Commit 2c736bb4 authored by Russell King's avatar Russell King
Browse files

Merge tag 'arm-adrl-replacement-for-v5.11' of...

Merge tag 'arm-adrl-replacement-for-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux into devel-stable

Tidy up open coded relative references in asm

Use the newly introduced adr_l/ldr_l/str_l/mov_l assembler macros to
replace open coded VA-to-PA arithmetic in various places in the code. This
avoids the use of literals on v7+ CPUs, reduces the footprint of the code
in most cases, and generally makes the code easier to follow.

Series was posted here, and reviewed by Nicolas Pitre:
https://lore.kernel.org/linux-arm-kernel/20200914095706.3985-1-ardb@kernel.org/
parents ae73ad05 aaac3733
Loading
Loading
Loading
Loading
+3 −15
Original line number Diff line number Diff line
@@ -468,15 +468,10 @@ dtb_check_done:

		/*
		 * Compute the address of the hyp vectors after relocation.
		 * This requires some arithmetic since we cannot directly
		 * reference __hyp_stub_vectors in a PC-relative way.
		 * Call __hyp_set_vectors with the new address so that we
		 * can HVC again after the copy.
		 */
0:		adr	r0, 0b
		movw	r1, #:lower16:__hyp_stub_vectors - 0b
		movt	r1, #:upper16:__hyp_stub_vectors - 0b
		add	r0, r0, r1
		adr_l	r0, __hyp_stub_vectors
		sub	r0, r0, r5
		add	r0, r0, r10
		bl	__hyp_set_vectors
@@ -627,17 +622,11 @@ not_relocated: mov r0, #0
		cmp	r0, #HYP_MODE		@ if not booted in HYP mode...
		bne	__enter_kernel		@ boot kernel directly

		adr	r12, .L__hyp_reentry_vectors_offset
		ldr	r0, [r12]
		add	r0, r0, r12

		adr_l	r0, __hyp_reentry_vectors
		bl	__hyp_set_vectors
		__HVC(0)			@ otherwise bounce to hyp mode

		b	.			@ should never be reached

		.align	2
.L__hyp_reentry_vectors_offset:	.long	__hyp_reentry_vectors - .
#else
		b	__enter_kernel
#endif
@@ -1440,8 +1429,7 @@ ENTRY(efi_enter_kernel)
		mov	r4, r0			@ preserve image base
		mov	r8, r1			@ preserve DT pointer

 ARM(		adrl	r0, call_cache_fn	)
 THUMB(		adr	r0, call_cache_fn	)
		adr_l	r0, call_cache_fn
		adr	r1, 0f			@ clean the region of code we
		bl	cache_clean_flush	@ may run with the MMU off

+2 −2
Original line number Diff line number Diff line
@@ -259,7 +259,7 @@
 */
#define ALT_UP(instr...)					\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
	.long	9998b - .					;\
9997:	instr							;\
	.if . - 9997b == 2					;\
		nop						;\
@@ -270,7 +270,7 @@
	.popsection
#define ALT_UP_B(label)					\
	.pushsection ".alt.smp.init", "a"			;\
	.long	9998b						;\
	.long	9998b - .					;\
	W(b)	. + (label - 9998b)					;\
	.popsection
#else
+1 −1
Original line number Diff line number Diff line
@@ -96,7 +96,7 @@ unsigned long get_wchan(struct task_struct *p);
#define __ALT_SMP_ASM(smp, up)						\
	"9998:	" smp "\n"						\
	"	.pushsection \".alt.smp.init\", \"a\"\n"		\
	"	.long	9998b\n"					\
	"	.long	9998b - .\n"					\
	"	" up "\n"						\
	"	.popsection\n"
#else
+6 −16
Original line number Diff line number Diff line
@@ -170,11 +170,12 @@ ENDPROC(lookup_processor_type)
 *	r9 = cpuid (preserved)
 */
__lookup_processor_type:
	adr	r3, __lookup_processor_type_data
	ldmia	r3, {r4 - r6}
	sub	r3, r3, r4			@ get offset between virt&phys
	add	r5, r5, r3			@ convert virt addresses to
	add	r6, r6, r3			@ physical address space
	/*
	 * Look in <asm/procinfo.h> for information about the __proc_info
	 * structure.
	 */
	adr_l	r5, __proc_info_begin
	adr_l	r6, __proc_info_end
1:	ldmia	r5, {r3, r4}			@ value, mask
	and	r4, r4, r9			@ mask wanted bits
	teq	r3, r4
@@ -186,17 +187,6 @@ __lookup_processor_type:
2:	ret	lr
ENDPROC(__lookup_processor_type)

/*
 * Look in <asm/procinfo.h> for information about the __proc_info structure.
 */
	.align	2
	.type	__lookup_processor_type_data, %object
__lookup_processor_type_data:
	.long	.
	.long	__proc_info_begin
	.long	__proc_info_end
	.size	__lookup_processor_type_data, . - __lookup_processor_type_data

__error_lpae:
#ifdef CONFIG_DEBUG_LL
	adr	r0, str_lpae
+15 −48
Original line number Diff line number Diff line
@@ -103,10 +103,8 @@ ENTRY(stext)
#endif

#ifndef CONFIG_XIP_KERNEL
	adr	r3, 2f
	ldmia	r3, {r4, r8}
	sub	r4, r3, r4			@ (PHYS_OFFSET - PAGE_OFFSET)
	add	r8, r8, r4			@ PHYS_OFFSET
	adr_l	r8, _text			@ __pa(_text)
	sub	r8, r8, #TEXT_OFFSET		@ PHYS_OFFSET
#else
	ldr	r8, =PLAT_PHYS_OFFSET		@ always constant in this case
#endif
@@ -158,10 +156,6 @@ ENTRY(stext)
1:	b	__enable_mmu
ENDPROC(stext)
	.ltorg
#ifndef CONFIG_XIP_KERNEL
2:	.long	.
	.long	PAGE_OFFSET
#endif

/*
 * Setup the initial page tables.  We only setup the barest
@@ -224,11 +218,8 @@ __create_page_tables:
	 * Create identity mapping to cater for __enable_mmu.
	 * This identity mapping will be removed by paging_init().
	 */
	adr	r0, __turn_mmu_on_loc
	ldmia	r0, {r3, r5, r6}
	sub	r0, r0, r3			@ virt->phys offset
	add	r5, r5, r0			@ phys __turn_mmu_on
	add	r6, r6, r0			@ phys __turn_mmu_on_end
	adr_l	r5, __turn_mmu_on		@ _pa(__turn_mmu_on)
	adr_l	r6, __turn_mmu_on_end		@ _pa(__turn_mmu_on_end)
	mov	r5, r5, lsr #SECTION_SHIFT
	mov	r6, r6, lsr #SECTION_SHIFT

@@ -351,11 +342,6 @@ __create_page_tables:
	ret	lr
ENDPROC(__create_page_tables)
	.ltorg
	.align
__turn_mmu_on_loc:
	.long	.
	.long	__turn_mmu_on
	.long	__turn_mmu_on_end

#if defined(CONFIG_SMP)
	.text
@@ -391,10 +377,8 @@ ENTRY(secondary_startup)
	/*
	 * Use the page tables supplied from  __cpu_up.
	 */
	adr	r4, __secondary_data
	ldmia	r4, {r5, r7, r12}		@ address to jump to after
	sub	lr, r4, r5			@ mmu has been enabled
	add	r3, r7, lr
	adr_l	r3, secondary_data
	mov_l	r12, __secondary_switched
	ldrd	r4, r5, [r3, #0]		@ get secondary_data.pgdir
ARM_BE8(eor	r4, r4, r5)			@ Swap r5 and r4 in BE:
ARM_BE8(eor	r5, r4, r5)			@ it can be done in 3 steps
@@ -409,22 +393,13 @@ ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ENDPROC(secondary_startup)
ENDPROC(secondary_startup_arm)

	/*
	 * r6  = &secondary_data
	 */
ENTRY(__secondary_switched)
	ldr	sp, [r7, #12]			@ get secondary_data.stack
	ldr_l	r7, secondary_data + 12		@ get secondary_data.stack
	mov	sp, r7
	mov	fp, #0
	b	secondary_start_kernel
ENDPROC(__secondary_switched)

	.align

	.type	__secondary_data, %object
__secondary_data:
	.long	.
	.long	secondary_data
	.long	__secondary_switched
#endif /* defined(CONFIG_SMP) */


@@ -539,19 +514,11 @@ ARM_BE8(rev r0, r0) @ byteswap if big endian
	retne	lr

__fixup_smp_on_up:
	adr	r0, 1f
	ldmia	r0, {r3 - r5}
	sub	r3, r0, r3
	add	r4, r4, r3
	add	r5, r5, r3
	adr_l	r4, __smpalt_begin
	adr_l	r5, __smpalt_end
	b	__do_fixup_smp_on_up
ENDPROC(__fixup_smp)

	.align
1:	.word	.
	.word	__smpalt_begin
	.word	__smpalt_end

	.pushsection .data
	.align	2
	.globl	smp_on_up
@@ -565,14 +532,15 @@ smp_on_up:
__do_fixup_smp_on_up:
	cmp	r4, r5
	reths	lr
	ldmia	r4!, {r0, r6}
 ARM(	str	r6, [r0, r3]	)
 THUMB(	add	r0, r0, r3	)
	ldmia	r4, {r0, r6}
 ARM(	str	r6, [r0, r4]	)
 THUMB(	add	r0, r0, r4	)
	add	r4, r4, #8
#ifdef __ARMEB__
 THUMB(	mov	r6, r6, ror #16	)	@ Convert word order for big-endian.
#endif
 THUMB(	strh	r6, [r0], #2	)	@ For Thumb-2, store as two halfwords
 THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r3.
 THUMB(	mov	r6, r6, lsr #16	)	@ to be robust against misaligned r0.
 THUMB(	strh	r6, [r0]	)
	b	__do_fixup_smp_on_up
ENDPROC(__do_fixup_smp_on_up)
@@ -581,7 +549,6 @@ ENTRY(fixup_smp)
	stmfd	sp!, {r4 - r6, lr}
	mov	r4, r0
	add	r5, r0, r1
	mov	r3, #0
	bl	__do_fixup_smp_on_up
	ldmfd	sp!, {r4 - r6, pc}
ENDPROC(fixup_smp)
Loading