Commit ae73ad05 authored by Russell King's avatar Russell King
Browse files

Merge tag 'arm-p2v-for-v5.11' of...

Merge tag 'arm-p2v-for-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux into devel-stable

Implement the necessary changes in the ARM assembler boot code to permit
the relative alignment of the physical and the virtual addresses of the
kernel to be as little as 2 MiB, as opposed to the minimum of 16 MiB we
support today.

Series was posted here, and reviewed by Nicolas Pitre and Linus Walleij:
https://lore.kernel.org/linux-arm-kernel/20200921154117.757-1-ardb@kernel.org/
parents 3650b228 9443076e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -243,7 +243,7 @@ config ARM_PATCH_PHYS_VIRT
	  kernel in system memory.

	  This can only be used with non-XIP MMU kernels where the base
	  of physical memory is at a 16MB boundary.
	  of physical memory is at a 2 MiB boundary.

	  Only disable this option if you know that you do not require
	  this feature (eg, building a kernel for a single machine) and
+84 −0
Original line number Diff line number Diff line
@@ -494,4 +494,88 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#define _ASM_NOKPROBE(entry)
#endif

	.macro		__adldst_l, op, reg, sym, tmp, c
	.if		__LINUX_ARM_ARCH__ < 7
	ldr\c		\tmp, .La\@
	.subsection	1
	.align		2
.La\@:	.long		\sym - .Lpc\@
	.previous
	.else
	.ifnb		\c
 THUMB(	ittt		\c			)
	.endif
	movw\c		\tmp, #:lower16:\sym - .Lpc\@
	movt\c		\tmp, #:upper16:\sym - .Lpc\@
	.endif

#ifndef CONFIG_THUMB2_KERNEL
	.set		.Lpc\@, . + 8			// PC bias
	.ifc		\op, add
	add\c		\reg, \tmp, pc
	.else
	\op\c		\reg, [pc, \tmp]
	.endif
#else
.Lb\@:	add\c		\tmp, \tmp, pc
	/*
	 * In Thumb-2 builds, the PC bias depends on whether we are currently
	 * emitting into a .arm or a .thumb section. The size of the add opcode
	 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
	 * emitting in ARM mode, so let's use this to account for the bias.
	 */
	.set		.Lpc\@, . + (. - .Lb\@)

	.ifnc		\op, add
	\op\c		\reg, [\tmp]
	.endif
#endif
	.endm

	/*
	 * mov_l - move a constant value or [relocated] address into a register
	 */
	.macro		mov_l, dst:req, imm:req
	.if		__LINUX_ARM_ARCH__ < 7
	ldr		\dst, =\imm
	.else
	movw		\dst, #:lower16:\imm
	movt		\dst, #:upper16:\imm
	.endif
	.endm

	/*
	 * adr_l - adr pseudo-op with unlimited range
	 *
	 * @dst: destination register
	 * @sym: name of the symbol
	 * @cond: conditional opcode suffix
	 */
	.macro		adr_l, dst:req, sym:req, cond
	__adldst_l	add, \dst, \sym, \dst, \cond
	.endm

	/*
	 * ldr_l - ldr <literal> pseudo-op with unlimited range
	 *
	 * @dst: destination register
	 * @sym: name of the symbol
	 * @cond: conditional opcode suffix
	 */
	.macro		ldr_l, dst:req, sym:req, cond
	__adldst_l	ldr, \dst, \sym, \dst, \cond
	.endm

	/*
	 * str_l - str <literal> pseudo-op with unlimited range
	 *
	 * @src: source register
	 * @sym: name of the symbol
	 * @tmp: mandatory scratch register
	 * @cond: conditional opcode suffix
	 */
	.macro		str_l, src:req, sym:req, tmp:req, cond
	__adldst_l	str, \src, \sym, \tmp, \cond
	.endm

#endif /* __ASM_ASSEMBLER_H__ */
+5 −0
Original line number Diff line number Diff line
@@ -51,6 +51,7 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_NONE		0
#define R_ARM_PC24		1
#define R_ARM_ABS32		2
#define R_ARM_REL32		3
#define R_ARM_CALL		28
#define R_ARM_JUMP24		29
#define R_ARM_TARGET1		38
@@ -58,11 +59,15 @@ typedef struct user_fp elf_fpregset_t;
#define R_ARM_PREL31		42
#define R_ARM_MOVW_ABS_NC	43
#define R_ARM_MOVT_ABS		44
#define R_ARM_MOVW_PREL_NC	45
#define R_ARM_MOVT_PREL		46

#define R_ARM_THM_CALL		10
#define R_ARM_THM_JUMP24	30
#define R_ARM_THM_MOVW_ABS_NC	47
#define R_ARM_THM_MOVT_ABS	48
#define R_ARM_THM_MOVW_PREL_NC	49
#define R_ARM_THM_MOVT_PREL	50

/*
 * These are used to set parameters in the core dumps.
+40 −17
Original line number Diff line number Diff line
@@ -173,6 +173,7 @@ extern unsigned long vectors_base;
 * so that all we need to do is modify the 8-bit constant field.
 */
#define __PV_BITS_31_24	0x81000000
#define __PV_BITS_23_16	0x810000
#define __PV_BITS_7_0	0x81

extern unsigned long __pv_phys_pfn_offset;
@@ -183,43 +184,65 @@ extern const void *__pv_table_begin, *__pv_table_end;
#define PHYS_OFFSET	((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
#define PHYS_PFN_OFFSET	(__pv_phys_pfn_offset)

#define __pv_stub(from,to,instr,type)			\
#ifndef CONFIG_THUMB2_KERNEL
#define __pv_stub(from,to,instr)			\
	__asm__("@ __pv_stub\n"				\
	"1:	" instr "	%0, %1, %2\n"		\
	"2:	" instr "	%0, %0, %3\n"		\
	"	.pushsection .pv_table,\"a\"\n"		\
	"	.long	1b\n"				\
	"	.long	1b - ., 2b - .\n"		\
	"	.popsection\n"				\
	: "=r" (to)					\
	: "r" (from), "I" (type))
	: "r" (from), "I" (__PV_BITS_31_24),		\
	  "I"(__PV_BITS_23_16))

#define __pv_add_carry_stub(x, y)			\
	__asm__("@ __pv_add_carry_stub\n"		\
	"0:	movw	%R0, #0\n"			\
	"	adds	%Q0, %1, %R0, lsl #20\n"	\
	"1:	mov	%R0, %2\n"			\
	"	adc	%R0, %R0, #0\n"			\
	"	.pushsection .pv_table,\"a\"\n"		\
	"	.long	0b - ., 1b - .\n"		\
	"	.popsection\n"				\
	: "=&r" (y)					\
	: "r" (x), "I" (__PV_BITS_7_0)			\
	: "cc")

#define __pv_stub_mov_hi(t)				\
	__asm__ volatile("@ __pv_stub_mov\n"		\
	"1:	mov	%R0, %1\n"			\
#else
#define __pv_stub(from,to,instr)			\
	__asm__("@ __pv_stub\n"				\
	"0:	movw	%0, #0\n"			\
	"	lsl	%0, #21\n"			\
	"	" instr " %0, %1, %0\n"			\
	"	.pushsection .pv_table,\"a\"\n"		\
	"	.long	1b\n"				\
	"	.long	0b - .\n"			\
	"	.popsection\n"				\
	: "=r" (t)					\
	: "I" (__PV_BITS_7_0))
	: "=&r" (to)					\
	: "r" (from))

#define __pv_add_carry_stub(x, y)			\
	__asm__ volatile("@ __pv_add_carry_stub\n"	\
	"1:	adds	%Q0, %1, %2\n"			\
	__asm__("@ __pv_add_carry_stub\n"		\
	"0:	movw	%R0, #0\n"			\
	"	lsls	%R0, #21\n"			\
	"	adds	%Q0, %1, %R0\n"			\
	"1:	mvn	%R0, #0\n"			\
	"	adc	%R0, %R0, #0\n"			\
	"	.pushsection .pv_table,\"a\"\n"		\
	"	.long	1b\n"				\
	"	.long	0b - ., 1b - .\n"		\
	"	.popsection\n"				\
	: "+r" (y)					\
	: "r" (x), "I" (__PV_BITS_31_24)		\
	: "=&r" (y)					\
	: "r" (x)					\
	: "cc")
#endif

static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{
	phys_addr_t t;

	if (sizeof(phys_addr_t) == 4) {
		__pv_stub(x, t, "add", __PV_BITS_31_24);
		__pv_stub(x, t, "add");
	} else {
		__pv_stub_mov_hi(t);
		__pv_add_carry_stub(x, t);
	}
	return t;
@@ -235,7 +258,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
	 * assembler expression receives 32 bit argument
	 * in place where 'r' 32 bit operand is expected.
	 */
	__pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
	__pv_stub((unsigned long) x, t, "sub");
	return t;
}

+1 −0
Original line number Diff line number Diff line
@@ -92,6 +92,7 @@ obj-$(CONFIG_PARAVIRT) += paravirt.o
head-y			:= head$(MMUEXT).o
obj-$(CONFIG_DEBUG_LL)	+= debug.o
obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
obj-$(CONFIG_ARM_PATCH_PHYS_VIRT)	+= phys2virt.o

# This is executed very early using a temporary stack when no memory allocator
# nor global data is available. Everything has to be allocated on the stack.
Loading