Commit 67e7fdfc authored by Steve Capper's avatar Steve Capper Committed by Will Deacon
Browse files

arm64: mm: introduce 52-bit userspace support



On arm64 there is optional support for a 52-bit virtual address space.
To exploit this one has to be running with a 64KB page size and be
running on hardware that supports this.

For an arm64 kernel supporting a 48 bit VA with a 64KB page size,
some changes are needed to support a 52-bit userspace:
 * TCR_EL1.T0SZ needs to be 12 instead of 16,
 * TASK_SIZE needs to reflect the new size.

This patch implements the above when the support for 52-bit VAs is
detected at early boot time.

On arm64 userspace addresses translation is controlled by TTBR0_EL1. As
well as userspace, TTBR0_EL1 controls:
 * The identity mapping,
 * EFI runtime code.

It is possible to run a kernel with an identity mapping that has a
larger VA size than userspace (and for this case __cpu_set_tcr_t0sz()
would set TCR_EL1.T0SZ as appropriate). However, when the conditions for
52-bit userspace are met; it is possible to keep TCR_EL1.T0SZ fixed at
12. Thus in this patch, the TCR_EL1.T0SZ size changing logic is
disabled.

Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarSteve Capper <steve.capper@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent a96a33b1
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -716,6 +716,10 @@ config ARM64_PA_BITS_52

endchoice

config ARM64_52BIT_VA
	def_bool y
	depends on ARM64_VA_BITS_48 && ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)

config ARM64_PA_BITS
	int
	default 48 if ARM64_PA_BITS_48
+3 −4
Original line number Diff line number Diff line
@@ -357,11 +357,10 @@ alternative_endif
	.endm

/*
 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
 */
	.macro	tcr_set_idmap_t0sz, valreg, tmpreg
	ldr_l	\tmpreg, idmap_t0sz
	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
	.macro	tcr_set_t0sz, valreg, t0sz
	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
	.endm

/*
+3 −0
Original line number Diff line number Diff line
@@ -74,6 +74,9 @@ extern u64 idmap_ptrs_per_pgd;

static inline bool __cpu_uses_extended_idmap(void)
{
	if (IS_ENABLED(CONFIG_ARM64_52BIT_VA))
		return false;

	return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
}

+9 −4
Original line number Diff line number Diff line
@@ -19,10 +19,12 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H

#define TASK_SIZE_64		(UL(1) << VA_BITS)

#define KERNEL_DS		UL(-1)
#define USER_DS		(TASK_SIZE_64 - 1)
#ifdef CONFIG_ARM64_52BIT_VA
#define USER_DS			((UL(1) << 52) - 1)
#else
#define USER_DS			((UL(1) << VA_BITS) - 1)
#endif /* CONFIG_ARM64_52BIT_VA */

/*
 * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
@@ -56,6 +58,9 @@

#define DEFAULT_MAP_WINDOW_64	(UL(1) << VA_BITS)

extern u64 vabits_user;
#define TASK_SIZE_64		(UL(1) << vabits_user)

#ifdef CONFIG_COMPAT
#define TASK_SIZE_32		UL(0x100000000)
#define TASK_SIZE		(test_thread_flag(TIF_32BIT) ? \
+13 −0
Original line number Diff line number Diff line
@@ -318,6 +318,19 @@ __create_page_tables:
	adrp	x0, idmap_pg_dir
	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)

#ifdef CONFIG_ARM64_52BIT_VA
	mrs_s	x6, SYS_ID_AA64MMFR2_EL1
	and	x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
	mov	x5, #52
	cbnz	x6, 1f
#endif
	mov	x5, #VA_BITS
1:
	adr_l	x6, vabits_user
	str	x5, [x6]
	dmb	sy
	dc	ivac, x6		// Invalidate potentially stale cache line

	/*
	 * VA_BITS may be too small to allow for an ID mapping to be created
	 * that covers system RAM if that is located sufficiently high in the
Loading