Commit 82724de6 authored by Sandeep Tripathy's avatar Sandeep Tripathy Committed by Carles Cufi
Browse files

arch: arm64: refactor for EL3 specific init



Zephyr being an OS is typically expected to run at EL1. Arm core
can reset to EL3 which typically requires a firmware to run at EL3
and drop control to lower EL. In that case EL3 init is done by the
firmware allowing the lower EL software to have necessary control.

If Zephyr is entered at EL3 and it is desired to run at EL1, which
is indicated by 'CONFIG_SWITCH_TO_EL1', then Zephyr is responsible
for doing required EL3 initializations to allow lower EL necessary
control.

The entry sequence is modified to have control flow under single
'switch_el'.

Provisions added by giving weak funcions to do platform specific
init from EL3.

Signed-off-by: default avatarSandeep Tripathy <sandeep.tripathy@broadcom.com>
parent c6f87713
Loading
Loading
Loading
Loading
+66 −33
Original line number Diff line number Diff line
@@ -19,6 +19,11 @@

_ASM_FILE_PROLOGUE

/* Platform may do platform specific EL3 init */
WTEXT(z_arch_el3_plat_init)
SECTION_FUNC(TEXT,z_arch_el3_plat_init)
ret

/**
 *
 * @brief Reset vector
@@ -44,18 +49,63 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
GTEXT(__start)
SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)

#ifdef CONFIG_SWITCH_TO_EL1
	/* Setup vector table */
	adr	x9, _vector_table

	switch_el x1, 3f, 2f, 1f
3:
	/* Disable MMU and async exceptions routing to EL1 */
	msr	sctlr_el1, xzr
	/* Initialize VBAR */
	msr	vbar_el3, x9
	isb

	/* Initialize sctlr_el3 to reset value */
	mov_imm	x1, SCTLR_EL3_RES1
	mrs     x0, sctlr_el3
	orr	x0, x0, x1
	msr	sctlr_el3, x0
	isb

	/* SError, IRQ and FIQ routing enablement in EL3 */
	mrs	x0, scr_el3
	orr	x0, x0, #(SCR_EL3_IRQ | SCR_EL3_FIQ | SCR_EL3_EA)
	msr	scr_el3, x0

	/*
	* Disable access traps to EL3 for CPACR, Trace, FP, ASIMD,
	* SVE from lower EL.
	*/
	mov_imm	x0, CPTR_EL3_RES_VAL
	mov_imm	x1, (CPTR_EL3_TTA | CPTR_EL3_TFP | CPTR_EL3_TCPAC)
	bic	x0, x0, x1
	orr	x0, x0, #(CPTR_EL3_EZ)
	msr	cptr_el3, x0
	isb

	/* Platform specific configurations needed in EL3 */
	bl	z_arch_el3_plat_init

#ifdef CONFIG_SWITCH_TO_EL1
	/*
	* Zephyr entry happened in EL3. Do EL3 specific init before
	* dropping to lower EL.
	*/
	/* Enable access control configuration from lower EL */
	mrs	x0, actlr_el3
	orr     x0, x0, #(ACTLR_EL3_L2ACTLR | ACTLR_EL3_L2ECTLR \
			 | ACTLR_EL3_L2CTLR)
	orr     x0, x0, #(ACTLR_EL3_CPUACTLR | ACTLR_EL3_CPUECTLR)
	msr	actlr_el3, x0

	/* Initialize sctlr_el1 to reset value */
	mov_imm	x0, SCTLR_EL1_RES1
	msr	sctlr_el1, x0

	/* Disable EA/IRQ/FIQ routing to EL3 and set EL1 to AArch64 */
	mov	x0, xzr
	orr	x0, x0, #(SCR_EL3_RW)
	msr	scr_el3, x0

	/* On eret return to EL1 with DAIF masked */
	/* On eret return to secure EL1h with DAIF masked */
	mov	x0, xzr
	orr	x0, x0, #(DAIF_MASK)
	orr	x0, x0, #(SPSR_EL3_TO_EL1)
@@ -65,39 +115,21 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
	adr	x0, 1f
	msr	elr_el3, x0
	eret
2:
	/* Boot from EL2 not supported */
	bl	.
1:
#endif
	/* Setup vector table */
	adr	x0, _vector_table

	switch_el x1, 3f, 2f, 1f
3:
	/* Initialize VBAR */
	msr	vbar_el3, x0

	/* SError, IRQ and FIQ routing enablement in EL3 */
	mrs	x0, scr_el3
	orr	x0, x0, #(SCR_EL3_IRQ | SCR_EL3_FIQ | SCR_EL3_EA)
	msr	scr_el3, x0

	/* Disable access trapping in EL3 for NEON/FP */
	msr	cptr_el3, xzr

	/*
	 * Enable the instruction cache, stack pointer and data access
	 * alignment checks and disable speculative loads.
	 * alignment checks.
	 */
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
	mrs	x0, sctlr_el3
	orr	x0, x0, x1
	msr	sctlr_el3, x0
	isb
	b	0f

2:
	/* Initialize VBAR */
	msr	vbar_el2, x0
	msr	vbar_el2, x9

	/* SError, IRQ and FIQ routing enablement in EL2 */
	mrs	x0, hcr_el2
@@ -109,29 +141,30 @@ SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)

	/*
	 * Enable the instruction cache, stack pointer and data access
	 * alignment checks and disable speculative loads.
	 * alignment checks.
	 */
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
	mrs	x0, sctlr_el2
	orr	x0, x0, x1
	msr	sctlr_el2, x0
	b	0f

1:
	/* Initialize VBAR */
	msr	vbar_el1, x0
	msr	vbar_el1, x9

	/* Disable access trapping in EL1 for NEON/FP */
	mov	x1, #(CPACR_EL1_FPEN_NOTRAP)
	msr	cpacr_el1, x1
	mov	x0, #(CPACR_EL1_FPEN_NOTRAP)
	msr	cpacr_el1, x0

	/*
	 * Enable the instruction cache, stack pointer and data access
	 * alignment checks and disable speculative loads.
	 * Enable the instruction cache and el1 stack alignment check.
	 */
	mov	x1, #(SCTLR_I_BIT | SCTLR_A_BIT | SCTLR_SA_BIT)
	mov	x1, #(SCTLR_I_BIT | SCTLR_SA_BIT)
	mrs	x0, sctlr_el1
	orr	x0, x0, x1
	msr	sctlr_el1, x0

0:
	isb

+23 −0
Original line number Diff line number Diff line
@@ -22,6 +22,12 @@

#define SPSR_MODE_EL1H		(0x5)

#define SCTLR_EL3_RES1		(BIT(29) | BIT(28) | BIT(23) | \
				BIT(22) | BIT(18) | BIT(16) | \
				BIT(11) | BIT(5) | BIT(4))

#define SCTLR_EL1_RES1		(BIT(29) | BIT(28) | BIT(23) | \
				BIT(22) | BIT(20) | BIT(11))
#define SCTLR_M_BIT		BIT(0)
#define SCTLR_A_BIT		BIT(1)
#define SCTLR_C_BIT		BIT(2)
@@ -36,6 +42,23 @@
#define SCR_EL3_EA		BIT(3)
#define SCR_EL3_RW		BIT(10)

/*
 * TODO: ACTLR is of class implementation defined. All core implementations
 * in armv8a have the same implementation so far w.r.t few controls.
 * When there will be differences we have to create core specific headers.
 */
#define ACTLR_EL3_CPUACTLR	BIT(0)
#define ACTLR_EL3_CPUECTLR	BIT(1)
#define ACTLR_EL3_L2CTLR	BIT(4)
#define ACTLR_EL3_L2ECTLR	BIT(5)
#define ACTLR_EL3_L2ACTLR	BIT(6)

#define CPTR_EL3_RES_VAL	(0x0)
#define CPTR_EL3_EZ		BIT(8)
#define CPTR_EL3_TFP		BIT(9)
#define CPTR_EL3_TTA		BIT(20)
#define CPTR_EL3_TCPAC		BIT(31)

#define HCR_EL2_FMO		BIT(3)
#define HCR_EL2_IMO		BIT(4)
#define HCR_EL2_AMO		BIT(5)