Commit 6cc0c16d authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman
Browse files

powerpc/64s: Implement interrupt exit logic in C



Implement the bulk of interrupt return logic in C. The asm return code
must handle a few cases: restoring full GPRs, and emulating stack
store.

The stack store emulation is significantly simplfied, rather than
creating a new return frame and switching to that before performing
the store, it uses the PACA to keep a scratch register around to
perform the store.

The asm return code is moved into 64e for now. The new logic has made
allowance for 64e, but I don't have a full environment that works well
to test it, and even booting in emulated qemu is not great for stress
testing. 64e shouldn't be too far off working with this, given a bit
more testing and auditing of the logic.

This is slightly faster on a POWER9 (page fault speed increases about
1.1%), probably due to reduced mtmsrd.

mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE
handling (including the fast_interrupt_return path), to remove
trace_hardirqs_on(), and fixes the interrupt-return part of the
MSR_VSX restore bug caught by tm-unavailable selftest.

mpe: Incorporate fix from Nick:

The return-to-kernel path has to replay any soft-pending interrupts if
it is returning to a context that had interrupts soft-enabled. It has
to do this carefully and avoid plain enabling interrupts if this is an
irq context, which can cause multiple nesting of interrupts on the
stack, and other unexpected issues.

The code which avoided this case got the soft-mask state wrong, and
marked interrupts as enabled before going around again to retry. This
seems to be mostly harmless except when PREEMPT=y, this calls
preempt_schedule_irq with irqs apparently enabled and runs into a BUG
in kernel/sched/core.c

Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichal Suchanek <msuchanek@suse.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
parent 3282a3da
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -99,6 +99,8 @@ void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);

long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
		      u32 len_high, u32 len_low);
+10 −0
Original line number Diff line number Diff line
@@ -60,6 +60,12 @@
#include <asm/mmu.h>
#include <asm/ptrace.h>

static inline void kuap_restore_amr(struct pt_regs *regs)
{
	if (mmu_has_feature(MMU_FTR_RADIX_KUAP))
		mtspr(SPRN_AMR, regs->kuap);
}

static inline void kuap_check_amr(void)
{
	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
@@ -136,6 +142,10 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
		    "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#else /* CONFIG_PPC_KUAP */
static inline void kuap_restore_amr(struct pt_regs *regs)
{
}

static inline void kuap_check_amr(void)
{
}
+1 −0
Original line number Diff line number Diff line
@@ -52,6 +52,7 @@
#ifndef __ASSEMBLY__

extern void replay_system_reset(void);
extern void replay_soft_interrupts(void);

extern void timer_interrupt(struct pt_regs *);
extern void timer_broadcast_interrupt(void);
+6 −0
Original line number Diff line number Diff line
@@ -23,7 +23,13 @@ extern void switch_booke_debug_regs(struct debug_reg *new_debug);

extern int emulate_altivec(struct pt_regs *);

#ifdef CONFIG_PPC_BOOK3S_64
void restore_math(struct pt_regs *regs);
#else
static inline void restore_math(struct pt_regs *regs)
{
}
#endif

void restore_tm_state(struct pt_regs *regs);

+117 −370
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@

#include <linux/errno.h>
#include <linux/err.h>
#include <asm/cache.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -228,6 +229,7 @@ _GLOBAL(ret_from_kernel_thread)
	li	r3,0
	b	.Lsyscall_exit

#ifdef CONFIG_PPC_BOOK3E
/* Save non-volatile GPRs, if not already saved. */
_GLOBAL(save_nvgprs)
	ld	r11,_TRAP(r1)
@@ -238,6 +240,7 @@ _GLOBAL(save_nvgprs)
	std	r0,_TRAP(r1)
	blr
_ASM_NOKPROBE_SYMBOL(save_nvgprs);
#endif

#ifdef CONFIG_PPC_BOOK3S_64

@@ -301,7 +304,7 @@ flush_count_cache:
 * state of one is saved on its kernel stack.  Then the state
 * of the other is restored from its kernel stack.  The memory
 * management hardware is updated to the second process's state.
 * Finally, we can return to the second process, via ret_from_except.
 * Finally, we can return to the second process, via interrupt_return.
 * On entry, r3 points to the THREAD for the current task, r4
 * points to the THREAD for the new task.
 *
@@ -453,408 +456,152 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
	addi	r1,r1,SWITCH_FRAME_SIZE
	blr

	.align	7
_GLOBAL(ret_from_except)
	ld	r11,_TRAP(r1)
	andi.	r0,r11,1
	bne	ret_from_except_lite
	REST_NVGPRS(r1)

_GLOBAL(ret_from_except_lite)
#ifdef CONFIG_PPC_BOOK3S
	/*
	 * Disable interrupts so that current_thread_info()->flags
	 * can't change between when we test it and when we return
	 * from the interrupt.
	 */
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
	li	r10,MSR_RI
	mtmsrd	r10,1		  /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
	 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
	 * touched, AMR not set, no exit work created, then this can be used.
	 */
	.balign IFETCH_ALIGN_BYTES
	.globl fast_interrupt_return
fast_interrupt_return:
_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
	ld	r4,_MSR(r1)
	andi.	r0,r4,MSR_PR
	bne	.Lfast_user_interrupt_return
	andi.	r0,r4,MSR_RI
	li	r3,0 /* 0 return value, no EMULATE_STACK_STORE */
	bne+	.Lfast_kernel_interrupt_return
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	unrecoverable_exception
	b	. /* should not get here */

	ld	r9, PACA_THREAD_INFO(r13)
	ld	r3,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3E
	ld	r10,PACACURRENT(r13)
#endif /* CONFIG_PPC_BOOK3E */
	ld	r4,TI_FLAGS(r9)
	andi.	r3,r3,MSR_PR
	beq	resume_kernel
#ifdef CONFIG_PPC_BOOK3E
	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
#endif /* CONFIG_PPC_BOOK3E */
	.balign IFETCH_ALIGN_BYTES
	.globl interrupt_return
interrupt_return:
_ASM_NOKPROBE_SYMBOL(interrupt_return)
	REST_NVGPRS(r1)

	/* Check current_thread_info()->flags */
	andi.	r0,r4,_TIF_USER_WORK_MASK
	bne	1f
#ifdef CONFIG_PPC_BOOK3E
	/*
	 * Check to see if the dbcr0 register is set up to debug.
	 * Use the internal debug mode bit to do this.
	 */
	andis.	r0,r3,DBCR0_IDM@h
	beq	restore
	mfmsr	r0
	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
	mtmsr	r0
	mtspr	SPRN_DBCR0,r3
	li	r10, -1
	mtspr	SPRN_DBSR,r10
	b	restore
#else
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	restore_math
	b	restore
#endif
1:	andi.	r0,r4,_TIF_NEED_RESCHED
	beq	2f
	bl	restore_interrupts
	SCHEDULE_USER
	b	ret_from_except_lite
2:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	andi.	r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
	bne	3f		/* only restore TM if nothing else to do */
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	restore_tm_state
	b	restore
3:
#endif
	bl	save_nvgprs
	/*
	 * Use a non volatile GPR to save and restore our thread_info flags
	 * across the call to restore_interrupts.
	 */
	mr	r30,r4
	bl	restore_interrupts
	mr	r4,r30
	.balign IFETCH_ALIGN_BYTES
	.globl interrupt_return_lite
interrupt_return_lite:
_ASM_NOKPROBE_SYMBOL(interrupt_return_lite)
	ld	r4,_MSR(r1)
	andi.	r0,r4,MSR_PR
	beq	.Lkernel_interrupt_return
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	do_notify_resume
	b	ret_from_except

resume_kernel:
	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
	beq+	1f
	bl	interrupt_exit_user_prepare
	cmpdi	r3,0
	bne-	.Lrestore_nvgprs

	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
.Lfast_user_interrupt_return:
	ld	r11,_NIP(r1)
	ld	r12,_MSR(r1)
BEGIN_FTR_SECTION
	ld	r10,_PPR(r1)
	mtspr	SPRN_PPR,r10
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
	mtspr	SPRN_SRR0,r11
	mtspr	SPRN_SRR1,r12

	ld	r3,GPR1(r1)
	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
	mr	r4,r1			/* src:  current exception frame */
	mr	r1,r3			/* Reroute the trampoline frame to r1 */
BEGIN_FTR_SECTION
	stdcx.	r0,0,r1		/* to clear the reservation */
FTR_SECTION_ELSE
	ldarx	r0,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	/* Copy from the original to the trampoline. */
	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
	li	r6,0			/* start offset: 0 */
	mtctr	r5
2:	ldx	r0,r6,r4
	stdx	r0,r6,r3
	addi	r6,r6,8
	bdnz	2b

	/* Do real store operation to complete stdu */
	ld	r5,GPR1(r1)
	std	r8,0(r5)

	/* Clear _TIF_EMULATE_STACK_STORE flag */
	lis	r11,_TIF_EMULATE_STACK_STORE@h
	addi	r5,r9,TI_FLAGS
0:	ldarx	r4,0,r5
	andc	r4,r4,r11
	stdcx.	r4,0,r5
	bne-	0b
1:

#ifdef CONFIG_PREEMPTION
	/* Check if we need to preempt */
	andi.	r0,r4,_TIF_NEED_RESCHED
	beq+	restore
	/* Check that preempt_count() == 0 and interrupts are enabled */
	lwz	r8,TI_PREEMPT(r9)
	cmpwi	cr0,r8,0
	bne	restore
	ld	r0,SOFTE(r1)
	andi.	r0,r0,IRQS_DISABLED
	bne	restore
	ld	r3,_CCR(r1)
	ld	r4,_LINK(r1)
	ld	r5,_CTR(r1)
	ld	r6,_XER(r1)
	li	r0,0

	/*
	 * Here we are preempting the current task. We want to make
	 * sure we are soft-disabled first and reconcile irq state.
	 */
	RECONCILE_IRQ_STATE(r3,r4)
	bl	preempt_schedule_irq
	REST_4GPRS(7, r1)
	REST_2GPRS(11, r1)
	REST_GPR(13, r1)

	/*
	 * arch_local_irq_restore() from preempt_schedule_irq above may
	 * enable hard interrupt but we really should disable interrupts
	 * when we return from the interrupt, and so that we don't get
	 * interrupted after loading SRR0/1.
	 */
#ifdef CONFIG_PPC_BOOK3E
	wrteei	0
#else
	li	r10,MSR_RI
	mtmsrd	r10,1		  /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
#endif /* CONFIG_PREEMPTION */
	mtcr	r3
	mtlr	r4
	mtctr	r5
	mtspr	SPRN_XER,r6

	.globl	fast_exc_return_irq
fast_exc_return_irq:
restore:
	/*
	 * This is the main kernel exit path. First we check if we
	 * are about to re-enable interrupts
	 */
	ld	r5,SOFTE(r1)
	lbz	r6,PACAIRQSOFTMASK(r13)
	andi.	r5,r5,IRQS_DISABLED
	bne	.Lrestore_irq_off
	REST_4GPRS(2, r1)
	REST_GPR(6, r1)
	REST_GPR(0, r1)
	REST_GPR(1, r1)
	RFI_TO_USER
	b	.	/* prevent speculative execution */

	/* We are enabling, were we already enabled ? Yes, just return */
	andi.	r6,r6,IRQS_DISABLED
	beq	cr0,.Ldo_restore
.Lrestore_nvgprs:
	REST_NVGPRS(r1)
	b	.Lfast_user_interrupt_return

	/*
	 * We are about to soft-enable interrupts (we are hard disabled
	 * at this point). We check if there's anything that needs to
	 * be replayed first.
	 */
	lbz	r0,PACAIRQHAPPENED(r13)
	cmpwi	cr0,r0,0
	bne-	.Lrestore_check_irq_replay
	.balign IFETCH_ALIGN_BYTES
.Lkernel_interrupt_return:
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	interrupt_exit_kernel_prepare

	/*
	 * Get here when nothing happened while soft-disabled, just
	 * soft-enable and move-on. We will hard-enable as a side
	 * effect of rfi
	 */
.Lrestore_no_replay:
	TRACE_ENABLE_INTS
	li	r0,IRQS_ENABLED
	stb	r0,PACAIRQSOFTMASK(r13);
.Lfast_kernel_interrupt_return:
	cmpdi	cr1,r3,0
	ld	r11,_NIP(r1)
	ld	r12,_MSR(r1)
	mtspr	SPRN_SRR0,r11
	mtspr	SPRN_SRR1,r12

	/*
	 * Final return path. BookE is handled in a different file
	 */
.Ldo_restore:
#ifdef CONFIG_PPC_BOOK3E
	b	exception_return_book3e
#else
	/*
	 * Clear the reservation. If we know the CPU tracks the address of
	 * the reservation then we can potentially save some cycles and use
	 * a larx. On POWER6 and POWER7 this is significantly faster.
	 */
BEGIN_FTR_SECTION
	stdcx.	r0,0,r1		/* to clear the reservation */
FTR_SECTION_ELSE
	ldarx	r4,0,r1
	ldarx	r0,0,r1
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)

	/*
	 * Some code path such as load_up_fpu or altivec return directly
	 * here. They run entirely hard disabled and do not alter the
	 * interrupt state. They also don't use lwarx/stwcx. and thus
	 * are known not to leave dangling reservations.
	 */
	.globl	fast_exception_return
fast_exception_return:
	ld	r3,_MSR(r1)
	ld	r3,_LINK(r1)
	ld	r4,_CTR(r1)
	ld	r0,_LINK(r1)
	mtctr	r4
	mtlr	r0
	ld	r4,_XER(r1)
	mtspr	SPRN_XER,r4

	kuap_check_amr r5, r6

	REST_8GPRS(5, r1)

	andi.	r0,r3,MSR_RI
	beq-	.Lunrecov_restore

	/*
	 * Clear RI before restoring r13.  If we are returning to
	 * userspace and we take an exception after restoring r13,
	 * we end up corrupting the userspace r13 value.
	 */
	li	r4,0
	mtmsrd	r4,1

#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
	/* TM debug */
	std	r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
#endif
	/*
	 * r13 is our per cpu area, only restore it if we are returning to
	 * userspace the value stored in the stack frame may belong to
	 * another CPU.
	 */
	andi.	r0,r3,MSR_PR
	beq	1f
BEGIN_FTR_SECTION
	/* Restore PPR */
	ld	r2,_PPR(r1)
	mtspr	SPRN_PPR,r2
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
	ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
	REST_GPR(13, r1)

	/*
	 * We don't need to restore AMR on the way back to userspace for KUAP.
	 * The value of AMR only matters while we're in the kernel.
	 */
	mtspr	SPRN_SRR1,r3

	ld	r2,_CCR(r1)
	mtcrf	0xFF,r2
	ld	r2,_NIP(r1)
	mtspr	SPRN_SRR0,r2

	ld	r0,GPR0(r1)
	ld	r2,GPR2(r1)
	ld	r3,GPR3(r1)
	ld	r4,GPR4(r1)
	ld	r1,GPR1(r1)
	RFI_TO_USER
	b	.	/* prevent speculative execution */
	ld	r5,_XER(r1)
	ld	r6,_CCR(r1)
	li	r0,0

1:	mtspr	SPRN_SRR1,r3
	REST_4GPRS(7, r1)
	REST_2GPRS(11, r1)

	ld	r2,_CCR(r1)
	mtcrf	0xFF,r2
	ld	r2,_NIP(r1)
	mtspr	SPRN_SRR0,r2
	mtlr	r3
	mtctr	r4
	mtspr	SPRN_XER,r5

	/*
	 * Leaving a stale exception_marker on the stack can confuse
	 * the reliable stack unwinder later on. Clear it.
	 */
	li	r2,0
	std	r2,STACK_FRAME_OVERHEAD-16(r1)

	ld	r0,GPR0(r1)
	ld	r2,GPR2(r1)
	ld	r3,GPR3(r1)
	std	r0,STACK_FRAME_OVERHEAD-16(r1)

	kuap_restore_amr r4
	REST_4GPRS(2, r1)

	ld	r4,GPR4(r1)
	ld	r1,GPR1(r1)
	bne-	cr1,1f /* emulate stack store */
	mtcr	r6
	REST_GPR(6, r1)
	REST_GPR(0, r1)
	REST_GPR(1, r1)
	RFI_TO_KERNEL
	b	.	/* prevent speculative execution */

#endif /* CONFIG_PPC_BOOK3E */

	/*
	 * We are returning to a context with interrupts soft disabled.
	 *
	 * However, we may also about to hard enable, so we need to
	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
	 * or that bit can get out of sync and bad things will happen
	 */
.Lrestore_irq_off:
	ld	r3,_MSR(r1)
	lbz	r7,PACAIRQHAPPENED(r13)
	andi.	r0,r3,MSR_EE
	beq	1f
	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
	stb	r7,PACAIRQHAPPENED(r13)
1:
#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
	/* The interrupt should not have soft enabled. */
	lbz	r7,PACAIRQSOFTMASK(r13)
1:	tdeqi	r7,IRQS_ENABLED
	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
	b	.Ldo_restore

	/*
	 * Something did happen, check if a re-emit is needed
	 * (this also clears paca->irq_happened)
	 */
.Lrestore_check_irq_replay:
	/* XXX: We could implement a fast path here where we check
	 * for irq_happened being just 0x01, in which case we can
	 * clear it and return. That means that we would potentially
	 * miss a decrementer having wrapped all the way around.
	 *
	 * Still, this might be useful for things like hash_page
	 */
	bl	__check_irq_replay
	cmpwi	cr0,r3,0
	beq	.Lrestore_no_replay
 
	/*
	 * We need to re-emit an interrupt. We do so by re-using our
	 * existing exception frame. We first change the trap value,
	 * but we need to ensure we preserve the low nibble of it
	 */
	ld	r4,_TRAP(r1)
	clrldi	r4,r4,60
	or	r4,r4,r3
	std	r4,_TRAP(r1)

	/*
	 * PACA_IRQ_HARD_DIS won't always be set here, so set it now
	 * to reconcile the IRQ state. Tracing is already accounted for.
	 */
	lbz	r4,PACAIRQHAPPENED(r13)
	ori	r4,r4,PACA_IRQ_HARD_DIS
	stb	r4,PACAIRQHAPPENED(r13)

	/*
	 * Then find the right handler and call it. Interrupts are
	 * still soft-disabled and we keep them that way.
	*/
	cmpwi	cr0,r3,0x500
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
 	bl	do_IRQ
	b	ret_from_except
1:	cmpwi	cr0,r3,0xf00
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	performance_monitor_exception
	b	ret_from_except
1:	cmpwi	cr0,r3,0xe60
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	handle_hmi_exception
	b	ret_from_except
1:	cmpwi	cr0,r3,0x900
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	timer_interrupt
	b	ret_from_except
#ifdef CONFIG_PPC_DOORBELL
1:
#ifdef CONFIG_PPC_BOOK3E
	cmpwi	cr0,r3,0x280
#else
	cmpwi	cr0,r3,0xa00
#endif /* CONFIG_PPC_BOOK3E */
	bne	1f
	addi	r3,r1,STACK_FRAME_OVERHEAD;
	bl	doorbell_exception
#endif /* CONFIG_PPC_DOORBELL */
1:	b	ret_from_except /* What else to do here ? */
 
.Lunrecov_restore:
	addi	r3,r1,STACK_FRAME_OVERHEAD
	bl	unrecoverable_exception
	b	.Lunrecov_restore

_ASM_NOKPROBE_SYMBOL(ret_from_except);
_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
_ASM_NOKPROBE_SYMBOL(resume_kernel);
_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
_ASM_NOKPROBE_SYMBOL(restore);
_ASM_NOKPROBE_SYMBOL(fast_exception_return);
1:	/*
	 * Emulate stack store with update. New r1 value was already calculated
	 * and updated in our interrupt regs by emulate_loadstore, but we can't
	 * store the previous value of r1 to the stack before re-loading our
	 * registers from it, otherwise they could be clobbered.  Use
	 * PACA_EXGEN as temporary storage to hold the store data, as
	 * interrupts are disabled here so it won't be clobbered.
	 */
	mtcr	r6
	std	r9,PACA_EXGEN+0(r13)
	addi	r9,r1,INT_FRAME_SIZE /* get original r1 */
	REST_GPR(6, r1)
	REST_GPR(0, r1)
	REST_GPR(1, r1)
	std	r9,0(r1) /* perform store component of stdu */
	ld	r9,PACA_EXGEN+0(r13)

	RFI_TO_KERNEL
	b	.	/* prevent speculative execution */
#endif /* CONFIG_PPC_BOOK3S */

#ifdef CONFIG_PPC_RTAS
/*
Loading