Commit b86fb888 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman
Browse files

powerpc/32: implement fast entry for syscalls on non BOOKE



This patch implements a fast entry for syscalls.

Syscalls don't have to preserve non volatile registers except LR.

This patch then implement a fast entry for syscalls, where
volatile registers get clobbered.

As this entry is dedicated to syscall it always sets MSR_EE
and warns in case MSR_EE was previously off

It also assumes that the call is always from user, system calls are
unexpected from kernel.

The overall series improves null_syscall selftest by 12,5% on an 83xx
and by 17% on a 8xx.

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 40530db7
Loading
Loading
Loading
Loading
+32 −0
Original line number Diff line number Diff line
@@ -342,6 +342,35 @@ stack_ovf:
	SYNC
	RFI

#ifndef CONFIG_BOOKE	/* to be removed once BOOKE uses fast syscall entry */
#ifdef CONFIG_TRACE_IRQFLAGS
trace_syscall_entry_irq_off:
	/*
	 * Syscall shouldn't happen while interrupts are disabled,
	 * so let's do a warning here.
	 */
0:	trap
	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
	bl	trace_hardirqs_on

	/* Now enable for real */
	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
	mtmsr	r10

	REST_GPR(0, r1)
	REST_4GPRS(3, r1)
	REST_2GPRS(7, r1)
	b	DoSyscall
#endif /* CONFIG_TRACE_IRQFLAGS */

	.globl	transfer_to_syscall
transfer_to_syscall:
#ifdef CONFIG_TRACE_IRQFLAGS
	andi.	r12,r9,MSR_EE
	beq-	trace_syscall_entry_irq_off
#endif /* CONFIG_TRACE_IRQFLAGS */
#endif /* !CONFIG_BOOKE */

/*
 * Handle a system call.
 */
@@ -353,9 +382,11 @@ _GLOBAL(DoSyscall)
	stw	r3,ORIG_GPR3(r1)
	li	r12,0
	stw	r12,RESULT(r1)
#ifdef CONFIG_BOOKE	/* to be removed once BOOKE uses fast syscall entry */
	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
	rlwinm	r11,r11,0,4,2
	stw	r11,_CCR(r1)
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	/* Make sure interrupts are enabled */
	mfmsr	r11
@@ -1219,6 +1250,7 @@ load_dbcr0:

	.section .bss
	.align	4
	.global global_dbcr0
global_dbcr0:
	.space	8*NR_CPUS
	.previous
+1 −2
Original line number Diff line number Diff line
@@ -374,8 +374,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
	. = 0xc00
	DO_KVM  0xc00
SystemCall:
	EXCEPTION_PROLOG
	EXC_XFER_SYS(0xc00, DoSyscall)
	SYSCALL_ENTRY	0xc00

/* Single step - not used on 601 */
	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+81 −4
Original line number Diff line number Diff line
@@ -73,6 +73,87 @@
	SAVE_2GPRS(7, r11)
.endm

.macro SYSCALL_ENTRY trapno
	mfspr	r12,SPRN_SPRG_THREAD
	mfcr	r10
	lwz	r11,TASK_STACK-THREAD(r12)
	mflr	r9
	addi	r11,r11,THREAD_SIZE - INT_FRAME_SIZE
	rlwinm	r10,r10,0,4,2	/* Clear SO bit in CR */
	tophys(r11,r11)
	stw	r10,_CCR(r11)		/* save registers */
	mfspr	r10,SPRN_SRR0
	stw	r9,_LINK(r11)
	mfspr	r9,SPRN_SRR1
	stw	r1,GPR1(r11)
	stw	r1,0(r11)
	tovirt(r1,r11)			/* set new kernel sp */
	stw	r10,_NIP(r11)
#ifdef CONFIG_40x
	rlwinm	r9,r9,0,14,12		/* clear MSR_WE (necessary?) */
#else
	LOAD_MSR_KERNEL(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
	MTMSRD(r10)			/* (except for mach check in rtas) */
#endif
	lis	r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
	stw	r2,GPR2(r11)
	addi	r10,r10,STACK_FRAME_REGS_MARKER@l
	stw	r9,_MSR(r11)
	li	r2, \trapno + 1
	stw	r10,8(r11)
	stw	r2,_TRAP(r11)
	SAVE_GPR(0, r11)
	SAVE_4GPRS(3, r11)
	SAVE_2GPRS(7, r11)
	addi	r11,r1,STACK_FRAME_OVERHEAD
	addi	r2,r12,-THREAD
	stw	r11,PT_REGS(r12)
#if defined(CONFIG_40x)
	/* Check to see if the dbcr0 register is set up to debug.  Use the
	   internal debug mode bit to do this. */
	lwz	r12,THREAD_DBCR0(r12)
	andis.	r12,r12,DBCR0_IDM@h
#endif
	ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
#if defined(CONFIG_40x)
	beq+	3f
	/* From user and task is ptraced - load up global dbcr0 */
	li	r12,-1			/* clear all pending debug events */
	mtspr	SPRN_DBSR,r12
	lis	r11,global_dbcr0@ha
	tophys(r11,r11)
	addi	r11,r11,global_dbcr0@l
	lwz	r12,0(r11)
	mtspr	SPRN_DBCR0,r12
	lwz	r12,4(r11)
	addi	r12,r12,-1
	stw	r12,4(r11)
#endif

3:
	tovirt(r2, r2)			/* set r2 to current */
	lis	r11, transfer_to_syscall@h
	ori	r11, r11, transfer_to_syscall@l
#ifdef CONFIG_TRACE_IRQFLAGS
	/*
	 * If MSR is changing we need to keep interrupts disabled at this point
	 * otherwise we might risk taking an interrupt before we tell lockdep
	 * they are enabled.
	 */
	LOAD_MSR_KERNEL(r10, MSR_KERNEL)
	rlwimi	r10, r9, 0, MSR_EE
#else
	LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
#endif
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
	mtspr	SPRN_NRI, r0
#endif
	mtspr	SPRN_SRR1,r10
	mtspr	SPRN_SRR0,r11
	SYNC
	RFI				/* jump to handler, enable MMU */
.endm

/*
 * Note: code which follows this uses cr0.eq (set if from kernel),
 * r11, r12 (SRR0), and r9 (SRR1).
@@ -119,8 +200,4 @@ label:
	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
			  ret_from_except)

#define EXC_XFER_SYS(n, hdlr)		\
	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL | MSR_EE, transfer_to_handler, \
			  ret_from_except)

#endif /* __HEAD_32_H__ */
+1 −2
Original line number Diff line number Diff line
@@ -348,8 +348,7 @@ _ENTRY(saved_ksp_limit)

/* 0x0C00 - System Call Exception */
	START_EXCEPTION(0x0C00,	SystemCall)
	EXCEPTION_PROLOG
	EXC_XFER_SYS(0xc00, DoSyscall)
	SYSCALL_ENTRY	0xc00

	EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD)
	EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
+1 −2
Original line number Diff line number Diff line
@@ -185,8 +185,7 @@ Alignment:
/* System call */
	. = 0xc00
SystemCall:
	EXCEPTION_PROLOG
	EXC_XFER_SYS(0xc00, DoSyscall)
	SYSCALL_ENTRY	0xc00

/* Single step - not used on 601 */
	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)