Commit b5b447b6 authored by Valentin Schneider's avatar Valentin Schneider Committed by Thomas Gleixner
Browse files

x86/entry: Remove unneeded need_resched() loop



Since the enabling and disabling of IRQs within preempt_schedule_irq() is
contained in a need_resched() loop, there is no need for the outer
architecture specific loop.

Signed-off-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Link: https://lkml.kernel.org/r/20190311224752.8337-14-valentin.schneider@arm.com
parent 79a3aaa7
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -766,13 +766,12 @@ END(ret_from_exception)
#ifdef CONFIG_PREEMPT
ENTRY(resume_kernel)
	DISABLE_INTERRUPTS(CLBR_ANY)
.Lneed_resched:
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	restore_all_kernel
	testl	$X86_EFLAGS_IF, PT_EFLAGS(%esp)	# interrupts off (exception path) ?
	jz	restore_all_kernel
	call	preempt_schedule_irq
	jmp	.Lneed_resched
	jmp	restore_all_kernel
END(resume_kernel)
#endif

+1 −2
Original line number Diff line number Diff line
@@ -645,10 +645,9 @@ retint_kernel:
	/* Check if we need preemption */
	btl	$9, EFLAGS(%rsp)		/* were interrupts off? */
	jnc	1f
0:	cmpl	$0, PER_CPU_VAR(__preempt_count)
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	1f
	call	preempt_schedule_irq
	jmp	0b
1:
#endif
	/*