Commit 48593975 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

x86: Use CONFIG_PREEMPTION



CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by
CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same
functionality which today depends on CONFIG_PREEMPT.

Switch the entry code, preempt and kprobes conditionals over to
CONFIG_PREEMPTION.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20190726212124.608488448@linutronix.de


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 92616606
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -63,7 +63,7 @@
 * enough to patch inline, increasing performance.
 */

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
# define preempt_stop(clobbers)	DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
#else
# define preempt_stop(clobbers)
@@ -1084,7 +1084,7 @@ restore_all:
	INTERRUPT_RETURN

restore_all_kernel:
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
	DISABLE_INTERRUPTS(CLBR_ANY)
	cmpl	$0, PER_CPU_VAR(__preempt_count)
	jnz	.Lno_preempt
@@ -1364,7 +1364,7 @@ ENTRY(xen_hypervisor_callback)
ENTRY(xen_do_upcall)
1:	mov	%esp, %eax
	call	xen_evtchn_do_upcall
#ifndef CONFIG_PREEMPT
#ifndef CONFIG_PREEMPTION
	call	xen_maybe_preempt_hcall
#endif
	jmp	ret_from_intr
+2 −2
Original line number Diff line number Diff line
@@ -662,7 +662,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)

/* Returning to kernel space */
retint_kernel:
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
	/* Interrupts are off */
	/* Check if we need preemption */
	btl	$9, EFLAGS(%rsp)		/* were interrupts off? */
@@ -1113,7 +1113,7 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
	call	xen_evtchn_do_upcall
	LEAVE_IRQ_STACK

#ifndef CONFIG_PREEMPT
#ifndef CONFIG_PREEMPTION
	call	xen_maybe_preempt_hcall
#endif
	jmp	error_exit
+1 −1
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@
	THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
#endif

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
	THUNK ___preempt_schedule, preempt_schedule
	THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
	EXPORT_SYMBOL(___preempt_schedule)
+2 −2
Original line number Diff line number Diff line
@@ -46,7 +46,7 @@
	THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
#endif

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
	THUNK ___preempt_schedule, preempt_schedule
	THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
	EXPORT_SYMBOL(___preempt_schedule)
@@ -55,7 +55,7 @@

#if defined(CONFIG_TRACE_IRQFLAGS) \
 || defined(CONFIG_DEBUG_LOCK_ALLOC) \
 || defined(CONFIG_PREEMPT)
 || defined(CONFIG_PREEMPTION)
.L_restore:
	popq %r11
	popq %r10
+1 −1
Original line number Diff line number Diff line
@@ -102,7 +102,7 @@ static __always_inline bool should_resched(int preempt_offset)
	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
}

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
  extern asmlinkage void ___preempt_schedule(void);
# define __preempt_schedule() \
	asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
Loading