Commit c1a280b6 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

sched/preempt: Use CONFIG_PREEMPTION where appropriate



CONFIG_PREEMPTION is selected by CONFIG_PREEMPT and by
CONFIG_PREEMPT_RT. Both PREEMPT and PREEMPT_RT require the same
functionality which today depends on CONFIG_PREEMPT.

Switch the preemption code, scheduler and init task over to use
CONFIG_PREEMPTION.

That's the first step towards RT in that area. The more complex changes are
coming separately.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/20190726212124.117528401@linutronix.de


Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2a11c76e
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -78,11 +78,11 @@ static __always_inline bool should_resched(int preempt_offset)
			tif_need_resched());
}

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* CONFIG_PREEMPTION */

#endif /* __ASM_PREEMPT_H */
+3 −3
Original line number Diff line number Diff line
@@ -182,7 +182,7 @@ do { \

#define preemptible()	(preempt_count() == 0 && !irqs_disabled())

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
	barrier(); \
@@ -203,7 +203,7 @@ do { \
		__preempt_schedule(); \
} while (0)

#else /* !CONFIG_PREEMPT */
#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
	barrier(); \
@@ -217,7 +217,7 @@ do { \
} while (0)

#define preempt_check_resched() do { } while (0)
#endif /* CONFIG_PREEMPT */
#endif /* CONFIG_PREEMPTION */

#define preempt_disable_notrace() \
do { \
+3 −3
Original line number Diff line number Diff line
@@ -1767,7 +1767,7 @@ static inline int test_tsk_need_resched(struct task_struct *tsk)
 * value indicates whether a reschedule was done in fact.
 * cond_resched_lock() will drop the spinlock before scheduling,
 */
#ifndef CONFIG_PREEMPT
#ifndef CONFIG_PREEMPTION
extern int _cond_resched(void);
#else
static inline int _cond_resched(void) { return 0; }
@@ -1796,12 +1796,12 @@ static inline void cond_resched_rcu(void)

/*
 * Does a critical section need to be broken due to another
 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
 * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
 * but a general need for low latency)
 */
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
	return spin_is_contended(lock);
#else
	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -174,7 +174,7 @@ struct task_struct init_task
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
	.ret_stack	= NULL,
#endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPT)
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
	.trace_recursion = 0,
#endif
#ifdef CONFIG_LIVEPATCH
+1 −1
Original line number Diff line number Diff line
@@ -433,7 +433,7 @@ noinline void __ref rest_init(void)

	/*
	 * Enable might_sleep() and smp_processor_id() checks.
	 * They cannot be enabled earlier because with CONFIG_PREEMPT=y
	 * They cannot be enabled earlier because with CONFIG_PREEMPTION=y
	 * kernel_thread() would trigger might_sleep() splats. With
	 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
	 * already, but it's stuck on the kthreadd_done completion.
Loading