Commit 284a8c93 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Per-CPU operation cleanups to rcu_*_qs() functions



The rcu_bh_qs(), rcu_preempt_qs(), and rcu_sched_qs() functions use
old-style per-CPU variable access and write to ->passed_quiesce even
if it is already set.  This commit therefore updates to use the new-style
per-CPU variable access functions and avoids the spurious writes.
This commit also eliminates the "cpu" argument to these functions because
they are always invoked on the indicated CPU.

Reported-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 1d082fd0
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -261,8 +261,8 @@ static inline int rcu_preempt_depth(void)

/* Internal to kernel */
void rcu_init(void);
void rcu_sched_qs(int cpu);
void rcu_bh_qs(int cpu);
void rcu_sched_qs(void);
void rcu_bh_qs(void);
void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
void rcu_idle_enter(void);
+1 −1
Original line number Diff line number Diff line
@@ -80,7 +80,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,

static inline void rcu_note_context_switch(int cpu)
{
	rcu_sched_qs(cpu);
	rcu_sched_qs();
}

/*
+5 −5
Original line number Diff line number Diff line
@@ -72,7 +72,7 @@ static void rcu_idle_enter_common(long long newval)
			  current->pid, current->comm,
			  idle->pid, idle->comm); /* must be idle task! */
	}
	rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */
	rcu_sched_qs(); /* implies rcu_bh_inc() */
	barrier();
	rcu_dynticks_nesting = newval;
}
@@ -217,7 +217,7 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
 * are at it, given that any rcu quiescent state is also an rcu_bh
 * quiescent state.  Use "+" instead of "||" to defeat short circuiting.
 */
void rcu_sched_qs(int cpu)
void rcu_sched_qs(void)
{
	unsigned long flags;

@@ -231,7 +231,7 @@ void rcu_sched_qs(int cpu)
/*
 * Record an rcu_bh quiescent state.
 */
void rcu_bh_qs(int cpu)
void rcu_bh_qs(void)
{
	unsigned long flags;

@@ -251,9 +251,9 @@ void rcu_check_callbacks(int cpu, int user)
{
	RCU_TRACE(check_cpu_stalls());
	if (user || rcu_is_cpu_rrupt_from_idle())
		rcu_sched_qs(cpu);
		rcu_sched_qs();
	else if (!in_softirq())
		rcu_bh_qs(cpu);
		rcu_bh_qs();
	if (user)
		rcu_note_voluntary_context_switch(current);
}
+18 −16
Original line number Diff line number Diff line
@@ -188,22 +188,24 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
 * one since the start of the grace period, this just sets a flag.
 * The caller must have disabled preemption.
 */
void rcu_sched_qs(int cpu)
void rcu_sched_qs(void)
{
	struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);

	if (rdp->passed_quiesce == 0)
		trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
	rdp->passed_quiesce = 1;
	if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
		trace_rcu_grace_period(TPS("rcu_sched"),
				       __this_cpu_read(rcu_sched_data.gpnum),
				       TPS("cpuqs"));
		__this_cpu_write(rcu_sched_data.passed_quiesce, 1);
	}
}

void rcu_bh_qs(int cpu)
void rcu_bh_qs(void)
{
	struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);

	if (rdp->passed_quiesce == 0)
		trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
	rdp->passed_quiesce = 1;
	if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
		trace_rcu_grace_period(TPS("rcu_bh"),
				       __this_cpu_read(rcu_bh_data.gpnum),
				       TPS("cpuqs"));
		__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
	}
}

static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
@@ -278,7 +280,7 @@ static void rcu_momentary_dyntick_idle(void)
void rcu_note_context_switch(int cpu)
{
	trace_rcu_utilization(TPS("Start context switch"));
	rcu_sched_qs(cpu);
	rcu_sched_qs();
	rcu_preempt_note_context_switch(cpu);
	if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
		rcu_momentary_dyntick_idle();
@@ -2395,8 +2397,8 @@ void rcu_check_callbacks(int cpu, int user)
		 * at least not while the corresponding CPU is online.
		 */

		rcu_sched_qs(cpu);
		rcu_bh_qs(cpu);
		rcu_sched_qs();
		rcu_bh_qs();

	} else if (!in_softirq()) {

@@ -2407,7 +2409,7 @@ void rcu_check_callbacks(int cpu, int user)
		 * critical section, so note it.
		 */

		rcu_bh_qs(cpu);
		rcu_bh_qs();
	}
	rcu_preempt_check_callbacks(cpu);
	if (rcu_pending(cpu))
+15 −12
Original line number Diff line number Diff line
@@ -158,15 +158,17 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
 * As with the other rcu_*_qs() functions, callers to this function
 * must disable preemption.
 */
static void rcu_preempt_qs(int cpu)
{
	struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);

	if (rdp->passed_quiesce == 0)
		trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
	rdp->passed_quiesce = 1;
static void rcu_preempt_qs(void)
{
	if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
		trace_rcu_grace_period(TPS("rcu_preempt"),
				       __this_cpu_read(rcu_preempt_data.gpnum),
				       TPS("cpuqs"));
		__this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
		barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
		current->rcu_read_unlock_special.b.need_qs = false;
	}
}

/*
 * We have entered the scheduler, and the current task might soon be
@@ -256,7 +258,7 @@ static void rcu_preempt_note_context_switch(int cpu)
	 * grace period, then the fact that the task has been enqueued
	 * means that we continue to block the current grace period.
	 */
	rcu_preempt_qs(cpu);
	rcu_preempt_qs();
}

/*
@@ -352,7 +354,7 @@ void rcu_read_unlock_special(struct task_struct *t)
	 */
	special = t->rcu_read_unlock_special;
	if (special.b.need_qs) {
		rcu_preempt_qs(smp_processor_id());
		rcu_preempt_qs();
		if (!t->rcu_read_unlock_special.s) {
			local_irq_restore(flags);
			return;
@@ -651,11 +653,12 @@ static void rcu_preempt_check_callbacks(int cpu)
	struct task_struct *t = current;

	if (t->rcu_read_lock_nesting == 0) {
		rcu_preempt_qs(cpu);
		rcu_preempt_qs();
		return;
	}
	if (t->rcu_read_lock_nesting > 0 &&
	    per_cpu(rcu_preempt_data, cpu).qs_pending)
	    per_cpu(rcu_preempt_data, cpu).qs_pending &&
	    !per_cpu(rcu_preempt_data, cpu).passed_quiesce)
		t->rcu_read_unlock_special.b.need_qs = true;
}

Loading