Commit 77339e61 authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paul E. McKenney
Browse files

rcu: Provide wrappers for uses of ->rcu_read_lock_nesting



This commit provides wrapper functions for uses of ->rcu_read_lock_nesting
to improve readability and to ease future changes to support inlining
of __rcu_read_lock() and __rcu_read_unlock().

Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent c51f83c3
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -610,7 +610,7 @@ static void rcu_exp_handler(void *unused)
	 * critical section.  If also enabled or idle, immediately
	 * report the quiescent state, otherwise defer.
	 */
	if (!t->rcu_read_lock_nesting) {
	if (!rcu_preempt_depth()) {
		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
		    rcu_dynticks_curr_cpu_in_eqs()) {
			rcu_report_exp_rdp(rdp);
@@ -634,7 +634,7 @@ static void rcu_exp_handler(void *unused)
	 * can have caused this quiescent state to already have been
	 * reported, so we really do need to check ->expmask.
	 */
	if (t->rcu_read_lock_nesting > 0) {
	if (rcu_preempt_depth() > 0) {
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		if (rnp->expmask & rdp->grpmask) {
			rdp->exp_deferred_qs = true;
+34 −19
Original line number Diff line number Diff line
@@ -290,8 +290,8 @@ void rcu_note_context_switch(bool preempt)

	trace_rcu_utilization(TPS("Start context switch"));
	lockdep_assert_irqs_disabled();
	WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
	if (t->rcu_read_lock_nesting > 0 &&
	WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
	if (rcu_preempt_depth() > 0 &&
	    !t->rcu_read_unlock_special.b.blocked) {

		/* Possibly blocking in an RCU read-side critical section. */
@@ -348,6 +348,21 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
#define RCU_NEST_NMAX (-INT_MAX / 2)
#define RCU_NEST_PMAX (INT_MAX / 2)

static void rcu_preempt_read_enter(void)
{
	current->rcu_read_lock_nesting++;
}

static void rcu_preempt_read_exit(void)
{
	current->rcu_read_lock_nesting--;
}

static void rcu_preempt_depth_set(int val)
{
	current->rcu_read_lock_nesting = val;
}

/*
 * Preemptible RCU implementation for rcu_read_lock().
 * Just increment ->rcu_read_lock_nesting, shared state will be updated
@@ -355,9 +370,9 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 */
void __rcu_read_lock(void)
{
	current->rcu_read_lock_nesting++;
	rcu_preempt_read_enter();
	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
		WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
		WARN_ON_ONCE(rcu_preempt_depth() > RCU_NEST_PMAX);
	barrier();  /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -373,19 +388,19 @@ void __rcu_read_unlock(void)
{
	struct task_struct *t = current;

	if (t->rcu_read_lock_nesting != 1) {
		--t->rcu_read_lock_nesting;
	if (rcu_preempt_depth() != 1) {
		rcu_preempt_read_exit();
	} else {
		barrier();  /* critical section before exit code. */
		t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
		rcu_preempt_depth_set(-RCU_NEST_BIAS);
		barrier();  /* assign before ->rcu_read_unlock_special load */
		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
			rcu_read_unlock_special(t);
		barrier();  /* ->rcu_read_unlock_special load before assign */
		t->rcu_read_lock_nesting = 0;
		rcu_preempt_depth_set(0);
	}
	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
		int rrln = t->rcu_read_lock_nesting;
		int rrln = rcu_preempt_depth();

		WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
	}
@@ -539,7 +554,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
		READ_ONCE(t->rcu_read_unlock_special.s)) &&
	       t->rcu_read_lock_nesting <= 0;
	       rcu_preempt_depth() <= 0;
}

/*
@@ -552,16 +567,16 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
static void rcu_preempt_deferred_qs(struct task_struct *t)
{
	unsigned long flags;
	bool couldrecurse = t->rcu_read_lock_nesting >= 0;
	bool couldrecurse = rcu_preempt_depth() >= 0;

	if (!rcu_preempt_need_deferred_qs(t))
		return;
	if (couldrecurse)
		t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
		rcu_preempt_depth_set(rcu_preempt_depth() - RCU_NEST_BIAS);
	local_irq_save(flags);
	rcu_preempt_deferred_qs_irqrestore(t, flags);
	if (couldrecurse)
		t->rcu_read_lock_nesting += RCU_NEST_BIAS;
		rcu_preempt_depth_set(rcu_preempt_depth() + RCU_NEST_BIAS);
}

/*
@@ -672,7 +687,7 @@ static void rcu_flavor_sched_clock_irq(int user)
	if (user || rcu_is_cpu_rrupt_from_idle()) {
		rcu_note_voluntary_context_switch(current);
	}
	if (t->rcu_read_lock_nesting > 0 ||
	if (rcu_preempt_depth() > 0 ||
	    (preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK))) {
		/* No QS, force context switch if deferred. */
		if (rcu_preempt_need_deferred_qs(t)) {
@@ -682,13 +697,13 @@ static void rcu_flavor_sched_clock_irq(int user)
	} else if (rcu_preempt_need_deferred_qs(t)) {
		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
		return;
	} else if (!t->rcu_read_lock_nesting) {
	} else if (!rcu_preempt_depth()) {
		rcu_qs(); /* Report immediate QS. */
		return;
	}

	/* If GP is oldish, ask for help from rcu_read_unlock_special(). */
	if (t->rcu_read_lock_nesting > 0 &&
	if (rcu_preempt_depth() > 0 &&
	    __this_cpu_read(rcu_data.core_needs_qs) &&
	    __this_cpu_read(rcu_data.cpu_no_qs.b.norm) &&
	    !t->rcu_read_unlock_special.b.need_qs &&
@@ -709,11 +724,11 @@ void exit_rcu(void)
	struct task_struct *t = current;

	if (unlikely(!list_empty(&current->rcu_node_entry))) {
		t->rcu_read_lock_nesting = 1;
		rcu_preempt_depth_set(1);
		barrier();
		WRITE_ONCE(t->rcu_read_unlock_special.b.blocked, true);
	} else if (unlikely(t->rcu_read_lock_nesting)) {
		t->rcu_read_lock_nesting = 1;
	} else if (unlikely(rcu_preempt_depth())) {
		rcu_preempt_depth_set(1);
	} else {
		return;
	}