Commit 1bb33644 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Rename rcu_data's ->deferred_qs to ->exp_deferred_qs



The rcu_data structure's ->deferred_qs field is used to indicate that the
current CPU is blocking an expedited grace period (perhaps a future one).
Given that it is used only for expedited grace periods, its current name
is misleading, so this commit renames it to ->exp_deferred_qs.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.ibm.com>
parent eddded80
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -154,7 +154,7 @@ struct rcu_data {
	bool		core_needs_qs;	/* Core waits for quiesc state. */
	bool		beenonline;	/* CPU online at least once. */
	bool		gpwrap;		/* Possible ->gp_seq wrap. */
	bool		deferred_qs;	/* This CPU awaiting a deferred QS? */
	bool		exp_deferred_qs; /* This CPU awaiting a deferred QS? */
	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */
	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */
	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */
+4 −4
Original line number Diff line number Diff line
@@ -250,7 +250,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
 */
static void rcu_report_exp_rdp(struct rcu_data *rdp)
{
	WRITE_ONCE(rdp->deferred_qs, false);
	WRITE_ONCE(rdp->exp_deferred_qs, false);
	rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
}

@@ -616,7 +616,7 @@ static void rcu_exp_handler(void *unused)
		    rcu_dynticks_curr_cpu_in_eqs()) {
			rcu_report_exp_rdp(rdp);
		} else {
			rdp->deferred_qs = true;
			rdp->exp_deferred_qs = true;
			set_tsk_need_resched(t);
			set_preempt_need_resched();
		}
@@ -638,7 +638,7 @@ static void rcu_exp_handler(void *unused)
	if (t->rcu_read_lock_nesting > 0) {
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		if (rnp->expmask & rdp->grpmask) {
			rdp->deferred_qs = true;
			rdp->exp_deferred_qs = true;
			t->rcu_read_unlock_special.b.exp_hint = true;
		}
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -661,7 +661,7 @@ static void rcu_exp_handler(void *unused)
	 *
	 * Otherwise, force a context switch after the CPU enables everything.
	 */
	rdp->deferred_qs = true;
	rdp->exp_deferred_qs = true;
	if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
	    WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
		rcu_preempt_deferred_qs(t);
+7 −7
Original line number Diff line number Diff line
@@ -237,10 +237,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
	 * no need to check for a subsequent expedited GP.  (Though we are
	 * still in a quiescent state in any case.)
	 */
	if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
	if (blkd_state & RCU_EXP_BLKD && rdp->exp_deferred_qs)
		rcu_report_exp_rdp(rdp);
	else
		WARN_ON_ONCE(rdp->deferred_qs);
		WARN_ON_ONCE(rdp->exp_deferred_qs);
}

/*
@@ -337,7 +337,7 @@ void rcu_note_context_switch(bool preempt)
	 * means that we continue to block the current grace period.
	 */
	rcu_qs();
	if (rdp->deferred_qs)
	if (rdp->exp_deferred_qs)
		rcu_report_exp_rdp(rdp);
	trace_rcu_utilization(TPS("End context switch"));
	barrier(); /* Avoid RCU read-side critical sections leaking up. */
@@ -451,7 +451,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
	 */
	special = t->rcu_read_unlock_special;
	rdp = this_cpu_ptr(&rcu_data);
	if (!special.s && !rdp->deferred_qs) {
	if (!special.s && !rdp->exp_deferred_qs) {
		local_irq_restore(flags);
		return;
	}
@@ -459,7 +459,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
	if (special.b.need_qs) {
		rcu_qs();
		t->rcu_read_unlock_special.b.need_qs = false;
		if (!t->rcu_read_unlock_special.s && !rdp->deferred_qs) {
		if (!t->rcu_read_unlock_special.s && !rdp->exp_deferred_qs) {
			local_irq_restore(flags);
			return;
		}
@@ -471,7 +471,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
	 * tasks are handled when removing the task from the
	 * blocked-tasks list below.
	 */
	if (rdp->deferred_qs) {
	if (rdp->exp_deferred_qs) {
		rcu_report_exp_rdp(rdp);
		if (!t->rcu_read_unlock_special.s) {
			local_irq_restore(flags);
@@ -560,7 +560,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
 */
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
	return (__this_cpu_read(rcu_data.deferred_qs) ||
	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
		READ_ONCE(t->rcu_read_unlock_special.s)) &&
	       t->rcu_read_lock_nesting <= 0;
}