Commit 6c7d7dbf authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Rename sync_rcu_preempt_exp_done() to sync_rcu_exp_done()



Now that the RCU flavors have been consolidated, there is one common
function for checking to see if an expedited RCU grace period has
completed, namely sync_rcu_preempt_exp_done().  Because this function is
no longer specific to RCU-preempt, this commit removes the "_preempt" from
its name.  This commit also changes sync_rcu_preempt_exp_done_unlocked()
to sync_rcu_exp_done_unlocked() for the same reason.

Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 4bc6b745
Loading
Loading
Loading
Loading
+9 −10
Original line number Original line Diff line number Diff line
@@ -148,7 +148,7 @@ static void __maybe_unused sync_exp_reset_tree(void)
 *
 *
 * Caller must hold the specificed rcu_node structure's ->lock
 * Caller must hold the specificed rcu_node structure's ->lock
 */
 */
static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
static bool sync_rcu_exp_done(struct rcu_node *rnp)
{
{
	raw_lockdep_assert_held_rcu_node(rnp);
	raw_lockdep_assert_held_rcu_node(rnp);


@@ -157,17 +157,16 @@ static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp)
}
}


/*
/*
 * Like sync_rcu_preempt_exp_done(), but this function assumes the caller
 * Like sync_rcu_exp_done(), but this function assumes the caller doesn't
 * doesn't hold the rcu_node's ->lock, and will acquire and release the lock
 * hold the rcu_node's ->lock, and will acquire and release the lock itself
 * itself
 */
 */
static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp)
static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
{
{
	unsigned long flags;
	unsigned long flags;
	bool ret;
	bool ret;


	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	ret = sync_rcu_preempt_exp_done(rnp);
	ret = sync_rcu_exp_done(rnp);
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);


	return ret;
	return ret;
@@ -191,7 +190,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
	unsigned long mask;
	unsigned long mask;


	for (;;) {
	for (;;) {
		if (!sync_rcu_preempt_exp_done(rnp)) {
		if (!sync_rcu_exp_done(rnp)) {
			if (!rnp->expmask)
			if (!rnp->expmask)
				rcu_initiate_boost(rnp, flags);
				rcu_initiate_boost(rnp, flags);
			else
			else
@@ -471,9 +470,9 @@ static void synchronize_sched_expedited_wait(void)
	for (;;) {
	for (;;) {
		ret = swait_event_timeout_exclusive(
		ret = swait_event_timeout_exclusive(
				rcu_state.expedited_wq,
				rcu_state.expedited_wq,
				sync_rcu_preempt_exp_done_unlocked(rnp_root),
				sync_rcu_exp_done_unlocked(rnp_root),
				jiffies_stall);
				jiffies_stall);
		if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root))
		if (ret > 0 || sync_rcu_exp_done_unlocked(rnp_root))
			return;
			return;
		WARN_ON(ret < 0);  /* workqueues should not be signaled. */
		WARN_ON(ret < 0);  /* workqueues should not be signaled. */
		if (rcu_cpu_stall_suppress)
		if (rcu_cpu_stall_suppress)
@@ -507,7 +506,7 @@ static void synchronize_sched_expedited_wait(void)
			rcu_for_each_node_breadth_first(rnp) {
			rcu_for_each_node_breadth_first(rnp) {
				if (rnp == rnp_root)
				if (rnp == rnp_root)
					continue; /* printed unconditionally */
					continue; /* printed unconditionally */
				if (sync_rcu_preempt_exp_done_unlocked(rnp))
				if (sync_rcu_exp_done_unlocked(rnp))
					continue;
					continue;
				pr_cont(" l=%u:%d-%d:%#lx/%c",
				pr_cont(" l=%u:%d-%d:%#lx/%c",
					rnp->level, rnp->grplo, rnp->grphi,
					rnp->level, rnp->grplo, rnp->grphi,
+2 −2
Original line number Original line Diff line number Diff line
@@ -485,7 +485,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
		empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
		WARN_ON_ONCE(rnp->completedqs == rnp->gp_seq &&
			     (!empty_norm || rnp->qsmask));
			     (!empty_norm || rnp->qsmask));
		empty_exp = sync_rcu_preempt_exp_done(rnp);
		empty_exp = sync_rcu_exp_done(rnp);
		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
		smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
		np = rcu_next_node_entry(t, rnp);
		np = rcu_next_node_entry(t, rnp);
		list_del_init(&t->rcu_node_entry);
		list_del_init(&t->rcu_node_entry);
@@ -509,7 +509,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
		 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
		 * so we must take a snapshot of the expedited state.
		 * so we must take a snapshot of the expedited state.
		 */
		 */
		empty_exp_now = sync_rcu_preempt_exp_done(rnp);
		empty_exp_now = sync_rcu_exp_done(rnp);
		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
		if (!empty_norm && !rcu_preempt_blocked_readers_cgp(rnp)) {
			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
			trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
							 rnp->gp_seq,
							 rnp->gp_seq,