Commit 49918a54 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Clean up flavor-related definitions and comments in tree.c

parent 679d3f30
Loading
Loading
Loading
Loading
+32 −42
Original line number Diff line number Diff line
@@ -382,12 +382,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
}

/*
 * Register a quiescent state for all RCU flavors.  If there is an
 * Register an urgently needed quiescent state.  If there is an
 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
 * dyntick-idle quiescent state visible to other CPUs (but only for those
 * RCU flavors in desperate need of a quiescent state, which will normally
 * be none of them).  Either way, do a lightweight quiescent state for
 * all RCU flavors.
 * dyntick-idle quiescent state visible to other CPUs, which will in
 * some cases serve for expedited as well as normal grace periods.
 * Either way, register a lightweight quiescent state.
 *
 * The barrier() calls are redundant in the common case when this is
 * called externally, but just in case this is called from within this
@@ -564,7 +563,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);

/*
 * Return the root node of the specified rcu_state structure.
 * Return the root node of the rcu_state structure.
 */
static struct rcu_node *rcu_get_root(void)
{
@@ -949,11 +948,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
 * Disable preemption to avoid false positives that could otherwise
 * happen due to the current CPU number being sampled, this task being
 * preempted, its old CPU being taken offline, resuming on some other CPU,
 * then determining that its old CPU is now offline.  Because there are
 * multiple flavors of RCU, and because this function can be called in the
 * midst of updating the flavors while a given CPU coming online or going
 * offline, it is necessary to check all flavors.  If any of the flavors
 * believe that given CPU is online, it is considered to be online.
 * then determining that its old CPU is now offline.
 *
 * Disable checking if in an NMI handler because we cannot safely
 * report errors from NMI handlers anyway.  In addition, it is OK to use
@@ -1563,11 +1558,10 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
}

/*
 * Awaken the grace-period kthread for the specified flavor of RCU.
 * Don't do a self-awaken, and don't bother awakening when there is
 * nothing for the grace-period kthread to do (as in several CPUs
 * raced to awaken, and we lost), and finally don't try to awaken
 * a kthread that has not yet been created.
 * Awaken the grace-period kthread.  Don't do a self-awaken, and don't
 * bother awakening when there is nothing for the grace-period kthread
 * to do (as in several CPUs raced to awaken, and we lost), and finally
 * don't try to awaken a kthread that has not yet been created.
 */
static void rcu_gp_kthread_wake(void)
{
@@ -2119,13 +2113,13 @@ static int __noreturn rcu_gp_kthread(void *unused)
}

/*
 * Report a full set of quiescent states to the specified rcu_state data
 * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
 * kthread if another grace period is required.  Whether we wake
 * the grace-period kthread or it awakens itself for the next round
 * of quiescent-state forcing, that kthread will clean up after the
 * just-completed grace period.  Note that the caller must hold rnp->lock,
 * which is released before return.
 * Report a full set of quiescent states to the rcu_state data structure.
 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
 * another grace period is required.  Whether we wake the grace-period
 * kthread or it awakens itself for the next round of quiescent-state
 * forcing, that kthread will clean up after the just-completed grace
 * period.  Note that the caller must hold rnp->lock, which is released
 * before return.
 */
static void rcu_report_qs_rsp(unsigned long flags)
	__releases(rcu_get_root()->lock)
@@ -2212,7 +2206,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
/*
 * Record a quiescent state for all tasks that were previously queued
 * on the specified rcu_node structure and that were blocking the current
 * RCU grace period.  The caller must hold the specified rnp->lock with
 * RCU grace period.  The caller must hold the corresponding rnp->lock with
 * irqs disabled, and this lock is released upon return, but irqs remain
 * disabled.
 */
@@ -2714,11 +2708,11 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
}

/*
 * Schedule RCU callback invocation.  If the specified type of RCU
 * does not support RCU priority boosting, just do a direct call,
 * otherwise wake up the per-CPU kernel kthread.  Note that because we
 * are running on the current CPU with softirqs disabled, the
 * rcu_cpu_kthread_task cannot disappear out from under us.
 * Schedule RCU callback invocation.  If the running implementation of RCU
 * does not support RCU priority boosting, just do a direct call, otherwise
 * wake up the per-CPU kernel kthread.  Note that because we are running
 * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
 * cannot disappear out from under us.
 */
static void invoke_rcu_callbacks(struct rcu_data *rdp)
{
@@ -2959,11 +2953,10 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu);

/*
 * Check to see if there is any immediate RCU-related work to be done by
 * the current CPU, for the specified type of RCU, returning 1 if so and
 * zero otherwise.  The checks are in order of increasing expense: checks
 * that can be carried out against CPU-local state are performed first.
 * However, we must check for CPU stalls first, else we might not get
 * a chance.
 * the current CPU, returning 1 if so and zero otherwise.  The checks are
 * in order of increasing expense: checks that can be carried out against
 * CPU-local state are performed first.  However, we must check for CPU
 * stalls first, else we might not get a chance.
 */
static int rcu_pending(void)
{
@@ -3070,10 +3063,7 @@ static void rcu_barrier_func(void *unused)
	}
}

/*
 * Orchestrate the specified type of RCU barrier, waiting for all
 * RCU callbacks of the specified type to complete.
 */
/* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete.  */
static void _rcu_barrier(void)
{
	int cpu;
@@ -3393,7 +3383,7 @@ void rcu_report_dead(unsigned int cpu)
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
	struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */

	/* QS for any half-done expedited RCU-sched GP. */
	/* QS for any half-done expedited grace period. */
	preempt_disable();
	rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
	preempt_enable();
@@ -3482,7 +3472,7 @@ static int rcu_pm_notify(struct notifier_block *self,
}

/*
 * Spawn the kthreads that handle each RCU flavor's grace periods.
 * Spawn the kthreads that handle RCU's grace periods.
 */
static int __init rcu_spawn_gp_kthread(void)
{
@@ -3545,7 +3535,7 @@ void rcu_scheduler_starting(void)
}

/*
 * Helper function for rcu_init() that initializes one rcu_state structure.
 * Helper function for rcu_init() that initializes the rcu_state structure.
 */
static void __init rcu_init_one(void)
{
@@ -3707,7 +3697,7 @@ static void __init rcu_init_geometry(void)

/*
 * Dump out the structure of the rcu_node combining tree associated
 * with the rcu_state structure referenced by rsp.
 * with the rcu_state structure.
 */
static void __init rcu_dump_rcu_node_tree(void)
{