Commit 8b2f63ab authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Abstract the dynticks snapshot operation



This commit is the second step towards full abstraction of all accesses to
the ->dynticks counter, implementing the previously open-coded atomic
add of zero in a new rcu_dynticks_snap() function.  This abstraction will
ease changes o the ->dynticks counter operation.

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: default avatarJosh Triplett <josh@joshtriplett.org>
parent 6563de9d
Loading
Loading
Loading
Loading
+16 −3
Original line number Diff line number Diff line
@@ -281,6 +281,17 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
};

/*
 * Snapshot the ->dynticks counter with full ordering so as to allow
 * stable comparison of this counter with past and future snapshots.
 */
static int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
{
	int snap = atomic_add_return(0, &rdtp->dynticks);

	return snap;
}

/*
 * Do a double-increment of the ->dynticks counter to emulate a
 * momentary idle-CPU quiescent state.
@@ -1049,7 +1060,9 @@ void rcu_nmi_exit(void)
 */
bool notrace __rcu_is_watching(void)
{
	return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
	struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);

	return atomic_read(&rdtp->dynticks) & 0x1;
}

/**
@@ -1132,7 +1145,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
static int dyntick_save_progress_counter(struct rcu_data *rdp,
					 bool *isidle, unsigned long *maxj)
{
	rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
	rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
	rcu_sysidle_check_cpu(rdp, isidle, maxj);
	if ((rdp->dynticks_snap & 0x1) == 0) {
		trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
@@ -1157,7 +1170,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
	int *rcrmp;
	unsigned int snap;

	curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
	curr = (unsigned int)rcu_dynticks_snap(rdp->dynticks);
	snap = (unsigned int)rdp->dynticks_snap;

	/*
+2 −4
Original line number Diff line number Diff line
@@ -356,10 +356,9 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
		mask_ofl_test = 0;
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);

			rdp->exp_dynticks_snap =
				atomic_add_return(0, &rdtp->dynticks);
				rcu_dynticks_snap(rdp->dynticks);
			if (raw_smp_processor_id() == cpu ||
			    !(rdp->exp_dynticks_snap & 0x1) ||
			    !(rnp->qsmaskinitnext & rdp->grpmask))
@@ -380,12 +379,11 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
			struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
			struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);

			if (!(mask_ofl_ipi & mask))
				continue;
retry_ipi:
			if (atomic_add_return(0, &rdtp->dynticks) !=
			if (rcu_dynticks_snap(rdp->dynticks) !=
			    rdp->exp_dynticks_snap) {
				mask_ofl_test |= mask;
				continue;