Commit 66e4c33b authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu: Force tick on for nohz_full CPUs not reaching quiescent states



CPUs running for long time periods in the kernel in nohz_full mode
might leave the scheduling-clock interrupt disabled for then full
duration of their in-kernel execution.  This can (among other things)
delay grace periods.  This commit therefore forces the tick back on
for any nohz_full CPU that is failing to pass through a quiescent state
upon return from interrupt, which the resched_cpu() will induce.

Reported-by: default avatarJoel Fernandes <joel@joelfernandes.org>
[ paulmck: Clear ->rcu_forced_tick as reported by Joel Fernandes testing. ]
[ paulmck: Apply Joel Fernandes TICK_DEP_MASK_RCU->TICK_DEP_BIT_RCU fix. ]
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 79ba7ff5
Loading
Loading
Loading
Loading
+31 −7
Original line number Diff line number Diff line
@@ -651,6 +651,12 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
	 */
	if (rdp->dynticks_nmi_nesting != 1) {
		trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2, rdp->dynticks);
		if (tick_nohz_full_cpu(rdp->cpu) &&
		    rdp->dynticks_nmi_nesting == 2 &&
		    rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
			rdp->rcu_forced_tick = true;
			tick_dep_set_cpu(rdp->cpu, TICK_DEP_MASK_RCU);
		}
		WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
			   rdp->dynticks_nmi_nesting - 2);
		return;
@@ -886,6 +892,18 @@ void rcu_irq_enter_irqson(void)
	local_irq_restore(flags);
}

/*
 * If the scheduler-clock interrupt was enabled on a nohz_full CPU
 * in order to get to a quiescent state, disable it.
 */
void rcu_disable_tick_upon_qs(struct rcu_data *rdp)
{
	if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
		tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
		rdp->rcu_forced_tick = false;
	}
}

/**
 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
 *
@@ -1980,6 +1998,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
		if (!offloaded)
			needwake = rcu_accelerate_cbs(rnp, rdp);

		rcu_disable_tick_upon_qs(rdp);
		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
		/* ^^^ Released rnp->lock */
		if (needwake)
@@ -2265,6 +2284,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
	int cpu;
	unsigned long flags;
	unsigned long mask;
	struct rcu_data *rdp;
	struct rcu_node *rnp;

	rcu_for_each_leaf_node(rnp) {
@@ -2289,8 +2309,11 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
			if ((rnp->qsmask & bit) != 0) {
				if (f(per_cpu_ptr(&rcu_data, cpu)))
				rdp = per_cpu_ptr(&rcu_data, cpu);
				if (f(rdp)) {
					mask |= bit;
					rcu_disable_tick_upon_qs(rdp);
				}
			}
		}
		if (mask != 0) {
@@ -3160,6 +3183,7 @@ void rcu_cpu_starting(unsigned int cpu)
	rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
	rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
	if (rnp->qsmask & mask) { /* RCU waiting on incoming CPU? */
		rcu_disable_tick_upon_qs(rdp);
		/* Report QS -after- changing ->qsmaskinitnext! */
		rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
	} else {
+1 −0
Original line number Diff line number Diff line
@@ -181,6 +181,7 @@ struct rcu_data {
	atomic_t dynticks;		/* Even value for idle, else odd. */
	bool rcu_need_heavy_qs;		/* GP old, so heavy quiescent state! */
	bool rcu_urgent_qs;		/* GP old need light quiescent state. */
	bool rcu_forced_tick;		/* Forced tick to provide QS. */
#ifdef CONFIG_RCU_FAST_NO_HZ
	bool all_lazy;			/* All CPU's CBs lazy at idle start? */
	unsigned long last_accelerate;	/* Last jiffy CBs were accelerated. */