Commit 9ea366f6 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Thomas Gleixner
Browse files

rcu: Make RCU IRQ enter/exit functions rely on in_nmi()



The rcu_nmi_enter_common() and rcu_nmi_exit_common() functions take an
"irq" parameter that indicates whether these functions have been invoked from
an irq handler (irq==true) or an NMI handler (irq==false).

However, recent changes have applied notrace to a few critical functions
such that rcu_nmi_enter_common() and rcu_nmi_exit_common() many now rely on
in_nmi().  Note that in_nmi() works no differently than before, but rather
that tracing is now prohibited in code regions where in_nmi() would
incorrectly report NMI state.

Therefore remove the "irq" parameter and inline rcu_nmi_enter_common() and
rcu_nmi_exit_common() into rcu_nmi_enter() and rcu_nmi_exit(),
respectively.

Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarAlexandre Chartre <alexandre.chartre@oracle.com>
Link: https://lkml.kernel.org/r/20200505134101.617130349@linutronix.de
parent ff5c4f5c
Loading
Loading
Loading
Loading
+15 −32
Original line number Diff line number Diff line
@@ -664,16 +664,18 @@ noinstr void rcu_user_enter(void)
}
#endif /* CONFIG_NO_HZ_FULL */

/*
/**
 * rcu_nmi_exit - inform RCU of exit from NMI context
 *
 * If we are returning from the outermost NMI handler that interrupted an
 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
 * to let the RCU grace-period handling know that the CPU is back to
 * being RCU-idle.
 *
 * If you add or remove a call to rcu_nmi_exit_common(), be sure to test
 * If you add or remove a call to rcu_nmi_exit(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
 */
static __always_inline void rcu_nmi_exit_common(bool irq)
noinstr void rcu_nmi_exit(void)
{
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);

@@ -704,7 +706,7 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */

	if (irq)
	if (!in_nmi())
		rcu_prepare_for_idle();
	instrumentation_end();

@@ -712,21 +714,10 @@ static __always_inline void rcu_nmi_exit_common(bool irq)
	rcu_dynticks_eqs_enter();
	// ... but is no longer watching here.

	if (irq)
	if (!in_nmi())
		rcu_dynticks_task_enter();
}

/**
 * rcu_nmi_exit - inform RCU of exit from NMI context
 *
 * If you add or remove a call to rcu_nmi_exit(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
 */
void noinstr rcu_nmi_exit(void)
{
	rcu_nmi_exit_common(false);
}

/**
 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 *
@@ -749,7 +740,7 @@ void noinstr rcu_nmi_exit(void)
void noinstr rcu_irq_exit(void)
{
	lockdep_assert_irqs_disabled();
	rcu_nmi_exit_common(true);
	rcu_nmi_exit();
}

/*
@@ -838,7 +829,7 @@ void noinstr rcu_user_exit(void)
#endif /* CONFIG_NO_HZ_FULL */

/**
 * rcu_nmi_enter_common - inform RCU of entry to NMI context
 * rcu_nmi_enter - inform RCU of entry to NMI context
 * @irq: Is this call from rcu_irq_enter?
 *
 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
@@ -847,10 +838,10 @@ void noinstr rcu_user_exit(void)
 * long as the nesting level does not overflow an int.  (You will probably
 * run out of stack space first.)
 *
 * If you add or remove a call to rcu_nmi_enter_common(), be sure to test
 * If you add or remove a call to rcu_nmi_enter(), be sure to test
 * with CONFIG_RCU_EQS_DEBUG=y.
 */
static __always_inline void rcu_nmi_enter_common(bool irq)
noinstr void rcu_nmi_enter(void)
{
	long incby = 2;
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
@@ -868,18 +859,18 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
	 */
	if (rcu_dynticks_curr_cpu_in_eqs()) {

		if (irq)
		if (!in_nmi())
			rcu_dynticks_task_exit();

		// RCU is not watching here ...
		rcu_dynticks_eqs_exit();
		// ... but is watching here.

		if (irq)
		if (!in_nmi())
			rcu_cleanup_after_idle();

		incby = 1;
	} else if (irq) {
	} else if (!in_nmi()) {
		instrumentation_begin();
		if (tick_nohz_full_cpu(rdp->cpu) &&
		    rdp->dynticks_nmi_nesting == DYNTICK_IRQ_NONIDLE &&
@@ -913,14 +904,6 @@ static __always_inline void rcu_nmi_enter_common(bool irq)
	barrier();
}

/**
 * rcu_nmi_enter - inform RCU of entry to NMI context
 */
noinstr void rcu_nmi_enter(void)
{
	rcu_nmi_enter_common(false);
}

/**
 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
 *
@@ -946,7 +929,7 @@ noinstr void rcu_nmi_enter(void)
noinstr void rcu_irq_enter(void)
{
	lockdep_assert_irqs_disabled();
	rcu_nmi_enter_common(true);
	rcu_nmi_enter();
}

/*