Commit 69ea03b5 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Thomas Gleixner
Browse files

hardirq/nmi: Allow nested nmi_enter()



Since there are already a number of sites (ARM64, PowerPC) that effectively
nest nmi_enter(), make the primitive support this before adding even more.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarMarc Zyngier <maz@kernel.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lkml.kernel.org/r/20200505134100.864179229@linutronix.de
parent 28f6bf9e
Loading
Loading
Loading
Loading
+2 −12
Original line number Diff line number Diff line
@@ -251,21 +251,11 @@ asmlinkage __kprobes notrace unsigned long
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
{
	unsigned long ret;
	bool do_nmi_exit = false;

	/*
	 * nmi_enter() deals with printk() re-entrance and use of RCU when
	 * RCU believed this CPU was idle. Because critical events can
	 * interrupt normal events, we may already be in_nmi().
	 */
	if (!in_nmi()) {
	nmi_enter();
		do_nmi_exit = true;
	}

	ret = _sdei_handler(regs, arg);

	if (do_nmi_exit)
	nmi_exit();

	return ret;
+2 −6
Original line number Diff line number Diff line
@@ -906,16 +906,12 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)

asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
{
	const bool was_in_nmi = in_nmi();

	if (!was_in_nmi)
	nmi_enter();

	/* non-RAS errors are not containable */
	if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
		arm64_serror_panic(regs, esr);

	if (!was_in_nmi)
	nmi_exit();
}

+6 −16
Original line number Diff line number Diff line
@@ -441,14 +441,8 @@ nonrecoverable:
void system_reset_exception(struct pt_regs *regs)
{
	unsigned long hsrr0, hsrr1;
	bool nested = in_nmi();
	bool saved_hsrrs = false;

	/*
	 * Avoid crashes in case of nested NMI exceptions. Recoverability
	 * is determined by RI and in_nmi
	 */
	if (!nested)
	nmi_enter();

	/*
@@ -521,7 +515,6 @@ out:
		mtspr(SPRN_HSRR1, hsrr1);
	}

	if (!nested)
	nmi_exit();

	/* What should we do here? We could issue a shutdown or hard reset. */
@@ -823,8 +816,7 @@ int machine_check_generic(struct pt_regs *regs)
void machine_check_exception(struct pt_regs *regs)
{
	int recover = 0;
	bool nested = in_nmi();
	if (!nested)

	nmi_enter();

	__this_cpu_inc(irq_stat.mce_exceptions);
@@ -851,7 +843,6 @@ void machine_check_exception(struct pt_regs *regs)
	if (check_io_access(regs))
		goto bail;

	if (!nested)
	nmi_exit();

	die("Machine check", regs, SIGBUS);
@@ -863,7 +854,6 @@ void machine_check_exception(struct pt_regs *regs)
	return;

bail:
	if (!nested)
	nmi_exit();
}

+4 −1
Original line number Diff line number Diff line
@@ -65,13 +65,16 @@ extern void irq_exit(void);
#define arch_nmi_exit()		do { } while (0)
#endif

/*
 * nmi_enter() can nest up to 15 times; see NMI_BITS.
 */
#define nmi_enter()						\
	do {							\
		arch_nmi_enter();				\
		printk_nmi_enter();				\
		lockdep_off();					\
		ftrace_nmi_enter();				\
		BUG_ON(in_nmi());				\
		BUG_ON(in_nmi() == NMI_MASK);			\
		preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET);	\
		rcu_nmi_enter();				\
		lockdep_hardirq_enter();			\
+2 −2
Original line number Diff line number Diff line
@@ -26,13 +26,13 @@
 *         PREEMPT_MASK:	0x000000ff
 *         SOFTIRQ_MASK:	0x0000ff00
 *         HARDIRQ_MASK:	0x000f0000
 *             NMI_MASK:	0x00100000
 *             NMI_MASK:	0x00f00000
 * PREEMPT_NEED_RESCHED:	0x80000000
 */
#define PREEMPT_BITS	8
#define SOFTIRQ_BITS	8
#define HARDIRQ_BITS	4
#define NMI_BITS	1
#define NMI_BITS	4

#define PREEMPT_SHIFT	0
#define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)