hardirq/nmi: Allow nested nmi_enter()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 19 Feb 2020 08:46:47 +0000 (09:46 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 19 May 2020 13:51:17 +0000 (15:51 +0200)
Since there are already a number of sites (ARM64, PowerPC) that effectively
nest nmi_enter(), make the primitive support this before adding even more.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Marc Zyngier <maz@kernel.org>
Acked-by: Will Deacon <will@kernel.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lkml.kernel.org/r/20200505134100.864179229@linutronix.de
arch/arm64/kernel/sdei.c
arch/arm64/kernel/traps.c
arch/powerpc/kernel/traps.c
include/linux/hardirq.h
include/linux/preempt.h

index d6259da..e396e69 100644 (file)
@@ -251,22 +251,12 @@ asmlinkage __kprobes notrace unsigned long
 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
 {
        unsigned long ret;
-       bool do_nmi_exit = false;
 
-       /*
-        * nmi_enter() deals with printk() re-entrance and use of RCU when
-        * RCU believed this CPU was idle. Because critical events can
-        * interrupt normal events, we may already be in_nmi().
-        */
-       if (!in_nmi()) {
-               nmi_enter();
-               do_nmi_exit = true;
-       }
+       nmi_enter();
 
        ret = _sdei_handler(regs, arg);
 
-       if (do_nmi_exit)
-               nmi_exit();
+       nmi_exit();
 
        return ret;
 }
index cf402be..c728f16 100644 (file)
@@ -906,17 +906,13 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
 
 asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
 {
-       const bool was_in_nmi = in_nmi();
-
-       if (!was_in_nmi)
-               nmi_enter();
+       nmi_enter();
 
        /* non-RAS errors are not containable */
        if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
                arm64_serror_panic(regs, esr);
 
-       if (!was_in_nmi)
-               nmi_exit();
+       nmi_exit();
 }
 
 asmlinkage void enter_from_user_mode(void)
index 3fca222..b44dd75 100644 (file)
@@ -441,15 +441,9 @@ nonrecoverable:
 void system_reset_exception(struct pt_regs *regs)
 {
        unsigned long hsrr0, hsrr1;
-       bool nested = in_nmi();
        bool saved_hsrrs = false;
 
-       /*
-        * Avoid crashes in case of nested NMI exceptions. Recoverability
-        * is determined by RI and in_nmi
-        */
-       if (!nested)
-               nmi_enter();
+       nmi_enter();
 
        /*
         * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
@@ -521,8 +515,7 @@ out:
                mtspr(SPRN_HSRR1, hsrr1);
        }
 
-       if (!nested)
-               nmi_exit();
+       nmi_exit();
 
        /* What should we do here? We could issue a shutdown or hard reset. */
 }
@@ -823,9 +816,8 @@ int machine_check_generic(struct pt_regs *regs)
 void machine_check_exception(struct pt_regs *regs)
 {
        int recover = 0;
-       bool nested = in_nmi();
-       if (!nested)
-               nmi_enter();
+
+       nmi_enter();
 
        __this_cpu_inc(irq_stat.mce_exceptions);
 
@@ -851,8 +843,7 @@ void machine_check_exception(struct pt_regs *regs)
        if (check_io_access(regs))
                goto bail;
 
-       if (!nested)
-               nmi_exit();
+       nmi_exit();
 
        die("Machine check", regs, SIGBUS);
 
@@ -863,8 +854,7 @@ void machine_check_exception(struct pt_regs *regs)
        return;
 
 bail:
-       if (!nested)
-               nmi_exit();
+       nmi_exit();
 }
 
 void SMIException(struct pt_regs *regs)
index 7c8b82f..a043ad8 100644 (file)
@@ -65,13 +65,16 @@ extern void irq_exit(void);
 #define arch_nmi_exit()                do { } while (0)
 #endif
 
+/*
+ * nmi_enter() can nest up to 15 times; see NMI_BITS.
+ */
 #define nmi_enter()                                            \
        do {                                                    \
                arch_nmi_enter();                               \
                printk_nmi_enter();                             \
                lockdep_off();                                  \
                ftrace_nmi_enter();                             \
-               BUG_ON(in_nmi());                               \
+               BUG_ON(in_nmi() == NMI_MASK);                   \
                preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
                rcu_nmi_enter();                                \
                lockdep_hardirq_enter();                        \
index bc3f1ae..7d9c1c0 100644 (file)
  *         PREEMPT_MASK:       0x000000ff
  *         SOFTIRQ_MASK:       0x0000ff00
  *         HARDIRQ_MASK:       0x000f0000
- *             NMI_MASK:       0x00100000
+ *             NMI_MASK:       0x00f00000
  * PREEMPT_NEED_RESCHED:       0x80000000
  */
 #define PREEMPT_BITS   8
 #define SOFTIRQ_BITS   8
 #define HARDIRQ_BITS   4
-#define NMI_BITS       1
+#define NMI_BITS       4
 
 #define PREEMPT_SHIFT  0
 #define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)