x86/entry: Implement user mode C entry points for #DB and #MCE
authorThomas Gleixner <tglx@linutronix.de>
Tue, 25 Feb 2020 22:33:29 +0000 (23:33 +0100)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 11 Jun 2020 13:15:00 +0000 (15:15 +0200)
The MCE entry point uses the same mechanism as the IST entry point for
now. For #DB split the inner workings and just keep the nmi_enter/exit()
magic in the IST variant. Fixup the ASM code to emit the proper
noist_##cfunc call.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Andy Lutomirski <luto@kernel.org>
Link: https://lkml.kernel.org/r/20200505135315.177564104@linutronix.de
arch/x86/entry/entry_64.S
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/traps.c

index eeb4285..d302839 100644 (file)
@@ -657,7 +657,7 @@ SYM_CODE_START(\asmsym)
 
        /* Switch to the regular task stack and use the noist entry point */
 .Lfrom_usermode_switch_stack_\@:
-       idtentry_body vector \cfunc, has_error_code=0
+       idtentry_body vector noist_\cfunc, has_error_code=0 sane=1
 
 _ASM_NOKPROBE(\asmsym)
 SYM_CODE_END(\asmsym)
index 3177652..a72c013 100644 (file)
@@ -1904,24 +1904,50 @@ static void unexpected_machine_check(struct pt_regs *regs)
 /* Call the installed machine check handler for this CPU setup. */
 void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
 
-DEFINE_IDTENTRY_MCE(exc_machine_check)
+static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
 {
+       /*
+        * Only required when from kernel mode. See
+        * mce_check_crashing_cpu() for details.
+        */
        if (machine_check_vector == do_machine_check &&
            mce_check_crashing_cpu())
                return;
 
-       if (user_mode(regs))
-               idtentry_enter(regs);
-       else
-               nmi_enter();
+       nmi_enter();
+       machine_check_vector(regs);
+       nmi_exit();
+}
 
+static __always_inline void exc_machine_check_user(struct pt_regs *regs)
+{
+       idtentry_enter(regs);
        machine_check_vector(regs);
+       idtentry_exit(regs);
+}
 
+#ifdef CONFIG_X86_64
+/* MCE hit kernel mode */
+DEFINE_IDTENTRY_MCE(exc_machine_check)
+{
+       exc_machine_check_kernel(regs);
+}
+
+/* The user mode variant. */
+DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
+{
+       exc_machine_check_user(regs);
+}
+#else
+/* 32bit unified entry point */
+DEFINE_IDTENTRY_MCE(exc_machine_check)
+{
        if (user_mode(regs))
-               idtentry_exit(regs);
+               exc_machine_check_user(regs);
        else
-               nmi_exit();
+               exc_machine_check_kernel(regs);
 }
+#endif
 
 /*
  * Called for each booted CPU to set up machine checks.
index 569408a..4f248c5 100644 (file)
@@ -775,20 +775,12 @@ static __always_inline void debug_exit(unsigned long dr7)
  *
  * May run on IST stack.
  */
-DEFINE_IDTENTRY_DEBUG(exc_debug)
+static noinstr void handle_debug(struct pt_regs *regs, unsigned long dr6)
 {
        struct task_struct *tsk = current;
-       unsigned long dr6, dr7;
        int user_icebp = 0;
        int si_code;
 
-       debug_enter(&dr6, &dr7);
-
-       if (user_mode(regs))
-               idtentry_enter(regs);
-       else
-               nmi_enter();
-
        /*
         * The SDM says "The processor clears the BTF flag when it
         * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
@@ -800,7 +792,7 @@ DEFINE_IDTENTRY_DEBUG(exc_debug)
                     is_sysenter_singlestep(regs))) {
                dr6 &= ~DR_STEP;
                if (!dr6)
-                       goto exit;
+                       return;
                /*
                 * else we might have gotten a single-step trap and hit a
                 * watchpoint at the same time, in which case we should fall
@@ -821,12 +813,12 @@ DEFINE_IDTENTRY_DEBUG(exc_debug)
 
 #ifdef CONFIG_KPROBES
        if (kprobe_debug_handler(regs))
-               goto exit;
+               return;
 #endif
 
        if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, 0,
                       SIGTRAP) == NOTIFY_STOP)
-               goto exit;
+               return;
 
        /*
         * Let others (NMI) know that the debug stack is in use
@@ -842,7 +834,7 @@ DEFINE_IDTENTRY_DEBUG(exc_debug)
                                 X86_TRAP_DB);
                cond_local_irq_disable(regs);
                debug_stack_usage_dec();
-               goto exit;
+               return;
        }
 
        if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
@@ -861,14 +853,60 @@ DEFINE_IDTENTRY_DEBUG(exc_debug)
                send_sigtrap(regs, 0, si_code);
        cond_local_irq_disable(regs);
        debug_stack_usage_dec();
+}
+
+static __always_inline void exc_debug_kernel(struct pt_regs *regs,
+                                            unsigned long dr6)
+{
+       nmi_enter();
+       handle_debug(regs, dr6);
+       nmi_exit();
+}
+
+static __always_inline void exc_debug_user(struct pt_regs *regs,
+                                          unsigned long dr6)
+{
+       idtentry_enter(regs);
+       handle_debug(regs, dr6);
+       idtentry_exit(regs);
+}
+
+#ifdef CONFIG_X86_64
+/* IST stack entry */
+DEFINE_IDTENTRY_DEBUG(exc_debug)
+{
+       unsigned long dr6, dr7;
+
+       debug_enter(&dr6, &dr7);
+       exc_debug_kernel(regs, dr6);
+       debug_exit(dr7);
+}
+
+/* User entry, runs on regular task stack */
+DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
+{
+       unsigned long dr6, dr7;
+
+       debug_enter(&dr6, &dr7);
+       exc_debug_user(regs, dr6);
+       debug_exit(dr7);
+}
+#else
+/* 32 bit does not have separate entry points. */
+DEFINE_IDTENTRY_DEBUG(exc_debug)
+{
+       unsigned long dr6, dr7;
+
+       debug_enter(&dr6, &dr7);
 
-exit:
        if (user_mode(regs))
-               idtentry_exit(regs);
+               exc_debug_user(regs, dr6);
        else
-               nmi_exit();
+               exc_debug_kernel(regs, dr6);
+
        debug_exit(dr7);
 }
+#endif
 
 /*
  * Note that we play around with the 'TS' bit in an attempt to get