x86/reboot: VMCLEAR active VMCSes before emergency reboot
authorSean Christopherson <seanjc@google.com>
Fri, 21 Jul 2023 20:18:41 +0000 (13:18 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 3 Aug 2023 22:37:14 +0000 (15:37 -0700)
VMCLEAR active VMCSes before any emergency reboot, not just if the kernel
may kexec into a new kernel after a crash.  Per Intel's SDM, the VMX
architecture doesn't require the CPU to flush the VMCS cache on INIT.  If
an emergency reboot doesn't RESET CPUs, cached VMCSes could theoretically
be kept and only be written back to memory after the new kernel is booted,
i.e. could effectively corrupt memory after reboot.

Opportunistically remove the setting of the global pointer to NULL to make
checkpatch happy.

Cc: Andrew Cooper <Andrew.Cooper3@citrix.com>
Link: https://lore.kernel.org/r/20230721201859.2307736-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kexec.h
arch/x86/include/asm/reboot.h
arch/x86/kernel/crash.c
arch/x86/kernel/reboot.c
arch/x86/kvm/vmx/vmx.c

index 5b77bbc..8190469 100644 (file)
@@ -205,8 +205,6 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image);
 #endif
 #endif
 
-typedef void crash_vmclear_fn(void);
-extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
 extern void kdump_nmi_shootdown_cpus(void);
 
 #endif /* __ASSEMBLY__ */
index 9177b43..dc20172 100644 (file)
@@ -25,6 +25,8 @@ void __noreturn machine_real_restart(unsigned int type);
 #define MRR_BIOS       0
 #define MRR_APM                1
 
+typedef void crash_vmclear_fn(void);
+extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
 void cpu_emergency_disable_virtualization(void);
 
 typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
index cdd92ab..54cd959 100644 (file)
@@ -48,27 +48,6 @@ struct crash_memmap_data {
        unsigned int type;
 };
 
-/*
- * This is used to VMCLEAR all VMCSs loaded on the
- * processor. And when loading kvm_intel module, the
- * callback function pointer will be assigned.
- *
- * protected by rcu.
- */
-crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
-EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
-
-static inline void cpu_crash_vmclear_loaded_vmcss(void)
-{
-       crash_vmclear_fn *do_vmclear_operation = NULL;
-
-       rcu_read_lock();
-       do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
-       if (do_vmclear_operation)
-               do_vmclear_operation();
-       rcu_read_unlock();
-}
-
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 
 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
@@ -76,11 +55,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
        crash_save_cpu(regs, cpu);
 
        /*
-        * VMCLEAR VMCSs loaded on all cpus if needed.
-        */
-       cpu_crash_vmclear_loaded_vmcss();
-
-       /*
         * Disable Intel PT to stop its logging
         */
        cpu_emergency_stop_pt();
@@ -133,11 +107,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
 
        crash_smp_send_stop();
 
-       /*
-        * VMCLEAR VMCSs loaded on this cpu if needed.
-        */
-       cpu_crash_vmclear_loaded_vmcss();
-
        cpu_emergency_disable_virtualization();
 
        /*
index 3adbe97..3fa4c67 100644 (file)
@@ -787,6 +787,26 @@ void machine_crash_shutdown(struct pt_regs *regs)
 }
 #endif
 
+/*
+ * This is used to VMCLEAR all VMCSs loaded on the
+ * processor. And when loading kvm_intel module, the
+ * callback function pointer will be assigned.
+ *
+ * protected by rcu.
+ */
+crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
+
+static inline void cpu_crash_vmclear_loaded_vmcss(void)
+{
+       crash_vmclear_fn *do_vmclear_operation = NULL;
+
+       rcu_read_lock();
+       do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
+       if (do_vmclear_operation)
+               do_vmclear_operation();
+       rcu_read_unlock();
+}
 
 /* This is the CPU performing the emergency shutdown work. */
 int crashing_cpu = -1;
@@ -798,6 +818,8 @@ int crashing_cpu = -1;
  */
 void cpu_emergency_disable_virtualization(void)
 {
+       cpu_crash_vmclear_loaded_vmcss();
+
        cpu_emergency_vmxoff();
        cpu_emergency_svm_disable();
 }
index e6d1ce2..7535147 100644 (file)
@@ -41,7 +41,7 @@
 #include <asm/idtentry.h>
 #include <asm/io.h>
 #include <asm/irq_remapping.h>
-#include <asm/kexec.h>
+#include <asm/reboot.h>
 #include <asm/perf_event.h>
 #include <asm/mmu_context.h>
 #include <asm/mshyperv.h>
@@ -725,7 +725,6 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
        return ret;
 }
 
-#ifdef CONFIG_KEXEC_CORE
 static void crash_vmclear_local_loaded_vmcss(void)
 {
        int cpu = raw_smp_processor_id();
@@ -735,7 +734,6 @@ static void crash_vmclear_local_loaded_vmcss(void)
                            loaded_vmcss_on_cpu_link)
                vmcs_clear(v->vmcs);
 }
-#endif /* CONFIG_KEXEC_CORE */
 
 static void __loaded_vmcs_clear(void *arg)
 {
@@ -8573,10 +8571,9 @@ static void __vmx_exit(void)
 {
        allow_smaller_maxphyaddr = false;
 
-#ifdef CONFIG_KEXEC_CORE
        RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
        synchronize_rcu();
-#endif
+
        vmx_cleanup_l1d_flush();
 }
 
@@ -8623,10 +8620,9 @@ static int __init vmx_init(void)
                pi_init_cpu(cpu);
        }
 
-#ifdef CONFIG_KEXEC_CORE
        rcu_assign_pointer(crash_vmclear_loaded_vmcss,
                           crash_vmclear_local_loaded_vmcss);
-#endif
+
        vmx_check_vmcs12_offsets();
 
        /*