x86/litf: Introduce vmx status variable
authorThomas Gleixner <tglx@linutronix.de>
Fri, 13 Jul 2018 14:23:16 +0000 (16:23 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Aug 2018 16:12:55 +0000 (18:12 +0200)
commit 72c6d2db64fa18c996ece8f06e499509e6c9a37e upstream

Store the effective mitigation of VMX in a status variable and use it to
report the VMX state in the l1tf sysfs file.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/vmx.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/vmx.c

index 7c300299e12eaf862e48715e4bd6e23cd42574da..6b9e556acef7beea6405e0d796d82a746839bbd7 100644 (file)
@@ -571,4 +571,13 @@ enum vm_instruction_error_number {
        VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
 };
 
+enum vmx_l1d_flush_state {
+       VMENTER_L1D_FLUSH_AUTO,
+       VMENTER_L1D_FLUSH_NEVER,
+       VMENTER_L1D_FLUSH_COND,
+       VMENTER_L1D_FLUSH_ALWAYS,
+};
+
+extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+
 #endif
index 4dab6fb9db9b204b18ddd75bed312e18d788b5d8..511e6d63d9dd4b2c5f7cffff6137a721c1f7404e 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/processor-flags.h>
 #include <asm/fpu/internal.h>
 #include <asm/msr.h>
+#include <asm/vmx.h>
 #include <asm/paravirt.h>
 #include <asm/alternative.h>
 #include <asm/pgtable.h>
@@ -636,6 +637,12 @@ void x86_spec_ctrl_setup_ap(void)
 
 #undef pr_fmt
 #define pr_fmt(fmt)    "L1TF: " fmt
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
+EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+#endif
+
 static void __init l1tf_select_mitigation(void)
 {
        u64 half_pa;
@@ -665,6 +672,32 @@ static void __init l1tf_select_mitigation(void)
 
 #ifdef CONFIG_SYSFS
 
+#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
+
+#if IS_ENABLED(CONFIG_KVM_INTEL)
+static const char *l1tf_vmx_states[] = {
+       [VMENTER_L1D_FLUSH_AUTO]        = "auto",
+       [VMENTER_L1D_FLUSH_NEVER]       = "vulnerable",
+       [VMENTER_L1D_FLUSH_COND]        = "conditional cache flushes",
+       [VMENTER_L1D_FLUSH_ALWAYS]      = "cache flushes",
+};
+
+static ssize_t l1tf_show_state(char *buf)
+{
+       if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
+               return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+
+       return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
+                      cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
+                      l1tf_vmx_states[l1tf_vmx_mitigation]);
+}
+#else
+static ssize_t l1tf_show_state(char *buf)
+{
+       return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
+}
+#endif
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
@@ -692,9 +725,8 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
 
        case X86_BUG_L1TF:
                if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
-                       return sprintf(buf, "Mitigation: Page Table Inversion\n");
+                       return l1tf_show_state(buf);
                break;
-
        default:
                break;
        }
index 1ae158a6d306dc4816b283565edb1ccf3aa3c7cd..860f73746d83d6510a25fbd383deb03d10600ec0 100644 (file)
@@ -196,19 +196,13 @@ extern const ulong vmx_return;
 
 static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
 
-/* These MUST be in sync with vmentry_l1d_param order. */
-enum vmx_l1d_flush_state {
-       VMENTER_L1D_FLUSH_NEVER,
-       VMENTER_L1D_FLUSH_COND,
-       VMENTER_L1D_FLUSH_ALWAYS,
-};
-
 static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
 
 static const struct {
        const char *option;
        enum vmx_l1d_flush_state cmd;
 } vmentry_l1d_param[] = {
+       {"auto",        VMENTER_L1D_FLUSH_AUTO},
        {"never",       VMENTER_L1D_FLUSH_NEVER},
        {"cond",        VMENTER_L1D_FLUSH_COND},
        {"always",      VMENTER_L1D_FLUSH_ALWAYS},
@@ -12533,8 +12527,12 @@ static int __init vmx_setup_l1d_flush(void)
 {
        struct page *page;
 
+       if (!boot_cpu_has_bug(X86_BUG_L1TF))
+               return 0;
+
+       l1tf_vmx_mitigation = vmentry_l1d_flush;
+
        if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
-           !boot_cpu_has_bug(X86_BUG_L1TF) ||
            vmx_l1d_use_msr_save_list())
                return 0;
 
@@ -12549,12 +12547,14 @@ static int __init vmx_setup_l1d_flush(void)
        return 0;
 }
 
-static void vmx_free_l1d_flush_pages(void)
+static void vmx_cleanup_l1d_flush(void)
 {
        if (vmx_l1d_flush_pages) {
                free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
                vmx_l1d_flush_pages = NULL;
        }
+       /* Restore state so sysfs ignores VMX */
+       l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
 }
 
 static int __init vmx_init(void)
@@ -12568,7 +12568,7 @@ static int __init vmx_init(void)
        r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
                     __alignof__(struct vcpu_vmx), THIS_MODULE);
        if (r) {
-               vmx_free_l1d_flush_pages();
+               vmx_cleanup_l1d_flush();
                return r;
        }
 
@@ -12589,7 +12589,7 @@ static void __exit vmx_exit(void)
 
        kvm_exit();
 
-       vmx_free_l1d_flush_pages();
+       vmx_cleanup_l1d_flush();
 }
 
 module_init(vmx_init)