x86/bugs: Report Intel retbleed vulnerability
authorPeter Zijlstra <peterz@infradead.org>
Fri, 24 Jun 2022 11:48:58 +0000 (13:48 +0200)
committerBorislav Petkov <bp@suse.de>
Mon, 27 Jun 2022 08:33:59 +0000 (10:33 +0200)
Skylake suffers from RSB underflow speculation issues; report this
vulnerability and it's mitigation (spectre_v2=ibrs).

  [jpoimboe: cleanups, eibrs]

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
arch/x86/include/asm/msr-index.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c

index d27e058..059c7e5 100644 (file)
@@ -93,6 +93,7 @@
 #define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               BIT(0)  /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL              BIT(1)  /* Enhanced IBRS support */
+#define ARCH_CAP_RSBA                  BIT(2)  /* RET may use alternative branch predictors */
 #define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3)  /* Skip L1D flush on vmentry */
 #define ARCH_CAP_SSB_NO                        BIT(4)  /*
                                                 * Not susceptible to Speculative Store Bypass
index c546a9e..05f29db 100644 (file)
@@ -790,12 +790,17 @@ static int __init nospectre_v1_cmdline(char *str)
 }
 early_param("nospectre_v1", nospectre_v1_cmdline);
 
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "RETBleed: " fmt
 
 enum retbleed_mitigation {
        RETBLEED_MITIGATION_NONE,
        RETBLEED_MITIGATION_UNRET,
+       RETBLEED_MITIGATION_IBRS,
+       RETBLEED_MITIGATION_EIBRS,
 };
 
 enum retbleed_mitigation_cmd {
@@ -807,6 +812,8 @@ enum retbleed_mitigation_cmd {
 const char * const retbleed_strings[] = {
        [RETBLEED_MITIGATION_NONE]      = "Vulnerable",
        [RETBLEED_MITIGATION_UNRET]     = "Mitigation: untrained return thunk",
+       [RETBLEED_MITIGATION_IBRS]      = "Mitigation: IBRS",
+       [RETBLEED_MITIGATION_EIBRS]     = "Mitigation: Enhanced IBRS",
 };
 
 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
@@ -849,6 +856,7 @@ early_param("retbleed", retbleed_parse_cmdline);
 
 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
 #define RETBLEED_COMPILER_MSG "WARNING: kernel not compiled with RETPOLINE or -mfunction-return capable compiler!\n"
+#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
 
 static void __init retbleed_select_mitigation(void)
 {
@@ -865,12 +873,15 @@ static void __init retbleed_select_mitigation(void)
 
        case RETBLEED_CMD_AUTO:
        default:
-               if (!boot_cpu_has_bug(X86_BUG_RETBLEED))
-                       break;
-
                if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
                    boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
                        retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+
+               /*
+                * The Intel mitigation (IBRS) was already selected in
+                * spectre_v2_select_mitigation().
+                */
+
                break;
        }
 
@@ -900,15 +911,31 @@ static void __init retbleed_select_mitigation(void)
                break;
        }
 
+       /*
+        * Let IBRS trump all on Intel without affecting the effects of the
+        * retbleed= cmdline option.
+        */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
+               switch (spectre_v2_enabled) {
+               case SPECTRE_V2_IBRS:
+                       retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
+                       break;
+               case SPECTRE_V2_EIBRS:
+               case SPECTRE_V2_EIBRS_RETPOLINE:
+               case SPECTRE_V2_EIBRS_LFENCE:
+                       retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
+                       break;
+               default:
+                       pr_err(RETBLEED_INTEL_MSG);
+               }
+       }
+
        pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
 }
 
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
-       SPECTRE_V2_NONE;
-
 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
        SPECTRE_V2_USER_NONE;
 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
index 4089c17..75a5c72 100644 (file)
@@ -1237,24 +1237,24 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPPINGS(BROADWELL_G,     X86_STEPPING_ANY,               SRBDS),
        VULNBL_INTEL_STEPPINGS(BROADWELL_X,     X86_STEPPING_ANY,               MMIO),
        VULNBL_INTEL_STEPPINGS(BROADWELL,       X86_STEPPING_ANY,               SRBDS),
-       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(SKYLAKE_L,       X86_STEPPING_ANY,               SRBDS),
        VULNBL_INTEL_STEPPINGS(SKYLAKE_X,       BIT(3) | BIT(4) | BIT(6) |
-                                               BIT(7) | BIT(0xB),              MMIO),
-       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO),
+                                               BIT(7) | BIT(0xB),              MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPINGS(0x3, 0x3),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(SKYLAKE,         X86_STEPPING_ANY,               SRBDS),
-       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x9, 0xC),        SRBDS | MMIO),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x9, 0xC),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(KABYLAKE_L,      X86_STEPPINGS(0x0, 0x8),        SRBDS),
-       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x9, 0xD),        SRBDS | MMIO),
+       VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x9, 0xD),        SRBDS | MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(KABYLAKE,        X86_STEPPINGS(0x0, 0x8),        SRBDS),
-       VULNBL_INTEL_STEPPINGS(ICELAKE_L,       X86_STEPPINGS(0x5, 0x5),        MMIO | MMIO_SBDS),
+       VULNBL_INTEL_STEPPINGS(ICELAKE_L,       X86_STEPPINGS(0x5, 0x5),        MMIO | MMIO_SBDS | RETBLEED),
        VULNBL_INTEL_STEPPINGS(ICELAKE_D,       X86_STEPPINGS(0x1, 0x1),        MMIO),
        VULNBL_INTEL_STEPPINGS(ICELAKE_X,       X86_STEPPINGS(0x4, 0x6),        MMIO),
-       VULNBL_INTEL_STEPPINGS(COMETLAKE,       BIT(2) | BIT(3) | BIT(5),       MMIO | MMIO_SBDS),
-       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS),
-       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x0, 0x0),        MMIO),
-       VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS),
-       VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPINGS(0x1, 0x1),        MMIO),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE,       BIT(2) | BIT(3) | BIT(5),       MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(COMETLAKE_L,     X86_STEPPINGS(0x0, 0x0),        MMIO | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(LAKEFIELD,       X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS | RETBLEED),
+       VULNBL_INTEL_STEPPINGS(ROCKETLAKE,      X86_STEPPINGS(0x1, 0x1),        MMIO | RETBLEED),
        VULNBL_INTEL_STEPPINGS(ATOM_TREMONT,    X86_STEPPINGS(0x1, 0x1),        MMIO | MMIO_SBDS),
        VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D,  X86_STEPPING_ANY,               MMIO),
        VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L,  X86_STEPPINGS(0x0, 0x0),        MMIO | MMIO_SBDS),
@@ -1364,7 +1364,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
            !arch_cap_mmio_immune(ia32_cap))
                setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
 
-       if (cpu_matches(cpu_vuln_blacklist, RETBLEED))
+       if ((cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA)))
                setup_force_cpu_bug(X86_BUG_RETBLEED);
 
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))