Merge branch 'x86-iopl-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / arch / x86 / kernel / cpu / common.c
index 6f6ca6b..baa2fed 100644 (file)
@@ -562,8 +562,9 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
        return NULL;            /* Not found */
 }
 
-__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
-__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
+/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
+__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
+__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
 
 void load_percpu_segment(int cpu)
 {
@@ -1013,13 +1014,14 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
 #endif
 }
 
-#define NO_SPECULATION BIT(0)
-#define NO_MELTDOWN    BIT(1)
-#define NO_SSB         BIT(2)
-#define NO_L1TF                BIT(3)
-#define NO_MDS         BIT(4)
-#define MSBDS_ONLY     BIT(5)
-#define NO_SWAPGS      BIT(6)
+#define NO_SPECULATION         BIT(0)
+#define NO_MELTDOWN            BIT(1)
+#define NO_SSB                 BIT(2)
+#define NO_L1TF                        BIT(3)
+#define NO_MDS                 BIT(4)
+#define MSBDS_ONLY             BIT(5)
+#define NO_SWAPGS              BIT(6)
+#define NO_ITLB_MULTIHIT       BIT(7)
 
 #define VULNWL(_vendor, _family, _model, _whitelist)   \
        { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1040,27 +1042,27 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
        VULNWL(NSC,     5, X86_MODEL_ANY,       NO_SPECULATION),
 
        /* Intel Family 6 */
-       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION),
-       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION),
-       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION),
-       VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION),
-       VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION),
-
-       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_SILVERMONT_D,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_SALTWELL,             NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_TABLET,      NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SALTWELL_MID,         NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_BONNELL,              NO_SPECULATION | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_BONNELL_MID,          NO_SPECULATION | NO_ITLB_MULTIHIT),
+
+       VULNWL_INTEL(ATOM_SILVERMONT,           NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SILVERMONT_D,         NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_SILVERMONT_MID,       NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_AIRMONT,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(XEON_PHI_KNL,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(XEON_PHI_KNM,              NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
        VULNWL_INTEL(CORE_YONAH,                NO_SSB),
 
-       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_AIRMONT_NP,           NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_AIRMONT_MID,          NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_AIRMONT_NP,           NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
-       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS),
-       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS),
+       VULNWL_INTEL(ATOM_GOLDMONT,             NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_GOLDMONT_D,           NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_INTEL(ATOM_GOLDMONT_PLUS,        NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
        /*
         * Technically, swapgs isn't serializing on AMD (despite it previously
@@ -1070,15 +1072,17 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
         * good enough for our purposes.
         */
 
+       VULNWL_INTEL(ATOM_TREMONT_D,            NO_ITLB_MULTIHIT),
+
        /* AMD Family 0xf - 0x12 */
-       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(0x0f,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x10,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x11,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_AMD(0x12,        NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
 
        /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
-       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
-       VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
+       VULNWL_AMD(X86_FAMILY_ANY,      NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
+       VULNWL_HYGON(X86_FAMILY_ANY,    NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
        {}
 };
 
@@ -1089,19 +1093,30 @@ static bool __init cpu_matches(unsigned long which)
        return m && !!(m->driver_data & which);
 }
 
-static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+u64 x86_read_arch_cap_msr(void)
 {
        u64 ia32_cap = 0;
 
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+       return ia32_cap;
+}
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+       u64 ia32_cap = x86_read_arch_cap_msr();
+
+       /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
+       if (!cpu_matches(NO_ITLB_MULTIHIT) && !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+               setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
+
        if (cpu_matches(NO_SPECULATION))
                return;
 
        setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
        setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
-       if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
-
        if (!cpu_matches(NO_SSB) && !(ia32_cap & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
@@ -1118,6 +1133,21 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        if (!cpu_matches(NO_SWAPGS))
                setup_force_cpu_bug(X86_BUG_SWAPGS);
 
+       /*
+        * When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
+        *      - TSX is supported or
+        *      - TSX_CTRL is present
+        *
+        * TSX_CTRL check is needed for cases when TSX could be disabled before
+        * the kernel boot e.g. kexec.
+        * TSX_CTRL check alone is not sufficient for cases when the microcode
+        * update is not present or running as guest that don't get TSX_CTRL.
+        */
+       if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+           (cpu_has(c, X86_FEATURE_RTM) ||
+            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+               setup_force_cpu_bug(X86_BUG_TAA);
+
        if (cpu_matches(NO_MELTDOWN))
                return;
 
@@ -1551,6 +1581,8 @@ void __init identify_boot_cpu(void)
 #endif
        cpu_detect_tlb(&boot_cpu_data);
        setup_cr_pinning();
+
+       tsx_init();
 }
 
 void identify_secondary_cpu(struct cpuinfo_x86 *c)