x86/cpu: Switch to arch_cpu_finalize_init()
authorThomas Gleixner <tglx@linutronix.de>
Tue, 1 Aug 2023 14:36:24 +0000 (16:36 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 Aug 2023 18:03:46 +0000 (20:03 +0200)
commit 7c7077a72674402654f3291354720cd73cdf649e upstream

check_bugs() is a dumping ground for finalizing the CPU bringup. Only parts of
it has to do with actual CPU bugs.

Split it apart into arch_cpu_finalize_init() and cpu_select_mitigations().

Fixup the bogus 32bit comments while at it.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230613224545.019583869@linutronix.de
Signed-off-by: Daniel Sneddon <daniel.sneddon@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/Kconfig
arch/x86/include/asm/bugs.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h

index b3d5706579d43248f476619828bb966270b8b7fb..3867f102cde13642e0b26674f1e82f98587fd39a 100644 (file)
@@ -69,6 +69,7 @@ config X86
        select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE
        select ARCH_HAS_ACPI_TABLE_UPGRADE      if ACPI
        select ARCH_HAS_CACHE_LINE_SIZE
+       select ARCH_HAS_CPU_FINALIZE_INIT
        select ARCH_HAS_CURRENT_STACK_POINTER
        select ARCH_HAS_DEBUG_VIRTUAL
        select ARCH_HAS_DEBUG_VM_PGTABLE        if !X86_PAE
index 92ae283899409df20a0865b1734d1864feccb5a6..f25ca2d709d404f2a49ec3935a7a5a1658f81470 100644 (file)
@@ -4,8 +4,6 @@
 
 #include <asm/processor.h>
 
-extern void check_bugs(void);
-
 #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
 int ppro_with_ram_bug(void);
 #else
index f54992887491e034048cadb8668546cb53a28a47..d3732e97ad140003f95e29bec3408a131c80f371 100644 (file)
@@ -9,7 +9,6 @@
  *     - Andrew D. Balsa (code cleanup).
  */
 #include <linux/init.h>
-#include <linux/utsname.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/nospec.h>
@@ -27,8 +26,6 @@
 #include <asm/msr.h>
 #include <asm/vmx.h>
 #include <asm/paravirt.h>
-#include <asm/alternative.h>
-#include <asm/set_memory.h>
 #include <asm/intel-family.h>
 #include <asm/e820/api.h>
 #include <asm/hypervisor.h>
@@ -124,21 +121,8 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
 
-void __init check_bugs(void)
+void __init cpu_select_mitigations(void)
 {
-       identify_boot_cpu();
-
-       /*
-        * identify_boot_cpu() initialized SMT support information, let the
-        * core code know.
-        */
-       cpu_smt_check_topology();
-
-       if (!IS_ENABLED(CONFIG_SMP)) {
-               pr_info("CPU: ");
-               print_cpu_info(&boot_cpu_data);
-       }
-
        /*
         * Read the SPEC_CTRL MSR to account for reserved bits which may
         * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
@@ -175,39 +159,6 @@ void __init check_bugs(void)
        md_clear_select_mitigation();
        srbds_select_mitigation();
        l1d_flush_select_mitigation();
-
-       arch_smt_update();
-
-#ifdef CONFIG_X86_32
-       /*
-        * Check whether we are able to run this kernel safely on SMP.
-        *
-        * - i386 is no longer supported.
-        * - In order to run on anything without a TSC, we need to be
-        *   compiled for a i486.
-        */
-       if (boot_cpu_data.x86 < 4)
-               panic("Kernel requires i486+ for 'invlpg' and other features");
-
-       init_utsname()->machine[1] =
-               '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
-       alternative_instructions();
-
-       fpu__init_check_bugs();
-#else /* CONFIG_X86_64 */
-       alternative_instructions();
-
-       /*
-        * Make sure the first 2MB area is not mapped by huge pages
-        * There are typically fixed size MTRRs in there and overlapping
-        * MTRRs into large pages causes slow downs.
-        *
-        * Right now we don't do that with gbpages because there seems
-        * very little benefit for that case.
-        */
-       if (!direct_gbpages)
-               set_memory_4k((unsigned long)__va(0), 1);
-#endif
 }
 
 /*
index d298d70f74ce66b28b57b6c8b24b7e409f9301d9..20c3c3ea69760aa9d23b15839ab052572d84a5d7 100644 (file)
 #include <linux/kprobes.h>
 #include <linux/kgdb.h>
 #include <linux/smp.h>
+#include <linux/cpu.h>
 #include <linux/io.h>
 #include <linux/syscore_ops.h>
 #include <linux/pgtable.h>
+#include <linux/utsname.h>
 
+#include <asm/alternative.h>
 #include <asm/cmdline.h>
 #include <asm/stackprotector.h>
 #include <asm/perf_event.h>
@@ -58,6 +61,7 @@
 #include <asm/intel-family.h>
 #include <asm/cpu_device_id.h>
 #include <asm/uv/uv.h>
+#include <asm/set_memory.h>
 #include <asm/sigframe.h>
 #include <asm/traps.h>
 #include <asm/sev.h>
@@ -2369,3 +2373,52 @@ void arch_smt_update(void)
        /* Check whether IPI broadcasting can be enabled */
        apic_smt_update();
 }
+
+void __init arch_cpu_finalize_init(void)
+{
+       identify_boot_cpu();
+
+       /*
+        * identify_boot_cpu() initialized SMT support information, let the
+        * core code know.
+        */
+       cpu_smt_check_topology();
+
+       if (!IS_ENABLED(CONFIG_SMP)) {
+               pr_info("CPU: ");
+               print_cpu_info(&boot_cpu_data);
+       }
+
+       cpu_select_mitigations();
+
+       arch_smt_update();
+
+       if (IS_ENABLED(CONFIG_X86_32)) {
+               /*
+                * Check whether this is a real i386 which is not longer
+                * supported and fixup the utsname.
+                */
+               if (boot_cpu_data.x86 < 4)
+                       panic("Kernel requires i486+ for 'invlpg' and other features");
+
+               init_utsname()->machine[1] =
+                       '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
+       }
+
+       alternative_instructions();
+
+       if (IS_ENABLED(CONFIG_X86_64)) {
+               /*
+                * Make sure the first 2MB area is not mapped by huge pages
+                * There are typically fixed size MTRRs in there and overlapping
+                * MTRRs into large pages causes slow downs.
+                *
+                * Right now we don't do that with gbpages because there seems
+                * very little benefit for that case.
+                */
+               if (!direct_gbpages)
+                       set_memory_4k((unsigned long)__va(0), 1);
+       } else {
+               fpu__init_check_bugs();
+       }
+}
index 7c9b5893c30aba0d3eb21c758264292d26a2e767..61dbb9b216e6f803a55a6293d312b3ca0ed5ef41 100644 (file)
@@ -79,6 +79,7 @@ extern void detect_ht(struct cpuinfo_x86 *c);
 extern void check_null_seg_clears_base(struct cpuinfo_x86 *c);
 
 unsigned int aperfmperf_get_khz(int cpu);
+void cpu_select_mitigations(void);
 
 extern void x86_spec_ctrl_setup_ap(void);
 extern void update_srbds_msr(void);