x86/mtrr: Move 32-bit code from mtrr.c to legacy.c
authorJuergen Gross <jgross@suse.com>
Tue, 2 May 2023 12:09:23 +0000 (14:09 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Thu, 1 Jun 2023 13:04:33 +0000 (15:04 +0200)
There is some code in mtrr.c which is relevant for old 32-bit CPUs
only. Move it to a new source legacy.c.

While modifying mtrr_init_finalize() fix spelling of its name.

Suggested-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Tested-by: Michael Kelley <mikelley@microsoft.com>
Link: https://lore.kernel.org/r/20230502120931.20719-9-jgross@suse.com
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
arch/x86/kernel/cpu/mtrr/Makefile
arch/x86/kernel/cpu/mtrr/legacy.c [new file with mode: 0644]
arch/x86/kernel/cpu/mtrr/mtrr.c
arch/x86/kernel/cpu/mtrr/mtrr.h

index cc4f9f1..aee4bc5 100644 (file)
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y          := mtrr.o if.o generic.o cleanup.o
-obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
+obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o legacy.o
 
diff --git a/arch/x86/kernel/cpu/mtrr/legacy.c b/arch/x86/kernel/cpu/mtrr/legacy.c
new file mode 100644 (file)
index 0000000..7d379fb
--- /dev/null
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/types.h>
+#include <linux/syscore_ops.h>
+#include <asm/cpufeature.h>
+#include <asm/mtrr.h>
+#include <asm/processor.h>
+#include "mtrr.h"
+
+void mtrr_set_if(void)
+{
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               /* Pre-Athlon (K6) AMD CPU MTRRs */
+               if (cpu_feature_enabled(X86_FEATURE_K6_MTRR))
+                       mtrr_if = &amd_mtrr_ops;
+               break;
+       case X86_VENDOR_CENTAUR:
+               if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR))
+                       mtrr_if = &centaur_mtrr_ops;
+               break;
+       case X86_VENDOR_CYRIX:
+               if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR))
+                       mtrr_if = &cyrix_mtrr_ops;
+               break;
+       default:
+               break;
+       }
+}
+
+/*
+ * The suspend/resume methods are only for CPUs without MTRR. CPUs using generic
+ * MTRR driver don't require this.
+ */
+struct mtrr_value {
+       mtrr_type       ltype;
+       unsigned long   lbase;
+       unsigned long   lsize;
+};
+
+static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
+
+static int mtrr_save(void)
+{
+       int i;
+
+       for (i = 0; i < num_var_ranges; i++) {
+               mtrr_if->get(i, &mtrr_value[i].lbase,
+                               &mtrr_value[i].lsize,
+                               &mtrr_value[i].ltype);
+       }
+       return 0;
+}
+
+static void mtrr_restore(void)
+{
+       int i;
+
+       for (i = 0; i < num_var_ranges; i++) {
+               if (mtrr_value[i].lsize) {
+                       mtrr_if->set(i, mtrr_value[i].lbase,
+                                    mtrr_value[i].lsize,
+                                    mtrr_value[i].ltype);
+               }
+       }
+}
+
+static struct syscore_ops mtrr_syscore_ops = {
+       .suspend        = mtrr_save,
+       .resume         = mtrr_restore,
+};
+
+void mtrr_register_syscore(void)
+{
+       /*
+        * The CPU has no MTRR and seems to not support SMP. They have
+        * specific drivers, we use a tricky method to support
+        * suspend/resume for them.
+        *
+        * TBD: is there any system with such CPU which supports
+        * suspend/resume? If no, we should remove the code.
+        */
+       register_syscore_ops(&mtrr_syscore_ops);
+}
index 007ecca..b7793a4 100644 (file)
@@ -541,49 +541,6 @@ int arch_phys_wc_index(int handle)
 }
 EXPORT_SYMBOL_GPL(arch_phys_wc_index);
 
-/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
- * MTRR driver doesn't require this
- */
-struct mtrr_value {
-       mtrr_type       ltype;
-       unsigned long   lbase;
-       unsigned long   lsize;
-};
-
-static struct mtrr_value mtrr_value[MTRR_MAX_VAR_RANGES];
-
-static int mtrr_save(void)
-{
-       int i;
-
-       for (i = 0; i < num_var_ranges; i++) {
-               mtrr_if->get(i, &mtrr_value[i].lbase,
-                               &mtrr_value[i].lsize,
-                               &mtrr_value[i].ltype);
-       }
-       return 0;
-}
-
-static void mtrr_restore(void)
-{
-       int i;
-
-       for (i = 0; i < num_var_ranges; i++) {
-               if (mtrr_value[i].lsize) {
-                       mtrr_if->set(i, mtrr_value[i].lbase,
-                                    mtrr_value[i].lsize,
-                                    mtrr_value[i].ltype);
-               }
-       }
-}
-
-
-
-static struct syscore_ops mtrr_syscore_ops = {
-       .suspend        = mtrr_save,
-       .resume         = mtrr_restore,
-};
-
 int __initdata changed_by_mtrr_cleanup;
 
 /**
@@ -611,27 +568,10 @@ void __init mtrr_bp_init(void)
                return;
        }
 
-       if (generic_mtrrs) {
+       if (generic_mtrrs)
                mtrr_if = &generic_mtrr_ops;
-       } else {
-               switch (boot_cpu_data.x86_vendor) {
-               case X86_VENDOR_AMD:
-                       /* Pre-Athlon (K6) AMD CPU MTRRs */
-                       if (cpu_feature_enabled(X86_FEATURE_K6_MTRR))
-                               mtrr_if = &amd_mtrr_ops;
-                       break;
-               case X86_VENDOR_CENTAUR:
-                       if (cpu_feature_enabled(X86_FEATURE_CENTAUR_MCR))
-                               mtrr_if = &centaur_mtrr_ops;
-                       break;
-               case X86_VENDOR_CYRIX:
-                       if (cpu_feature_enabled(X86_FEATURE_CYRIX_ARR))
-                               mtrr_if = &cyrix_mtrr_ops;
-                       break;
-               default:
-                       break;
-               }
-       }
+       else
+               mtrr_set_if();
 
        if (mtrr_enabled()) {
                /* Get the number of variable MTRR ranges. */
@@ -673,7 +613,7 @@ void mtrr_save_state(void)
        smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
 }
 
-static int __init mtrr_init_finialize(void)
+static int __init mtrr_init_finalize(void)
 {
        if (!mtrr_enabled())
                return 0;
@@ -684,16 +624,8 @@ static int __init mtrr_init_finialize(void)
                return 0;
        }
 
-       /*
-        * The CPU has no MTRR and seems to not support SMP. They have
-        * specific drivers, we use a tricky method to support
-        * suspend/resume for them.
-        *
-        * TBD: is there any system with such CPU which supports
-        * suspend/resume? If no, we should remove the code.
-        */
-       register_syscore_ops(&mtrr_syscore_ops);
+       mtrr_register_syscore();
 
        return 0;
 }
-subsys_initcall(mtrr_init_finialize);
+subsys_initcall(mtrr_init_finalize);
index 6f3312b..e1e8864 100644 (file)
@@ -61,6 +61,13 @@ extern u32 phys_hi_rsvd;
 void mtrr_state_warn(void);
 const char *mtrr_attrib_to_str(int x);
 void mtrr_wrmsr(unsigned, unsigned, unsigned);
+#ifdef CONFIG_X86_32
+void mtrr_set_if(void);
+void mtrr_register_syscore(void);
+#else
+static inline void mtrr_set_if(void) { }
+static inline void mtrr_register_syscore(void) { }
+#endif
 
 /* CPU specific mtrr_ops vectors. */
 extern const struct mtrr_ops amd_mtrr_ops;