x86/mtrr: Split MTRR-specific handling from cache dis/enabling
authorJuergen Gross <jgross@suse.com>
Wed, 2 Nov 2022 07:47:02 +0000 (08:47 +0100)
committerBorislav Petkov <bp@suse.de>
Thu, 10 Nov 2022 12:12:44 +0000 (13:12 +0100)
Split the MTRR-specific actions from cache_disable() and cache_enable()
into new functions mtrr_disable() and mtrr_enable().

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20221102074713.21493-6-jgross@suse.com
Signed-off-by: Borislav Petkov <bp@suse.de>
arch/x86/include/asm/mtrr.h
arch/x86/kernel/cpu/mtrr/generic.c

index 76d7260..12a16ca 100644 (file)
@@ -48,6 +48,8 @@ extern void mtrr_aps_init(void);
 extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
+void mtrr_disable(void);
+void mtrr_enable(void);
 #  else
 static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
 {
@@ -87,6 +89,8 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 #define set_mtrr_aps_delayed_init() do {} while (0)
 #define mtrr_aps_init() do {} while (0)
 #define mtrr_bp_restore() do {} while (0)
+#define mtrr_disable() do {} while (0)
+#define mtrr_enable() do {} while (0)
 #  endif
 
 #ifdef CONFIG_COMPAT
index 2f3fc28..0db0770 100644 (file)
@@ -716,6 +716,21 @@ static unsigned long set_mtrr_state(void)
        return change_mask;
 }
 
+void mtrr_disable(void)
+{
+       /* Save MTRR state */
+       rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+
+       /* Disable MTRRs, and set the default type to uncached */
+       mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+}
+
+void mtrr_enable(void)
+{
+       /* Intel (P6) standard MTRRs */
+       mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+}
+
 /*
  * Disable and enable caches. Needed for changing MTRRs and the PAT MSR.
  *
@@ -764,11 +779,8 @@ void cache_disable(void) __acquires(cache_disable_lock)
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
        flush_tlb_local();
 
-       /* Save MTRR state */
-       rdmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
-
-       /* Disable MTRRs, and set the default type to uncached */
-       mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+       if (cpu_feature_enabled(X86_FEATURE_MTRR))
+               mtrr_disable();
 
        /* Again, only flush caches if we have to. */
        if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
@@ -781,8 +793,8 @@ void cache_enable(void) __releases(cache_disable_lock)
        count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
        flush_tlb_local();
 
-       /* Intel (P6) standard MTRRs */
-       mtrr_wrmsr(MSR_MTRRdefType, deftype_lo, deftype_hi);
+       if (cpu_feature_enabled(X86_FEATURE_MTRR))
+               mtrr_enable();
 
        /* Enable caches */
        write_cr0(read_cr0() & ~X86_CR0_CD);