From a153f254e5cdf8fa3a1df90a6ffed3063fede154 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 2 May 2023 14:09:20 +0200 Subject: [PATCH] x86/xen: Set MTRR state when running as Xen PV initial domain When running as Xen PV initial domain (aka dom0), MTRRs are disabled by the hypervisor, but the system should nevertheless use correct cache memory types. This has always kind of worked, as disabled MTRRs resulted in disabled PAT, too, so that the kernel avoided code paths resulting in inconsistencies. This bypassed all of the sanity checks the kernel is doing with enabled MTRRs in order to avoid memory mappings with conflicting memory types. This has been changed recently, leading to PAT being accepted to be enabled, while MTRRs stayed disabled. The result is that mtrr_type_lookup() no longer is accepting all memory type requests, but started to return WB even if UC- was requested. This led to driver failures during initialization of some devices. In reality MTRRs are still in effect, but they are under complete control of the Xen hypervisor. It is possible, however, to retrieve the MTRR settings from the hypervisor. In order to fix those problems, overwrite the MTRR state via mtrr_overwrite_state() with the MTRR data from the hypervisor, if the system is running as a Xen dom0. Fixes: 72cbc8f04fe2 ("x86/PAT: Have pat_enabled() properly reflect state when running on Xen") Signed-off-by: Juergen Gross Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Boris Ostrovsky Tested-by: Michael Kelley Link: https://lore.kernel.org/r/20230502120931.20719-6-jgross@suse.com Signed-off-by: Borislav Petkov (AMD) --- arch/x86/xen/enlighten_pv.c | 52 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 093b78c..8732b85 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -68,6 +68,7 @@ #include #include #include +#include #include #include #include @@ -119,6 +120,54 @@ static int __init parse_xen_msr_safe(char *str) } early_param("xen_msr_safe", parse_xen_msr_safe); +/* Get MTRR settings from Xen and put them into mtrr_state. */ +static void __init xen_set_mtrr_data(void) +{ +#ifdef CONFIG_MTRR + struct xen_platform_op op = { + .cmd = XENPF_read_memtype, + .interface_version = XENPF_INTERFACE_VERSION, + }; + unsigned int reg; + unsigned long mask; + uint32_t eax, width; + static struct mtrr_var_range var[MTRR_MAX_VAR_RANGES] __initdata; + + /* Get physical address width (only 64-bit cpus supported). */ + width = 36; + eax = cpuid_eax(0x80000000); + if ((eax >> 16) == 0x8000 && eax >= 0x80000008) { + eax = cpuid_eax(0x80000008); + width = eax & 0xff; + } + + for (reg = 0; reg < MTRR_MAX_VAR_RANGES; reg++) { + op.u.read_memtype.reg = reg; + if (HYPERVISOR_platform_op(&op)) + break; + + /* + * Only called in dom0, which has all RAM PFNs mapped at + * RAM MFNs, and all PCI space etc. is identity mapped. + * This means we can treat MFN == PFN regarding MTRR settings. + */ + var[reg].base_lo = op.u.read_memtype.type; + var[reg].base_lo |= op.u.read_memtype.mfn << PAGE_SHIFT; + var[reg].base_hi = op.u.read_memtype.mfn >> (32 - PAGE_SHIFT); + mask = ~((op.u.read_memtype.nr_mfns << PAGE_SHIFT) - 1); + mask &= (1UL << width) - 1; + if (mask) + mask |= MTRR_PHYSMASK_V; + var[reg].mask_lo = mask; + var[reg].mask_hi = mask >> 32; + } + + /* Only overwrite MTRR state if any MTRR could be got from Xen. */ + if (reg) + mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE); +#endif +} + static void __init xen_pv_init_platform(void) { /* PV guests can't operate virtio devices without grants. */ @@ -135,6 +184,9 @@ static void __init xen_pv_init_platform(void) /* pvclock is in shared info area */ xen_init_time_ops(); + + if (xen_initial_domain()) + xen_set_mtrr_data(); } static void __init xen_pv_guest_late_init(void) -- 2.7.4