From: Sean Christopherson Date: Wed, 20 Apr 2022 00:27:47 +0000 (+0000) Subject: KVM: x86/mmu: Use enable_mmio_caching to track if MMIO caching is enabled X-Git-Tag: v6.6.17~7255^2~89 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8b9e74bfbf8c7020498a9ea600bd4c0f1915134d;p=platform%2Fkernel%2Flinux-rpi.git KVM: x86/mmu: Use enable_mmio_caching to track if MMIO caching is enabled Clear enable_mmio_caching if hardware can't support MMIO caching and use the dedicated flag to detect if MMIO caching is enabled instead of assuming shadow_mmio_value==0 means MMIO caching is disabled. TDX will use a zero value even when caching is enabled, and is_mmio_spte() isn't so hot that it needs to avoid an extra memory access, i.e. there's no reason to be super clever. And the clever approach may not even be more performant, e.g. gcc-11 lands the extra check on a non-zero value inline, but puts the enable_mmio_caching out-of-line, i.e. avoids the few extra uops for non-MMIO SPTEs. Cc: Isaku Yamahata Cc: Kai Huang Signed-off-by: Sean Christopherson Message-Id: <20220420002747.3287931-1-seanjc@google.com> Signed-off-by: Paolo Bonzini --- diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 7778558..7b08841 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3036,7 +3036,7 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa * and only if L1's MAXPHYADDR is inaccurate with respect to * the hardware's). */ - if (unlikely(!shadow_mmio_value) || + if (unlikely(!enable_mmio_caching) || unlikely(fault->gfn > kvm_mmu_max_gfn())) { *ret_val = RET_PF_EMULATE; return true; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index aab7857..3d611f0 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -19,7 +19,7 @@ #include #include -static bool __read_mostly enable_mmio_caching = true; +bool __read_mostly enable_mmio_caching = true; module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); u64 __read_mostly shadow_host_writable_mask; @@ -351,6 +351,9 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) mmio_value = 0; + if (!mmio_value) + enable_mmio_caching = false; + shadow_mmio_value = mmio_value; shadow_mmio_mask = mmio_mask; shadow_mmio_access_mask = access_mask; diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index e4abeb5..43eceb82 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -5,6 +5,8 @@ #include "mmu_internal.h" +extern bool __read_mostly enable_mmio_caching; + /* * A MMU present SPTE is backed by actual memory and may or may not be present * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it @@ -204,7 +206,7 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; static inline bool is_mmio_spte(u64 spte) { return (spte & shadow_mmio_mask) == shadow_mmio_value && - likely(shadow_mmio_value); + likely(enable_mmio_caching); } static inline bool is_shadow_present_pte(u64 pte)