KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 3 Jul 2020 02:35:36 +0000 (19:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jul 2020 17:29:41 +0000 (13:29 -0400)
Don't bother filling the gfn array cache when the caller is a fully
direct MMU, i.e. won't need a gfn array for shadow pages.

Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-13-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index a9fb427..3d477a3 100644 (file)
@@ -1102,7 +1102,7 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
        }
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
 {
        int r;
 
@@ -1115,10 +1115,12 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
                                   PT64_ROOT_MAX_LEVEL);
        if (r)
                return r;
-       r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-                                  PT64_ROOT_MAX_LEVEL);
-       if (r)
-               return r;
+       if (maybe_indirect) {
+               r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+                                          PT64_ROOT_MAX_LEVEL);
+               if (r)
+                       return r;
+       }
        return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
                                      PT64_ROOT_MAX_LEVEL);
 }
@@ -4132,7 +4134,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        if (fast_page_fault(vcpu, gpa, error_code))
                return RET_PF_RETRY;
 
-       r = mmu_topup_memory_caches(vcpu);
+       r = mmu_topup_memory_caches(vcpu, false);
        if (r)
                return r;
 
@@ -5168,7 +5170,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
 {
        int r;
 
-       r = mmu_topup_memory_caches(vcpu);
+       r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
        if (r)
                goto out;
        r = mmu_alloc_roots(vcpu);
@@ -5362,7 +5364,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
         * or not since pte prefetch is skiped if it does not have
         * enough objects in the cache.
         */
-       mmu_topup_memory_caches(vcpu);
+       mmu_topup_memory_caches(vcpu, true);
 
        spin_lock(&vcpu->kvm->mmu_lock);
 
index e3e9f92..0172a94 100644 (file)
@@ -816,7 +816,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
                return RET_PF_EMULATE;
        }
 
-       r = mmu_topup_memory_caches(vcpu);
+       r = mmu_topup_memory_caches(vcpu, true);
        if (r)
                return r;
 
@@ -904,7 +904,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
         * No need to check return value here, rmap_can_add() can
         * help us to skip pte prefetch later.
         */
-       mmu_topup_memory_caches(vcpu);
+       mmu_topup_memory_caches(vcpu, true);
 
        if (!VALID_PAGE(root_hpa)) {
                WARN_ON(1);