KVM: x86/mmu: Consolidate "page" variant of memory cache helpers
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 3 Jul 2020 02:35:26 +0000 (19:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jul 2020 17:29:37 +0000 (13:29 -0400)
Drop the "page" variants of the topup/free memory cache helpers, using
the existence of an associated kmem_cache to select the correct alloc
or free routine.

No functional change intended.

Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-3-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 4175390..317a2c5 100644 (file)
@@ -1068,7 +1068,10 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               obj = kmem_cache_zalloc(cache->kmem_cache, GFP_KERNEL_ACCOUNT);
+               if (cache->kmem_cache)
+                       obj = kmem_cache_zalloc(cache->kmem_cache, GFP_KERNEL_ACCOUNT);
+               else
+                       obj = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
                if (!obj)
                        return cache->nobjs >= min ? 0 : -ENOMEM;
                cache->objects[cache->nobjs++] = obj;
@@ -1083,30 +1086,12 @@ static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
 
 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 {
-       while (mc->nobjs)
-               kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
-}
-
-static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
-                                      int min)
-{
-       void *page;
-
-       if (cache->nobjs >= min)
-               return 0;
-       while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
-               if (!page)
-                       return cache->nobjs >= min ? 0 : -ENOMEM;
-               cache->objects[cache->nobjs++] = page;
+       while (mc->nobjs) {
+               if (mc->kmem_cache)
+                       kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
+               else
+                       free_page((unsigned long)mc->objects[--mc->nobjs]);
        }
-       return 0;
-}
-
-static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
-{
-       while (mc->nobjs)
-               free_page((unsigned long)mc->objects[--mc->nobjs]);
 }
 
 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
@@ -1117,7 +1102,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
                                   8 + PTE_PREFETCH_NUM);
        if (r)
                goto out;
-       r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
+       r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_cache, 8);
        if (r)
                goto out;
        r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, 4);
@@ -1128,7 +1113,7 @@ out:
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
        mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
-       mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
+       mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
        mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }