KVM: x86/mmu: Prepend "kvm_" to memory cache helpers that will be global
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 3 Jul 2020 02:35:37 +0000 (19:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jul 2020 17:29:41 +0000 (13:29 -0400)
Rename the memory helpers that will soon be moved to common code and be
made globaly available via linux/kvm_host.h.  "mmu" alone is not a
sufficient namespace for globally available KVM symbols.

Opportunistically add "nr_" in mmu_memory_cache_free_objects() to make
it clear the function returns the number of free objects, as opposed to
freeing existing objects.

Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-14-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 3d477a3..942b6a9 100644 (file)
@@ -1072,7 +1072,7 @@ static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
                return (void *)__get_free_page(gfp_flags);
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
+static int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
 {
        void *obj;
 
@@ -1087,12 +1087,12 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
        return 0;
 }
 
-static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *mc)
+static int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
 {
        return mc->nobjs;
 }
 
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
+static void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
 {
        while (mc->nobjs) {
                if (mc->kmem_cache)
@@ -1107,33 +1107,33 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect)
        int r;
 
        /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */
-       r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
-                                  1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
+       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
+                                      1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM);
        if (r)
                return r;
-       r = mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
-                                  PT64_ROOT_MAX_LEVEL);
+       r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
+                                      PT64_ROOT_MAX_LEVEL);
        if (r)
                return r;
        if (maybe_indirect) {
-               r = mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
-                                          PT64_ROOT_MAX_LEVEL);
+               r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
+                                              PT64_ROOT_MAX_LEVEL);
                if (r)
                        return r;
        }
-       return mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
-                                     PT64_ROOT_MAX_LEVEL);
+       return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
+                                         PT64_ROOT_MAX_LEVEL);
 }
 
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
-       mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
-       mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
-       mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
 }
 
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
+static void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
 {
        void *p;
 
@@ -1147,7 +1147,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
 
 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
 {
-       return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
+       return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
 }
 
 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
@@ -1418,7 +1418,7 @@ static bool rmap_can_add(struct kvm_vcpu *vcpu)
        struct kvm_mmu_memory_cache *mc;
 
        mc = &vcpu->arch.mmu_pte_list_desc_cache;
-       return mmu_memory_cache_free_objects(mc);
+       return kvm_mmu_memory_cache_nr_free_objects(mc);
 }
 
 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
@@ -2090,10 +2090,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
 {
        struct kvm_mmu_page *sp;
 
-       sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
-       sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
+       sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
+       sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
        if (!direct)
-               sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
+               sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
        set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
 
        /*