KVM: arm64: Use common KVM implementation of MMU memory caches
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 3 Jul 2020 02:35:42 +0000 (19:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 9 Jul 2020 17:29:43 +0000 (13:29 -0400)
Move to the common MMU memory cache implementation now that the common
code and arm64's existing code are semantically compatible.

No functional change intended.

Cc: Marc Zyngier <maz@kernel.org>
Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200703023545.8771-19-sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_types.h [new file with mode: 0644]
arch/arm64/kvm/mmu.c

index 35a6815..ff9cbb6 100644 (file)
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 generic-y += early_ioremap.h
-generic-y += kvm_types.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += qrwlock.h
index 335170b..23d1f41 100644 (file)
@@ -97,18 +97,6 @@ struct kvm_arch {
        bool return_nisv_io_abort_to_user;
 };
 
-#define KVM_NR_MEM_OBJS     40
-
-/*
- * We don't want allocation failures within the mmu code, so we preallocate
- * enough memory for a single page fault in a cache.
- */
-struct kvm_mmu_memory_cache {
-       int nobjs;
-       gfp_t gfp_zero;
-       void *objects[KVM_NR_MEM_OBJS];
-};
-
 struct kvm_vcpu_fault_info {
        u32 esr_el2;            /* Hyp Syndrom Register */
        u64 far_el2;            /* Hyp Fault Address Register */
diff --git a/arch/arm64/include/asm/kvm_types.h b/arch/arm64/include/asm/kvm_types.h
new file mode 100644 (file)
index 0000000..9a126b9
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_ARM64_KVM_TYPES_H
+#define _ASM_ARM64_KVM_TYPES_H
+
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+
+#endif /* _ASM_ARM64_KVM_TYPES_H */
+
index 5220623..ba66e9a 100644 (file)
@@ -124,37 +124,6 @@ static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
        put_page(virt_to_page(pudp));
 }
 
-static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, int min)
-{
-       void *page;
-
-       if (cache->nobjs >= min)
-               return 0;
-       while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT |
-                                              cache->gfp_zero);
-               if (!page)
-                       return -ENOMEM;
-               cache->objects[cache->nobjs++] = page;
-       }
-       return 0;
-}
-
-static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
-{
-       while (mc->nobjs)
-               free_page((unsigned long)mc->objects[--mc->nobjs]);
-}
-
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
-{
-       void *p;
-
-       BUG_ON(!mc || !mc->nobjs);
-       p = mc->objects[--mc->nobjs];
-       return p;
-}
-
 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
 {
        p4d_t *p4d_table __maybe_unused = stage2_p4d_offset(kvm, pgd, 0UL);
@@ -1131,7 +1100,7 @@ static p4d_t *stage2_get_p4d(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
        if (stage2_pgd_none(kvm, *pgd)) {
                if (!cache)
                        return NULL;
-               p4d = mmu_memory_cache_alloc(cache);
+               p4d = kvm_mmu_memory_cache_alloc(cache);
                stage2_pgd_populate(kvm, pgd, p4d);
                get_page(virt_to_page(pgd));
        }
@@ -1149,7 +1118,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
        if (stage2_p4d_none(kvm, *p4d)) {
                if (!cache)
                        return NULL;
-               pud = mmu_memory_cache_alloc(cache);
+               pud = kvm_mmu_memory_cache_alloc(cache);
                stage2_p4d_populate(kvm, p4d, pud);
                get_page(virt_to_page(p4d));
        }
@@ -1170,7 +1139,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
        if (stage2_pud_none(kvm, *pud)) {
                if (!cache)
                        return NULL;
-               pmd = mmu_memory_cache_alloc(cache);
+               pmd = kvm_mmu_memory_cache_alloc(cache);
                stage2_pud_populate(kvm, pud, pmd);
                get_page(virt_to_page(pud));
        }
@@ -1376,7 +1345,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
        if (stage2_pud_none(kvm, *pud)) {
                if (!cache)
                        return 0; /* ignore calls from kvm_set_spte_hva */
-               pmd = mmu_memory_cache_alloc(cache);
+               pmd = kvm_mmu_memory_cache_alloc(cache);
                stage2_pud_populate(kvm, pud, pmd);
                get_page(virt_to_page(pud));
        }
@@ -1401,7 +1370,7 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
        if (pmd_none(*pmd)) {
                if (!cache)
                        return 0; /* ignore calls from kvm_set_spte_hva */
-               pte = mmu_memory_cache_alloc(cache);
+               pte = kvm_mmu_memory_cache_alloc(cache);
                kvm_pmd_populate(pmd, pte);
                get_page(virt_to_page(pmd));
        }
@@ -1468,7 +1437,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
        phys_addr_t addr, end;
        int ret = 0;
        unsigned long pfn;
-       struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, };
+       struct kvm_mmu_memory_cache cache = { 0, __GFP_ZERO, NULL, };
 
        end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
        pfn = __phys_to_pfn(pa);
@@ -1479,8 +1448,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
                if (writable)
                        pte = kvm_s2pte_mkwrite(pte);
 
-               ret = mmu_topup_memory_cache(&cache,
-                                            kvm_mmu_cache_min_pages(kvm));
+               ret = kvm_mmu_topup_memory_cache(&cache,
+                                                kvm_mmu_cache_min_pages(kvm));
                if (ret)
                        goto out;
                spin_lock(&kvm->mmu_lock);
@@ -1494,7 +1463,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
        }
 
 out:
-       mmu_free_memory_cache(&cache);
+       kvm_mmu_free_memory_cache(&cache);
        return ret;
 }
 
@@ -1880,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        mmap_read_unlock(current->mm);
 
        /* We need minimum second+third level pages */
-       ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
+       ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
        if (ret)
                return ret;
 
@@ -2303,7 +2272,7 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
 
 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
-       mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+       kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
 }
 
 phys_addr_t kvm_mmu_get_httbr(void)