KVM: arm64/mmu: count KVM s2 mmu usage in secondary pagetable stats
authorYosry Ahmed <yosryahmed@google.com>
Tue, 23 Aug 2022 00:46:39 +0000 (00:46 +0000)
committerSean Christopherson <seanjc@google.com>
Tue, 30 Aug 2022 14:44:25 +0000 (07:44 -0700)
Count the pages used by KVM in arm64 for stage2 mmu in memory stats
under secondary pagetable stats (e.g. "SecPageTables" in /proc/meminfo)
to give better visibility into the memory consumption of KVM mmu in a
similar way to how normal user page tables are accounted.

Signed-off-by: Yosry Ahmed <yosryahmed@google.com>
Reviewed-by: Oliver Upton <oliver.upton@linux.dev>
Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20220823004639.2387269-5-yosryahmed@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/arm64/kvm/mmu.c

index c9a13e4..34c5fee 100644 (file)
@@ -92,9 +92,13 @@ static bool kvm_is_device_pfn(unsigned long pfn)
 static void *stage2_memcache_zalloc_page(void *arg)
 {
        struct kvm_mmu_memory_cache *mc = arg;
+       void *virt;
 
        /* Allocated with __GFP_ZERO, so no need to zero */
-       return kvm_mmu_memory_cache_alloc(mc);
+       virt = kvm_mmu_memory_cache_alloc(mc);
+       if (virt)
+               kvm_account_pgtable_pages(virt, 1);
+       return virt;
 }
 
 static void *kvm_host_zalloc_pages_exact(size_t size)
@@ -102,6 +106,21 @@ static void *kvm_host_zalloc_pages_exact(size_t size)
        return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
 }
 
+static void *kvm_s2_zalloc_pages_exact(size_t size)
+{
+       void *virt = kvm_host_zalloc_pages_exact(size);
+
+       if (virt)
+               kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT));
+       return virt;
+}
+
+static void kvm_s2_free_pages_exact(void *virt, size_t size)
+{
+       kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT));
+       free_pages_exact(virt, size);
+}
+
 static void kvm_host_get_page(void *addr)
 {
        get_page(virt_to_page(addr));
@@ -112,6 +131,15 @@ static void kvm_host_put_page(void *addr)
        put_page(virt_to_page(addr));
 }
 
+static void kvm_s2_put_page(void *addr)
+{
+       struct page *p = virt_to_page(addr);
+       /* Dropping last refcount, the page will be freed */
+       if (page_count(p) == 1)
+               kvm_account_pgtable_pages(addr, -1);
+       put_page(p);
+}
+
 static int kvm_host_page_count(void *addr)
 {
        return page_count(virt_to_page(addr));
@@ -625,10 +653,10 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
 
 static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
        .zalloc_page            = stage2_memcache_zalloc_page,
-       .zalloc_pages_exact     = kvm_host_zalloc_pages_exact,
-       .free_pages_exact       = free_pages_exact,
+       .zalloc_pages_exact     = kvm_s2_zalloc_pages_exact,
+       .free_pages_exact       = kvm_s2_free_pages_exact,
        .get_page               = kvm_host_get_page,
-       .put_page               = kvm_host_put_page,
+       .put_page               = kvm_s2_put_page,
        .page_count             = kvm_host_page_count,
        .phys_to_virt           = kvm_host_va,
        .virt_to_phys           = kvm_host_pa,