KVM: selftests: Add hugepage support for x86-64
authorSean Christopherson <seanjc@google.com>
Tue, 22 Jun 2021 20:05:28 +0000 (13:05 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 24 Jun 2021 15:47:55 +0000 (11:47 -0400)
Add x86-64 hugepage support in the form of a x86-only variant of
virt_pg_map() that takes an explicit page size.  To keep things simple,
follow the existing logic for 4k pages and disallow creating a hugepage
if the upper-level entry is present, even if the desired pfn matches.

Opportunistically fix a double "beyond beyond" reported by checkpatch.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622200529.3650424-19-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/x86_64/processor.c

index 9a5b47d..f211269 100644 (file)
@@ -412,6 +412,14 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void);
 void vcpu_set_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
 struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpuid);
 
+enum x86_page_size {
+       X86_PAGE_SIZE_4K = 0,
+       X86_PAGE_SIZE_2M,
+       X86_PAGE_SIZE_1G,
+};
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+                  enum x86_page_size page_size);
+
 /*
  * Basic CPU control in CR0
  */
index fc33acf..5e0e3a1 100644 (file)
@@ -198,55 +198,90 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
 static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
                                                    uint64_t pt_pfn,
                                                    uint64_t vaddr,
-                                                   int level)
+                                                   uint64_t paddr,
+                                                   int level,
+                                                   enum x86_page_size page_size)
 {
        struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
 
        if (!pte->present) {
-               pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
                pte->writable = true;
                pte->present = true;
+               pte->page_size = (level == page_size);
+               if (pte->page_size)
+                       pte->pfn = paddr >> vm->page_shift;
+               else
+                       pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
+       } else {
+               /*
+                * Entry already present.  Assert that the caller doesn't want
+                * a hugepage at this level, and that there isn't a hugepage at
+                * this level.
+                */
+               TEST_ASSERT(level != page_size,
+                           "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
+                           page_size, vaddr);
+               TEST_ASSERT(!pte->page_size,
+                           "Cannot create page table at level: %u, vaddr: 0x%lx\n",
+                           level, vaddr);
        }
        return pte;
 }
 
-void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+                  enum x86_page_size page_size)
 {
+       const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
        struct pageUpperEntry *pml4e, *pdpe, *pde;
        struct pageTableEntry *pte;
 
-       TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
-               "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
-
-       TEST_ASSERT((vaddr % vm->page_size) == 0,
-               "Virtual address not on page boundary,\n"
-               "  vaddr: 0x%lx vm->page_size: 0x%x",
-               vaddr, vm->page_size);
-       TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
-               (vaddr >> vm->page_shift)),
-               "Invalid virtual address, vaddr: 0x%lx",
-               vaddr);
-       TEST_ASSERT((paddr % vm->page_size) == 0,
-               "Physical address not on page boundary,\n"
-               "  paddr: 0x%lx vm->page_size: 0x%x",
-               paddr, vm->page_size);
+       TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
+                   "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+
+       TEST_ASSERT((vaddr % pg_size) == 0,
+                   "Virtual address not aligned,\n"
+                   "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
+       TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
+                   "Invalid virtual address, vaddr: 0x%lx", vaddr);
+       TEST_ASSERT((paddr % pg_size) == 0,
+                   "Physical address not aligned,\n"
+                   "  paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
        TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
-               "Physical address beyond beyond maximum supported,\n"
-               "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
-               paddr, vm->max_gfn, vm->page_size);
+                   "Physical address beyond maximum supported,\n"
+                   "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+                   paddr, vm->max_gfn, vm->page_size);
+
+       /*
+        * Allocate upper level page tables, if not already present.  Return
+        * early if a hugepage was created.
+        */
+       pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
+                                     vaddr, paddr, 3, page_size);
+       if (pml4e->page_size)
+               return;
+
+       pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
+       if (pdpe->page_size)
+               return;
 
-       /* Allocate upper level page tables, if not already present. */
-       pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift, vaddr, 3);
-       pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, 2);
-       pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, 1);
+       pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
+       if (pde->page_size)
+               return;
 
        /* Fill in page table entry. */
        pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
+       TEST_ASSERT(!pte->present,
+                   "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
        pte->pfn = paddr >> vm->page_shift;
        pte->writable = true;
        pte->present = 1;
 }
 
+void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
+{
+       __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
+}
+
 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 {
        struct pageUpperEntry *pml4e, *pml4e_start;