KVM: selftests: Use the right memslot for code, page-tables, and data allocations
authorRicardo Koller <ricarkol@google.com>
Mon, 17 Oct 2022 19:58:29 +0000 (19:58 +0000)
committerMarc Zyngier <maz@kernel.org>
Thu, 10 Nov 2022 19:10:27 +0000 (19:10 +0000)
Now that kvm_vm allows specifying different memslots for code, page tables,
and data, use the appropriate memslot when making allocations in
common/libraty code. Change them accordingly:

- code (allocated by lib/elf) use the CODE memslot
- stacks, exception tables, and other core data pages (like the TSS in x86)
  use the DATA memslot
- page tables and the PGD use the PT memslot
- test data (anything allocated with vm_vaddr_alloc()) uses the TEST_DATA
  memslot

No functional change intended. All allocators keep using memslot #0.

Cc: Sean Christopherson <seanjc@google.com>
Cc: Andrew Jones <andrew.jones@linux.dev>
Signed-off-by: Ricardo Koller <ricarkol@google.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Andrew Jones <andrew.jones@linux.dev>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221017195834.2295901-10-ricarkol@google.com
tools/testing/selftests/kvm/include/kvm_util_base.h
tools/testing/selftests/kvm/lib/aarch64/processor.c
tools/testing/selftests/kvm/lib/elf.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/riscv/processor.c
tools/testing/selftests/kvm/lib/s390x/processor.c
tools/testing/selftests/kvm/lib/x86_64/processor.c

index 6442aa9..b0da75a 100644 (file)
@@ -407,7 +407,11 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
+vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
+                           enum kvm_mem_region_type type);
 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
+vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
+                                enum kvm_mem_region_type type);
 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
 
 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
index 6ff2b9d..2883dfd 100644 (file)
@@ -82,7 +82,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
                return;
 
        vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
-                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+                                    vm->memslots[MEM_REGION_PT]);
        vm->pgd_created = true;
 }
 
@@ -332,8 +333,9 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
 
        stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
                                             vm->page_size;
-       stack_vaddr = vm_vaddr_alloc(vm, stack_size,
-                                    DEFAULT_ARM64_GUEST_STACK_VADDR_MIN);
+       stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
+                                      DEFAULT_ARM64_GUEST_STACK_VADDR_MIN,
+                                      MEM_REGION_DATA);
 
        aarch64_vcpu_setup(vcpu, init);
 
@@ -438,8 +440,8 @@ unexpected_exception:
 
 void vm_init_descriptor_tables(struct kvm_vm *vm)
 {
-       vm->handlers = vm_vaddr_alloc(vm, sizeof(struct handlers),
-                       vm->page_size);
+       vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers),
+                                       vm->page_size, MEM_REGION_DATA);
 
        *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
 }
index 9f54c09..51f280c 100644 (file)
@@ -161,7 +161,8 @@ void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename)
                seg_vend |= vm->page_size - 1;
                size_t seg_size = seg_vend - seg_vstart + 1;
 
-               vm_vaddr_t vaddr = vm_vaddr_alloc(vm, seg_size, seg_vstart);
+               vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart,
+                                                   MEM_REGION_CODE);
                TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate "
                        "virtual memory for segment at requested min addr,\n"
                        "  segment idx: %u\n"
index f3dfa4e..5ad4aca 100644 (file)
@@ -1226,32 +1226,15 @@ va_found:
        return pgidx_start * vm->page_size;
 }
 
-/*
- * VM Virtual Address Allocate
- *
- * Input Args:
- *   vm - Virtual Machine
- *   sz - Size in bytes
- *   vaddr_min - Minimum starting virtual address
- *
- * Output Args: None
- *
- * Return:
- *   Starting guest virtual address
- *
- * Allocates at least sz bytes within the virtual address space of the vm
- * given by vm.  The allocated bytes are mapped to a virtual address >=
- * the address given by vaddr_min.  Note that each allocation uses a
- * a unique set of pages, with the minimum real allocation being at least
- * a page.
- */
-vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
+                           enum kvm_mem_region_type type)
 {
        uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
 
        virt_pgd_alloc(vm);
        vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
-                                             KVM_UTIL_MIN_PFN * vm->page_size, 0);
+                                             KVM_UTIL_MIN_PFN * vm->page_size,
+                                             vm->memslots[type]);
 
        /*
         * Find an unused range of virtual page addresses of at least
@@ -1273,6 +1256,30 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
 }
 
 /*
+ * VM Virtual Address Allocate
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   sz - Size in bytes
+ *   vaddr_min - Minimum starting virtual address
+ *
+ * Output Args: None
+ *
+ * Return:
+ *   Starting guest virtual address
+ *
+ * Allocates at least sz bytes within the virtual address space of the vm
+ * given by vm.  The allocated bytes are mapped to a virtual address >=
+ * the address given by vaddr_min.  Note that each allocation uses a
+ * a unique set of pages, with the minimum real allocation being at least
+ * a page. The allocated physical space comes from the TEST_DATA memory region.
+ */
+vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
+{
+       return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA);
+}
+
+/*
  * VM Virtual Address Allocate Pages
  *
  * Input Args:
@@ -1291,6 +1298,11 @@ vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
        return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
 }
 
+vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type)
+{
+       return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type);
+}
+
 /*
  * VM Virtual Address Allocate Page
  *
@@ -1856,7 +1868,8 @@ vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
 
 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
 {
-       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+       return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+                                vm->memslots[MEM_REGION_PT]);
 }
 
 /*
index ac7fc9d..d146ca7 100644 (file)
@@ -61,7 +61,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
                return;
 
        vm->pgd = vm_phy_pages_alloc(vm, nr_pages,
-                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+                                    vm->memslots[MEM_REGION_PT]);
        vm->pgd_created = true;
 }
 
@@ -288,8 +289,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
 
        stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size :
                                             vm->page_size;
-       stack_vaddr = vm_vaddr_alloc(vm, stack_size,
-                                    DEFAULT_RISCV_GUEST_STACK_VADDR_MIN);
+       stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
+                                      DEFAULT_RISCV_GUEST_STACK_VADDR_MIN,
+                                      MEM_REGION_DATA);
 
        vcpu = __vm_vcpu_add(vm, vcpu_id);
        riscv_vcpu_mmu_setup(vcpu);
index 89d7340..1594512 100644 (file)
@@ -21,7 +21,8 @@ void virt_arch_pgd_alloc(struct kvm_vm *vm)
                return;
 
        paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
-                                  KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+                                  KVM_GUEST_PAGE_TABLE_MIN_PADDR,
+                                  vm->memslots[MEM_REGION_PT]);
        memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
 
        vm->pgd = paddr;
@@ -167,8 +168,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
                    vm->page_size);
 
-       stack_vaddr = vm_vaddr_alloc(vm, stack_size,
-                                    DEFAULT_GUEST_STACK_VADDR_MIN);
+       stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
+                                      DEFAULT_GUEST_STACK_VADDR_MIN,
+                                      MEM_REGION_DATA);
 
        vcpu = __vm_vcpu_add(vm, vcpu_id);
 
index 39c4409..b199dde 100644 (file)
@@ -552,7 +552,7 @@ unmapped_gva:
 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
 {
        if (!vm->gdt)
-               vm->gdt = vm_vaddr_alloc_page(vm);
+               vm->gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
 
        dt->base = vm->gdt;
        dt->limit = getpagesize();
@@ -562,7 +562,7 @@ static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
                                int selector)
 {
        if (!vm->tss)
-               vm->tss = vm_vaddr_alloc_page(vm);
+               vm->tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
 
        memset(segp, 0, sizeof(*segp));
        segp->base = vm->tss;
@@ -647,8 +647,9 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        vm_vaddr_t stack_vaddr;
        struct kvm_vcpu *vcpu;
 
-       stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
-                                    DEFAULT_GUEST_STACK_VADDR_MIN);
+       stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
+                                      DEFAULT_GUEST_STACK_VADDR_MIN,
+                                      MEM_REGION_DATA);
 
        vcpu = __vm_vcpu_add(vm, vcpu_id);
        vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
@@ -1145,8 +1146,8 @@ void vm_init_descriptor_tables(struct kvm_vm *vm)
        extern void *idt_handlers;
        int i;
 
-       vm->idt = vm_vaddr_alloc_page(vm);
-       vm->handlers = vm_vaddr_alloc_page(vm);
+       vm->idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
+       vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA);
        /* Handlers have the same address in both address spaces.*/
        for (i = 0; i < NUM_INTERRUPTS; i++)
                set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,