1 // SPDX-License-Identifier: GPL-2.0-only
3 * tools/testing/selftests/kvm/lib/kvm_util.c
5 * Copyright (C) 2018, Google LLC.
8 #define _GNU_SOURCE /* for program_invocation_name */
11 #include "processor.h"
15 #include <sys/types.h>
18 #include <linux/kernel.h>
20 #define KVM_UTIL_MIN_PFN 2
22 static int vcpu_mmap_sz(void);
24 int open_path_or_exit(const char *path, int flags)
28 fd = open(path, flags);
29 __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno);
35 * Open KVM_DEV_PATH if available, otherwise exit the entire program.
38 * flags - The flags to pass when opening KVM_DEV_PATH.
41 * The opened file descriptor of /dev/kvm.
43 static int _open_kvm_dev_path_or_exit(int flags)
45 return open_path_or_exit(KVM_DEV_PATH, flags);
48 int open_kvm_dev_path_or_exit(void)
50 return _open_kvm_dev_path_or_exit(O_RDONLY);
62 * On success, the Value corresponding to the capability (KVM_CAP_*)
63 * specified by the value of cap. On failure a TEST_ASSERT failure
66 * Looks up and returns the value corresponding to the capability
67 * (KVM_CAP_*) given by cap.
69 unsigned int kvm_check_cap(long cap)
74 kvm_fd = open_kvm_dev_path_or_exit();
75 ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap);
76 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
80 return (unsigned int)ret;
83 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
85 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL))
86 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size);
88 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size);
89 vm->dirty_ring_size = ring_size;
92 static void vm_open(struct kvm_vm *vm)
94 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR);
96 TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT));
98 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type);
99 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd));
102 const char *vm_guest_mode_string(uint32_t i)
104 static const char * const strings[] = {
105 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
106 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
107 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
108 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
109 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
110 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
111 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
112 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
113 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
114 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
115 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
116 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
117 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
118 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
119 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
121 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
122 "Missing new mode strings?");
124 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
129 const struct vm_guest_mode_params vm_guest_mode_params[] = {
130 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
131 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
132 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
133 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
134 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
135 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
136 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
137 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
138 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
139 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
140 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
141 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
142 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
143 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
144 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
146 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
147 "Missing new mode params?");
149 struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages)
153 pr_debug("%s: mode='%s' pages='%ld'\n", __func__,
154 vm_guest_mode_string(mode), nr_pages);
156 vm = calloc(1, sizeof(*vm));
157 TEST_ASSERT(vm != NULL, "Insufficient Memory");
159 INIT_LIST_HEAD(&vm->vcpus);
160 vm->regions.gpa_tree = RB_ROOT;
161 vm->regions.hva_tree = RB_ROOT;
162 hash_init(vm->regions.slot_hash);
167 vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
168 vm->va_bits = vm_guest_mode_params[mode].va_bits;
169 vm->page_size = vm_guest_mode_params[mode].page_size;
170 vm->page_shift = vm_guest_mode_params[mode].page_shift;
172 /* Setup mode specific traits. */
174 case VM_MODE_P52V48_4K:
175 vm->pgtable_levels = 4;
177 case VM_MODE_P52V48_64K:
178 vm->pgtable_levels = 3;
180 case VM_MODE_P48V48_4K:
181 vm->pgtable_levels = 4;
183 case VM_MODE_P48V48_64K:
184 vm->pgtable_levels = 3;
186 case VM_MODE_P40V48_4K:
187 case VM_MODE_P36V48_4K:
188 vm->pgtable_levels = 4;
190 case VM_MODE_P40V48_64K:
191 case VM_MODE_P36V48_64K:
192 vm->pgtable_levels = 3;
194 case VM_MODE_P48V48_16K:
195 case VM_MODE_P40V48_16K:
196 case VM_MODE_P36V48_16K:
197 vm->pgtable_levels = 4;
199 case VM_MODE_P36V47_16K:
200 vm->pgtable_levels = 3;
202 case VM_MODE_PXXV48_4K:
204 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
206 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
207 * it doesn't take effect unless a CR4.LA57 is set, which it
208 * isn't for this VM_MODE.
210 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
211 "Linear address width (%d bits) not supported",
213 pr_debug("Guest physical address width detected: %d\n",
215 vm->pgtable_levels = 4;
218 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
221 case VM_MODE_P47V64_4K:
222 vm->pgtable_levels = 5;
224 case VM_MODE_P44V64_4K:
225 vm->pgtable_levels = 5;
228 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
232 if (vm->pa_bits != 40)
233 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
238 /* Limit to VA-bit canonical virtual addresses. */
239 vm->vpages_valid = sparsebit_alloc();
240 sparsebit_set_num(vm->vpages_valid,
241 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
242 sparsebit_set_num(vm->vpages_valid,
243 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
244 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
246 /* Limit physical addresses to PA-bits. */
247 vm->max_gfn = vm_compute_max_gfn(vm);
249 /* Allocate and setup memory for guest. */
250 vm->vpages_mapped = sparsebit_alloc();
252 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
258 static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
259 uint32_t nr_runnable_vcpus,
260 uint64_t extra_mem_pages)
264 TEST_ASSERT(nr_runnable_vcpus,
265 "Use vm_create_barebones() for VMs that _never_ have vCPUs\n");
267 TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
268 "nr_vcpus = %d too large for host, max-vcpus = %d",
269 nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
272 * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the
273 * test code and other per-VM assets that will be loaded into memslot0.
277 /* Account for the per-vCPU stacks on behalf of the test. */
278 nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS;
281 * Account for the number of pages needed for the page tables. The
282 * maximum page table size for a memory region will be when the
283 * smallest page size is used. Considering each page contains x page
284 * table descriptors, the total extra size for page tables (for extra
285 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
288 nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
290 return vm_adjust_num_guest_pages(mode, nr_pages);
293 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
294 uint64_t nr_extra_pages)
296 uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus,
300 vm = ____vm_create(mode, nr_pages);
302 kvm_vm_elf_load(vm, program_invocation_name);
305 vm_create_irqchip(vm);
311 * VM Create with customized parameters
314 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
315 * nr_vcpus - VCPU count
316 * extra_mem_pages - Non-slot0 physical memory total size
317 * guest_code - Guest entry point
323 * Pointer to opaque structure that describes the created VM.
325 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
326 * extra_mem_pages is only used to calculate the maximum page table size,
327 * no real memory allocation for non-slot0 memory in this function.
329 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
330 uint64_t extra_mem_pages,
331 void *guest_code, struct kvm_vcpu *vcpus[])
336 TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array");
338 vm = __vm_create(mode, nr_vcpus, extra_mem_pages);
340 for (i = 0; i < nr_vcpus; ++i)
341 vcpus[i] = vm_vcpu_add(vm, i, guest_code);
346 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
347 uint64_t extra_mem_pages,
350 struct kvm_vcpu *vcpus[1];
353 vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages,
364 * vm - VM that has been released before
368 * Reopens the file descriptors associated to the VM and reinstates the
369 * global state, such as the irqchip and the memory regions that are mapped
372 void kvm_vm_restart(struct kvm_vm *vmp)
375 struct userspace_mem_region *region;
378 if (vmp->has_irqchip)
379 vm_create_irqchip(vmp);
381 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
382 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
383 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
384 " rc: %i errno: %i\n"
385 " slot: %u flags: 0x%x\n"
386 " guest_phys_addr: 0x%llx size: 0x%llx",
387 ret, errno, region->region.slot,
388 region->region.flags,
389 region->region.guest_phys_addr,
390 region->region.memory_size);
394 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm,
397 return __vm_vcpu_add(vm, vcpu_id);
400 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm)
404 return vm_vcpu_recreate(vm, 0);
408 * Userspace Memory Region Find
411 * vm - Virtual Machine
412 * start - Starting VM physical address
413 * end - Ending VM physical address, inclusive.
418 * Pointer to overlapping region, NULL if no such region.
420 * Searches for a region with any physical memory that overlaps with
421 * any portion of the guest physical addresses from start to end
422 * inclusive. If multiple overlapping regions exist, a pointer to any
423 * of the regions is returned. Null is returned only when no overlapping
426 static struct userspace_mem_region *
427 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
429 struct rb_node *node;
431 for (node = vm->regions.gpa_tree.rb_node; node; ) {
432 struct userspace_mem_region *region =
433 container_of(node, struct userspace_mem_region, gpa_node);
434 uint64_t existing_start = region->region.guest_phys_addr;
435 uint64_t existing_end = region->region.guest_phys_addr
436 + region->region.memory_size - 1;
437 if (start <= existing_end && end >= existing_start)
440 if (start < existing_start)
441 node = node->rb_left;
443 node = node->rb_right;
450 * KVM Userspace Memory Region Find
453 * vm - Virtual Machine
454 * start - Starting VM physical address
455 * end - Ending VM physical address, inclusive.
460 * Pointer to overlapping region, NULL if no such region.
462 * Public interface to userspace_mem_region_find. Allows tests to look up
463 * the memslot datastructure for a given range of guest physical memory.
465 struct kvm_userspace_memory_region *
466 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
469 struct userspace_mem_region *region;
471 region = userspace_mem_region_find(vm, start, end);
475 return ®ion->region;
478 __weak void vcpu_arch_free(struct kvm_vcpu *vcpu)
487 * vcpu - VCPU to remove
491 * Return: None, TEST_ASSERT failures for all error conditions
493 * Removes a vCPU from a VM and frees its resources.
495 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
499 if (vcpu->dirty_gfns) {
500 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
501 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
502 vcpu->dirty_gfns = NULL;
505 ret = munmap(vcpu->run, vcpu_mmap_sz());
506 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
508 ret = close(vcpu->fd);
509 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
511 list_del(&vcpu->list);
513 vcpu_arch_free(vcpu);
517 void kvm_vm_release(struct kvm_vm *vmp)
519 struct kvm_vcpu *vcpu, *tmp;
522 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
523 vm_vcpu_rm(vmp, vcpu);
525 ret = close(vmp->fd);
526 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
528 ret = close(vmp->kvm_fd);
529 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret));
532 static void __vm_mem_region_delete(struct kvm_vm *vm,
533 struct userspace_mem_region *region,
539 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
540 rb_erase(®ion->hva_node, &vm->regions.hva_tree);
541 hash_del(®ion->slot_node);
544 region->region.memory_size = 0;
545 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
547 sparsebit_free(®ion->unused_phy_pages);
548 ret = munmap(region->mmap_start, region->mmap_size);
549 TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret));
555 * Destroys and frees the VM pointed to by vmp.
557 void kvm_vm_free(struct kvm_vm *vmp)
560 struct hlist_node *node;
561 struct userspace_mem_region *region;
566 /* Free cached stats metadata and close FD */
568 free(vmp->stats_desc);
569 close(vmp->stats_fd);
572 /* Free userspace_mem_regions. */
573 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
574 __vm_mem_region_delete(vmp, region, false);
576 /* Free sparsebit arrays. */
577 sparsebit_free(&vmp->vpages_valid);
578 sparsebit_free(&vmp->vpages_mapped);
582 /* Free the structure describing the VM. */
586 int kvm_memfd_alloc(size_t size, bool hugepages)
588 int memfd_flags = MFD_CLOEXEC;
592 memfd_flags |= MFD_HUGETLB;
594 fd = memfd_create("kvm_selftest", memfd_flags);
595 TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd));
597 r = ftruncate(fd, size);
598 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r));
600 r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size);
601 TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r));
607 * Memory Compare, host virtual to guest virtual
610 * hva - Starting host virtual address
611 * vm - Virtual Machine
612 * gva - Starting guest virtual address
613 * len - number of bytes to compare
617 * Input/Output Args: None
620 * Returns 0 if the bytes starting at hva for a length of len
621 * are equal the guest virtual bytes starting at gva. Returns
622 * a value < 0, if bytes at hva are less than those at gva.
623 * Otherwise a value > 0 is returned.
625 * Compares the bytes starting at the host virtual address hva, for
626 * a length of len, to the guest bytes starting at the guest virtual
627 * address given by gva.
629 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
634 * Compare a batch of bytes until either a match is found
635 * or all the bytes have been compared.
637 for (uintptr_t offset = 0; offset < len; offset += amt) {
638 uintptr_t ptr1 = (uintptr_t)hva + offset;
641 * Determine host address for guest virtual address
644 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
647 * Determine amount to compare on this pass.
648 * Don't allow the comparsion to cross a page boundary.
651 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
652 amt = vm->page_size - (ptr1 % vm->page_size);
653 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
654 amt = vm->page_size - (ptr2 % vm->page_size);
656 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
657 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
660 * Perform the comparison. If there is a difference
661 * return that result to the caller, otherwise need
662 * to continue on looking for a mismatch.
664 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
670 * No mismatch found. Let the caller know the two memory
676 static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
677 struct userspace_mem_region *region)
679 struct rb_node **cur, *parent;
681 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
682 struct userspace_mem_region *cregion;
684 cregion = container_of(*cur, typeof(*cregion), gpa_node);
686 if (region->region.guest_phys_addr <
687 cregion->region.guest_phys_addr)
688 cur = &(*cur)->rb_left;
690 TEST_ASSERT(region->region.guest_phys_addr !=
691 cregion->region.guest_phys_addr,
692 "Duplicate GPA in region tree");
694 cur = &(*cur)->rb_right;
698 rb_link_node(®ion->gpa_node, parent, cur);
699 rb_insert_color(®ion->gpa_node, gpa_tree);
702 static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
703 struct userspace_mem_region *region)
705 struct rb_node **cur, *parent;
707 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
708 struct userspace_mem_region *cregion;
710 cregion = container_of(*cur, typeof(*cregion), hva_node);
712 if (region->host_mem < cregion->host_mem)
713 cur = &(*cur)->rb_left;
715 TEST_ASSERT(region->host_mem !=
717 "Duplicate HVA in region tree");
719 cur = &(*cur)->rb_right;
723 rb_link_node(®ion->hva_node, parent, cur);
724 rb_insert_color(®ion->hva_node, hva_tree);
728 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
729 uint64_t gpa, uint64_t size, void *hva)
731 struct kvm_userspace_memory_region region = {
734 .guest_phys_addr = gpa,
736 .userspace_addr = (uintptr_t)hva,
739 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion);
742 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
743 uint64_t gpa, uint64_t size, void *hva)
745 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva);
747 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)",
748 errno, strerror(errno));
752 * VM Userspace Memory Region Add
755 * vm - Virtual Machine
756 * src_type - Storage source for this region.
757 * NULL to use anonymous memory.
758 * guest_paddr - Starting guest physical address
759 * slot - KVM region slot
760 * npages - Number of physical pages
761 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
767 * Allocates a memory area of the number of pages specified by npages
768 * and maps it to the VM specified by vm, at a starting physical address
769 * given by guest_paddr. The region is created with a KVM region slot
770 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
771 * region is created with the flags given by flags.
773 void vm_userspace_mem_region_add(struct kvm_vm *vm,
774 enum vm_mem_backing_src_type src_type,
775 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
779 struct userspace_mem_region *region;
780 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
783 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
784 "Number of guest pages is not compatible with the host. "
785 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
787 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
788 "address not on a page boundary.\n"
789 " guest_paddr: 0x%lx vm->page_size: 0x%x",
790 guest_paddr, vm->page_size);
791 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
792 <= vm->max_gfn, "Physical range beyond maximum "
793 "supported physical address,\n"
794 " guest_paddr: 0x%lx npages: 0x%lx\n"
795 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
796 guest_paddr, npages, vm->max_gfn, vm->page_size);
799 * Confirm a mem region with an overlapping address doesn't
802 region = (struct userspace_mem_region *) userspace_mem_region_find(
803 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
805 TEST_FAIL("overlapping userspace_mem_region already "
807 " requested guest_paddr: 0x%lx npages: 0x%lx "
809 " existing guest_paddr: 0x%lx size: 0x%lx",
810 guest_paddr, npages, vm->page_size,
811 (uint64_t) region->region.guest_phys_addr,
812 (uint64_t) region->region.memory_size);
814 /* Confirm no region with the requested slot already exists. */
815 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
817 if (region->region.slot != slot)
820 TEST_FAIL("A mem region with the requested slot "
822 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
823 " existing slot: %u paddr: 0x%lx size: 0x%lx",
824 slot, guest_paddr, npages,
826 (uint64_t) region->region.guest_phys_addr,
827 (uint64_t) region->region.memory_size);
830 /* Allocate and initialize new mem region structure. */
831 region = calloc(1, sizeof(*region));
832 TEST_ASSERT(region != NULL, "Insufficient Memory");
833 region->mmap_size = npages * vm->page_size;
836 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
837 alignment = 0x100000;
843 * When using THP mmap is not guaranteed to returned a hugepage aligned
844 * address so we have to pad the mmap. Padding is not needed for HugeTLB
845 * because mmap will always return an address aligned to the HugeTLB
848 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
849 alignment = max(backing_src_pagesz, alignment);
851 ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
853 /* Add enough memory to align up if necessary */
855 region->mmap_size += alignment;
858 if (backing_src_is_shared(src_type))
859 region->fd = kvm_memfd_alloc(region->mmap_size,
860 src_type == VM_MEM_SRC_SHARED_HUGETLB);
862 region->mmap_start = mmap(NULL, region->mmap_size,
863 PROT_READ | PROT_WRITE,
864 vm_mem_backing_src_alias(src_type)->flag,
866 TEST_ASSERT(region->mmap_start != MAP_FAILED,
867 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
869 TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
870 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
871 "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
872 region->mmap_start, backing_src_pagesz);
874 /* Align host address */
875 region->host_mem = align_ptr_up(region->mmap_start, alignment);
877 /* As needed perform madvise */
878 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
879 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
880 ret = madvise(region->host_mem, npages * vm->page_size,
881 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
882 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
883 region->host_mem, npages * vm->page_size,
884 vm_mem_backing_src_alias(src_type)->name);
887 region->unused_phy_pages = sparsebit_alloc();
888 sparsebit_set_num(region->unused_phy_pages,
889 guest_paddr >> vm->page_shift, npages);
890 region->region.slot = slot;
891 region->region.flags = flags;
892 region->region.guest_phys_addr = guest_paddr;
893 region->region.memory_size = npages * vm->page_size;
894 region->region.userspace_addr = (uintptr_t) region->host_mem;
895 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
896 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
897 " rc: %i errno: %i\n"
898 " slot: %u flags: 0x%x\n"
899 " guest_phys_addr: 0x%lx size: 0x%lx",
900 ret, errno, slot, flags,
901 guest_paddr, (uint64_t) region->region.memory_size);
903 /* Add to quick lookup data structures */
904 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
905 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
906 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
908 /* If shared memory, create an alias. */
909 if (region->fd >= 0) {
910 region->mmap_alias = mmap(NULL, region->mmap_size,
911 PROT_READ | PROT_WRITE,
912 vm_mem_backing_src_alias(src_type)->flag,
914 TEST_ASSERT(region->mmap_alias != MAP_FAILED,
915 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
917 /* Align host alias address */
918 region->host_alias = align_ptr_up(region->mmap_alias, alignment);
926 * vm - Virtual Machine
927 * memslot - KVM memory slot ID
932 * Pointer to memory region structure that describe memory region
933 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
934 * on error (e.g. currently no memory region using memslot as a KVM
937 struct userspace_mem_region *
938 memslot2region(struct kvm_vm *vm, uint32_t memslot)
940 struct userspace_mem_region *region;
942 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
944 if (region->region.slot == memslot)
947 fprintf(stderr, "No mem region with the requested slot found,\n"
948 " requested slot: %u\n", memslot);
949 fputs("---- vm dump ----\n", stderr);
950 vm_dump(stderr, vm, 2);
951 TEST_FAIL("Mem region not found");
956 * VM Memory Region Flags Set
959 * vm - Virtual Machine
960 * flags - Starting guest physical address
966 * Sets the flags of the memory region specified by the value of slot,
967 * to the values given by flags.
969 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
972 struct userspace_mem_region *region;
974 region = memslot2region(vm, slot);
976 region->region.flags = flags;
978 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
980 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
981 " rc: %i errno: %i slot: %u flags: 0x%x",
982 ret, errno, slot, flags);
986 * VM Memory Region Move
989 * vm - Virtual Machine
990 * slot - Slot of the memory region to move
991 * new_gpa - Starting guest physical address
997 * Change the gpa of a memory region.
999 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
1001 struct userspace_mem_region *region;
1004 region = memslot2region(vm, slot);
1006 region->region.guest_phys_addr = new_gpa;
1008 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region);
1010 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
1011 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
1012 ret, errno, slot, new_gpa);
1016 * VM Memory Region Delete
1019 * vm - Virtual Machine
1020 * slot - Slot of the memory region to delete
1026 * Delete a memory region.
1028 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1030 __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1033 /* Returns the size of a vCPU's kvm_run structure. */
1034 static int vcpu_mmap_sz(void)
1038 dev_fd = open_kvm_dev_path_or_exit();
1040 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1041 TEST_ASSERT(ret >= sizeof(struct kvm_run),
1042 KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret));
1049 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id)
1051 struct kvm_vcpu *vcpu;
1053 list_for_each_entry(vcpu, &vm->vcpus, list) {
1054 if (vcpu->id == vcpu_id)
1062 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1063 * No additional vCPU setup is done. Returns the vCPU.
1065 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
1067 struct kvm_vcpu *vcpu;
1069 /* Confirm a vcpu with the specified id doesn't already exist. */
1070 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id);
1072 /* Allocate and initialize new vcpu structure. */
1073 vcpu = calloc(1, sizeof(*vcpu));
1074 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1078 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id);
1079 TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd));
1081 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size "
1082 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
1083 vcpu_mmap_sz(), sizeof(*vcpu->run));
1084 vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1085 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1086 TEST_ASSERT(vcpu->run != MAP_FAILED,
1087 __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED));
1089 /* Add to linked-list of VCPUs. */
1090 list_add(&vcpu->list, &vm->vcpus);
1096 * VM Virtual Address Unused Gap
1099 * vm - Virtual Machine
1101 * vaddr_min - Minimum Virtual Address
1106 * Lowest virtual address at or below vaddr_min, with at least
1107 * sz unused bytes. TEST_ASSERT failure if no area of at least
1108 * size sz is available.
1110 * Within the VM specified by vm, locates the lowest starting virtual
1111 * address >= vaddr_min, that has at least sz unallocated bytes. A
1112 * TEST_ASSERT failure occurs for invalid input or no area of at least
1113 * sz unallocated bytes >= vaddr_min is available.
1115 static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1116 vm_vaddr_t vaddr_min)
1118 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1120 /* Determine lowest permitted virtual page index. */
1121 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1122 if ((pgidx_start * vm->page_size) < vaddr_min)
1125 /* Loop over section with enough valid virtual page indexes. */
1126 if (!sparsebit_is_set_num(vm->vpages_valid,
1127 pgidx_start, pages))
1128 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1129 pgidx_start, pages);
1132 * Are there enough unused virtual pages available at
1133 * the currently proposed starting virtual page index.
1134 * If not, adjust proposed starting index to next
1137 if (sparsebit_is_clear_num(vm->vpages_mapped,
1138 pgidx_start, pages))
1140 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1141 pgidx_start, pages);
1142 if (pgidx_start == 0)
1146 * If needed, adjust proposed starting virtual address,
1147 * to next range of valid virtual addresses.
1149 if (!sparsebit_is_set_num(vm->vpages_valid,
1150 pgidx_start, pages)) {
1151 pgidx_start = sparsebit_next_set_num(
1152 vm->vpages_valid, pgidx_start, pages);
1153 if (pgidx_start == 0)
1156 } while (pgidx_start != 0);
1159 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
1165 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1166 pgidx_start, pages),
1167 "Unexpected, invalid virtual page index range,\n"
1168 " pgidx_start: 0x%lx\n"
1170 pgidx_start, pages);
1171 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1172 pgidx_start, pages),
1173 "Unexpected, pages already mapped,\n"
1174 " pgidx_start: 0x%lx\n"
1176 pgidx_start, pages);
1178 return pgidx_start * vm->page_size;
1182 * VM Virtual Address Allocate
1185 * vm - Virtual Machine
1186 * sz - Size in bytes
1187 * vaddr_min - Minimum starting virtual address
1192 * Starting guest virtual address
1194 * Allocates at least sz bytes within the virtual address space of the vm
1195 * given by vm. The allocated bytes are mapped to a virtual address >=
1196 * the address given by vaddr_min. Note that each allocation uses a
1197 * a unique set of pages, with the minimum real allocation being at least
1200 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1202 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1205 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
1206 KVM_UTIL_MIN_PFN * vm->page_size, 0);
1209 * Find an unused range of virtual page addresses of at least
1212 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1214 /* Map the virtual pages. */
1215 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1216 pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1218 virt_pg_map(vm, vaddr, paddr);
1220 sparsebit_set(vm->vpages_mapped,
1221 vaddr >> vm->page_shift);
1228 * VM Virtual Address Allocate Pages
1231 * vm - Virtual Machine
1236 * Starting guest virtual address
1238 * Allocates at least N system pages worth of bytes within the virtual address
1241 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1243 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1247 * VM Virtual Address Allocate Page
1250 * vm - Virtual Machine
1255 * Starting guest virtual address
1257 * Allocates at least one system page worth of bytes within the virtual address
1260 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1262 return vm_vaddr_alloc_pages(vm, 1);
1266 * Map a range of VM virtual address to the VM's physical address
1269 * vm - Virtual Machine
1270 * vaddr - Virtuall address to map
1271 * paddr - VM Physical Address
1272 * npages - The number of pages to map
1278 * Within the VM given by @vm, creates a virtual translation for
1279 * @npages starting at @vaddr to the page range starting at @paddr.
1281 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1282 unsigned int npages)
1284 size_t page_size = vm->page_size;
1285 size_t size = npages * page_size;
1287 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1288 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1291 virt_pg_map(vm, vaddr, paddr);
1298 * Address VM Physical to Host Virtual
1301 * vm - Virtual Machine
1302 * gpa - VM physical address
1307 * Equivalent host virtual address
1309 * Locates the memory region containing the VM physical address given
1310 * by gpa, within the VM given by vm. When found, the host virtual
1311 * address providing the memory to the vm physical address is returned.
1312 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1314 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1316 struct userspace_mem_region *region;
1318 region = userspace_mem_region_find(vm, gpa, gpa);
1320 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1324 return (void *)((uintptr_t)region->host_mem
1325 + (gpa - region->region.guest_phys_addr));
1329 * Address Host Virtual to VM Physical
1332 * vm - Virtual Machine
1333 * hva - Host virtual address
1338 * Equivalent VM physical address
1340 * Locates the memory region containing the host virtual address given
1341 * by hva, within the VM given by vm. When found, the equivalent
1342 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1343 * region containing hva exists.
1345 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1347 struct rb_node *node;
1349 for (node = vm->regions.hva_tree.rb_node; node; ) {
1350 struct userspace_mem_region *region =
1351 container_of(node, struct userspace_mem_region, hva_node);
1353 if (hva >= region->host_mem) {
1354 if (hva <= (region->host_mem
1355 + region->region.memory_size - 1))
1356 return (vm_paddr_t)((uintptr_t)
1357 region->region.guest_phys_addr
1358 + (hva - (uintptr_t)region->host_mem));
1360 node = node->rb_right;
1362 node = node->rb_left;
1365 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1370 * Address VM physical to Host Virtual *alias*.
1373 * vm - Virtual Machine
1374 * gpa - VM physical address
1379 * Equivalent address within the host virtual *alias* area, or NULL
1380 * (without failing the test) if the guest memory is not shared (so
1383 * Create a writable, shared virtual=>physical alias for the specific GPA.
1384 * The primary use case is to allow the host selftest to manipulate guest
1385 * memory without mapping said memory in the guest's address space. And, for
1386 * userfaultfd-based demand paging, to do so without triggering userfaults.
1388 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1390 struct userspace_mem_region *region;
1393 region = userspace_mem_region_find(vm, gpa, gpa);
1397 if (!region->host_alias)
1400 offset = gpa - region->region.guest_phys_addr;
1401 return (void *) ((uintptr_t) region->host_alias + offset);
1404 /* Create an interrupt controller chip for the specified VM. */
1405 void vm_create_irqchip(struct kvm_vm *vm)
1407 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL);
1409 vm->has_irqchip = true;
1412 int _vcpu_run(struct kvm_vcpu *vcpu)
1417 rc = __vcpu_run(vcpu);
1418 } while (rc == -1 && errno == EINTR);
1420 assert_on_unhandled_exception(vcpu);
1426 * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR.
1427 * Assert if the KVM returns an error (other than -EINTR).
1429 void vcpu_run(struct kvm_vcpu *vcpu)
1431 int ret = _vcpu_run(vcpu);
1433 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret));
1436 void vcpu_run_complete_io(struct kvm_vcpu *vcpu)
1440 vcpu->run->immediate_exit = 1;
1441 ret = __vcpu_run(vcpu);
1442 vcpu->run->immediate_exit = 0;
1444 TEST_ASSERT(ret == -1 && errno == EINTR,
1445 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1450 * Get the list of guest registers which are supported for
1451 * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer,
1452 * it is the caller's responsibility to free the list.
1454 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu)
1456 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1459 ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n);
1460 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1462 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1463 reg_list->n = reg_list_n.n;
1464 vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list);
1468 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu)
1470 uint32_t page_size = vcpu->vm->page_size;
1471 uint32_t size = vcpu->vm->dirty_ring_size;
1473 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1475 if (!vcpu->dirty_gfns) {
1478 addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd,
1479 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1480 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1482 addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd,
1483 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1484 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1486 addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd,
1487 page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1488 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1490 vcpu->dirty_gfns = addr;
1491 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1494 return vcpu->dirty_gfns;
1501 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
1503 struct kvm_device_attr attribute = {
1509 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
1512 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type)
1514 struct kvm_create_device create_dev = {
1516 .flags = KVM_CREATE_DEVICE_TEST,
1519 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1522 int __kvm_create_device(struct kvm_vm *vm, uint64_t type)
1524 struct kvm_create_device create_dev = {
1531 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev);
1532 TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value");
1533 return err ? : create_dev.fd;
1536 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val)
1538 struct kvm_device_attr kvmattr = {
1542 .addr = (uintptr_t)val,
1545 return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr);
1548 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val)
1550 struct kvm_device_attr kvmattr = {
1554 .addr = (uintptr_t)val,
1557 return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr);
1561 * IRQ related functions.
1564 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1566 struct kvm_irq_level irq_level = {
1571 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
1574 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
1576 int ret = _kvm_irq_line(vm, irq, level);
1578 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret));
1581 struct kvm_irq_routing *kvm_gsi_routing_create(void)
1583 struct kvm_irq_routing *routing;
1586 size = sizeof(struct kvm_irq_routing);
1587 /* Allocate space for the max number of entries: this wastes 196 KBs. */
1588 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
1589 routing = calloc(1, size);
1595 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
1596 uint32_t gsi, uint32_t pin)
1601 assert(routing->nr < KVM_MAX_IRQ_ROUTES);
1604 routing->entries[i].gsi = gsi;
1605 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
1606 routing->entries[i].flags = 0;
1607 routing->entries[i].u.irqchip.irqchip = 0;
1608 routing->entries[i].u.irqchip.pin = pin;
1612 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1617 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing);
1623 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
1627 ret = _kvm_gsi_routing_write(vm, routing);
1628 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret));
1635 * vm - Virtual Machine
1636 * indent - Left margin indent amount
1639 * stream - Output FILE stream
1643 * Dumps the current state of the VM given by vm, to the FILE stream
1646 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1649 struct userspace_mem_region *region;
1650 struct kvm_vcpu *vcpu;
1652 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1653 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1654 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1655 fprintf(stream, "%*sMem Regions:\n", indent, "");
1656 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
1657 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1658 "host_virt: %p\n", indent + 2, "",
1659 (uint64_t) region->region.guest_phys_addr,
1660 (uint64_t) region->region.memory_size,
1662 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1663 sparsebit_dump(stream, region->unused_phy_pages, 0);
1665 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1666 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1667 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1669 if (vm->pgd_created) {
1670 fprintf(stream, "%*sVirtual Translation Tables:\n",
1672 virt_dump(stream, vm, indent + 4);
1674 fprintf(stream, "%*sVCPUs:\n", indent, "");
1676 list_for_each_entry(vcpu, &vm->vcpus, list)
1677 vcpu_dump(stream, vcpu, indent + 2);
1680 /* Known KVM exit reasons */
1681 static struct exit_reason {
1682 unsigned int reason;
1684 } exit_reasons_known[] = {
1685 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1686 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1687 {KVM_EXIT_IO, "IO"},
1688 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1689 {KVM_EXIT_DEBUG, "DEBUG"},
1690 {KVM_EXIT_HLT, "HLT"},
1691 {KVM_EXIT_MMIO, "MMIO"},
1692 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1693 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1694 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1695 {KVM_EXIT_INTR, "INTR"},
1696 {KVM_EXIT_SET_TPR, "SET_TPR"},
1697 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1698 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1699 {KVM_EXIT_S390_RESET, "S390_RESET"},
1700 {KVM_EXIT_DCR, "DCR"},
1701 {KVM_EXIT_NMI, "NMI"},
1702 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1703 {KVM_EXIT_OSI, "OSI"},
1704 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1705 {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
1706 {KVM_EXIT_X86_RDMSR, "RDMSR"},
1707 {KVM_EXIT_X86_WRMSR, "WRMSR"},
1708 {KVM_EXIT_XEN, "XEN"},
1709 #ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1710 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1715 * Exit Reason String
1718 * exit_reason - Exit reason
1723 * Constant string pointer describing the exit reason.
1725 * Locates and returns a constant string that describes the KVM exit
1726 * reason given by exit_reason. If no such string is found, a constant
1727 * string of "Unknown" is returned.
1729 const char *exit_reason_str(unsigned int exit_reason)
1733 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1734 if (exit_reason == exit_reasons_known[n1].reason)
1735 return exit_reasons_known[n1].name;
1742 * Physical Contiguous Page Allocator
1745 * vm - Virtual Machine
1746 * num - number of pages
1747 * paddr_min - Physical address minimum
1748 * memslot - Memory region to allocate page from
1753 * Starting physical address
1755 * Within the VM specified by vm, locates a range of available physical
1756 * pages at or above paddr_min. If found, the pages are marked as in use
1757 * and their base address is returned. A TEST_ASSERT failure occurs if
1758 * not enough pages are available at or above paddr_min.
1760 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1761 vm_paddr_t paddr_min, uint32_t memslot)
1763 struct userspace_mem_region *region;
1764 sparsebit_idx_t pg, base;
1766 TEST_ASSERT(num > 0, "Must allocate at least one page");
1768 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1769 "not divisible by page size.\n"
1770 " paddr_min: 0x%lx page_size: 0x%x",
1771 paddr_min, vm->page_size);
1773 region = memslot2region(vm, memslot);
1774 base = pg = paddr_min >> vm->page_shift;
1777 for (; pg < base + num; ++pg) {
1778 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1779 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1783 } while (pg && pg != base + num);
1786 fprintf(stderr, "No guest physical page available, "
1787 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1788 paddr_min, vm->page_size, memslot);
1789 fputs("---- vm dump ----\n", stderr);
1790 vm_dump(stderr, vm, 2);
1794 for (pg = base; pg < base + num; ++pg)
1795 sparsebit_clear(region->unused_phy_pages, pg);
1797 return base * vm->page_size;
1800 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1803 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1806 /* Arbitrary minimum physical address used for virtual translation tables. */
1807 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
1809 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
1811 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
1815 * Address Guest Virtual to Host Virtual
1818 * vm - Virtual Machine
1819 * gva - VM virtual address
1824 * Equivalent host virtual address
1826 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1828 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1831 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm)
1833 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
1836 static unsigned int vm_calc_num_pages(unsigned int num_pages,
1837 unsigned int page_shift,
1838 unsigned int new_page_shift,
1841 unsigned int n = 1 << (new_page_shift - page_shift);
1843 if (page_shift >= new_page_shift)
1844 return num_pages * (1 << (page_shift - new_page_shift));
1846 return num_pages / n + !!(ceil && num_pages % n);
1849 static inline int getpageshift(void)
1851 return __builtin_ffs(getpagesize()) - 1;
1855 vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1857 return vm_calc_num_pages(num_guest_pages,
1858 vm_guest_mode_params[mode].page_shift,
1859 getpageshift(), true);
1863 vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
1865 return vm_calc_num_pages(num_host_pages, getpageshift(),
1866 vm_guest_mode_params[mode].page_shift, false);
1869 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
1872 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
1873 return vm_adjust_num_guest_pages(mode, n);
1877 * Read binary stats descriptors
1880 * stats_fd - the file descriptor for the binary stats file from which to read
1881 * header - the binary stats metadata header corresponding to the given FD
1886 * A pointer to a newly allocated series of stat descriptors.
1887 * Caller is responsible for freeing the returned kvm_stats_desc.
1889 * Read the stats descriptors from the binary stats interface.
1891 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
1892 struct kvm_stats_header *header)
1894 struct kvm_stats_desc *stats_desc;
1895 ssize_t desc_size, total_size, ret;
1897 desc_size = get_stats_descriptor_size(header);
1898 total_size = header->num_desc * desc_size;
1900 stats_desc = calloc(header->num_desc, desc_size);
1901 TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors");
1903 ret = pread(stats_fd, stats_desc, total_size, header->desc_offset);
1904 TEST_ASSERT(ret == total_size, "Read KVM stats descriptors");
1910 * Read stat data for a particular stat
1913 * stats_fd - the file descriptor for the binary stats file from which to read
1914 * header - the binary stats metadata header corresponding to the given FD
1915 * desc - the binary stat metadata for the particular stat to be read
1916 * max_elements - the maximum number of 8-byte values to read into data
1919 * data - the buffer into which stat data should be read
1921 * Read the data values of a specified stat from the binary stats interface.
1923 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
1924 struct kvm_stats_desc *desc, uint64_t *data,
1925 size_t max_elements)
1927 size_t nr_elements = min_t(ssize_t, desc->size, max_elements);
1928 size_t size = nr_elements * sizeof(*data);
1931 TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name);
1932 TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name);
1934 ret = pread(stats_fd, data, size,
1935 header->data_offset + desc->offset);
1937 TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)",
1938 desc->name, errno, strerror(errno));
1939 TEST_ASSERT(ret == size,
1940 "pread() on stat '%s' read %ld bytes, wanted %lu bytes",
1941 desc->name, size, ret);
1945 * Read the data of the named stat
1948 * vm - the VM for which the stat should be read
1949 * stat_name - the name of the stat to read
1950 * max_elements - the maximum number of 8-byte values to read into data
1953 * data - the buffer into which stat data should be read
1955 * Read the data values of a specified stat from the binary stats interface.
1957 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
1958 size_t max_elements)
1960 struct kvm_stats_desc *desc;
1964 if (!vm->stats_fd) {
1965 vm->stats_fd = vm_get_stats_fd(vm);
1966 read_stats_header(vm->stats_fd, &vm->stats_header);
1967 vm->stats_desc = read_stats_descriptors(vm->stats_fd,
1971 size_desc = get_stats_descriptor_size(&vm->stats_header);
1973 for (i = 0; i < vm->stats_header.num_desc; ++i) {
1974 desc = (void *)vm->stats_desc + (i * size_desc);
1976 if (strcmp(desc->name, stat_name))
1979 read_stat_data(vm->stats_fd, &vm->stats_header, desc,
1980 data, max_elements);