1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * tools/testing/selftests/kvm/include/kvm_util_base.h
5 * Copyright (C) 2018, Google LLC.
7 #ifndef SELFTEST_KVM_UTIL_BASE_H
8 #define SELFTEST_KVM_UTIL_BASE_H
10 #include "test_util.h"
12 #include <linux/compiler.h>
13 #include "linux/hashtable.h"
14 #include "linux/list.h"
15 #include <linux/kernel.h>
16 #include <linux/kvm.h>
17 #include "linux/rbtree.h"
19 #include <asm/atomic.h>
21 #include <sys/ioctl.h>
23 #include "sparsebit.h"
26 * Provide a version of static_assert() that is guaranteed to have an optional
27 * message param. If _ISOC11_SOURCE is defined, glibc (/usr/include/assert.h)
28 * #undefs and #defines static_assert() as a direct alias to _Static_assert(),
29 * i.e. effectively makes the message mandatory. Many KVM selftests #define
30 * _GNU_SOURCE for various reasons, and _GNU_SOURCE implies _ISOC11_SOURCE. As
31 * a result, static_assert() behavior is non-deterministic and may or may not
32 * require a message depending on #include order.
34 #define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
35 #define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
37 #define KVM_DEV_PATH "/dev/kvm"
38 #define KVM_MAX_VCPUS 512
40 #define NSEC_PER_SEC 1000000000L
42 typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
43 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
45 struct userspace_mem_region {
46 struct kvm_userspace_memory_region region;
47 struct sparsebit *unused_phy_pages;
50 enum vm_mem_backing_src_type backing_src_type;
56 struct rb_node gpa_node;
57 struct rb_node hva_node;
58 struct hlist_node slot_node;
62 struct list_head list;
68 struct kvm_cpuid2 *cpuid;
70 struct kvm_dirty_gfn *dirty_gfns;
72 uint32_t dirty_gfns_count;
75 struct userspace_mem_regions {
76 struct rb_root gpa_tree;
77 struct rb_root hva_tree;
78 DECLARE_HASHTABLE(slot_hash, 9);
81 enum kvm_mem_region_type {
94 unsigned int pgtable_levels;
95 unsigned int page_size;
96 unsigned int page_shift;
100 struct list_head vcpus;
101 struct userspace_mem_regions regions;
102 struct sparsebit *vpages_valid;
103 struct sparsebit *vpages_mapped;
106 vm_paddr_t ucall_mmio_addr;
112 uint32_t dirty_ring_size;
114 /* Cache of information for binary stats interface */
116 struct kvm_stats_header stats_header;
117 struct kvm_stats_desc *stats_desc;
120 * KVM region slots. These are the default memslots used by page
121 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
124 uint32_t memslots[NR_MEM_REGIONS];
128 #define kvm_for_each_vcpu(vm, i, vcpu) \
129 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
130 if (!((vcpu) = vm->vcpus[i])) \
134 struct userspace_mem_region *
135 memslot2region(struct kvm_vm *vm, uint32_t memslot);
137 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
138 enum kvm_mem_region_type type)
140 assert(type < NR_MEM_REGIONS);
141 return memslot2region(vm, vm->memslots[type]);
144 /* Minimum allocated guest virtual and physical addresses */
145 #define KVM_UTIL_MIN_VADDR 0x2000
146 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
148 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
149 #define DEFAULT_STACK_PGS 5
160 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
170 #if defined(__aarch64__)
172 extern enum vm_guest_mode vm_mode_default;
174 #define VM_MODE_DEFAULT vm_mode_default
175 #define MIN_PAGE_SHIFT 12U
176 #define ptes_per_page(page_size) ((page_size) / 8)
178 #elif defined(__x86_64__)
180 #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
181 #define MIN_PAGE_SHIFT 12U
182 #define ptes_per_page(page_size) ((page_size) / 8)
184 #elif defined(__s390x__)
186 #define VM_MODE_DEFAULT VM_MODE_P44V64_4K
187 #define MIN_PAGE_SHIFT 12U
188 #define ptes_per_page(page_size) ((page_size) / 16)
190 #elif defined(__riscv)
192 #if __riscv_xlen == 32
193 #error "RISC-V 32-bit kvm selftests not supported"
196 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K
197 #define MIN_PAGE_SHIFT 12U
198 #define ptes_per_page(page_size) ((page_size) / 8)
202 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
203 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
205 struct vm_guest_mode_params {
206 unsigned int pa_bits;
207 unsigned int va_bits;
208 unsigned int page_size;
209 unsigned int page_shift;
211 extern const struct vm_guest_mode_params vm_guest_mode_params[];
213 int open_path_or_exit(const char *path, int flags);
214 int open_kvm_dev_path_or_exit(void);
216 bool get_kvm_param_bool(const char *param);
217 bool get_kvm_intel_param_bool(const char *param);
218 bool get_kvm_amd_param_bool(const char *param);
220 unsigned int kvm_check_cap(long cap);
222 static inline bool kvm_has_cap(long cap)
224 return kvm_check_cap(cap);
227 #define __KVM_SYSCALL_ERROR(_name, _ret) \
228 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
230 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
231 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
233 #define kvm_do_ioctl(fd, cmd, arg) \
235 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \
236 ioctl(fd, cmd, arg); \
239 #define __kvm_ioctl(kvm_fd, cmd, arg) \
240 kvm_do_ioctl(kvm_fd, cmd, arg)
243 #define _kvm_ioctl(kvm_fd, cmd, name, arg) \
245 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
247 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
250 #define kvm_ioctl(kvm_fd, cmd, arg) \
251 _kvm_ioctl(kvm_fd, cmd, #cmd, arg)
253 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
255 #define __vm_ioctl(vm, cmd, arg) \
257 static_assert_is_vm(vm); \
258 kvm_do_ioctl((vm)->fd, cmd, arg); \
261 #define _vm_ioctl(vm, cmd, name, arg) \
263 int ret = __vm_ioctl(vm, cmd, arg); \
265 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
268 #define vm_ioctl(vm, cmd, arg) \
269 _vm_ioctl(vm, cmd, #cmd, arg)
272 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
274 #define __vcpu_ioctl(vcpu, cmd, arg) \
276 static_assert_is_vcpu(vcpu); \
277 kvm_do_ioctl((vcpu)->fd, cmd, arg); \
280 #define _vcpu_ioctl(vcpu, cmd, name, arg) \
282 int ret = __vcpu_ioctl(vcpu, cmd, arg); \
284 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
287 #define vcpu_ioctl(vcpu, cmd, arg) \
288 _vcpu_ioctl(vcpu, cmd, #cmd, arg)
291 * Looks up and returns the value corresponding to the capability
292 * (KVM_CAP_*) given by cap.
294 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
296 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
298 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
302 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
304 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
306 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
308 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
310 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
312 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
315 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
316 const char *vm_guest_mode_string(uint32_t i);
318 void kvm_vm_free(struct kvm_vm *vmp);
319 void kvm_vm_restart(struct kvm_vm *vmp);
320 void kvm_vm_release(struct kvm_vm *vmp);
321 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
323 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
324 int kvm_memfd_alloc(size_t size, bool hugepages);
326 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
328 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
330 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
332 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
335 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
336 uint64_t first_page, uint32_t num_pages)
338 struct kvm_clear_dirty_log args = {
341 .first_page = first_page,
342 .num_pages = num_pages
345 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
348 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
350 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
353 static inline int vm_get_stats_fd(struct kvm_vm *vm)
355 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
357 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
361 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
365 ret = pread(stats_fd, header, sizeof(*header), 0);
366 TEST_ASSERT(ret == sizeof(*header),
367 "Failed to read '%lu' header bytes, ret = '%ld'",
368 sizeof(*header), ret);
371 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
372 struct kvm_stats_header *header);
374 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
377 * The base size of the descriptor is defined by KVM's ABI, but the
378 * size of the name field is variable, as far as KVM's ABI is
379 * concerned. For a given instance of KVM, the name field is the same
380 * size for all stats and is provided in the overall stats header.
382 return sizeof(struct kvm_stats_desc) + header->name_size;
385 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
387 struct kvm_stats_header *header)
390 * Note, size_desc includes the size of the name field, which is
391 * variable. i.e. this is NOT equivalent to &stats_desc[i].
393 return (void *)stats + index * get_stats_descriptor_size(header);
396 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
397 struct kvm_stats_desc *desc, uint64_t *data,
398 size_t max_elements);
400 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
401 size_t max_elements);
403 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
407 __vm_get_stat(vm, stat_name, &data, 1);
411 void vm_create_irqchip(struct kvm_vm *vm);
413 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
414 uint64_t gpa, uint64_t size, void *hva);
415 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
416 uint64_t gpa, uint64_t size, void *hva);
417 void vm_userspace_mem_region_add(struct kvm_vm *vm,
418 enum vm_mem_backing_src_type src_type,
419 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
422 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
423 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
424 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
425 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
426 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
427 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
428 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
429 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
430 enum kvm_mem_region_type type);
431 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
432 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
433 enum kvm_mem_region_type type);
434 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
436 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
437 unsigned int npages);
438 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
439 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
440 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
441 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
443 void vcpu_run(struct kvm_vcpu *vcpu);
444 int _vcpu_run(struct kvm_vcpu *vcpu);
446 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
448 return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
451 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
452 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
454 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
457 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
459 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
462 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
463 struct kvm_guest_debug *debug)
465 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
468 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
469 struct kvm_mp_state *mp_state)
471 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
473 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
474 struct kvm_mp_state *mp_state)
476 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
479 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
481 vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
484 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
486 vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
488 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
490 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
493 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
495 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
497 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
499 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
501 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
503 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
505 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
507 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
510 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
512 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
514 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
516 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
518 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
520 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
522 static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
524 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
526 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
528 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
530 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
532 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
535 #ifdef __KVM_HAVE_VCPU_EVENTS
536 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
537 struct kvm_vcpu_events *events)
539 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
541 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
542 struct kvm_vcpu_events *events)
544 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
548 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
549 struct kvm_nested_state *state)
551 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
553 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
554 struct kvm_nested_state *state)
556 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
559 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
560 struct kvm_nested_state *state)
562 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
565 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
567 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
569 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
573 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
575 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
577 int ret = __kvm_has_device_attr(dev_fd, group, attr);
579 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
582 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
584 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
585 uint64_t attr, void *val)
587 int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
589 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
592 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
594 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
595 uint64_t attr, void *val)
597 int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
599 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
602 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
605 return __kvm_has_device_attr(vcpu->fd, group, attr);
608 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
611 kvm_has_device_attr(vcpu->fd, group, attr);
614 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
615 uint64_t attr, void *val)
617 return __kvm_device_attr_get(vcpu->fd, group, attr, val);
620 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
621 uint64_t attr, void *val)
623 kvm_device_attr_get(vcpu->fd, group, attr, val);
626 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
627 uint64_t attr, void *val)
629 return __kvm_device_attr_set(vcpu->fd, group, attr, val);
632 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
633 uint64_t attr, void *val)
635 kvm_device_attr_set(vcpu->fd, group, attr, val);
638 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
639 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
641 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
643 int fd = __kvm_create_device(vm, type);
645 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
649 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
655 * vm - Virtual Machine
656 * num - number of arguments
657 * ... - arguments, each of type uint64_t
663 * Sets the first @num input parameters for the function at @vcpu's entry point,
664 * per the C calling convention of the architecture, to the values given as
665 * variable args. Each of the variable args is expected to be of type uint64_t.
666 * The maximum @num can be is specific to the architecture.
668 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
670 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
671 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
673 #define KVM_MAX_IRQ_ROUTES 4096
675 struct kvm_irq_routing *kvm_gsi_routing_create(void);
676 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
677 uint32_t gsi, uint32_t pin);
678 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
679 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
681 const char *exit_reason_str(unsigned int exit_reason);
683 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
685 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
686 vm_paddr_t paddr_min, uint32_t memslot);
687 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
690 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
691 * loads the test binary into guest memory and creates an IRQ chip (x86 only).
692 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
693 * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
695 struct kvm_vm *____vm_create(enum vm_guest_mode mode);
696 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
697 uint64_t nr_extra_pages);
699 static inline struct kvm_vm *vm_create_barebones(void)
701 return ____vm_create(VM_MODE_DEFAULT);
704 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
706 return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
709 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
710 uint64_t extra_mem_pages,
711 void *guest_code, struct kvm_vcpu *vcpus[]);
713 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
715 struct kvm_vcpu *vcpus[])
717 return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
722 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
723 * additional pages of guest memory. Returns the VM and vCPU (via out param).
725 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
726 uint64_t extra_mem_pages,
729 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
732 return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
735 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
737 void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
738 void kvm_print_vcpu_pinning_help(void);
739 void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
742 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
743 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
744 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
745 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
746 static inline unsigned int
747 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
750 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
752 /* s390 requires 1M aligned guest sizes */
753 n = (n + 255) & ~255;
758 struct kvm_userspace_memory_region *
759 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
762 #define sync_global_to_guest(vm, g) ({ \
763 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
764 memcpy(_p, &(g), sizeof(g)); \
767 #define sync_global_from_guest(vm, g) ({ \
768 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
769 memcpy(&(g), _p, sizeof(g)); \
773 * Write a global value, but only in the VM's (guest's) domain. Primarily used
774 * for "globals" that hold per-VM values (VMs always duplicate code and global
775 * data into their own region of physical memory), but can be used anytime it's
776 * undesirable to change the host's copy of the global.
778 #define write_guest_global(vm, g, val) ({ \
779 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
780 typeof(g) _val = val; \
782 memcpy(_p, &(_val), sizeof(g)); \
785 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
787 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
790 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
793 vcpu_arch_dump(stream, vcpu, indent);
797 * Adds a vCPU with reasonable defaults (e.g. a stack)
800 * vm - Virtual Machine
801 * vcpu_id - The id of the VCPU to add to the VM.
802 * guest_code - The vCPU's entry point
804 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
807 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
810 return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
813 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
814 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
816 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
819 return vm_arch_vcpu_recreate(vm, vcpu_id);
822 void vcpu_arch_free(struct kvm_vcpu *vcpu);
824 void virt_arch_pgd_alloc(struct kvm_vm *vm);
826 static inline void virt_pgd_alloc(struct kvm_vm *vm)
828 virt_arch_pgd_alloc(vm);
832 * VM Virtual Page Map
835 * vm - Virtual Machine
836 * vaddr - VM Virtual Address
837 * paddr - VM Physical Address
838 * memslot - Memory region slot for new virtual translation tables
844 * Within @vm, creates a virtual translation for the page starting
845 * at @vaddr to the page starting at @paddr.
847 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
849 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
851 virt_arch_pg_map(vm, vaddr, paddr);
856 * Address Guest Virtual to Guest Physical
859 * vm - Virtual Machine
860 * gva - VM virtual address
865 * Equivalent VM physical address
867 * Returns the VM physical address of the translated VM virtual
868 * address given by @gva.
870 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
872 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
874 return addr_arch_gva2gpa(vm, gva);
878 * Virtual Translation Tables Dump
881 * stream - Output FILE stream
882 * vm - Virtual Machine
883 * indent - Left margin indent amount
889 * Dumps to the FILE stream given by @stream, the contents of all the
890 * virtual translation tables for the VM given by @vm.
892 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
894 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
896 virt_arch_dump(stream, vm, indent);
900 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
902 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
906 * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
907 * to allow for arch-specific setup that is common to all tests, e.g. computing
908 * the default guest "mode".
910 void kvm_selftest_arch_init(void);
912 void kvm_arch_vm_post_create(struct kvm_vm *vm);
914 #endif /* SELFTEST_KVM_UTIL_BASE_H */