check_supported(c);
- vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
+ vm = vm_create_barebones();
prepare_vcpu_init(c, &init);
vm_vcpu_add(vm, 0);
aarch64_vcpu_setup(vm, 0, &init);
struct kvm_vm *vm;
int ret;
- vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
+ vm = vm_create_barebones();
vm_vcpu_add(vm, 0);
ret = __vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
struct kvm_vm *vm;
int ret;
- vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
+ vm = vm_create_barebones();
vm_vcpu_add(vm, 0);
vm_vcpu_add(vm, 1);
}
/* Get the preferred target type and copy that to init2 for later use */
- vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
+ vm = vm_create_barebones();
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
kvm_vm_free(vm);
init2 = init1;
const char *vm_guest_mode_string(uint32_t i);
struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint64_t phy_pages);
-struct kvm_vm *vm_create(uint64_t phy_pages);
void kvm_vm_free(struct kvm_vm *vmp);
void kvm_vm_restart(struct kvm_vm *vmp);
void kvm_vm_release(struct kvm_vm *vmp);
vm_paddr_t paddr_min, uint32_t memslot);
vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
+static inline struct kvm_vm *vm_create_barebones(void)
+{
+ return __vm_create(VM_MODE_DEFAULT, 0);
+}
+
/*
* Create a VM with reasonable defaults
*
vms = malloc(sizeof(vms[0]) * max_vm);
TEST_ASSERT(vms, "Allocate memory for storing VM pointers");
for (i = 0; i < max_vm; ++i) {
- vms[i] = vm_create(DEFAULT_GUEST_PHY_PAGES);
+ vms[i] = vm_create_barebones();
for (j = 0; j < max_vcpu; ++j)
vm_vcpu_add(vms[i], j);
}
pr_info("Testing creating %d vCPUs, with IDs %d...%d.\n",
num_vcpus, first_vcpu_id, first_vcpu_id + num_vcpus - 1);
- vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
+ vm = vm_create_barebones();
for (i = first_vcpu_id; i < first_vcpu_id + num_vcpus; i++)
/* This asserts that the vCPU was created. */
return vm;
}
-/*
- * VM Create
- *
- * Input Args:
- * phy_pages - Physical memory pages
- *
- * Output Args: None
- *
- * Return:
- * Pointer to opaque structure that describes the created VM.
- *
- * Creates a VM with the default physical/virtual address widths and page size.
- * When phy_pages is non-zero, a memory region of phy_pages physical pages
- * is created and mapped starting at guest physical address 0.
- */
-struct kvm_vm *vm_create(uint64_t phy_pages)
-{
- return __vm_create(VM_MODE_DEFAULT, phy_pages);
-}
-
struct kvm_vm *vm_create_without_vcpus(enum vm_guest_mode mode, uint64_t pages)
{
struct kvm_vm *vm;
* (without failing the test) if the guest memory is not shared (so
* no alias exists).
*
- * When vm_create() and related functions are called with a shared memory
- * src_type, we also create a writable, shared alias mapping of the
- * underlying guest memory. This allows the host to manipulate guest memory
- * without mapping that memory in the guest's address space. And, for
- * userfaultfd-based demand paging, we can do so without triggering userfaults.
+ * Create a writable, shared virtual=>physical alias for the specific GPA.
+ * The primary use case is to allow the host selftest to manipulate guest
+ * memory without mapping said memory in the guest's address space. And, for
+ * userfaultfd-based demand paging, to do so without triggering userfaults.
*/
void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
{
pr_info("Testing KVM_RUN with zero added memory regions\n");
- vm = vm_create(0);
+ vm = vm_create_barebones();
vm_vcpu_add(vm, VCPU_ID);
vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul);
"KVM_CAP_NR_MEMSLOTS should be greater than 0");
pr_info("Allowed number of memory slots: %i\n", max_mem_slots);
- vm = vm_create(0);
+ vm = vm_create_barebones();
/* Check it can be added memory slots up to the maximum allowed */
pr_info("Adding slots 0..%i, each memory region with %dK size\n",
struct kvm_vm *vm;
int ret;
- vm = vm_create(0);
+ vm = vm_create_barebones();
/* Get KVM_CAP_MAX_VCPU_ID cap supported in KVM */
ret = vm_check_cap(vm, KVM_CAP_MAX_VCPU_ID);
* use it to verify all supported CR4 bits can be set prior to defining
* the vCPU model, i.e. without doing KVM_SET_CPUID2.
*/
- vm = vm_create(DEFAULT_GUEST_PHY_PAGES);
+ vm = vm_create_barebones();
vm_vcpu_add(vm, VCPU_ID);
vcpu_sregs_get(vm, VCPU_ID, &sregs);
struct kvm_sev_launch_start start = { 0 };
int i;
- vm = vm_create(0);
+ vm = vm_create_barebones();
sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
vm_vcpu_add(vm, i);
struct kvm_vm *vm;
int i;
- vm = vm_create(0);
+ vm = vm_create_barebones();
if (!with_vcpus)
return vm;
*sev_es_vm_no_vmsa;
int ret;
- vm_no_vcpu = vm_create(0);
+ vm_no_vcpu = vm_create_barebones();
vm_no_sev = aux_vm_create(true);
ret = __sev_migrate_from(vm_no_vcpu, vm_no_sev);
TEST_ASSERT(ret == -1 && errno == EINVAL,
sev_vm = sev_vm_create(/* es= */ false);
sev_es_vm = sev_vm_create(/* es= */ true);
- sev_es_vm_no_vmsa = vm_create(0);
+ sev_es_vm_no_vmsa = vm_create_barebones();
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
vm_vcpu_add(sev_es_vm_no_vmsa, 1);