return rdmsr(MSR_IA32_VMX_BASIC);
}
-void prepare_for_vmx_operation(void);
-void prepare_vmcs(void *guest_rip, void *guest_rsp);
-struct kvm_vm *vm_create_default_vmx(uint32_t vcpuid,
- vmx_guest_code_t guest_code);
+struct vmx_pages {
+ void *vmxon_hva;
+ uint64_t vmxon_gpa;
+ void *vmxon;
+
+ void *vmcs_hva;
+ uint64_t vmcs_gpa;
+ void *vmcs;
+
+ void *msr_hva;
+ uint64_t msr_gpa;
+ void *msr;
+};
+
+struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
+bool prepare_for_vmx_operation(struct vmx_pages *vmx);
+void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
#endif /* !SELFTEST_KVM_VMX_H */
#include "x86.h"
#include "vmx.h"
-/* Create a default VM for VMX tests.
+/* Allocate memory regions for nested VMX tests.
*
* Input Args:
- * vcpuid - The id of the single VCPU to add to the VM.
- * guest_code - The vCPU's entry point
+ * vm - The VM to allocate guest-virtual addresses in.
*
- * Output Args: None
+ * Output Args:
+ * p_vmx_gva - The guest virtual address for the struct vmx_pages.
*
* Return:
- * Pointer to opaque structure that describes the created VM.
+ * Pointer to structure with the addresses of the VMX areas.
*/
-struct kvm_vm *
-vm_create_default_vmx(uint32_t vcpuid, vmx_guest_code_t guest_code)
+struct vmx_pages *
+vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
{
- struct kvm_cpuid2 *cpuid;
- struct kvm_vm *vm;
- vm_vaddr_t vmxon_vaddr;
- vm_paddr_t vmxon_paddr;
- vm_vaddr_t vmcs_vaddr;
- vm_paddr_t vmcs_paddr;
-
- vm = vm_create_default(vcpuid, (void *) guest_code);
-
- /* Enable nesting in CPUID */
- vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
+ vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
/* Setup of a region of guest memory for the vmxon region. */
- vmxon_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0);
- vmxon_paddr = addr_gva2gpa(vm, vmxon_vaddr);
+ vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
+ vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
/* Setup of a region of guest memory for a vmcs. */
- vmcs_vaddr = vm_vaddr_alloc(vm, getpagesize(), 0, 0, 0);
- vmcs_paddr = addr_gva2gpa(vm, vmcs_vaddr);
+ vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
+ vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
- vcpu_args_set(vm, vcpuid, 4, vmxon_vaddr, vmxon_paddr, vmcs_vaddr,
- vmcs_paddr);
+ /* Setup of a region of guest memory for the MSR bitmap. */
+ vmx->msr = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+ vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
+ vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
- return vm;
+ *p_vmx_gva = vmx_gva;
+ return vmx;
}
-void prepare_for_vmx_operation(void)
+bool prepare_for_vmx_operation(struct vmx_pages *vmx)
{
uint64_t feature_control;
uint64_t required;
feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
if ((feature_control & required) != required)
wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required);
+
+ /* Enter VMX root operation. */
+ *(uint32_t *)(vmx->vmxon) = vmcs_revision();
+ if (vmxon(vmx->vmxon_gpa))
+ return false;
+
+ /* Load a VMCS. */
+ *(uint32_t *)(vmx->vmcs) = vmcs_revision();
+ if (vmclear(vmx->vmcs_gpa))
+ return false;
+
+ if (vmptrld(vmx->vmcs_gpa))
+ return false;
+
+ return true;
}
/*
* Initialize the control fields to the most basic settings possible.
*/
-static inline void init_vmcs_control_fields(void)
+static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
{
vmwrite(VIRTUAL_PROCESSOR_ID, 0);
vmwrite(POSTED_INTR_NV, 0);
vmwrite(CR4_GUEST_HOST_MASK, 0);
vmwrite(CR0_READ_SHADOW, get_cr0());
vmwrite(CR4_READ_SHADOW, get_cr4());
+
+ vmwrite(MSR_BITMAP, vmx->msr_gpa);
}
/*
vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP));
}
-void prepare_vmcs(void *guest_rip, void *guest_rsp)
+void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
{
- init_vmcs_control_fields();
+ init_vmcs_control_fields(vmx);
init_vmcs_host_state();
init_vmcs_guest_state(guest_rip, guest_rsp);
}
PORT_DONE,
};
-struct vmx_page {
- vm_vaddr_t virt;
- vm_paddr_t phys;
-};
-
enum {
VMXON_PAGE = 0,
VMCS_PAGE,
/* The virtual machine object. */
static struct kvm_vm *vm;
-/* Array of vmx_page descriptors that is shared with the guest. */
-struct vmx_page *vmx_pages;
-
#define exit_to_l0(_port, _arg) do_exit_to_l0(_port, (unsigned long) (_arg))
static void do_exit_to_l0(uint16_t port, unsigned long arg)
{
__asm__ __volatile__("vmcall");
}
-static void l1_guest_code(struct vmx_page *vmx_pages)
+static void l1_guest_code(struct vmx_pages *vmx_pages)
{
#define L2_GUEST_STACK_SIZE 64
unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
- prepare_for_vmx_operation();
-
- /* Enter VMX root operation. */
- *(uint32_t *)vmx_pages[VMXON_PAGE].virt = vmcs_revision();
- GUEST_ASSERT(!vmxon(vmx_pages[VMXON_PAGE].phys));
-
- /* Load a VMCS. */
- *(uint32_t *)vmx_pages[VMCS_PAGE].virt = vmcs_revision();
- GUEST_ASSERT(!vmclear(vmx_pages[VMCS_PAGE].phys));
- GUEST_ASSERT(!vmptrld(vmx_pages[VMCS_PAGE].phys));
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
/* Prepare the VMCS for L2 execution. */
- prepare_vmcs(l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
- vmwrite(MSR_BITMAP, vmx_pages[MSR_BITMAP_PAGE].phys);
vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
/* Jump into L2. First, test failure to load guest CR3. */
exit_to_l0(PORT_DONE, 0);
}
-static void allocate_vmx_page(struct vmx_page *page)
-{
- vm_vaddr_t virt;
-
- virt = vm_vaddr_alloc(vm, PAGE_SIZE, 0, 0, 0);
- memset(addr_gva2hva(vm, virt), 0, PAGE_SIZE);
-
- page->virt = virt;
- page->phys = addr_gva2gpa(vm, virt);
-}
-
-static vm_vaddr_t allocate_vmx_pages(void)
-{
- vm_vaddr_t vmx_pages_vaddr;
- int i;
-
- vmx_pages_vaddr = vm_vaddr_alloc(
- vm, sizeof(struct vmx_page) * NUM_VMX_PAGES, 0, 0, 0);
-
- vmx_pages = (void *) addr_gva2hva(vm, vmx_pages_vaddr);
-
- for (i = 0; i < NUM_VMX_PAGES; i++)
- allocate_vmx_page(&vmx_pages[i]);
-
- return vmx_pages_vaddr;
-}
-
void report(int64_t val)
{
printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
int main(int argc, char *argv[])
{
- vm_vaddr_t vmx_pages_vaddr;
+ struct vmx_pages *vmx_pages;
+ vm_vaddr_t vmx_pages_gva;
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
if (!(entry->ecx & CPUID_VMX)) {
exit(KSFT_SKIP);
}
- vm = vm_create_default_vmx(VCPU_ID, (void *) l1_guest_code);
+ vm = vm_create_default(VCPU_ID, (void *) l1_guest_code);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
/* Allocate VMX pages and shared descriptors (vmx_pages). */
- vmx_pages_vaddr = allocate_vmx_pages();
- vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_vaddr);
+ vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
for (;;) {
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
struct kvm_regs regs;
vcpu_run(vm, VCPU_ID);
+ vcpu_regs_get(vm, VCPU_ID, ®s);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Got exit_reason other than KVM_EXIT_IO: %u (%s),\n",
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s), rip=%lx\n",
run->exit_reason,
- exit_reason_str(run->exit_reason));
-
- vcpu_regs_get(vm, VCPU_ID, ®s);
+ exit_reason_str(run->exit_reason), regs.rip);
switch (run->io.port) {
case PORT_ABORT: