KVM: selftests: state_test: test bare VMXON migration
authorVitaly Kuznetsov <vkuznets@redhat.com>
Tue, 16 Oct 2018 16:50:08 +0000 (18:50 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 16 Oct 2018 22:30:18 +0000 (00:30 +0200)
Split prepare_for_vmx_operation() into prepare_for_vmx_operation() and
load_vmcs() so we can inject GUEST_SYNC() in between.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/state_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c

index 12ebd83..4bbee85 100644 (file)
@@ -548,5 +548,6 @@ struct vmx_pages {
 struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
 bool prepare_for_vmx_operation(struct vmx_pages *vmx);
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
+bool load_vmcs(struct vmx_pages *vmx);
 
 #endif /* SELFTEST_KVM_VMX_H */
index d7c4014..cc356da 100644 (file)
@@ -107,6 +107,11 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
        if (vmxon(vmx->vmxon_gpa))
                return false;
 
+       return true;
+}
+
+bool load_vmcs(struct vmx_pages *vmx)
+{
        /* Load a VMCS. */
        *(uint32_t *)(vmx->vmcs) = vmcs_revision();
        if (vmclear(vmx->vmcs_gpa))
index 43df194..03da41f 100644 (file)
@@ -26,20 +26,20 @@ static bool have_nested_state;
 
 void l2_guest_code(void)
 {
-       GUEST_SYNC(5);
+       GUEST_SYNC(6);
 
         /* Exit to L1 */
        vmcall();
 
        /* L1 has now set up a shadow VMCS for us.  */
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
-       GUEST_SYNC(9);
+       GUEST_SYNC(10);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
-       GUEST_SYNC(10);
+       GUEST_SYNC(11);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
-       GUEST_SYNC(11);
+       GUEST_SYNC(12);
 
        /* Done, exit to L1 and never come back.  */
        vmcall();
@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
 
        GUEST_ASSERT(vmx_pages->vmcs_gpa);
        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+       GUEST_SYNC(3);
+       GUEST_ASSERT(load_vmcs(vmx_pages));
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
 
-       GUEST_SYNC(3);
+       GUEST_SYNC(4);
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
 
        prepare_vmcs(vmx_pages, l2_guest_code,
                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 
-       GUEST_SYNC(4);
+       GUEST_SYNC(5);
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
        GUEST_ASSERT(!vmlaunch());
        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
        GUEST_ASSERT(!vmresume());
        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 
-       GUEST_SYNC(6);
+       GUEST_SYNC(7);
        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 
        GUEST_ASSERT(!vmresume());
@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
 
        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
        GUEST_ASSERT(vmlaunch());
-       GUEST_SYNC(7);
+       GUEST_SYNC(8);
        GUEST_ASSERT(vmlaunch());
        GUEST_ASSERT(vmresume());
 
        vmwrite(GUEST_RIP, 0xc0ffee);
-       GUEST_SYNC(8);
+       GUEST_SYNC(9);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
 
        GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
        GUEST_ASSERT(vmlaunch());
        GUEST_ASSERT(vmresume());
-       GUEST_SYNC(12);
+       GUEST_SYNC(13);
        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
        GUEST_ASSERT(vmlaunch());
        GUEST_ASSERT(vmresume());
index 38a91a5..18fa64d 100644 (file)
@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
        check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
 
        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+       GUEST_ASSERT(load_vmcs(vmx_pages));
 
        /* Prepare the VMCS for L2 execution. */
        prepare_vmcs(vmx_pages, l2_guest_code,