KVM: selftests: smm_test: Test SMM enter from L2
authorVitaly Kuznetsov <vkuznets@redhat.com>
Mon, 28 Jun 2021 10:44:25 +0000 (12:44 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 15 Jul 2021 14:19:44 +0000 (10:19 -0400)
Two additional tests are added:
- SMM triggered from L2 does not currupt L1 host state.
- Save/restore during SMM triggered from L2 does not corrupt guest/host
  state.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210628104425.391276-7-vkuznets@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
tools/testing/selftests/kvm/x86_64/smm_test.c

index c1f8318..d0fe2fd 100644 (file)
@@ -53,15 +53,28 @@ static inline void sync_with_host(uint64_t phase)
                     : "+a" (phase));
 }
 
-void self_smi(void)
+static void self_smi(void)
 {
        x2apic_write_reg(APIC_ICR,
                         APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
 }
 
-void guest_code(void *arg)
+static void l2_guest_code(void)
 {
+       sync_with_host(8);
+
+       sync_with_host(10);
+
+       vmcall();
+}
+
+static void guest_code(void *arg)
+{
+       #define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
        uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
+       struct svm_test_data *svm = arg;
+       struct vmx_pages *vmx_pages = arg;
 
        sync_with_host(1);
 
@@ -74,21 +87,50 @@ void guest_code(void *arg)
        sync_with_host(4);
 
        if (arg) {
-               if (cpu_has_svm())
-                       generic_svm_setup(arg, NULL, NULL);
-               else
-                       GUEST_ASSERT(prepare_for_vmx_operation(arg));
+               if (cpu_has_svm()) {
+                       generic_svm_setup(svm, l2_guest_code,
+                                         &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+               } else {
+                       GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+                       GUEST_ASSERT(load_vmcs(vmx_pages));
+                       prepare_vmcs(vmx_pages, l2_guest_code,
+                                    &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+               }
 
                sync_with_host(5);
 
                self_smi();
 
                sync_with_host(7);
+
+               if (cpu_has_svm()) {
+                       run_guest(svm->vmcb, svm->vmcb_gpa);
+                       svm->vmcb->save.rip += 3;
+                       run_guest(svm->vmcb, svm->vmcb_gpa);
+               } else {
+                       vmlaunch();
+                       vmresume();
+               }
+
+               /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */
+               sync_with_host(12);
        }
 
        sync_with_host(DONE);
 }
 
+void inject_smi(struct kvm_vm *vm)
+{
+       struct kvm_vcpu_events events;
+
+       vcpu_events_get(vm, VCPU_ID, &events);
+
+       events.smi.pending = 1;
+       events.flags |= KVM_VCPUEVENT_VALID_SMM;
+
+       vcpu_events_set(vm, VCPU_ID, &events);
+}
+
 int main(int argc, char *argv[])
 {
        vm_vaddr_t nested_gva = 0;
@@ -147,6 +189,22 @@ int main(int argc, char *argv[])
                            "Unexpected stage: #%x, got %x",
                            stage, stage_reported);
 
+               /*
+                * Enter SMM during L2 execution and check that we correctly
+                * return from it. Do not perform save/restore while in SMM yet.
+                */
+               if (stage == 8) {
+                       inject_smi(vm);
+                       continue;
+               }
+
+               /*
+                * Perform save/restore while the guest is in SMM triggered
+                * during L2 execution.
+                */
+               if (stage == 10)
+                       inject_smi(vm);
+
                state = vcpu_save_state(vm, VCPU_ID);
                kvm_vm_release(vm);
                kvm_vm_restart(vm, O_RDWR);