KVM: SVM: introduce nested_run_pending
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 23 Apr 2020 17:22:27 +0000 (13:22 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 13 May 2020 16:14:21 +0000 (12:14 -0400)
We want to inject vmexits immediately from svm_check_nested_events,
so that the interrupt/NMI window requests happen in inject_pending_event
right after it returns.

This however has the same issue as in vmx_check_nested_events, so
introduce a nested_run_pending flag with the exact same purpose
of delaying vmexit injection after the vmentry.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 1429f50..7a724ea 100644 (file)
@@ -414,6 +414,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
 
        copy_vmcb_control_area(hsave, vmcb);
 
+       svm->nested.nested_run_pending = 1;
        enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
 
        if (!nested_svm_vmrun_msrpm(svm)) {
@@ -815,7 +816,8 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        bool block_nested_events =
-               kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required;
+               kvm_event_needs_reinjection(vcpu) || svm->nested.exit_required ||
+               svm->nested.nested_run_pending;
 
        if (kvm_cpu_has_interrupt(vcpu) && nested_exit_on_intr(svm)) {
                if (block_nested_events)
index b627564..c2a4e2d 100644 (file)
@@ -3429,6 +3429,7 @@ static enum exit_fastpath_completion svm_vcpu_run(struct kvm_vcpu *vcpu)
        sync_cr8_to_lapic(vcpu);
 
        svm->next_rip = 0;
+       svm->nested.nested_run_pending = 0;
 
        svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
 
index 98c2890..435f332 100644 (file)
@@ -97,6 +97,10 @@ struct nested_state {
        /* A VMEXIT is required but not yet emulated */
        bool exit_required;
 
+       /* A VMRUN has started but has not yet been performed, so
+        * we cannot inject a nested vmexit yet.  */
+       bool nested_run_pending;
+
        /* cache for intercepts of the guest */
        u32 intercept_cr;
        u32 intercept_dr;