KVM: x86: Formalize blocking of nested pending exceptions
authorSean Christopherson <seanjc@google.com>
Tue, 30 Aug 2022 23:16:02 +0000 (23:16 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 26 Sep 2022 16:03:08 +0000 (12:03 -0400)
Capture nested_run_pending as block_pending_exceptions so that the logic
of why exceptions are blocked only needs to be documented once instead of
at every place that employs the logic.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Link: https://lore.kernel.org/r/20220830231614.3580124-16-seanjc@google.com
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/vmx/nested.c

index bbfbcea..2ecc64c 100644 (file)
@@ -1360,10 +1360,22 @@ static inline bool nested_exit_on_init(struct vcpu_svm *svm)
 
 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
 {
-       struct vcpu_svm *svm = to_svm(vcpu);
-       bool block_nested_events =
-               kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
        struct kvm_lapic *apic = vcpu->arch.apic;
+       struct vcpu_svm *svm = to_svm(vcpu);
+       /*
+        * Only a pending nested run blocks a pending exception.  If there is a
+        * previously injected event, the pending exception occurred while said
+        * event was being delivered and thus needs to be handled.
+        */
+       bool block_nested_exceptions = svm->nested.nested_run_pending;
+       /*
+        * New events (not exceptions) are only recognized at instruction
+        * boundaries.  If an event needs reinjection, then KVM is handling a
+        * VM-Exit that occurred _during_ instruction execution; new events are
+        * blocked until the instruction completes.
+        */
+       bool block_nested_events = block_nested_exceptions ||
+                                  kvm_event_needs_reinjection(vcpu);
 
        if (lapic_in_kernel(vcpu) &&
            test_bit(KVM_APIC_INIT, &apic->pending_events)) {
@@ -1376,13 +1388,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
        }
 
        if (vcpu->arch.exception.pending) {
-               /*
-                * Only a pending nested run can block a pending exception.
-                * Otherwise an injected NMI/interrupt should either be
-                * lost or delivered to the nested hypervisor in the EXITINTINFO
-                * vmcb field, while delivering the pending exception.
-                */
-               if (svm->nested.nested_run_pending)
+               if (block_nested_exceptions)
                         return -EBUSY;
                if (!nested_exit_on_exception(svm))
                        return 0;
index 8e7f8ce..68533ae 100644 (file)
@@ -3945,11 +3945,23 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
 
 static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
 {
+       struct kvm_lapic *apic = vcpu->arch.apic;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qual;
-       bool block_nested_events =
-           vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
-       struct kvm_lapic *apic = vcpu->arch.apic;
+       /*
+        * Only a pending nested run blocks a pending exception.  If there is a
+        * previously injected event, the pending exception occurred while said
+        * event was being delivered and thus needs to be handled.
+        */
+       bool block_nested_exceptions = vmx->nested.nested_run_pending;
+       /*
+        * New events (not exceptions) are only recognized at instruction
+        * boundaries.  If an event needs reinjection, then KVM is handling a
+        * VM-Exit that occurred _during_ instruction execution; new events are
+        * blocked until the instruction completes.
+        */
+       bool block_nested_events = block_nested_exceptions ||
+                                  kvm_event_needs_reinjection(vcpu);
 
        if (lapic_in_kernel(vcpu) &&
                test_bit(KVM_APIC_INIT, &apic->pending_events)) {
@@ -3988,15 +4000,10 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
         * for TSS T flag #DBs).  KVM also doesn't save/restore pending MTF
         * across SMI/RSM as it should; that needs to be addressed in order to
         * prioritize SMI over MTF and trap-like #DBs.
-        *
-        * Note that only a pending nested run can block a pending exception.
-        * Otherwise an injected NMI/interrupt should either be
-        * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
-        * while delivering the pending exception.
         */
        if (vcpu->arch.exception.pending &&
            !(vmx_get_pending_dbg_trap(vcpu) & ~DR6_BT)) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_exceptions)
                        return -EBUSY;
                if (!nested_vmx_check_exception(vcpu, &exit_qual))
                        goto no_vmexit;
@@ -4013,7 +4020,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
        }
 
        if (vcpu->arch.exception.pending) {
-               if (vmx->nested.nested_run_pending)
+               if (block_nested_exceptions)
                        return -EBUSY;
                if (!nested_vmx_check_exception(vcpu, &exit_qual))
                        goto no_vmexit;