KVM: SVM: Don't defer NMI unblocking until next exit for SEV-ES guests
authorSean Christopherson <seanjc@google.com>
Thu, 15 Jun 2023 06:37:56 +0000 (16:37 +1000)
committerSean Christopherson <seanjc@google.com>
Fri, 28 Jul 2023 23:13:18 +0000 (16:13 -0700)
Immediately mark NMIs as unmasked in response to #VMGEXIT(NMI complete)
instead of setting awaiting_iret_completion and waiting until the *next*
VM-Exit to unmask NMIs.  The whole point of "NMI complete" is that the
guest is responsible for telling the hypervisor when it's safe to inject
an NMI, i.e. there's no need to wait.  And because there's no IRET to
single-step, the next VM-Exit could be a long time coming, i.e. KVM could
incorrectly hold an NMI pending for far longer than what is required and
expected.

Opportunistically fix a stale reference to HF_IRET_MASK.

Fixes: 916b54a7688b ("KVM: x86: Move HF_NMI_MASK and HF_IRET_MASK into "struct vcpu_svm"")
Fixes: 4444dfe4050b ("KVM: SVM: Add NMI support for an SEV-ES guest")
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20230615063757.3039121-9-aik@amd.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c

index b35cd670ce667701bf657fc8f5d16f2f6f448e1d..2cd15783dfb949bfb16c3d6109a57f1d65dbdcf7 100644 (file)
@@ -2900,7 +2900,10 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
                                            svm->sev_es.ghcb_sa);
                break;
        case SVM_VMGEXIT_NMI_COMPLETE:
-               ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
+               ++vcpu->stat.nmi_window_exits;
+               svm->nmi_masked = false;
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+               ret = 1;
                break;
        case SVM_VMGEXIT_AP_HLT_LOOP:
                ret = kvm_emulate_ap_reset_hold(vcpu);
index 8fad9ebb51266675e9c1bc6d6dcf750b653ed99f..b15bc158ecc68e66ce5d524e341073b90d18a099 100644 (file)
@@ -2535,12 +2535,13 @@ static int iret_interception(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       WARN_ON_ONCE(sev_es_guest(vcpu->kvm));
+
        ++vcpu->stat.nmi_window_exits;
        svm->awaiting_iret_completion = true;
 
        svm_clr_iret_intercept(svm);
-       if (!sev_es_guest(vcpu->kvm))
-               svm->nmi_iret_rip = kvm_rip_read(vcpu);
+       svm->nmi_iret_rip = kvm_rip_read(vcpu);
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 1;
@@ -3950,12 +3951,11 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
        svm->soft_int_injected = false;
 
        /*
-        * If we've made progress since setting HF_IRET_MASK, we've
+        * If we've made progress since setting awaiting_iret_completion, we've
         * executed an IRET and can allow NMI injection.
         */
        if (svm->awaiting_iret_completion &&
-           (sev_es_guest(vcpu->kvm) ||
-            kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
+           kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
                svm->awaiting_iret_completion = false;
                svm->nmi_masked = false;
                kvm_make_request(KVM_REQ_EVENT, vcpu);