KVM: x86: do not define KVM_REQ_SMI if SMM disabled
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 29 Sep 2022 17:20:16 +0000 (13:20 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 9 Nov 2022 17:31:20 +0000 (12:31 -0500)
This ensures that all the relevant code is compiled out, in fact
the process_smi stub can be removed too.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220929172016.319443-9-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/smm.h
arch/x86/kvm/x86.c

index b9f6f85..24a2152 100644 (file)
@@ -81,7 +81,9 @@
 #define KVM_REQ_NMI                    KVM_ARCH_REQ(9)
 #define KVM_REQ_PMU                    KVM_ARCH_REQ(10)
 #define KVM_REQ_PMI                    KVM_ARCH_REQ(11)
+#ifdef CONFIG_KVM_SMM
 #define KVM_REQ_SMI                    KVM_ARCH_REQ(12)
+#endif
 #define KVM_REQ_MASTERCLOCK_UPDATE     KVM_ARCH_REQ(13)
 #define KVM_REQ_MCLOCK_INPROGRESS \
        KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
index 8debe81..53c8139 100644 (file)
@@ -27,7 +27,6 @@ void process_smi(struct kvm_vcpu *vcpu);
 #else
 static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
 static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
-static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
 
 /*
  * emulator_leave_smm is used as a function pointer, so the
index 9ac51c8..73c3203 100644 (file)
@@ -5026,8 +5026,10 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 
        process_nmi(vcpu);
 
+#ifdef CONFIG_KVM_SMM
        if (kvm_check_request(KVM_REQ_SMI, vcpu))
                process_smi(vcpu);
+#endif
 
        /*
         * KVM's ABI only allows for one exception to be migrated.  Luckily,
@@ -10266,8 +10268,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                }
                if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
                        record_steal_time(vcpu);
+#ifdef CONFIG_KVM_SMM
                if (kvm_check_request(KVM_REQ_SMI, vcpu))
                        process_smi(vcpu);
+#endif
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
                if (kvm_check_request(KVM_REQ_PMU, vcpu))
@@ -12628,7 +12632,9 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
                return true;
 
        if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+#ifdef CONFIG_KVM_SMM
                kvm_test_request(KVM_REQ_SMI, vcpu) ||
+#endif
                 kvm_test_request(KVM_REQ_EVENT, vcpu))
                return true;