KVM: x86: compile out vendor-specific code if SMM is disabled
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 29 Sep 2022 17:20:14 +0000 (13:20 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 9 Nov 2022 17:31:19 +0000 (12:31 -0500)
Vendor-specific code that deals with SMI injection and saving/restoring
SMM state is not needed if CONFIG_KVM_SMM is disabled, so remove the
four callbacks smi_allowed, enter_smm, leave_smm and enable_smi_window.
The users in svm/nested.c and x86.c also have to be compiled out; the
amount of #ifdef'ed code is small and it's not worth moving it to
smm.c.

enter_smm is now used only within #ifdef CONFIG_KVM_SMM, and the stub
can therefore be removed.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20220929172016.319443-7-pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/smm.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 82ba4a5..ea58e67 100644 (file)
@@ -110,10 +110,12 @@ KVM_X86_OP_OPTIONAL_RET0(dy_apicv_has_pending_interrupt)
 KVM_X86_OP_OPTIONAL(set_hv_timer)
 KVM_X86_OP_OPTIONAL(cancel_hv_timer)
 KVM_X86_OP(setup_mce)
+#ifdef CONFIG_KVM_SMM
 KVM_X86_OP(smi_allowed)
 KVM_X86_OP(enter_smm)
 KVM_X86_OP(leave_smm)
 KVM_X86_OP(enable_smi_window)
+#endif
 KVM_X86_OP_OPTIONAL(mem_enc_ioctl)
 KVM_X86_OP_OPTIONAL(mem_enc_register_region)
 KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
index 612ef60..3e5e54d 100644 (file)
@@ -1612,10 +1612,12 @@ struct kvm_x86_ops {
 
        void (*setup_mce)(struct kvm_vcpu *vcpu);
 
+#ifdef CONFIG_KVM_SMM
        int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
        int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
        int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
        void (*enable_smi_window)(struct kvm_vcpu *vcpu);
+#endif
 
        int (*mem_enc_ioctl)(struct kvm *kvm, void __user *argp);
        int (*mem_enc_register_region)(struct kvm *kvm, struct kvm_enc_region *argp);
index 0e1bd8b..8debe81 100644 (file)
@@ -27,7 +27,6 @@ void process_smi(struct kvm_vcpu *vcpu);
 #else
 static inline int kvm_inject_smi(struct kvm_vcpu *vcpu) { return -ENOTTY; }
 static inline bool is_smm(struct kvm_vcpu *vcpu) { return false; }
-static inline void enter_smm(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
 static inline void process_smi(struct kvm_vcpu *vcpu) { WARN_ON_ONCE(1); }
 
 /*
index cc0fd75..b258d69 100644 (file)
@@ -1378,6 +1378,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
                return 0;
        }
 
+#ifdef CONFIG_KVM_SMM
        if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
                if (block_nested_events)
                        return -EBUSY;
@@ -1386,6 +1387,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
                nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
                return 0;
        }
+#endif
 
        if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
                if (block_nested_events)
index 4cc014b..d28de3e 100644 (file)
@@ -4373,6 +4373,7 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
        vcpu->arch.mcg_cap &= 0x1ff;
 }
 
+#ifdef CONFIG_KVM_SMM
 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -4522,6 +4523,7 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
                /* We must be in SMM; RSM will cause a vmexit anyway.  */
        }
 }
+#endif
 
 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
                                        void *insn, int insn_len)
@@ -4797,10 +4799,12 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .pi_update_irte = avic_pi_update_irte,
        .setup_mce = svm_setup_mce,
 
+#ifdef CONFIG_KVM_SMM
        .smi_allowed = svm_smi_allowed,
        .enter_smm = svm_enter_smm,
        .leave_smm = svm_leave_smm,
        .enable_smi_window = svm_enable_smi_window,
+#endif
 
        .mem_enc_ioctl = sev_mem_enc_ioctl,
        .mem_enc_register_region = sev_mem_enc_register_region,
index 6a0b658..6be991b 100644 (file)
@@ -7932,6 +7932,7 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
                        ~FEAT_CTL_LMCE_ENABLED;
 }
 
+#ifdef CONFIG_KVM_SMM
 static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
        /* we need a nested vmexit to enter SMM, postpone if run is pending */
@@ -7986,6 +7987,7 @@ static void vmx_enable_smi_window(struct kvm_vcpu *vcpu)
 {
        /* RSM will cause a vmexit anyway.  */
 }
+#endif
 
 static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
 {
@@ -8153,10 +8155,12 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
 
        .setup_mce = vmx_setup_mce,
 
+#ifdef CONFIG_KVM_SMM
        .smi_allowed = vmx_smi_allowed,
        .enter_smm = vmx_enter_smm,
        .leave_smm = vmx_leave_smm,
        .enable_smi_window = vmx_enable_smi_window,
+#endif
 
        .can_emulate_instruction = vmx_can_emulate_instruction,
        .apic_init_signal_blocked = vmx_apic_init_signal_blocked,
index 0a80cd1..9ac51c8 100644 (file)
@@ -9919,6 +9919,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
         * in order to make progress and get back here for another iteration.
         * The kvm_x86_ops hooks communicate this by returning -EBUSY.
         */
+#ifdef CONFIG_KVM_SMM
        if (vcpu->arch.smi_pending) {
                r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
                if (r < 0)
@@ -9931,6 +9932,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
                } else
                        static_call(kvm_x86_enable_smi_window)(vcpu);
        }
+#endif
 
        if (vcpu->arch.nmi_pending) {
                r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
@@ -12580,10 +12582,12 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
             static_call(kvm_x86_nmi_allowed)(vcpu, false)))
                return true;
 
+#ifdef CONFIG_KVM_SMM
        if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
            (vcpu->arch.smi_pending &&
             static_call(kvm_x86_smi_allowed)(vcpu, false)))
                return true;
+#endif
 
        if (kvm_arch_interrupt_allowed(vcpu) &&
            (kvm_cpu_has_interrupt(vcpu) ||