KVM: x86: Drop "pre_" from enter/leave_smm() helpers
authorSean Christopherson <seanjc@google.com>
Wed, 9 Jun 2021 18:56:19 +0000 (11:56 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Jun 2021 17:09:35 +0000 (13:09 -0400)
Now that .post_leave_smm() is gone, drop "pre_" from the remaining
helpers.  The helpers aren't invoked purely before SMI/RSM processing,
e.g. both helpers are invoked after state is snapshotted (from regs or
SMRAM), and the RSM helper is invoked after some amount of register state
has been stuffed.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210609185619.992058-10-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/emulate.c
arch/x86/kvm/kvm_emulate.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index aeb5f1136718165e9b16af3d94303c6a280e9110..a12a4987154eec46c600cf0f87f4c281e2c4e69f 100644 (file)
@@ -109,8 +109,8 @@ KVM_X86_OP_NULL(set_hv_timer)
 KVM_X86_OP_NULL(cancel_hv_timer)
 KVM_X86_OP(setup_mce)
 KVM_X86_OP(smi_allowed)
-KVM_X86_OP(pre_enter_smm)
-KVM_X86_OP(pre_leave_smm)
+KVM_X86_OP(enter_smm)
+KVM_X86_OP(leave_smm)
 KVM_X86_OP(enable_smi_window)
 KVM_X86_OP_NULL(mem_enc_op)
 KVM_X86_OP_NULL(mem_enc_reg_region)
index ced3e3b94b777629d2b9e098552fa0c07d983764..921de30c23c53cfa282d0d5bcbedd214b6ec9b9e 100644 (file)
@@ -1372,8 +1372,8 @@ struct kvm_x86_ops {
        void (*setup_mce)(struct kvm_vcpu *vcpu);
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
-       int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-       int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
+       int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+       int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
        void (*enable_smi_window)(struct kvm_vcpu *vcpu);
 
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
index 83520a9f171eee3e9c8293dbd64a01f3cca54357..2837110e66eda5952d8110f874e158c2a4bcf067 100644 (file)
@@ -2574,11 +2574,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
        }
 
        /*
-        * Give pre_leave_smm() a chance to make ISA-specific changes to the
-        * vCPU state (e.g. enter guest mode) before loading state from the SMM
+        * Give leave_smm() a chance to make ISA-specific changes to the vCPU
+        * state (e.g. enter guest mode) before loading state from the SMM
         * state-save area.
         */
-       if (ctxt->ops->pre_leave_smm(ctxt, buf))
+       if (ctxt->ops->leave_smm(ctxt, buf))
                goto emulate_shutdown;
 
 #ifdef CONFIG_X86_64
index 3ee701b0ef10386fdc7728bdc0a473c0caa0332b..68b420289d7edeb0219073930292a31342bb9c52 100644 (file)
@@ -231,8 +231,7 @@ struct x86_emulate_ops {
 
        unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
        void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
-       int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
-                            const char *smstate);
+       int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
        void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
        int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
 };
index 1e2c635d308c9eb000d42ce5dedeaf022b849574..e7bec71a3d9b0771940d545c5e99a3409b065c59 100644 (file)
@@ -4258,7 +4258,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        return !svm_smi_blocked(vcpu);
 }
 
-static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        int ret;
@@ -4280,7 +4280,7 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map;
@@ -4555,8 +4555,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .setup_mce = svm_setup_mce,
 
        .smi_allowed = svm_smi_allowed,
-       .pre_enter_smm = svm_pre_enter_smm,
-       .pre_leave_smm = svm_pre_leave_smm,
+       .enter_smm = svm_enter_smm,
+       .leave_smm = svm_leave_smm,
        .enable_smi_window = svm_enable_smi_window,
 
        .mem_enc_op = svm_mem_enc_op,
index 76586ce9cf769af45cc21045b91b9736b42440e4..51bbde75b1fd460f16b4ef6c4ccfa5a73e994f63 100644 (file)
@@ -7544,7 +7544,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        return !is_smm(vcpu);
 }
 
-static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
@@ -7558,7 +7558,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int ret;
@@ -7736,8 +7736,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .setup_mce = vmx_setup_mce,
 
        .smi_allowed = vmx_smi_allowed,
-       .pre_enter_smm = vmx_pre_enter_smm,
-       .pre_leave_smm = vmx_pre_leave_smm,
+       .enter_smm = vmx_enter_smm,
+       .leave_smm = vmx_leave_smm,
        .enable_smi_window = vmx_enable_smi_window,
 
        .can_emulate_instruction = vmx_can_emulate_instruction,
index 9a268728399ebc708080e481f942b0f8067f3f35..8d88e4513294c4e82fef7ee9f01f6a92d8cb497b 100644 (file)
@@ -7216,10 +7216,10 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
        kvm_smm_changed(vcpu, false);
 }
 
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
                                  const char *smstate)
 {
-       return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate);
+       return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
 }
 
 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
@@ -7274,7 +7274,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .set_nmi_mask        = emulator_set_nmi_mask,
        .get_hflags          = emulator_get_hflags,
        .exiting_smm         = emulator_exiting_smm,
-       .pre_leave_smm       = emulator_pre_leave_smm,
+       .leave_smm           = emulator_leave_smm,
        .triple_fault        = emulator_triple_fault,
        .set_xcr             = emulator_set_xcr,
 };
@@ -9006,11 +9006,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
                enter_smm_save_state_32(vcpu, buf);
 
        /*
-        * Give pre_enter_smm() a chance to make ISA-specific changes to the
-        * vCPU state (e.g. leave guest mode) after we've saved the state into
-        * the SMM state-save area.
+        * Give enter_smm() a chance to make ISA-specific changes to the vCPU
+        * state (e.g. leave guest mode) after we've saved the state into the
+        * SMM state-save area.
         */
-       static_call(kvm_x86_pre_enter_smm)(vcpu, buf);
+       static_call(kvm_x86_enter_smm)(vcpu, buf);
 
        kvm_smm_changed(vcpu, true);
        kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));