KVM: x86: smm: use smram structs in the common code
authorMaxim Levitsky <mlevitsk@redhat.com>
Tue, 25 Oct 2022 12:47:35 +0000 (15:47 +0300)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 9 Nov 2022 17:31:23 +0000 (12:31 -0500)
Use kvm_smram union instad of raw arrays in the common smm code.

Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20221025124741.228045-18-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/smm.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c

index e9862ea..4443869 100644 (file)
@@ -206,6 +206,7 @@ typedef enum exit_fastpath_completion fastpath_t;
 
 struct x86_emulate_ctxt;
 struct x86_exception;
+union kvm_smram;
 enum x86_intercept;
 enum x86_intercept_stage;
 
@@ -1616,8 +1617,8 @@ struct kvm_x86_ops {
 
 #ifdef CONFIG_KVM_SMM
        int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
-       int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-       int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
+       int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
+       int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
        void (*enable_smi_window)(struct kvm_vcpu *vcpu);
 #endif
 
index 2e6ec79..ba2733e 100644 (file)
@@ -295,17 +295,18 @@ void enter_smm(struct kvm_vcpu *vcpu)
        struct kvm_segment cs, ds;
        struct desc_ptr dt;
        unsigned long cr0;
-       char buf[512];
+       union kvm_smram smram;
 
        check_smram_offsets();
 
-       memset(buf, 0, 512);
+       memset(smram.bytes, 0, sizeof(smram.bytes));
+
 #ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-               enter_smm_save_state_64(vcpu, buf);
+               enter_smm_save_state_64(vcpu, smram.bytes);
        else
 #endif
-               enter_smm_save_state_32(vcpu, buf);
+               enter_smm_save_state_32(vcpu, smram.bytes);
 
        /*
         * Give enter_smm() a chance to make ISA-specific changes to the vCPU
@@ -315,12 +316,12 @@ void enter_smm(struct kvm_vcpu *vcpu)
         * Kill the VM in the unlikely case of failure, because the VM
         * can be in undefined state in this case.
         */
-       if (static_call(kvm_x86_enter_smm)(vcpu, buf))
+       if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
                goto error;
 
        kvm_smm_changed(vcpu, true);
 
-       if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)))
+       if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
                goto error;
 
        if (static_call(kvm_x86_get_nmi_mask)(vcpu))
@@ -480,7 +481,7 @@ static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
 }
 
 static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
-                            const char *smstate)
+                            u8 *smstate)
 {
        struct kvm_vcpu *vcpu = ctxt->vcpu;
        struct kvm_segment desc;
@@ -541,7 +542,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
 
 #ifdef CONFIG_X86_64
 static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
-                            const char *smstate)
+                            u8 *smstate)
 {
        struct kvm_vcpu *vcpu = ctxt->vcpu;
        struct kvm_segment desc;
@@ -612,13 +613,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
 {
        struct kvm_vcpu *vcpu = ctxt->vcpu;
        unsigned long cr0;
-       char buf[512];
+       union kvm_smram smram;
        u64 smbase;
        int ret;
 
        smbase = vcpu->arch.smbase;
 
-       ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, buf, sizeof(buf));
+       ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram));
        if (ret < 0)
                return X86EMUL_UNHANDLEABLE;
 
@@ -675,13 +676,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
         * state (e.g. enter guest mode) before loading state from the SMM
         * state-save area.
         */
-       if (static_call(kvm_x86_leave_smm)(vcpu, buf))
+       if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
                return X86EMUL_UNHANDLEABLE;
 
 #ifdef CONFIG_X86_64
        if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-               return rsm_load_state_64(ctxt, buf);
+               return rsm_load_state_64(ctxt, smram.bytes);
        else
 #endif
-               return rsm_load_state_32(ctxt, buf);
+               return rsm_load_state_32(ctxt, smram.bytes);
 }
index d28de3e..9d08214 100644 (file)
@@ -4401,12 +4401,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        return 1;
 }
 
-static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map_save;
        int ret;
 
+       char *smstate = (char *)smram;
+
        if (!is_guest_mode(vcpu))
                return 0;
 
@@ -4448,7 +4450,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map, map_save;
@@ -4456,6 +4458,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
        struct vmcb *vmcb12;
        int ret;
 
+       const char *smstate = (const char *)smram;
+
        if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
                return 0;
 
index 6be991b..aca8852 100644 (file)
@@ -7941,7 +7941,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
        return !is_smm(vcpu);
 }
 
-static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
@@ -7962,7 +7962,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        return 0;
 }
 
-static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        int ret;