KVM: nSVM: prepare to handle errors from enter_svm_guest_mode()
authorVitaly Kuznetsov <vkuznets@redhat.com>
Fri, 10 Jul 2020 14:11:52 +0000 (16:11 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 10 Jul 2020 16:55:13 +0000 (12:55 -0400)
Some operations in enter_svm_guest_mode() may fail, e.g. currently
we suppress kvm_set_cr3() return value. Prepare the code to proparate
errors.

No functional change intended.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200710141157.1640173-5-vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index bd0df1b..bca9865 100644 (file)
@@ -404,7 +404,7 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
        vmcb_mark_all_dirty(svm->vmcb);
 }
 
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
                          struct vmcb *nested_vmcb)
 {
        svm->nested.vmcb = vmcb_gpa;
@@ -413,6 +413,8 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        nested_prepare_vmcb_control(svm);
 
        svm_set_gif(svm, true);
+
+       return 0;
 }
 
 int nested_svm_vmrun(struct vcpu_svm *svm)
@@ -490,18 +492,22 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
        copy_vmcb_control_area(&hsave->control, &vmcb->control);
 
        svm->nested.nested_run_pending = 1;
-       enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
 
-       if (!nested_svm_vmrun_msrpm(svm)) {
-               svm->nested.nested_run_pending = 0;
+       if (enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb))
+               goto out_exit_err;
 
-               svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
-               svm->vmcb->control.exit_code_hi = 0;
-               svm->vmcb->control.exit_info_1  = 0;
-               svm->vmcb->control.exit_info_2  = 0;
+       if (nested_svm_vmrun_msrpm(svm))
+               goto out;
 
-               nested_svm_vmexit(svm);
-       }
+out_exit_err:
+       svm->nested.nested_run_pending = 0;
+
+       svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
+       svm->vmcb->control.exit_code_hi = 0;
+       svm->vmcb->control.exit_info_1  = 0;
+       svm->vmcb->control.exit_info_2  = 0;
+
+       nested_svm_vmexit(svm);
 
 out:
        kvm_vcpu_unmap(&svm->vcpu, &map, true);
index 13f923c..41f791e 100644 (file)
@@ -3889,6 +3889,7 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
        struct kvm_host_map map;
        u64 guest;
        u64 vmcb;
+       int ret = 0;
 
        guest = GET_SMSTATE(u64, smstate, 0x7ed8);
        vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
@@ -3897,10 +3898,11 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
                if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
                        return 1;
                nested_vmcb = map.hva;
-               enter_svm_guest_mode(svm, vmcb, nested_vmcb);
+               ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
                kvm_vcpu_unmap(&svm->vcpu, &map, true);
        }
-       return 0;
+
+       return ret;
 }
 
 static void enable_smi_window(struct kvm_vcpu *vcpu)
index 121b198..a798e17 100644 (file)
@@ -387,8 +387,8 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
        return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI));
 }
 
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-                         struct vmcb *nested_vmcb);
+int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+                        struct vmcb *nested_vmcb);
 void svm_leave_nested(struct vcpu_svm *svm);
 int nested_svm_vmrun(struct vcpu_svm *svm);
 void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);