vmcb_mark_all_dirty(svm->vmcb);
}
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
struct vmcb *nested_vmcb)
{
svm->nested.vmcb = vmcb_gpa;
nested_prepare_vmcb_control(svm);
svm_set_gif(svm, true);
+
+ return 0;
}
int nested_svm_vmrun(struct vcpu_svm *svm)
copy_vmcb_control_area(&hsave->control, &vmcb->control);
svm->nested.nested_run_pending = 1;
- enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb);
- if (!nested_svm_vmrun_msrpm(svm)) {
- svm->nested.nested_run_pending = 0;
+ if (enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb))
+ goto out_exit_err;
- svm->vmcb->control.exit_code = SVM_EXIT_ERR;
- svm->vmcb->control.exit_code_hi = 0;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
+ if (nested_svm_vmrun_msrpm(svm))
+ goto out;
- nested_svm_vmexit(svm);
- }
+out_exit_err:
+ svm->nested.nested_run_pending = 0;
+
+ svm->vmcb->control.exit_code = SVM_EXIT_ERR;
+ svm->vmcb->control.exit_code_hi = 0;
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
+
+ nested_svm_vmexit(svm);
out:
kvm_vcpu_unmap(&svm->vcpu, &map, true);
struct kvm_host_map map;
u64 guest;
u64 vmcb;
+ int ret = 0;
guest = GET_SMSTATE(u64, smstate, 0x7ed8);
vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
return 1;
nested_vmcb = map.hva;
- enter_svm_guest_mode(svm, vmcb, nested_vmcb);
+ ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
kvm_vcpu_unmap(&svm->vcpu, &map, true);
}
- return 0;
+
+ return ret;
}
static void enable_smi_window(struct kvm_vcpu *vcpu)
return (svm->nested.ctl.intercept & (1ULL << INTERCEPT_NMI));
}
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
- struct vmcb *nested_vmcb);
+int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+ struct vmcb *nested_vmcb);
void svm_leave_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct vcpu_svm *svm);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);