KVM: nSVM: extract preparation of VMCB for nested run
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 18 May 2020 14:56:43 +0000 (10:56 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 1 Jun 2020 08:25:58 +0000 (04:25 -0400)
Split out filling svm->vmcb.save and svm->vmcb.control before VMRUN.
Only the latter will be useful when restoring nested SVM state.

This patch introduces no semantic change, so the MMU setup is still
done in nested_prepare_vmcb_save.  The next patch will clean up things.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c

index fc0c6d1..73be7af 100644 (file)
@@ -245,21 +245,8 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm,
        svm->vcpu.arch.tsc_offset += control->tsc_offset;
 }
 
-void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
-                         struct vmcb *nested_vmcb)
+static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
 {
-       bool evaluate_pending_interrupts =
-               is_intercept(svm, INTERCEPT_VINTR) ||
-               is_intercept(svm, INTERCEPT_IRET);
-
-       svm->nested.vmcb = vmcb_gpa;
-       if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
-               svm->vcpu.arch.hflags |= HF_HIF_MASK;
-       else
-               svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
-
-       load_nested_vmcb_control(svm, &nested_vmcb->control);
-
        if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE)
                nested_svm_init_mmu_context(&svm->vcpu);
 
@@ -291,7 +278,10 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
        svm->vcpu.arch.dr6  = nested_vmcb->save.dr6;
        svm->vmcb->save.cpl = nested_vmcb->save.cpl;
+}
 
+static void nested_prepare_vmcb_control(struct vcpu_svm *svm, struct vmcb *nested_vmcb)
+{
        svm_flush_tlb(&svm->vcpu);
        if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
                svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -321,6 +311,26 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
         */
        recalc_intercepts(svm);
 
+       mark_all_dirty(svm->vmcb);
+}
+
+void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
+                         struct vmcb *nested_vmcb)
+{
+       bool evaluate_pending_interrupts =
+               is_intercept(svm, INTERCEPT_VINTR) ||
+               is_intercept(svm, INTERCEPT_IRET);
+
+       svm->nested.vmcb = vmcb_gpa;
+       if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
+               svm->vcpu.arch.hflags |= HF_HIF_MASK;
+       else
+               svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
+
+       load_nested_vmcb_control(svm, &nested_vmcb->control);
+       nested_prepare_vmcb_save(svm, nested_vmcb);
+       nested_prepare_vmcb_control(svm, nested_vmcb);
+
        /*
         * If L1 had a pending IRQ/NMI before executing VMRUN,
         * which wasn't delivered because it was disallowed (e.g.
@@ -336,8 +346,6 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
        enable_gif(svm);
        if (unlikely(evaluate_pending_interrupts))
                kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
-
-       mark_all_dirty(svm->vmcb);
 }
 
 int nested_svm_vmrun(struct vcpu_svm *svm)