KVM: VMX: Save HOST_CR3 in vmx_prepare_switch_to_guest()
authorLai Jiangshan <laijs@linux.alibaba.com>
Thu, 18 Nov 2021 11:08:03 +0000 (19:08 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Dec 2021 09:25:06 +0000 (04:25 -0500)
The host CR3 in the vcpu thread can only be changed when scheduling.
Moving the code in vmx_prepare_switch_to_guest() makes the code
simpler.

Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
Message-Id: <20211118110814.2568-5-jiangshanlai@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c

index dc5041a..b03df82 100644 (file)
@@ -3033,7 +3033,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr3, cr4;
+       unsigned long cr4;
        bool vm_fail;
 
        if (!nested_early_check)
@@ -3056,12 +3056,6 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
         */
        vmcs_writel(GUEST_RFLAGS, 0);
 
-       cr3 = __get_current_cr3_fast();
-       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
-               vmcs_writel(HOST_CR3, cr3);
-               vmx->loaded_vmcs->host_state.cr3 = cr3;
-       }
-
        cr4 = cr4_read_shadow();
        if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
                vmcs_writel(HOST_CR4, cr4);
index 73cb132..1358770 100644 (file)
@@ -1103,6 +1103,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 #ifdef CONFIG_X86_64
        int cpu = raw_smp_processor_id();
 #endif
+       unsigned long cr3;
        unsigned long fs_base, gs_base;
        u16 fs_sel, gs_sel;
        int i;
@@ -1167,6 +1168,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 #endif
 
        vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
+
+       /* Host CR3 including its PCID is stable when guest state is loaded. */
+       cr3 = __get_current_cr3_fast();
+       if (unlikely(cr3 != host_state->cr3)) {
+               vmcs_writel(HOST_CR3, cr3);
+               host_state->cr3 = cr3;
+       }
+
        vmx->guest_state_loaded = true;
 }
 
@@ -6612,7 +6621,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       unsigned long cr3, cr4;
+       unsigned long cr4;
 
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!enable_vnmi &&
@@ -6657,12 +6666,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
        vcpu->arch.regs_dirty = 0;
 
-       cr3 = __get_current_cr3_fast();
-       if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
-               vmcs_writel(HOST_CR3, cr3);
-               vmx->loaded_vmcs->host_state.cr3 = cr3;
-       }
-
        cr4 = cr4_read_shadow();
        if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
                vmcs_writel(HOST_CR4, cr4);