Merge tag 'kvm-x86-vmx-6.6' of https://github.com/kvm-x86/linux into HEAD
authorPaolo Bonzini <pbonzini@redhat.com>
Thu, 31 Aug 2023 17:32:06 +0000 (13:32 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 31 Aug 2023 17:32:06 +0000 (13:32 -0400)
KVM: x86: VMX changes for 6.6:

 - Misc cleanups

 - Fix a bug where KVM reads a stale vmcs.IDT_VECTORING_INFO_FIELD when trying
   to handle NMI VM-Exits

arch/x86/kvm/vmx/vmx.c

index df461f3..c0236dd 100644 (file)
@@ -3071,13 +3071,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 
        vmx->rmode.vm86_active = 1;
 
-       /*
-        * Very old userspace does not call KVM_SET_TSS_ADDR before entering
-        * vcpu. Warn the user that an update is overdue.
-        */
-       if (!kvm_vmx->tss_addr)
-               pr_warn_once("KVM_SET_TSS_ADDR needs to be called before running vCPU\n");
-
        vmx_segment_cache_clear(vmx);
 
        vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
@@ -3350,7 +3343,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        vmx->emulation_required = vmx_emulation_required(vcpu);
 }
 
-static int vmx_get_max_tdp_level(void)
+static int vmx_get_max_ept_level(void)
 {
        if (cpu_has_vmx_ept_5levels())
                return 5;
@@ -6796,8 +6789,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
        vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
        read_unlock(&vcpu->kvm->mmu_lock);
 
-       vmx_flush_tlb_current(vcpu);
-
+       /*
+        * No need for a manual TLB flush at this point, KVM has already done a
+        * flush if there were SPTEs pointing at the previous page.
+        */
 out:
        /*
         * Do not pin apic access page in memory, the MMU notifier
@@ -7243,13 +7238,20 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
                                   flags);
 
        vcpu->arch.cr2 = native_read_cr2();
+       vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
+
+       vmx->idt_vectoring_info = 0;
 
        vmx_enable_fb_clear(vmx);
 
-       if (unlikely(vmx->fail))
+       if (unlikely(vmx->fail)) {
                vmx->exit_reason.full = 0xdead;
-       else
-               vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
+               goto out;
+       }
+
+       vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
+       if (likely(!vmx->exit_reason.failed_vmentry))
+               vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
 
        if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
            is_nmi(vmx_get_intr_info(vcpu))) {
@@ -7258,6 +7260,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
                kvm_after_interrupt(vcpu);
        }
 
+out:
        guest_state_exit_irqoff();
 }
 
@@ -7379,8 +7382,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
        loadsegment(es, __USER_DS);
 #endif
 
-       vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
-
        pt_guest_exit(vmx);
 
        kvm_load_host_xsave_state(vcpu);
@@ -7397,17 +7398,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
                vmx->nested.nested_run_pending = 0;
        }
 
-       vmx->idt_vectoring_info = 0;
-
        if (unlikely(vmx->fail))
                return EXIT_FASTPATH_NONE;
 
        if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
                kvm_machine_check();
 
-       if (likely(!vmx->exit_reason.failed_vmentry))
-               vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
-
        trace_kvm_exit(vcpu, KVM_ISA_VMX);
 
        if (unlikely(vmx->exit_reason.failed_vmentry))
@@ -8526,7 +8522,7 @@ static __init int hardware_setup(void)
         */
        vmx_setup_me_spte_mask();
 
-       kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
+       kvm_configure_mmu(enable_ept, 0, vmx_get_max_ept_level(),
                          ept_caps_to_lpage_level(vmx_capability.ept));
 
        /*