KVM: move EXIT_FASTPATH_REENTER_GUEST to common code
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 2 Feb 2021 15:44:23 +0000 (10:44 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 4 Feb 2021 10:27:37 +0000 (05:27 -0500)
Now that KVM is using static calls, calling vmx_vcpu_run and
vmx_sync_pir_to_irr does not incur anymore the cost of a
retpoline.

Therefore there is no need anymore to handle EXIT_FASTPATH_REENTER_GUEST
in vendor code.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 02949a6..0cd24ae 100644 (file)
@@ -6711,11 +6711,9 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
 
 static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
-       fastpath_t exit_fastpath;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
 
-reenter_guest:
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!enable_vnmi &&
                     vmx->loaded_vmcs->soft_vnmi_blocked))
@@ -6865,22 +6863,7 @@ reenter_guest:
        if (is_guest_mode(vcpu))
                return EXIT_FASTPATH_NONE;
 
-       exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
-       if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
-               if (!kvm_vcpu_exit_request(vcpu)) {
-                       /*
-                        * FIXME: this goto should be a loop in vcpu_enter_guest,
-                        * but it would incur the cost of a retpoline for now.
-                        * Revisit once static calls are available.
-                        */
-                       if (vcpu->arch.apicv_active)
-                               vmx_sync_pir_to_irr(vcpu);
-                       goto reenter_guest;
-               }
-               exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
-       }
-
-       return exit_fastpath;
+       return vmx_exit_handlers_fastpath(vcpu);
 }
 
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
index 4758afe..39f01a0 100644 (file)
@@ -1796,12 +1796,11 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
 
-bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
 {
        return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
                xfer_to_guest_mode_work_pending();
 }
-EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
 
 /*
  * The fast path for frequent and performance sensitive wrmsr emulation,
@@ -9044,7 +9043,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
        }
 
-       exit_fastpath = static_call(kvm_x86_run)(vcpu);
+       for (;;) {
+               exit_fastpath = static_call(kvm_x86_run)(vcpu);
+               if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
+                       break;
+
+                if (unlikely(kvm_vcpu_exit_request(vcpu))) {
+                       exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+                       break;
+               }
+
+               if (vcpu->arch.apicv_active)
+                       static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+        }
 
        /*
         * Do this here before restoring debug registers on the host.  And
index 5f7c224..cc652a3 100644 (file)
@@ -395,7 +395,6 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 int kvm_spec_ctrl_test_value(u64 value);
 bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
                              struct x86_exception *e);
 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);