KVM: x86: Uninline and export hv_track_root_tdp()
authorSean Christopherson <seanjc@google.com>
Fri, 28 Jan 2022 00:51:55 +0000 (00:51 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 10 Feb 2022 18:47:19 +0000 (13:47 -0500)
Uninline and export Hyper-V's hv_track_root_tdp(), which is (somewhat
indirectly) the last remaining reference to kvm_x86_ops from vendor
modules, i.e. will allow unexporting kvm_x86_ops.  Reloading the TDP PGD
isn't the fastest of paths, hv_track_root_tdp() isn't exactly tiny, and
disallowing vendor code from accessing kvm_x86_ops provides nice-to-have
encapsulation of common x86 code (and of Hyper-V code for that matter).

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220128005208.4008533-10-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/kvm_onhyperv.c
arch/x86/kvm/kvm_onhyperv.h

index b469f45..ee4f696 100644 (file)
@@ -92,3 +92,17 @@ int hv_remote_flush_tlb(struct kvm *kvm)
        return hv_remote_flush_tlb_with_range(kvm, NULL);
 }
 EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
+
+void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+{
+       struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
+
+       if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
+               spin_lock(&kvm_arch->hv_root_tdp_lock);
+               vcpu->arch.hv_root_tdp = root_tdp;
+               if (root_tdp != kvm_arch->hv_root_tdp)
+                       kvm_arch->hv_root_tdp = INVALID_PAGE;
+               spin_unlock(&kvm_arch->hv_root_tdp_lock);
+       }
+}
+EXPORT_SYMBOL_GPL(hv_track_root_tdp);
index 1c67abf..287e98e 100644 (file)
 int hv_remote_flush_tlb_with_range(struct kvm *kvm,
                struct kvm_tlb_range *range);
 int hv_remote_flush_tlb(struct kvm *kvm);
-
-static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
-{
-       struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
-
-       if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
-               spin_lock(&kvm_arch->hv_root_tdp_lock);
-               vcpu->arch.hv_root_tdp = root_tdp;
-               if (root_tdp != kvm_arch->hv_root_tdp)
-                       kvm_arch->hv_root_tdp = INVALID_PAGE;
-               spin_unlock(&kvm_arch->hv_root_tdp_lock);
-       }
-}
+void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp);
 #else /* !CONFIG_HYPERV */
 static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
 {