KVM: x86/mmu: Avoid retpoline on ->page_fault() with TDP
authorSean Christopherson <sean.j.christopherson@intel.com>
Thu, 6 Feb 2020 22:14:34 +0000 (14:14 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 12 Feb 2020 19:09:42 +0000 (20:09 +0100)
Wrap calls to ->page_fault() with a small shim to directly invoke the
TDP fault handler when the kernel is using retpolines and TDP is being
used.  Single out the TDP fault handler and annotate the TDP path as
likely to coerce the compiler into preferring it over the indirect
function call.

Rename tdp_page_fault() to kvm_tdp_page_fault(), as it's exposed outside
of mmu.c to allow inlining the shim.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c

index d55674f..a647601 100644 (file)
@@ -102,6 +102,19 @@ static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
                                              kvm_get_active_pcid(vcpu));
 }
 
+int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+                      bool prefault);
+
+static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                                       u32 err, bool prefault)
+{
+#ifdef CONFIG_RETPOLINE
+       if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
+               return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
+#endif
+       return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
+}
+
 /*
  * Currently, we have two sorts of write-protection, a) the first one
  * write-protects guest page to sync the guest modification, b) another one is
index 7011a4e..87e9ba2 100644 (file)
@@ -4219,8 +4219,8 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 }
 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
 
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
-                         bool prefault)
+int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+                      bool prefault)
 {
        int max_level;
 
@@ -4925,7 +4925,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
                return;
 
        context->mmu_role.as_u64 = new_role.as_u64;
-       context->page_fault = tdp_page_fault;
+       context->page_fault = kvm_tdp_page_fault;
        context->sync_page = nonpaging_sync_page;
        context->invlpg = nonpaging_invlpg;
        context->update_pte = nonpaging_update_pte;
@@ -5436,9 +5436,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
        }
 
        if (r == RET_PF_INVALID) {
-               r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa,
-                                              lower_32_bits(error_code),
-                                              false);
+               r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
+                                         lower_32_bits(error_code), false);
                WARN_ON(r == RET_PF_INVALID);
        }
 
index 5e762c8..fd9e2f6 100644 (file)
@@ -10192,7 +10192,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
              work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
                return;
 
-       vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);
+       kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
 }
 
 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)