KVM: x86/mmu: Move nonpaging_page_fault() below try_async_pf()
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 6 Dec 2019 23:57:17 +0000 (15:57 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jan 2020 17:16:03 +0000 (18:16 +0100)
Move nonpaging_page_fault() below try_async_pf() to eliminate the
forward declaration of try_async_pf() and to prepare for combining the
bulk of nonpaging_page_fault() and tdp_page_fault() into a common
helper.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index a1d1fc2..b363306 100644 (file)
@@ -3657,10 +3657,6 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, int level,
        return fault_handled;
 }
 
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
-                        gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
-                        bool *writable);
-
 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
                               struct list_head *invalid_list)
 {
@@ -4119,6 +4115,55 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
        walk_shadow_page_lockless_end(vcpu);
 }
 
+static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+                                  gfn_t gfn)
+{
+       struct kvm_arch_async_pf arch;
+
+       arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+       arch.gfn = gfn;
+       arch.direct_map = vcpu->arch.mmu->direct_map;
+       arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+
+       return kvm_setup_async_pf(vcpu, cr2_or_gpa,
+                                 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+                        gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
+                        bool *writable)
+{
+       struct kvm_memory_slot *slot;
+       bool async;
+
+       /*
+        * Don't expose private memslots to L2.
+        */
+       if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+               *pfn = KVM_PFN_NOSLOT;
+               return false;
+       }
+
+       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+       async = false;
+       *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
+       if (!async)
+               return false; /* *pfn has correct page already */
+
+       if (!prefault && kvm_can_do_async_pf(vcpu)) {
+               trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
+               if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+                       trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
+                       kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+                       return true;
+               } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
+                       return true;
+       }
+
+       *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
+       return false;
+}
+
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
                                u32 error_code, bool prefault)
 {
@@ -4188,55 +4233,6 @@ out_unlock:
        return r;
 }
 
-static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                                  gfn_t gfn)
-{
-       struct kvm_arch_async_pf arch;
-
-       arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
-       arch.gfn = gfn;
-       arch.direct_map = vcpu->arch.mmu->direct_map;
-       arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
-
-       return kvm_setup_async_pf(vcpu, cr2_or_gpa,
-                                 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
-}
-
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
-                        gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
-                        bool *writable)
-{
-       struct kvm_memory_slot *slot;
-       bool async;
-
-       /*
-        * Don't expose private memslots to L2.
-        */
-       if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
-               *pfn = KVM_PFN_NOSLOT;
-               return false;
-       }
-
-       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       async = false;
-       *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
-       if (!async)
-               return false; /* *pfn has correct page already */
-
-       if (!prefault && kvm_can_do_async_pf(vcpu)) {
-               trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
-               if (kvm_find_async_pf_gfn(vcpu, gfn)) {
-                       trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
-                       kvm_make_request(KVM_REQ_APF_HALT, vcpu);
-                       return true;
-               } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
-                       return true;
-       }
-
-       *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
-       return false;
-}
-
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len)
 {