From: Sean Christopherson Date: Fri, 3 Jul 2020 02:35:30 +0000 (-0700) Subject: KVM: x86/mmu: Move fast_page_fault() call above mmu_topup_memory_caches() X-Git-Tag: v5.15~3185^2~54 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=832914452a9638b713a3ea9a490cbc18f3b164f2;p=platform%2Fkernel%2Flinux-starfive.git KVM: x86/mmu: Move fast_page_fault() call above mmu_topup_memory_caches() Avoid refilling the memory caches and potentially slow reclaim/swap when handling a fast page fault, which does not need to allocate any new objects. Reviewed-by: Ben Gardon Signed-off-by: Sean Christopherson Message-Id: <20200703023545.8771-7-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini --- diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index a8eddb8..d851f8c 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4120,6 +4120,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (page_fault_handle_page_track(vcpu, error_code, gfn)) return RET_PF_EMULATE; + if (fast_page_fault(vcpu, gpa, error_code)) + return RET_PF_RETRY; + r = mmu_topup_memory_caches(vcpu); if (r) return r; @@ -4127,9 +4130,6 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, if (lpage_disallowed) max_level = PG_LEVEL_4K; - if (fast_page_fault(vcpu, gpa, error_code)) - return RET_PF_RETRY; - mmu_seq = vcpu->kvm->mmu_notifier_seq; smp_rmb();