KVM: x86/mmu: Rename cr2_or_gpa to gpa in fast_page_fault
authorDavid Matlack <dmatlack@google.com>
Tue, 13 Jul 2021 22:09:52 +0000 (22:09 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 2 Aug 2021 15:01:46 +0000 (11:01 -0400)
fast_page_fault is only called from direct_page_fault where we know the
address is a gpa.

Fixes: 736c291c9f36 ("KVM: x86: Use gpa_t for cr2/gpa to fix TDP support on 32-bit KVM")
Reviewed-by: Ben Gardon <bgardon@google.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20210713220957.3493520-2-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 0b2954b..6f5910b 100644 (file)
@@ -3103,8 +3103,7 @@ static bool is_access_allowed(u32 fault_err_code, u64 spte)
 /*
  * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS.
  */
-static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
-                          u32 error_code)
+static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code)
 {
        struct kvm_shadow_walk_iterator iterator;
        struct kvm_mmu_page *sp;
@@ -3120,7 +3119,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        do {
                u64 new_spte;
 
-               for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
+               for_each_shadow_entry_lockless(vcpu, gpa, iterator, spte)
                        if (!is_shadow_present_pte(spte))
                                break;
 
@@ -3199,8 +3198,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 
        } while (true);
 
-       trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep,
-                             spte, ret);
+       trace_fast_page_fault(vcpu, gpa, error_code, iterator.sptep, spte, ret);
        walk_shadow_page_lockless_end(vcpu);
 
        return ret;