KVM: MMU: change kvm_tdp_mmu_map() arguments to kvm_page_fault
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 6 Aug 2021 08:35:50 +0000 (04:35 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 1 Oct 2021 07:44:50 +0000 (03:44 -0400)
Pass struct kvm_page_fault to kvm_tdp_mmu_map() instead of
extracting the arguments from the struct.

Suggested-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h

index c84e978..b2020b4 100644 (file)
@@ -3988,8 +3988,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
                goto out_unlock;
 
        if (is_tdp_mmu_fault)
-               r = kvm_tdp_mmu_map(vcpu, gpa, error_code, fault->map_writable, fault->max_level,
-                                   fault->pfn, fault->prefault);
+               r = kvm_tdp_mmu_map(vcpu, fault);
        else
                r = __direct_map(vcpu, fault);
 
index 7a5a24c..4a5bb0b 100644 (file)
@@ -985,35 +985,30 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
  * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
  * page tables and SPTEs to translate the faulting guest physical address.
  */
-int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
-                   int map_writable, int max_level, kvm_pfn_t pfn,
-                   bool prefault)
+int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
        bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
-       bool write = error_code & PFERR_WRITE_MASK;
-       bool exec = error_code & PFERR_FETCH_MASK;
-       bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
+       bool huge_page_disallowed = fault->exec && nx_huge_page_workaround_enabled;
        struct kvm_mmu *mmu = vcpu->arch.mmu;
        struct tdp_iter iter;
        struct kvm_mmu_page *sp;
        u64 *child_pt;
        u64 new_spte;
        int ret;
-       gfn_t gfn = gpa >> PAGE_SHIFT;
        int level;
        int req_level;
 
-       level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
+       level = kvm_mmu_hugepage_adjust(vcpu, fault->gfn, fault->max_level, &fault->pfn,
                                        huge_page_disallowed, &req_level);
 
-       trace_kvm_mmu_spte_requested(gpa, level, pfn);
+       trace_kvm_mmu_spte_requested(fault->addr, level, fault->pfn);
 
        rcu_read_lock();
 
-       tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
+       tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
                if (nx_huge_page_workaround_enabled)
-                       disallowed_hugepage_adjust(iter.old_spte, gfn,
-                                                  iter.level, &pfn, &level);
+                       disallowed_hugepage_adjust(iter.old_spte, fault->gfn,
+                                                  iter.level, &fault->pfn, &level);
 
                if (iter.level == level)
                        break;
@@ -1069,8 +1064,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                return RET_PF_RETRY;
        }
 
-       ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
-                                             pfn, prefault);
+       ret = tdp_mmu_map_handle_target_level(vcpu, fault->write, fault->map_writable, &iter,
+                                             fault->pfn, fault->prefault);
        rcu_read_unlock();
 
        return ret;
index 358f447..ceaf7ff 100644 (file)
@@ -48,9 +48,7 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
 
-int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
-                   int map_writable, int max_level, kvm_pfn_t pfn,
-                   bool prefault);
+int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
 
 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
                                 bool flush);