KVM: x86/mmu: Refactor THP adjust to prep for changing query
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 8 Jan 2020 20:24:40 +0000 (12:24 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 27 Jan 2020 19:00:04 +0000 (20:00 +0100)
Refactor transparent_hugepage_adjust() in preparation for walking the
host page tables to identify hugepage mappings, initially for THP pages,
and eventualy for HugeTLB and DAX-backed pages as well.  The latter
cases support 1gb pages, i.e. the adjustment logic needs access to the
max allowed level.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index e4458c9..64c28a3 100644 (file)
@@ -3329,33 +3329,34 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
        __direct_pte_prefetch(vcpu, sp, sptep);
 }
 
-static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
-                                       gfn_t gfn, kvm_pfn_t *pfnp,
+static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
+                                       int max_level, kvm_pfn_t *pfnp,
                                        int *levelp)
 {
        kvm_pfn_t pfn = *pfnp;
        int level = *levelp;
+       kvm_pfn_t mask;
+
+       if (max_level == PT_PAGE_TABLE_LEVEL || level > PT_PAGE_TABLE_LEVEL)
+               return;
+
+       if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn) ||
+           kvm_is_zone_device_pfn(pfn))
+               return;
+
+       if (!kvm_is_transparent_hugepage(pfn))
+               return;
+
+       level = PT_DIRECTORY_LEVEL;
 
        /*
-        * Check if it's a transparent hugepage. If this would be an
-        * hugetlbfs page, level wouldn't be set to
-        * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
-        * here.
+        * mmu_notifier_retry() was successful and mmu_lock is held, so
+        * the pmd can't be split from under us.
         */
-       if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
-           !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
-           kvm_is_transparent_hugepage(pfn)) {
-               unsigned long mask;
-
-               /*
-                * mmu_notifier_retry() was successful and mmu_lock is held, so
-                * the pmd can't be split from under us.
-                */
-               *levelp = level = PT_DIRECTORY_LEVEL;
-               mask = KVM_PAGES_PER_HPAGE(level) - 1;
-               VM_BUG_ON((gfn & mask) != (pfn & mask));
-               *pfnp = pfn & ~mask;
-       }
+       *levelp = level;
+       mask = KVM_PAGES_PER_HPAGE(level) - 1;
+       VM_BUG_ON((gfn & mask) != (pfn & mask));
+       *pfnp = pfn & ~mask;
 }
 
 static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
@@ -3395,8 +3396,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
        if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
                return RET_PF_RETRY;
 
-       if (likely(max_level > PT_PAGE_TABLE_LEVEL))
-               transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
+       transparent_hugepage_adjust(vcpu, gfn, max_level, &pfn, &level);
 
        trace_kvm_mmu_spte_requested(gpa, level, pfn);
        for_each_shadow_entry(vcpu, gpa, it) {
index eaa00c4..1ad87f0 100644 (file)
@@ -688,8 +688,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
        gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
        base_gfn = gfn;
 
-       if (max_level > PT_PAGE_TABLE_LEVEL)
-               transparent_hugepage_adjust(vcpu, gw->gfn, &pfn, &hlevel);
+       transparent_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn, &hlevel);
 
        trace_kvm_mmu_spte_requested(addr, gw->level, pfn);