KVM: x86/mmu: Refactor the per-slot level calculation in mapping_level()
authorSean Christopherson <sean.j.christopherson@intel.com>
Fri, 6 Dec 2019 23:57:19 +0000 (15:57 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 8 Jan 2020 17:16:04 +0000 (18:16 +0100)
Invert the loop which adjusts the allowed page level based on what's
compatible with the associated memslot to use a largest-to-smallest
page size walk.  This paves the way for passing around a "max level"
variable instead of having redundant checks and/or multiple booleans.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index defe94e..8db2bb0 100644 (file)
@@ -1330,7 +1330,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
                         bool *force_pt_level)
 {
-       int host_level, level, max_level;
+       int host_level, max_level;
        struct kvm_memory_slot *slot;
 
        if (unlikely(*force_pt_level))
@@ -1347,12 +1347,12 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
                return host_level;
 
        max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
-
-       for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
-               if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
+       for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
+               if (!__mmu_gfn_lpage_is_disallowed(large_gfn, max_level, slot))
                        break;
+       }
 
-       return level - 1;
+       return max_level;
 }
 
 /*