KVM: x86: Fold "write-protect large" use case into generic write-protect
authorSean Christopherson <seanjc@google.com>
Sat, 13 Feb 2021 00:50:14 +0000 (16:50 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 19 Feb 2021 08:08:35 +0000 (03:08 -0500)
Drop kvm_mmu_slot_largepage_remove_write_access() and refactor its sole
caller to use kvm_mmu_slot_remove_write_access().  Remove the now-unused
slot_handle_large_level() and slot_handle_all_level() helpers.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210213005015.1651772-14-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/x86.c

index 562bff7..e2178e0 100644 (file)
@@ -5205,22 +5205,6 @@ slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
 }
 
 static __always_inline bool
-slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                     slot_level_handler fn, bool lock_flush_tlb)
-{
-       return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
-                                KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
-}
-
-static __always_inline bool
-slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
-                       slot_level_handler fn, bool lock_flush_tlb)
-{
-       return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1,
-                                KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
-}
-
-static __always_inline bool
 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
                 slot_level_handler fn, bool lock_flush_tlb)
 {
@@ -5584,22 +5568,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
                kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
 
-void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
-                                       struct kvm_memory_slot *memslot)
-{
-       bool flush;
-
-       write_lock(&kvm->mmu_lock);
-       flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
-                                       false);
-       if (is_tdp_mmu_enabled(kvm))
-               flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
-       write_unlock(&kvm->mmu_lock);
-
-       if (flush)
-               kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
-}
-
 void kvm_mmu_zap_all(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
index dca2c33..1d2bc89 100644 (file)
@@ -10829,25 +10829,26 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
                 */
                kvm_mmu_zap_collapsible_sptes(kvm, new);
        } else {
-               /*
-                * Large sptes are write-protected so they can be split on first
-                * write. New large sptes cannot be created for this slot until
-                * the end of the logging. See the comments in fast_page_fault().
-                *
-                * For small sptes, nothing is done if the dirty log is in the
-                * initial-all-set state.  Otherwise, depending on whether pml
-                * is enabled the D-bit or the W-bit will be cleared.
-                */
+               /* By default, write-protect everything to log writes. */
+               int level = PG_LEVEL_4K;
+
                if (kvm_x86_ops.cpu_dirty_log_size) {
+                       /*
+                        * Clear all dirty bits, unless pages are treated as
+                        * dirty from the get-go.
+                        */
                        if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
                                kvm_mmu_slot_leaf_clear_dirty(kvm, new);
-                       kvm_mmu_slot_largepage_remove_write_access(kvm, new);
-               } else {
-                       int level =
-                               kvm_dirty_log_manual_protect_and_init_set(kvm) ?
-                               PG_LEVEL_2M : PG_LEVEL_4K;
 
                        /*
+                        * Write-protect large pages on write so that dirty
+                        * logging happens at 4k granularity.  No need to
+                        * write-protect small SPTEs since write accesses are
+                        * logged by the CPU via dirty bits.
+                        */
+                       level = PG_LEVEL_2M;
+               } else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
+                       /*
                         * If we're with initial-all-set, we don't need
                         * to write protect any small page because
                         * they're reported as dirty already.  However
@@ -10855,8 +10856,9 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
                         * so that the page split can happen lazily on
                         * the first write to the huge page.
                         */
-                       kvm_mmu_slot_remove_write_access(kvm, new, level);
+                       level = PG_LEVEL_2M;
                }
+               kvm_mmu_slot_remove_write_access(kvm, new, level);
        }
 }