Revert "KVM: MMU: reclaim the zapped-obsolete page first"
authorSean Christopherson <sean.j.christopherson@intel.com>
Tue, 5 Feb 2019 21:01:26 +0000 (13:01 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 20 Feb 2019 21:48:42 +0000 (22:48 +0100)
Unwinding optimizations related to obsolete pages is a step towards
removing x86 KVM's fast invalidate mechanism, i.e. this is one part of
a revert all patches from the series that introduced the mechanism[1].

This reverts commit 365c886860c4ba670d245e762b23987c912c129a.

[1] https://lkml.kernel.org/r/1369960590-14138-1-git-send-email-xiaoguangrong@linux.vnet.ibm.com

Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c

index aeca3fb..fbe16a9 100644 (file)
@@ -851,7 +851,6 @@ struct kvm_arch {
         * Hash table of struct kvm_mmu_page.
         */
        struct list_head active_mmu_pages;
-       struct list_head zapped_obsolete_pages;
        struct kvm_page_track_notifier_node mmu_sp_tracker;
        struct kvm_page_track_notifier_head track_notifier_head;
 
index 6cbffc7..255b021 100644 (file)
@@ -5858,6 +5858,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 static void kvm_zap_obsolete_pages(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
+       LIST_HEAD(invalid_list);
        int batch = 0;
 
 restart:
@@ -5890,8 +5891,7 @@ restart:
                        goto restart;
                }
 
-               ret = kvm_mmu_prepare_zap_page(kvm, sp,
-                               &kvm->arch.zapped_obsolete_pages);
+               ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
                batch += ret;
 
                if (ret)
@@ -5902,7 +5902,7 @@ restart:
         * Should flush tlb before free page tables since lockless-walking
         * may use the pages.
         */
-       kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
+       kvm_mmu_commit_zap_page(kvm, &invalid_list);
 }
 
 /*
@@ -5935,11 +5935,6 @@ void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
        spin_unlock(&kvm->mmu_lock);
 }
 
-static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
-{
-       return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
-}
-
 static void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
 {
        struct kvm_mmu_page *sp, *node;
@@ -6011,24 +6006,16 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                 * want to shrink a VM that only started to populate its MMU
                 * anyway.
                 */
-               if (!kvm->arch.n_used_mmu_pages &&
-                     !kvm_has_zapped_obsolete_pages(kvm))
+               if (!kvm->arch.n_used_mmu_pages)
                        continue;
 
                idx = srcu_read_lock(&kvm->srcu);
                spin_lock(&kvm->mmu_lock);
 
-               if (kvm_has_zapped_obsolete_pages(kvm)) {
-                       kvm_mmu_commit_zap_page(kvm,
-                             &kvm->arch.zapped_obsolete_pages);
-                       goto unlock;
-               }
-
                if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
                        freed++;
                kvm_mmu_commit_zap_page(kvm, &invalid_list);
 
-unlock:
                spin_unlock(&kvm->mmu_lock);
                srcu_read_unlock(&kvm->srcu, idx);
 
index 03d26ff..78fb13f 100644 (file)
@@ -9113,7 +9113,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
        INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
-       INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
        INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
        atomic_set(&kvm->arch.noncoherent_dma_count, 0);