KVM: x86/mmu: Zap only the relevant pages when removing a memslot
authorSean Christopherson <sean.j.christopherson@intel.com>
Tue, 5 Feb 2019 21:01:21 +0000 (13:01 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 20 Feb 2019 21:48:39 +0000 (22:48 +0100)
Modify kvm_mmu_invalidate_zap_pages_in_memslot(), a.k.a. the x86 MMU's
handler for kvm_arch_flush_shadow_memslot(), to zap only the pages/PTEs
that actually belong to the memslot being removed.  This improves
performance, especially why the deleted memslot has only a few shadow
entries, or even no entries.  E.g. a microbenchmark to access regular
memory while concurrently reading PCI ROM to trigger memslot deletion
showed a 5% improvement in throughput.

Cc: Xiao Guangrong <guangrong.xiao@gmail.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index 1cce120..b81e2ca 100644 (file)
@@ -5622,7 +5622,38 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
                        struct kvm_memory_slot *slot,
                        struct kvm_page_track_notifier_node *node)
 {
-       kvm_mmu_invalidate_zap_all_pages(kvm);
+       struct kvm_mmu_page *sp;
+       LIST_HEAD(invalid_list);
+       unsigned long i;
+       bool flush;
+       gfn_t gfn;
+
+       spin_lock(&kvm->mmu_lock);
+
+       if (list_empty(&kvm->arch.active_mmu_pages))
+               goto out_unlock;
+
+       flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
+
+       for (i = 0; i < slot->npages; i++) {
+               gfn = slot->base_gfn + i;
+
+               for_each_valid_sp(kvm, sp, gfn) {
+                       if (sp->gfn != gfn)
+                               continue;
+
+                       kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
+               }
+               if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
+                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+                       flush = false;
+                       cond_resched_lock(&kvm->mmu_lock);
+               }
+       }
+       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
+
+out_unlock:
+       spin_unlock(&kvm->mmu_lock);
 }
 
 void kvm_mmu_init_vm(struct kvm *kvm)