Optimize TLB flush in kvm_mmu_slot_remove_write_access.
authorKai Huang <kai.huang@linux.intel.com>
Mon, 12 Jan 2015 07:28:54 +0000 (15:28 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 19 Jan 2015 10:09:37 +0000 (11:09 +0100)
No TLB flush is needed when there's no valid rmap in memory slot.

Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c

index 0d0fdd6..97898ab 100644 (file)
@@ -4302,6 +4302,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
        struct kvm_memory_slot *memslot;
        gfn_t last_gfn;
        int i;
+       bool flush = false;
 
        memslot = id_to_memslot(kvm->memslots, slot);
        last_gfn = memslot->base_gfn + memslot->npages - 1;
@@ -4318,7 +4319,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
 
                for (index = 0; index <= last_index; ++index, ++rmapp) {
                        if (*rmapp)
-                               __rmap_write_protect(kvm, rmapp, false);
+                               flush |= __rmap_write_protect(kvm, rmapp,
+                                               false);
 
                        if (need_resched() || spin_needbreak(&kvm->mmu_lock))
                                cond_resched_lock(&kvm->mmu_lock);
@@ -4345,7 +4347,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
         * instead of PT_WRITABLE_MASK, that means it does not depend
         * on PT_WRITABLE_MASK anymore.
         */
-       kvm_flush_remote_tlbs(kvm);
+       if (flush)
+               kvm_flush_remote_tlbs(kvm);
 }
 
 #define BATCH_ZAP_PAGES        10