KVM: MMU: Keep a reverse mapping of non-writable translations
authorIzik Eidus <izike@qumranet.com>
Tue, 16 Oct 2007 12:43:46 +0000 (14:43 +0200)
committerAvi Kivity <avi@qumranet.com>
Wed, 30 Jan 2008 15:52:54 +0000 (17:52 +0200)
The current kvm mmu only reverse maps writable translation.  This is used
to write-protect a page in case it becomes a pagetable.

But with swapping support, we need a reverse mapping of read-only pages as
well:  when we evict a page, we need to remove any mapping to it, whether
writable or not.

Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/mmu.c

index 14e54e3..bbf5eb4 100644 (file)
@@ -211,8 +211,8 @@ static int is_io_pte(unsigned long pte)
 
 static int is_rmap_pte(u64 pte)
 {
-       return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
-               == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
+       return pte != shadow_trap_nonpresent_pte
+               && pte != shadow_notrap_nonpresent_pte;
 }
 
 static void set_shadow_pte(u64 *sptep, u64 spte)
@@ -488,7 +488,6 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
 {
        unsigned long *rmapp;
        u64 *spte;
-       u64 *prev_spte;
 
        gfn = unalias_gfn(kvm, gfn);
        rmapp = gfn_to_rmap(kvm, gfn);
@@ -497,13 +496,11 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
        while (spte) {
                BUG_ON(!spte);
                BUG_ON(!(*spte & PT_PRESENT_MASK));
-               BUG_ON(!(*spte & PT_WRITABLE_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
-               prev_spte = spte;
-               spte = rmap_next(kvm, rmapp, spte);
-               rmap_remove(kvm, prev_spte);
-               set_shadow_pte(prev_spte, *prev_spte & ~PT_WRITABLE_MASK);
+               if (is_writeble_pte(*spte))
+                       set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
                kvm_flush_remote_tlbs(kvm);
+               spte = rmap_next(kvm, rmapp, spte);
        }
 }
 
@@ -908,14 +905,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                table = __va(table_addr);
 
                if (level == 1) {
+                       int was_rmapped;
+
                        pte = table[index];
+                       was_rmapped = is_rmap_pte(pte);
                        if (is_shadow_present_pte(pte) && is_writeble_pte(pte))
                                return 0;
                        mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
                        page_header_update_slot(vcpu->kvm, table, v);
                        table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
                                                                PT_USER_MASK;
-                       rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
+                       if (!was_rmapped)
+                               rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
                        return 0;
                }
 
@@ -1424,10 +1425,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
                pt = page->spt;
                for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
                        /* avoid RMW */
-                       if (pt[i] & PT_WRITABLE_MASK) {
-                               rmap_remove(kvm, &pt[i]);
+                       if (pt[i] & PT_WRITABLE_MASK)
                                pt[i] &= ~PT_WRITABLE_MASK;
-                       }
        }
 }