KVM: Unify kvm_mmu_pre_write() and kvm_mmu_post_write()
authorAvi Kivity <avi@qumranet.com>
Tue, 1 May 2007 11:16:52 +0000 (14:16 +0300)
committerAvi Kivity <avi@qumranet.com>
Mon, 16 Jul 2007 09:05:38 +0000 (12:05 +0300)
Instead of calling two functions and repeating expensive checks, call one
function and provide it with before/after information.

Signed-off-by: Avi Kivity <avi@qumranet.com>
drivers/kvm/kvm.h
drivers/kvm/kvm_main.c
drivers/kvm/mmu.c

index 7facebd..11c519e 100644 (file)
@@ -525,8 +525,8 @@ int kvm_write_guest(struct kvm_vcpu *vcpu,
 
 unsigned long segment_base(u16 selector);
 
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes);
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+                      const u8 *old, const u8 *new, int bytes);
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
 void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
 
index 7d68258..b6ad9c6 100644 (file)
@@ -1071,18 +1071,18 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 {
        struct page *page;
        void *virt;
+       unsigned offset = offset_in_page(gpa);
 
        if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
                return 0;
        page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
        if (!page)
                return 0;
-       kvm_mmu_pre_write(vcpu, gpa, bytes);
        mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
        virt = kmap_atomic(page, KM_USER0);
+       kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
        memcpy(virt + offset_in_page(gpa), val, bytes);
        kunmap_atomic(virt, KM_USER0);
-       kvm_mmu_post_write(vcpu, gpa, bytes);
        return 1;
 }
 
index 2277b7c..b3a83ef 100644 (file)
@@ -1118,7 +1118,7 @@ out:
        return r;
 }
 
-static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu_page *page,
                                  u64 *spte)
 {
@@ -1137,7 +1137,8 @@ static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
        *spte = 0;
 }
 
-void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
+void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+                      const u8 *old, const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *page;
@@ -1206,16 +1207,12 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                spte = __va(page->page_hpa);
                spte += page_offset / sizeof(*spte);
                while (npte--) {
-                       mmu_pre_write_zap_pte(vcpu, page, spte);
+                       mmu_pte_write_zap_pte(vcpu, page, spte);
                        ++spte;
                }
        }
 }
 
-void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
-{
-}
-
 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
 {
        gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);