migration: move dirty bitmap sync to ram_addr.h
authorStefan Hajnoczi <stefanha@redhat.com>
Tue, 2 Dec 2014 11:23:17 +0000 (11:23 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 5 Jun 2015 15:10:00 +0000 (17:10 +0200)
The dirty memory bitmap is managed by ram_addr.h and copied to
migration_bitmap[] periodically during live migration.

Move the code to sync the bitmap to ram_addr.h where related code lives.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <1417519399-3166-5-git-send-email-stefanha@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch_init.c
include/exec/ram_addr.h

index b5d90a4..d294474 100644 (file)
@@ -609,52 +609,10 @@ ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
     return (next - base) << TARGET_PAGE_BITS;
 }
 
-static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
-{
-    bool ret;
-    int nr = addr >> TARGET_PAGE_BITS;
-
-    ret = test_and_set_bit(nr, migration_bitmap);
-
-    if (!ret) {
-        migration_dirty_pages++;
-    }
-    return ret;
-}
-
 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
 {
-    ram_addr_t addr;
-    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
-
-    /* start address is aligned at the start of a word? */
-    if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
-        int k;
-        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
-        unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
-
-        for (k = page; k < page + nr; k++) {
-            if (src[k]) {
-                unsigned long new_dirty;
-                new_dirty = ~migration_bitmap[k];
-                migration_bitmap[k] |= src[k];
-                new_dirty &= src[k];
-                migration_dirty_pages += ctpopl(new_dirty);
-                src[k] = 0;
-            }
-        }
-    } else {
-        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
-            if (cpu_physical_memory_get_dirty(start + addr,
-                                              TARGET_PAGE_SIZE,
-                                              DIRTY_MEMORY_MIGRATION)) {
-                cpu_physical_memory_reset_dirty(start + addr,
-                                                TARGET_PAGE_SIZE,
-                                                DIRTY_MEMORY_MIGRATION);
-                migration_bitmap_set_dirty(start + addr);
-            }
-        }
-    }
+    migration_dirty_pages +=
+        cpu_physical_memory_sync_dirty_bitmap(migration_bitmap, start, length);
 }
 
 
index 9f73076..63db371 100644 (file)
@@ -218,5 +218,49 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
                                      unsigned client);
 
+static inline
+uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
+                                               ram_addr_t start,
+                                               ram_addr_t length)
+{
+    ram_addr_t addr;
+    unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+    uint64_t num_dirty = 0;
+
+    /* start address is aligned at the start of a word? */
+    if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
+        int k;
+        int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
+        unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
+
+        for (k = page; k < page + nr; k++) {
+            if (src[k]) {
+                unsigned long new_dirty;
+                new_dirty = ~dest[k];
+                dest[k] |= src[k];
+                new_dirty &= src[k];
+                num_dirty += ctpopl(new_dirty);
+                src[k] = 0;
+            }
+        }
+    } else {
+        for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
+            if (cpu_physical_memory_get_dirty(start + addr,
+                                              TARGET_PAGE_SIZE,
+                                              DIRTY_MEMORY_MIGRATION)) {
+                long k = (start + addr) >> TARGET_PAGE_BITS;
+                if (!test_and_set_bit(k, dest)) {
+                    num_dirty++;
+                }
+                cpu_physical_memory_reset_dirty(start + addr,
+                                                TARGET_PAGE_SIZE,
+                                                DIRTY_MEMORY_MIGRATION);
+            }
+        }
+    }
+
+    return num_dirty;
+}
+
 #endif
 #endif