diff mbox

qemu-kvm: Speed up of the dirty-bitmap-traveling

Message ID 4B6BF06D.1090909@lab.ntt.co.jp (mailing list archive)
State New, archived
Headers show

Commit Message

OHMURA Kei Feb. 5, 2010, 10:18 a.m. UTC
None
diff mbox

Patch

diff --git a/qemu-kvm.c b/qemu-kvm.c
index a305907..5459cdd 100644
--- a/qemu-kvm.c
+++ b/qemu-kvm.c
@@ -2433,22 +2433,21 @@  int kvm_physical_memory_set_dirty_tracking(int enable)
 }
 
 /* get kvm's dirty pages bitmap and update qemu's */
-static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
-                                         unsigned char *bitmap,
-                                         unsigned long offset,
-                                         unsigned long mem_size)
+static void kvm_get_dirty_pages_log_range_by_byte(unsigned int start,
+                                                  unsigned int end,
+                                                  unsigned char *bitmap,
+                                                  unsigned long offset)
 {
     unsigned int i, j, n = 0;
     unsigned char c;
     unsigned long page_number, addr, addr1;
     ram_addr_t ram_addr;
-    unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + 7) / 8;
 
     /* 
      * bitmap-traveling is faster than memory-traveling (for addr...) 
      * especially when most of the memory is not dirty.
      */
-    for (i = 0; i < len; i++) {
+    for (i = start; i < end; i++) {
         c = bitmap[i];
         while (c > 0) {
             j = ffsl(c) - 1;
@@ -2461,13 +2460,49 @@  static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
             n++;
         }
     }
+}
+
+static int kvm_get_dirty_pages_log_range_by_long(unsigned long start_addr,
+                                                 unsigned char *bitmap,
+                                                 unsigned long offset,
+                                                 unsigned long mem_size)
+{
+    unsigned int i;
+    unsigned int len;
+    unsigned long *bitmap_ul = (unsigned long *)bitmap;
+
+    /* bitmap-traveling by long size is faster than by byte size
+     * especially when most of memory is not dirty.
+     * bitmap should be long-size aligned for traveling by long.
+     */
+    if (((unsigned long)bitmap & (TARGET_LONG_SIZE - 1)) == 0) {
+        len = ((mem_size / TARGET_PAGE_SIZE) + TARGET_LONG_BITS - 1) /
+            TARGET_LONG_BITS;
+        for (i = 0; i < len; i++)
+            if (bitmap_ul[i] != 0)
+                kvm_get_dirty_pages_log_range_by_byte(i * TARGET_LONG_SIZE, 
+                    (i + 1) * TARGET_LONG_SIZE, bitmap, offset);
+        /* 
+         * We will check the remaining dirty-bitmap, 
+         * when the mem_size is not a multiple of TARGET_LONG_SIZE. 
+         */ 
+        if ((mem_size & (TARGET_LONG_SIZE - 1)) != 0) {
+            len = ((mem_size / TARGET_PAGE_SIZE) + 7) / 8;
+            kvm_get_dirty_pages_log_range_by_byte(i * TARGET_LONG_SIZE, 
+                len, bitmap, offset);
+        }
+    } else { /* slow path: traveling by byte. */
+        len = ((mem_size / TARGET_PAGE_SIZE) + 7) / 8;
+        kvm_get_dirty_pages_log_range_by_byte(0, len, bitmap, offset);
+    }
+
     return 0;
 }
 
 static int kvm_get_dirty_bitmap_cb(unsigned long start, unsigned long len,
                                    void *bitmap, void *opaque)
 {
-    return kvm_get_dirty_pages_log_range(start, bitmap, start, len);
+    return kvm_get_dirty_pages_log_range_by_long(start, bitmap, start, len);
 }
 
 /*