@@ -279,9 +279,69 @@ int kvm_set_migration_log(int enable)
return 0;
}
-static int test_le_bit(unsigned long nr, unsigned char *addr)
+static inline void kvm_get_dirty_pages_log_range_by_byte(unsigned int start,
+ unsigned int end,
+ unsigned char *bitmap,
+ unsigned long offset)
{
- return (addr[nr >> 3] >> (nr & 7)) & 1;
+ unsigned int i, j, n = 0;
+ unsigned long page_number, addr, addr1;
+ ram_addr_t ram_addr;
+ unsigned char c;
+
+ /*
+ * bitmap-traveling is faster than memory-traveling (for addr...)
+ * especially when most of the memory is not dirty.
+ */
+ for (i = start; i < end; i++) {
+ c = bitmap[i];
+ while (c > 0) {
+ j = ffsl(c) - 1;
+ c &= ~(1u << j);
+ page_number = i * 8 + j;
+ addr1 = page_number * TARGET_PAGE_SIZE;
+ addr = offset + addr1;
+ ram_addr = cpu_get_physical_page_desc(addr);
+ cpu_physical_memory_set_dirty(ram_addr);
+ n++;
+ }
+ }
+}
+
+static int kvm_get_dirty_pages_log_range_by_long(unsigned long start_addr,
+ unsigned char *bitmap,
+ unsigned long mem_size)
+{
+ unsigned int i;
+ unsigned int len;
+ unsigned long *bitmap_ul = (unsigned long *)bitmap;
+
+ /* bitmap-traveling by long size is faster than by byte size
+ * especially when most of memory is not dirty.
+ * bitmap should be long-size aligned for traveling by long.
+ */
+ if (((unsigned long)bitmap & (TARGET_LONG_SIZE - 1)) == 0) {
+ len = ((mem_size / TARGET_PAGE_SIZE) + TARGET_LONG_BITS - 1) /
+ TARGET_LONG_BITS;
+ for (i = 0; i < len; i++)
+ if (bitmap_ul[i] != 0)
+ kvm_get_dirty_pages_log_range_by_byte(i * TARGET_LONG_SIZE,
+ (i + 1) * TARGET_LONG_SIZE, bitmap, start_addr);
+ /*
+ * We will check the remaining dirty-bitmap,
+ * when the mem_size is not a multiple of TARGET_LONG_SIZE.
+ */
+ if ((mem_size & (TARGET_LONG_SIZE - 1)) != 0) {
+ len = ((mem_size / TARGET_PAGE_SIZE) + 7) / 8;
+ kvm_get_dirty_pages_log_range_by_byte(i * TARGET_LONG_SIZE,
+ len, bitmap, start_addr);
+ }
+ } else { /* slow path: traveling by byte. */
+ len = ((mem_size / TARGET_PAGE_SIZE) + 7) / 8;
+ kvm_get_dirty_pages_log_range_by_byte(0, len, bitmap, start_addr);
+ }
+
+ return 0;
}
/**
@@ -297,8 +357,6 @@ int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
{
KVMState *s = kvm_state;
unsigned long size, allocated_size = 0;
- target_phys_addr_t phys_addr;
- ram_addr_t addr;
KVMDirtyLog d;
KVMSlot *mem;
int ret = 0;
@@ -327,17 +385,9 @@ int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
break;
}
- for (phys_addr = mem->start_addr, addr = mem->phys_offset;
- phys_addr < mem->start_addr + mem->memory_size;
- phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
- unsigned char *bitmap = (unsigned char *)d.dirty_bitmap;
- unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
-
- if (test_le_bit(nr, bitmap)) {
- cpu_physical_memory_set_dirty(addr);
- }
- }
- start_addr = phys_addr;
+ kvm_get_dirty_pages_log_range_by_long(mem->start_addr,
+ d.dirty_bitmap, mem->memory_size);
+ start_addr = mem->start_addr + mem->memory_size;
}
qemu_free(d.dirty_bitmap);