@@ -123,13 +123,13 @@ static int ram_save_block(QEMUFile *f)
current_addr = block->offset + offset;
do {
- if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
+ if (migration_bitmap_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
uint8_t *p;
int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
- cpu_physical_memory_reset_dirty(current_addr,
- current_addr + TARGET_PAGE_SIZE,
- MIGRATION_DIRTY_FLAG);
+ migration_bitmap_reset_dirty(current_addr,
+ current_addr + TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG);
p = block->host + offset;
@@ -185,7 +185,7 @@ static ram_addr_t ram_save_remaining(void)
ram_addr_t addr;
for (addr = block->offset; addr < block->offset + block->length;
addr += TARGET_PAGE_SIZE) {
- if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
+ if (migration_bitmap_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
count++;
}
}
@@ -265,6 +265,8 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
return 0;
}
+ sync_migration_bitmap(0, TARGET_PHYS_ADDR_MAX);
+
if (stage == 1) {
RAMBlock *block;
bytes_transferred = 0;
@@ -276,9 +278,9 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
QLIST_FOREACH(block, &ram_list.blocks, next) {
for (addr = block->offset; addr < block->offset + block->length;
addr += TARGET_PAGE_SIZE) {
- if (!cpu_physical_memory_get_dirty(addr,
+ if (!migration_bitmap_get_dirty(addr,
MIGRATION_DIRTY_FLAG)) {
- cpu_physical_memory_set_dirty(addr);
+ migration_bitmap_set_dirty(addr);
}
}
}
@@ -932,6 +932,7 @@ typedef struct RAMBlock {
typedef struct RAMList {
uint8_t *phys_dirty;
+ uint8_t *migration_bitmap;
QLIST_HEAD(ram, RAMBlock) blocks;
} RAMList;
extern RAMList ram_list;
@@ -1004,8 +1005,44 @@ static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
}
}
+
+
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags);
+
+static inline int migration_bitmap_get_dirty(ram_addr_t addr,
+ int dirty_flags)
+{
+ return ram_list.migration_bitmap[addr >> TARGET_PAGE_BITS] & dirty_flags;
+}
+
+static inline void migration_bitmap_set_dirty(ram_addr_t addr)
+{
+ ram_list.migration_bitmap[addr >> TARGET_PAGE_BITS] = 0xff;
+}
+
+static inline void migration_bitmap_mask_dirty_range(ram_addr_t start,
+ int length,
+ int dirty_flags)
+{
+ int i, mask, len;
+ uint8_t *p;
+
+ len = length >> TARGET_PAGE_BITS;
+ mask = ~dirty_flags;
+ p = ram_list.migration_bitmap + (start >> TARGET_PAGE_BITS);
+ for (i = 0; i < len; i++) {
+ p[i] &= mask;
+ }
+}
+
+
+void migration_bitmap_reset_dirty(ram_addr_t start,
+ ram_addr_t end,
+ int dirty_flags);
+
+void sync_migration_bitmap(ram_addr_t start, ram_addr_t end);
+
void cpu_tlb_update_dirty(CPUState *env);
int cpu_physical_memory_set_dirty_tracking(int enable);
@@ -2106,6 +2106,10 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
abort();
}
+ if (kvm_enabled()) {
+ return;
+ }
+
for(env = first_cpu; env != NULL; env = env->next_cpu) {
int mmu_idx;
for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
@@ -2114,8 +2118,61 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
start1, length);
}
}
+
}
+void migration_bitmap_reset_dirty(ram_addr_t start, ram_addr_t end,
+ int dirty_flags)
+{
+ unsigned long length, start1;
+
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_ALIGN(end);
+
+ length = end - start;
+ if (length == 0) {
+ return;
+ }
+
+ migration_bitmap_mask_dirty_range(start, length, dirty_flags);
+
+ /* we modify the TLB cache so that the dirty bit will be set again
+ when accessing the range */
+ start1 = (unsigned long)qemu_safe_ram_ptr(start);
+ /* Check that we don't span multiple blocks - this breaks the
+ address comparisons below. */
+ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
+ != (end - 1) - start) {
+ abort();
+ }
+}
+
+void sync_migration_bitmap(ram_addr_t start, ram_addr_t end)
+{
+ unsigned long length, len, i;
+ ram_addr_t addr;
+ start &= TARGET_PAGE_MASK;
+ end = TARGET_PAGE_ALIGN(end);
+
+ length = end - start;
+ if (length == 0) {
+ return;
+ }
+
+ len = length >> TARGET_PAGE_BITS;
+ for (i = 0; i < len; i++) {
+ addr = i << TARGET_PAGE_BITS;
+ if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
+ migration_bitmap_set_dirty(addr);
+ cpu_physical_memory_reset_dirty(addr, addr + TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG);
+ }
+ }
+
+}
+
+
+
int cpu_physical_memory_set_dirty_tracking(int enable)
{
int ret = 0;
@@ -2979,6 +3036,12 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
0xff, size >> TARGET_PAGE_BITS);
+ ram_list.migration_bitmap = qemu_realloc(ram_list.phys_dirty,
+ last_ram_offset() >> TARGET_PAGE_BITS);
+ memset(ram_list.migration_bitmap + (new_block->offset >> TARGET_PAGE_BITS),
+ 0xff, size >> TARGET_PAGE_BITS);
+
+
if (kvm_enabled())
kvm_setup_guest_memory(new_block->host, size);
This patch creates a migration bitmap, which is periodically kept in sync with the qemu bitmap. This allows us to have a separate thread for VM migration. A separate copy of the dirty bitmap for the migration avoids concurrent access to the qemu bitmap from iohandlers and migration thread. Signed-off-by: Umesh Deshpande <udeshpan@redhat.com> --- arch_init.c | 16 ++++++++------- cpu-all.h | 37 +++++++++++++++++++++++++++++++++++ exec.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+), 7 deletions(-) -- -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html