Message ID | 3289d007d494cb0e2f05b1cf4ae6a78d300fede3.1589193382.git.lukasstraub2@web.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | colo: migration related bugfixes | expand |
Reviewed-by: zhanghailiang <zhang.zhanghailiang@huawei.com> > > If we suceed in receiving ram state, but fail receiving the device state, there > will be a mismatch between the two. > > Fix this by flushing the ram cache only after the vmstate has been received. > > Signed-off-by: Lukas Straub <lukasstraub2@web.de> > --- > migration/colo.c | 1 + > migration/ram.c | 5 +---- > migration/ram.h | 1 + > 3 files changed, 3 insertions(+), 4 deletions(-) > > diff --git a/migration/colo.c b/migration/colo.c index > 6b2ad35aa4..2947363ae5 100644 > --- a/migration/colo.c > +++ b/migration/colo.c > @@ -739,6 +739,7 @@ static void > colo_incoming_process_checkpoint(MigrationIncomingState *mis, > > qemu_mutex_lock_iothread(); > vmstate_loading = true; > + colo_flush_ram_cache(); > ret = qemu_load_device_state(fb); > if (ret < 0) { > error_setg(errp, "COLO: load device state failed"); diff --git > a/migration/ram.c b/migration/ram.c index 04f13feb2e..5baec5fce9 100644 > --- a/migration/ram.c > +++ b/migration/ram.c > @@ -3313,7 +3313,7 @@ static bool postcopy_is_running(void) > * Flush content of RAM cache into SVM's memory. > * Only flush the pages that be dirtied by PVM or SVM or both. > */ > -static void colo_flush_ram_cache(void) > +void colo_flush_ram_cache(void) > { > RAMBlock *block = NULL; > void *dst_host; > @@ -3585,9 +3585,6 @@ static int ram_load(QEMUFile *f, void *opaque, > int version_id) > } > trace_ram_load_complete(ret, seq_iter); > > - if (!ret && migration_incoming_in_colo_state()) { > - colo_flush_ram_cache(); > - } > return ret; > } > > diff --git a/migration/ram.h b/migration/ram.h index 5ceaff7cb4..2eeaacfa13 > 100644 > --- a/migration/ram.h > +++ b/migration/ram.h > @@ -65,6 +65,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, > RAMBlock *rb); > > /* ram cache */ > int colo_init_ram_cache(void); > +void colo_flush_ram_cache(void); > void colo_release_ram_cache(void); > void colo_incoming_start_dirty_log(void); > > -- > 2.20.1
diff --git a/migration/colo.c b/migration/colo.c index 6b2ad35aa4..2947363ae5 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -739,6 +739,7 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, qemu_mutex_lock_iothread(); vmstate_loading = true; + colo_flush_ram_cache(); ret = qemu_load_device_state(fb); if (ret < 0) { error_setg(errp, "COLO: load device state failed"); diff --git a/migration/ram.c b/migration/ram.c index 04f13feb2e..5baec5fce9 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3313,7 +3313,7 @@ static bool postcopy_is_running(void) * Flush content of RAM cache into SVM's memory. * Only flush the pages that be dirtied by PVM or SVM or both. */ -static void colo_flush_ram_cache(void) +void colo_flush_ram_cache(void) { RAMBlock *block = NULL; void *dst_host; @@ -3585,9 +3585,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id) } trace_ram_load_complete(ret, seq_iter); - if (!ret && migration_incoming_in_colo_state()) { - colo_flush_ram_cache(); - } return ret; } diff --git a/migration/ram.h b/migration/ram.h index 5ceaff7cb4..2eeaacfa13 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -65,6 +65,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); /* ram cache */ int colo_init_ram_cache(void); +void colo_flush_ram_cache(void); void colo_release_ram_cache(void); void colo_incoming_start_dirty_log(void);
If we suceed in receiving ram state, but fail receiving the device state, there will be a mismatch between the two. Fix this by flushing the ram cache only after the vmstate has been received. Signed-off-by: Lukas Straub <lukasstraub2@web.de> --- migration/colo.c | 1 + migration/ram.c | 5 +---- migration/ram.h | 1 + 3 files changed, 3 insertions(+), 4 deletions(-)