@@ -345,6 +345,8 @@ retry:
int multifd_ram_flush_and_sync(void)
{
+ MultiFDSyncReq req;
+
if (!migrate_multifd()) {
return 0;
}
@@ -356,7 +358,10 @@ int multifd_ram_flush_and_sync(void)
}
}
- return multifd_send_sync_main();
+ /* File migrations only need to sync with threads */
+ req = migrate_mapped_ram() ? MULTIFD_SYNC_LOCAL : MULTIFD_SYNC_ALL;
+
+ return multifd_send_sync_main(req);
}
bool multifd_send_prepare_common(MultiFDSendParams *p)
@@ -523,11 +523,13 @@ static int multifd_zero_copy_flush(QIOChannel *c)
return ret;
}
-int multifd_send_sync_main(void)
+int multifd_send_sync_main(MultiFDSyncReq req)
{
int i;
bool flush_zero_copy;
+ assert(req != MULTIFD_SYNC_NONE);
+
flush_zero_copy = migrate_zero_copy_send();
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -543,8 +545,8 @@ int multifd_send_sync_main(void)
* We should be the only user so far, so not possible to be set by
* others concurrently.
*/
- assert(qatomic_read(&p->pending_sync) == false);
- qatomic_set(&p->pending_sync, true);
+ assert(qatomic_read(&p->pending_sync) == MULTIFD_SYNC_NONE);
+ qatomic_set(&p->pending_sync, req);
qemu_sem_post(&p->sem);
}
for (i = 0; i < migrate_multifd_channels(); i++) {
@@ -635,14 +637,17 @@ static void *multifd_send_thread(void *opaque)
*/
qatomic_store_release(&p->pending_job, false);
} else {
+ MultiFDSyncReq req = qatomic_read(&p->pending_sync);
+
/*
* If not a normal job, must be a sync request. Note that
* pending_sync is a standalone flag (unlike pending_job), so
* it doesn't require explicit memory barriers.
*/
- assert(qatomic_read(&p->pending_sync));
+ assert(req != MULTIFD_SYNC_NONE);
- if (use_packets) {
+ /* Only push the SYNC message if it involves a remote sync */
+ if (req == MULTIFD_SYNC_ALL) {
p->flags = MULTIFD_FLAG_SYNC;
multifd_send_fill_packet(p);
ret = qio_channel_write_all(p->c, (void *)p->packet,
@@ -654,7 +659,7 @@ static void *multifd_send_thread(void *opaque)
stat64_add(&mig_stats.multifd_bytes, p->packet_len);
}
- qatomic_set(&p->pending_sync, false);
+ qatomic_set(&p->pending_sync, MULTIFD_SYNC_NONE);
qemu_sem_post(&p->sem_sync);
}
}
@@ -19,6 +19,22 @@
typedef struct MultiFDRecvData MultiFDRecvData;
typedef struct MultiFDSendData MultiFDSendData;
+typedef enum {
+ /* No sync request */
+ MULTIFD_SYNC_NONE = 0,
+ /* Sync locally on the sender threads without pushing messages */
+ MULTIFD_SYNC_LOCAL,
+ /*
+ * Sync not only on the sender threads, but also push MULTIFD_FLAG_SYNC
+ * message to the wire for each iochannel (which is for a remote sync).
+ *
+ * When remote sync is used, need to be paired with a follow up
+ * RAM_SAVE_FLAG_EOS / RAM_SAVE_FLAG_MULTIFD_FLUSH message on the main
+ * channel.
+ */
+ MULTIFD_SYNC_ALL,
+} MultiFDSyncReq;
+
bool multifd_send_setup(void);
void multifd_send_shutdown(void);
void multifd_send_channel_created(void);
@@ -28,7 +44,7 @@ void multifd_recv_shutdown(void);
bool multifd_recv_all_channels_created(void);
void multifd_recv_new_channel(QIOChannel *ioc, Error **errp);
void multifd_recv_sync_main(void);
-int multifd_send_sync_main(void);
+int multifd_send_sync_main(MultiFDSyncReq req);
bool multifd_queue_page(RAMBlock *block, ram_addr_t offset);
bool multifd_recv(void);
MultiFDRecvData *multifd_get_recv_data(void);
@@ -143,7 +159,7 @@ typedef struct {
/* multifd flags for each packet */
uint32_t flags;
/*
- * The sender thread has work to do if either of below boolean is set.
+ * The sender thread has work to do if either of below field is set.
*
* @pending_job: a job is pending
* @pending_sync: a sync request is pending
@@ -152,7 +168,8 @@ typedef struct {
* cleared by the multifd sender threads.
*/
bool pending_job;
- bool pending_sync;
+ MultiFDSyncReq pending_sync;
+
MultiFDSendData *data;
/* thread local variables. No locking required */