Message ID | 1612339311-114805-14-git-send-email-zhengchuan@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Support Multifd for RDMA migration | expand |
* Chuan Zheng (zhengchuan@huawei.com) wrote: > Add the 'qemu_rdma_registration' function, multifd send threads > call it to register memory. This function is a copy of the code out of qemu_rdma_registration_stop; with some of the comments removed. It's OK to split this code out so you can use it as well; but then why not make qemu_rdma_registration_stop use this function as well to stop having two copies ? And keep the comments! > Signed-off-by: Zhimin Feng <fengzhimin1@huawei.com> > Signed-off-by: Chuan Zheng <zhengchuan@huawei.com> > --- > migration/rdma.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 51 insertions(+) > > diff --git a/migration/rdma.c b/migration/rdma.c > index cff9446..1095a8f 100644 > --- a/migration/rdma.c > +++ b/migration/rdma.c > @@ -3739,6 +3739,57 @@ out: > return ret; > } > > +/* > + * Dynamic page registrations for multifd RDMA threads. > + */ > +static int qemu_rdma_registration(void *opaque) > +{ > + RDMAContext *rdma = opaque; Can't you keep that as qemu_rdma_registration(RDMAContext *rdma) ? > + RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; > + RDMALocalBlocks *local = &rdma->local_ram_blocks; > + int reg_result_idx, i, nb_dest_blocks; > + RDMAControlHeader head = { .len = 0, .repeat = 1 }; > + int ret = 0; > + > + head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; > + > + ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, > + ®_result_idx, rdma->pin_all ? > + qemu_rdma_reg_whole_ram_blocks : NULL); > + if (ret < 0) { > + goto out; > + } > + > + nb_dest_blocks = resp.len / sizeof(RDMADestBlock); > + > + if (local->nb_blocks != nb_dest_blocks) { > + rdma->error_state = -EINVAL; > + ret = -1; > + goto out; > + } > + > + qemu_rdma_move_header(rdma, reg_result_idx, &resp); > + memcpy(rdma->dest_blocks, > + rdma->wr_data[reg_result_idx].control_curr, resp.len); > + > + for (i = 0; i < nb_dest_blocks; i++) { > + network_to_dest_block(&rdma->dest_blocks[i]); > + > + /* We require that the blocks are in the same order */ > + if (rdma->dest_blocks[i].length != local->block[i].length) { > + rdma->error_state = -EINVAL; > + ret = -1; > + goto out; > + } > + local->block[i].remote_host_addr = > + rdma->dest_blocks[i].remote_host_addr; > + local->block[i].remote_rkey = rdma->dest_blocks[i].remote_rkey; > + } > + > +out: > + return ret; > +} > + > /* Destination: > * Called via a ram_control_load_hook during the initial RAM load section which > * lists the RAMBlocks by name. This lets us know the order of the RAMBlocks > -- > 1.8.3.1 >
On 2021/2/4 4:06, Dr. David Alan Gilbert wrote: > * Chuan Zheng (zhengchuan@huawei.com) wrote: >> Add the 'qemu_rdma_registration' function, multifd send threads >> call it to register memory. > > This function is a copy of the code out of qemu_rdma_registration_stop; > with some of the comments removed. > It's OK to split this code out so you can use it as well; but then why > not make qemu_rdma_registration_stop use this function as well to stop > having two copies ? And keep the comments! > OK, That's good to me. Will do that in V5. >> Signed-off-by: Zhimin Feng <fengzhimin1@huawei.com> >> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com> >> --- >> migration/rdma.c | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ >> 1 file changed, 51 insertions(+) >> >> diff --git a/migration/rdma.c b/migration/rdma.c >> index cff9446..1095a8f 100644 >> --- a/migration/rdma.c >> +++ b/migration/rdma.c >> @@ -3739,6 +3739,57 @@ out: >> return ret; >> } >> >> +/* >> + * Dynamic page registrations for multifd RDMA threads. >> + */ >> +static int qemu_rdma_registration(void *opaque) >> +{ >> + RDMAContext *rdma = opaque; > > Can't you keep that as qemu_rdma_registration(RDMAContext *rdma) ? > >> + RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; >> + RDMALocalBlocks *local = &rdma->local_ram_blocks; >> + int reg_result_idx, i, nb_dest_blocks; >> + RDMAControlHeader head = { .len = 0, .repeat = 1 }; >> + int ret = 0; >> + >> + head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; >> + >> + ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, >> + ®_result_idx, rdma->pin_all ? >> + qemu_rdma_reg_whole_ram_blocks : NULL); >> + if (ret < 0) { >> + goto out; >> + } >> + >> + nb_dest_blocks = resp.len / sizeof(RDMADestBlock); >> + >> + if (local->nb_blocks != nb_dest_blocks) { >> + rdma->error_state = -EINVAL; >> + ret = -1; >> + goto out; >> + } >> + >> + qemu_rdma_move_header(rdma, reg_result_idx, &resp); >> + memcpy(rdma->dest_blocks, >> + rdma->wr_data[reg_result_idx].control_curr, resp.len); >> + >> + for (i = 0; i < nb_dest_blocks; i++) { >> + network_to_dest_block(&rdma->dest_blocks[i]); >> + >> + /* We require that the blocks are in the same order */ >> + if (rdma->dest_blocks[i].length != local->block[i].length) { >> + rdma->error_state = -EINVAL; >> + ret = -1; >> + goto out; >> + } >> + local->block[i].remote_host_addr = >> + rdma->dest_blocks[i].remote_host_addr; >> + local->block[i].remote_rkey = rdma->dest_blocks[i].remote_rkey; >> + } >> + >> +out: >> + return ret; >> +} >> + >> /* Destination: >> * Called via a ram_control_load_hook during the initial RAM load section which >> * lists the RAMBlocks by name. This lets us know the order of the RAMBlocks >> -- >> 1.8.3.1 >>
diff --git a/migration/rdma.c b/migration/rdma.c index cff9446..1095a8f 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -3739,6 +3739,57 @@ out: return ret; } +/* + * Dynamic page registrations for multifd RDMA threads. + */ +static int qemu_rdma_registration(void *opaque) +{ + RDMAContext *rdma = opaque; + RDMAControlHeader resp = {.type = RDMA_CONTROL_RAM_BLOCKS_RESULT }; + RDMALocalBlocks *local = &rdma->local_ram_blocks; + int reg_result_idx, i, nb_dest_blocks; + RDMAControlHeader head = { .len = 0, .repeat = 1 }; + int ret = 0; + + head.type = RDMA_CONTROL_RAM_BLOCKS_REQUEST; + + ret = qemu_rdma_exchange_send(rdma, &head, NULL, &resp, + ®_result_idx, rdma->pin_all ? + qemu_rdma_reg_whole_ram_blocks : NULL); + if (ret < 0) { + goto out; + } + + nb_dest_blocks = resp.len / sizeof(RDMADestBlock); + + if (local->nb_blocks != nb_dest_blocks) { + rdma->error_state = -EINVAL; + ret = -1; + goto out; + } + + qemu_rdma_move_header(rdma, reg_result_idx, &resp); + memcpy(rdma->dest_blocks, + rdma->wr_data[reg_result_idx].control_curr, resp.len); + + for (i = 0; i < nb_dest_blocks; i++) { + network_to_dest_block(&rdma->dest_blocks[i]); + + /* We require that the blocks are in the same order */ + if (rdma->dest_blocks[i].length != local->block[i].length) { + rdma->error_state = -EINVAL; + ret = -1; + goto out; + } + local->block[i].remote_host_addr = + rdma->dest_blocks[i].remote_host_addr; + local->block[i].remote_rkey = rdma->dest_blocks[i].remote_rkey; + } + +out: + return ret; +} + /* Destination: * Called via a ram_control_load_hook during the initial RAM load section which * lists the RAMBlocks by name. This lets us know the order of the RAMBlocks