Message ID | 1701970793-6865-18-git-send-email-si-wei.liu@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | vdpa-net: improve migration downtime through descriptor ASID and persistent IOTLB | expand |
On Thu, Dec 7, 2023 at 7:50 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote: > > The descriptor group for SVQ ASID allows the guest memory mapping > to retain across SVQ switching, same as how isolated CVQ can do > with a different ASID than the guest GPA space. Introduce an > evaluation function to judge whether to flush or keep iotlb maps > based on virtqueue's descriptor group and cvq isolation capability. > > Have to hook the evaluation function to NetClient's .poll op as > .vhost_reset_status runs ahead of .stop, and .vhost_dev_start > don't have access to the vhost-vdpa net's information. > > Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com> > --- > net/vhost-vdpa.c | 40 ++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 40 insertions(+) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 04718b2..e9b96ed 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -504,12 +504,36 @@ static int vhost_vdpa_net_load_cleanup(NetClientState *nc, NICState *nic) > n->parent_obj.status & VIRTIO_CONFIG_S_DRIVER_OK); > } > > +static void vhost_vdpa_net_data_eval_flush(NetClientState *nc, bool stop) > +{ > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > + struct vhost_vdpa *v = &s->vhost_vdpa; > + > + if (!stop) { > + return; > + } > + > + if (s->vhost_vdpa.index == 0) { > + if (s->always_svq) { > + v->shared->flush_map = true; Why do we need to reset the map in the case of always_svq? > + } else if (!v->shared->svq_switching || v->desc_group >= 0) { > + v->shared->flush_map = false; > + } else { > + v->shared->flush_map = true; > + } > + } else if (!s->always_svq && v->shared->svq_switching && > + v->desc_group < 0) { > + v->shared->flush_map = true; > + } > +} > + I'm wondering, since we have the reference count for the memory listener already, why not adding one refcnt if _start detect it can keep the memory maps? > static NetClientInfo net_vhost_vdpa_info = { > .type = NET_CLIENT_DRIVER_VHOST_VDPA, > .size = sizeof(VhostVDPAState), > .receive = vhost_vdpa_receive, > .start = vhost_vdpa_net_data_start, > .load = vhost_vdpa_net_data_load, > + .poll = vhost_vdpa_net_data_eval_flush, > .stop = vhost_vdpa_net_client_stop, > .cleanup = vhost_vdpa_cleanup, > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, > @@ -1368,12 +1392,28 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) > return 0; > } > > +static void vhost_vdpa_net_cvq_eval_flush(NetClientState *nc, bool stop) > +{ > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > + struct vhost_vdpa *v = &s->vhost_vdpa; > + > + if (!stop) { > + return; > + } > + > + if (!v->shared->flush_map && !v->shared->svq_switching && > + !s->cvq_isolated && v->desc_group < 0) { > + v->shared->flush_map = true; > + } > +} > + > static NetClientInfo net_vhost_vdpa_cvq_info = { > .type = NET_CLIENT_DRIVER_VHOST_VDPA, > .size = sizeof(VhostVDPAState), > .receive = vhost_vdpa_receive, > .start = vhost_vdpa_net_cvq_start, > .load = vhost_vdpa_net_cvq_load, > + .poll = vhost_vdpa_net_cvq_eval_flush, > .stop = vhost_vdpa_net_cvq_stop, > .cleanup = vhost_vdpa_cleanup, > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, > -- > 1.8.3.1 >
On Fri, Dec 8, 2023 at 2:50 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote: > > The descriptor group for SVQ ASID allows the guest memory mapping > to retain across SVQ switching, same as how isolated CVQ can do > with a different ASID than the guest GPA space. Introduce an > evaluation function to judge whether to flush or keep iotlb maps > based on virtqueue's descriptor group and cvq isolation capability. I may miss something, but is there any reason we can't judge during initialization? We know the device capability so it should not depend on any runtime configuration. Thanks > > Have to hook the evaluation function to NetClient's .poll op as > .vhost_reset_status runs ahead of .stop, and .vhost_dev_start > don't have access to the vhost-vdpa net's information. > > Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com> > --- > net/vhost-vdpa.c | 40 ++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 40 insertions(+) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 04718b2..e9b96ed 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -504,12 +504,36 @@ static int vhost_vdpa_net_load_cleanup(NetClientState *nc, NICState *nic) > n->parent_obj.status & VIRTIO_CONFIG_S_DRIVER_OK); > } > > +static void vhost_vdpa_net_data_eval_flush(NetClientState *nc, bool stop) > +{ > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > + struct vhost_vdpa *v = &s->vhost_vdpa; > + > + if (!stop) { > + return; > + } > + > + if (s->vhost_vdpa.index == 0) { > + if (s->always_svq) { > + v->shared->flush_map = true; > + } else if (!v->shared->svq_switching || v->desc_group >= 0) { > + v->shared->flush_map = false; > + } else { > + v->shared->flush_map = true; > + } > + } else if (!s->always_svq && v->shared->svq_switching && > + v->desc_group < 0) { > + v->shared->flush_map = true; > + } > +} > + > static NetClientInfo net_vhost_vdpa_info = { > .type = NET_CLIENT_DRIVER_VHOST_VDPA, > .size = sizeof(VhostVDPAState), > .receive = vhost_vdpa_receive, > .start = vhost_vdpa_net_data_start, > .load = vhost_vdpa_net_data_load, > + .poll = vhost_vdpa_net_data_eval_flush, > .stop = vhost_vdpa_net_client_stop, > .cleanup = vhost_vdpa_cleanup, > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, > @@ -1368,12 +1392,28 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) > return 0; > } > > +static void vhost_vdpa_net_cvq_eval_flush(NetClientState *nc, bool stop) > +{ > + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); > + struct vhost_vdpa *v = &s->vhost_vdpa; > + > + if (!stop) { > + return; > + } > + > + if (!v->shared->flush_map && !v->shared->svq_switching && > + !s->cvq_isolated && v->desc_group < 0) { > + v->shared->flush_map = true; > + } > +} > + > static NetClientInfo net_vhost_vdpa_cvq_info = { > .type = NET_CLIENT_DRIVER_VHOST_VDPA, > .size = sizeof(VhostVDPAState), > .receive = vhost_vdpa_receive, > .start = vhost_vdpa_net_cvq_start, > .load = vhost_vdpa_net_cvq_load, > + .poll = vhost_vdpa_net_cvq_eval_flush, > .stop = vhost_vdpa_net_cvq_stop, > .cleanup = vhost_vdpa_cleanup, > .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, > -- > 1.8.3.1 >
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 04718b2..e9b96ed 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -504,12 +504,36 @@ static int vhost_vdpa_net_load_cleanup(NetClientState *nc, NICState *nic) n->parent_obj.status & VIRTIO_CONFIG_S_DRIVER_OK); } +static void vhost_vdpa_net_data_eval_flush(NetClientState *nc, bool stop) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_vdpa *v = &s->vhost_vdpa; + + if (!stop) { + return; + } + + if (s->vhost_vdpa.index == 0) { + if (s->always_svq) { + v->shared->flush_map = true; + } else if (!v->shared->svq_switching || v->desc_group >= 0) { + v->shared->flush_map = false; + } else { + v->shared->flush_map = true; + } + } else if (!s->always_svq && v->shared->svq_switching && + v->desc_group < 0) { + v->shared->flush_map = true; + } +} + static NetClientInfo net_vhost_vdpa_info = { .type = NET_CLIENT_DRIVER_VHOST_VDPA, .size = sizeof(VhostVDPAState), .receive = vhost_vdpa_receive, .start = vhost_vdpa_net_data_start, .load = vhost_vdpa_net_data_load, + .poll = vhost_vdpa_net_data_eval_flush, .stop = vhost_vdpa_net_client_stop, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, @@ -1368,12 +1392,28 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc) return 0; } +static void vhost_vdpa_net_cvq_eval_flush(NetClientState *nc, bool stop) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_vdpa *v = &s->vhost_vdpa; + + if (!stop) { + return; + } + + if (!v->shared->flush_map && !v->shared->svq_switching && + !s->cvq_isolated && v->desc_group < 0) { + v->shared->flush_map = true; + } +} + static NetClientInfo net_vhost_vdpa_cvq_info = { .type = NET_CLIENT_DRIVER_VHOST_VDPA, .size = sizeof(VhostVDPAState), .receive = vhost_vdpa_receive, .start = vhost_vdpa_net_cvq_start, .load = vhost_vdpa_net_cvq_load, + .poll = vhost_vdpa_net_cvq_eval_flush, .stop = vhost_vdpa_net_cvq_stop, .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
The descriptor group for SVQ ASID allows the guest memory mapping to retain across SVQ switching, same as how isolated CVQ can do with a different ASID than the guest GPA space. Introduce an evaluation function to judge whether to flush or keep iotlb maps based on virtqueue's descriptor group and cvq isolation capability. Have to hook the evaluation function to NetClient's .poll op as .vhost_reset_status runs ahead of .stop, and .vhost_dev_start don't have access to the vhost-vdpa net's information. Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com> --- net/vhost-vdpa.c | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+)