Message ID | 1701970793-6865-5-git-send-email-si-wei.liu@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | vdpa-net: improve migration downtime through descriptor ASID and persistent IOTLB | expand |
On Fri, Dec 8, 2023 at 2:50 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote: > > Same as the previous commit, but do it for cvq instead of data vqs. > > Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com> > --- > net/vhost-vdpa.c | 21 +++++++++++++++++---- > 1 file changed, 17 insertions(+), 4 deletions(-) > > diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c > index 0cf3147..cb5705d 100644 > --- a/net/vhost-vdpa.c > +++ b/net/vhost-vdpa.c > @@ -1601,16 +1601,19 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { > }; > > /** > - * Probe if CVQ is isolated > + * Probe if CVQ is isolated, and piggyback its descriptor group > + * index if supported > * > * @device_fd The vdpa device fd > * @features Features offered by the device. > * @cvq_index The control vq pair index > + * @desc_grpidx The CVQ's descriptor group index to return > * > - * Returns <0 in case of failure, 0 if false and 1 if true. > + * Returns <0 in case of failure, 0 if false and 1 if true (isolated). > */ > static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, > - int cvq_index, Error **errp) > + int cvq_index, int64_t *desc_grpidx, > + Error **errp) > { > uint64_t backend_features; > int64_t cvq_group; > @@ -1667,6 +1670,13 @@ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, > goto out; > } > > + if (backend_features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) { > + int64_t desc_group = vhost_vdpa_get_vring_desc_group(device_fd, > + cvq_index, errp); > + if (likely(desc_group >= 0) && desc_group != cvq_group) > + *desc_grpidx = desc_group; > + } > + > for (int i = 0; i < cvq_index; ++i) { > int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); > if (unlikely(group < 0)) { > @@ -1685,6 +1695,8 @@ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, > out: > status = 0; > ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); > + status = VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER; Is this a bug fix or I don't see the connection with the descriptor group. Thanks > + ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); > return r; > } > > @@ -1791,6 +1803,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > Error **errp) > { > NetClientState *nc = NULL; > + int64_t desc_group = -1; > VhostVDPAState *s; > int ret = 0; > assert(name); > @@ -1802,7 +1815,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, > } else { > cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, > queue_pair_index * 2, > - errp); > + &desc_group, errp); > if (unlikely(cvq_isolated < 0)) { > return NULL; > } > -- > 1.8.3.1 >
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 0cf3147..cb5705d 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -1601,16 +1601,19 @@ static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { }; /** - * Probe if CVQ is isolated + * Probe if CVQ is isolated, and piggyback its descriptor group + * index if supported * * @device_fd The vdpa device fd * @features Features offered by the device. * @cvq_index The control vq pair index + * @desc_grpidx The CVQ's descriptor group index to return * - * Returns <0 in case of failure, 0 if false and 1 if true. + * Returns <0 in case of failure, 0 if false and 1 if true (isolated). */ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, - int cvq_index, Error **errp) + int cvq_index, int64_t *desc_grpidx, + Error **errp) { uint64_t backend_features; int64_t cvq_group; @@ -1667,6 +1670,13 @@ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, goto out; } + if (backend_features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) { + int64_t desc_group = vhost_vdpa_get_vring_desc_group(device_fd, + cvq_index, errp); + if (likely(desc_group >= 0) && desc_group != cvq_group) + *desc_grpidx = desc_group; + } + for (int i = 0; i < cvq_index; ++i) { int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp); if (unlikely(group < 0)) { @@ -1685,6 +1695,8 @@ static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features, out: status = 0; ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); + status = VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER; + ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status); return r; } @@ -1791,6 +1803,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, Error **errp) { NetClientState *nc = NULL; + int64_t desc_group = -1; VhostVDPAState *s; int ret = 0; assert(name); @@ -1802,7 +1815,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, } else { cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features, queue_pair_index * 2, - errp); + &desc_group, errp); if (unlikely(cvq_isolated < 0)) { return NULL; }
Same as the previous commit, but do it for cvq instead of data vqs. Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com> --- net/vhost-vdpa.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-)