diff mbox series

[33/40] vdpa: batch multiple dma_unmap to a single call for vm stop

Message ID 1701970793-6865-34-git-send-email-si-wei.liu@oracle.com (mailing list archive)
State New, archived
Headers show
Series vdpa-net: improve migration downtime through descriptor ASID and persistent IOTLB | expand

Commit Message

Si-Wei Liu Dec. 7, 2023, 5:39 p.m. UTC
Should help live migration downtime on source host. Below are the
coalesced dma_unmap time series on 2 queue pair config (no
dedicated descriptor group ASID for SVQ).

109531@1693367276.853503:vhost_vdpa_reset_device dev: 0x55c933926890
109531@1693367276.853513:vhost_vdpa_add_status dev: 0x55c933926890 status: 0x3
109531@1693367276.853520:vhost_vdpa_flush_map dev: 0x55c933926890 doit: 1 svq_flush: 0 persist: 1
109531@1693367276.853524:vhost_vdpa_set_config_call dev: 0x55c933926890 fd: -1
109531@1693367276.853579:vhost_vdpa_iotlb_begin_batch vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 type: 5
109531@1693367276.853586:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x1000 size: 0x2000 type: 3
109531@1693367276.853600:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x3000 size: 0x1000 type: 3
109531@1693367276.853618:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x4000 size: 0x2000 type: 3
109531@1693367276.853625:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x6000 size: 0x1000 type: 3
109531@1693367276.853630:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0x7000 size: 0x2000 type: 3
109531@1693367276.853636:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0x9000 size: 0x1000 type: 3
109531@1693367276.853642:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0xa000 size: 0x2000 type: 3
109531@1693367276.853648:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0xc000 size: 0x1000 type: 3
109531@1693367276.853654:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xf000 size: 0x1000 type: 3
109531@1693367276.853660:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0x10000 size: 0x1000 type: 3
109531@1693367276.853666:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xd000 size: 0x1000 type: 3
109531@1693367276.853670:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xe000 size: 0x1000 type: 3
109531@1693367276.853675:vhost_vdpa_iotlb_end_batch vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 type: 6
109531@1693367277.014697:vhost_vdpa_get_vq_index dev: 0x55c933925de0 idx: 0 vq idx: 0
109531@1693367277.014747:vhost_vdpa_get_vq_index dev: 0x55c933925de0 idx: 1 vq idx: 1
109531@1693367277.014753:vhost_vdpa_get_vq_index dev: 0x55c9339262e0 idx: 2 vq idx: 2
109531@1693367277.014756:vhost_vdpa_get_vq_index dev: 0x55c9339262e0 idx: 3 vq idx: 3

Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 hw/virtio/vhost-vdpa.c         |   7 +--
 include/hw/virtio/vhost-vdpa.h |   3 ++
 net/vhost-vdpa.c               | 112 +++++++++++++++++++++++++++--------------
 3 files changed, 80 insertions(+), 42 deletions(-)

Comments

Eugenio Perez Martin Dec. 13, 2023, 4:46 p.m. UTC | #1
On Thu, Dec 7, 2023 at 7:51 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Should help live migration downtime on source host. Below are the
> coalesced dma_unmap time series on 2 queue pair config (no
> dedicated descriptor group ASID for SVQ).
>
> 109531@1693367276.853503:vhost_vdpa_reset_device dev: 0x55c933926890
> 109531@1693367276.853513:vhost_vdpa_add_status dev: 0x55c933926890 status: 0x3
> 109531@1693367276.853520:vhost_vdpa_flush_map dev: 0x55c933926890 doit: 1 svq_flush: 0 persist: 1
> 109531@1693367276.853524:vhost_vdpa_set_config_call dev: 0x55c933926890 fd: -1
> 109531@1693367276.853579:vhost_vdpa_iotlb_begin_batch vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 type: 5
> 109531@1693367276.853586:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x1000 size: 0x2000 type: 3
> 109531@1693367276.853600:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x3000 size: 0x1000 type: 3
> 109531@1693367276.853618:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x4000 size: 0x2000 type: 3
> 109531@1693367276.853625:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x6000 size: 0x1000 type: 3
> 109531@1693367276.853630:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0x7000 size: 0x2000 type: 3
> 109531@1693367276.853636:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0x9000 size: 0x1000 type: 3
> 109531@1693367276.853642:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0xa000 size: 0x2000 type: 3
> 109531@1693367276.853648:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0xc000 size: 0x1000 type: 3
> 109531@1693367276.853654:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xf000 size: 0x1000 type: 3
> 109531@1693367276.853660:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0x10000 size: 0x1000 type: 3
> 109531@1693367276.853666:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xd000 size: 0x1000 type: 3
> 109531@1693367276.853670:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xe000 size: 0x1000 type: 3
> 109531@1693367276.853675:vhost_vdpa_iotlb_end_batch vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 type: 6
> 109531@1693367277.014697:vhost_vdpa_get_vq_index dev: 0x55c933925de0 idx: 0 vq idx: 0
> 109531@1693367277.014747:vhost_vdpa_get_vq_index dev: 0x55c933925de0 idx: 1 vq idx: 1
> 109531@1693367277.014753:vhost_vdpa_get_vq_index dev: 0x55c9339262e0 idx: 2 vq idx: 2
> 109531@1693367277.014756:vhost_vdpa_get_vq_index dev: 0x55c9339262e0 idx: 3 vq idx: 3
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  hw/virtio/vhost-vdpa.c         |   7 +--
>  include/hw/virtio/vhost-vdpa.h |   3 ++
>  net/vhost-vdpa.c               | 112 +++++++++++++++++++++++++++--------------
>  3 files changed, 80 insertions(+), 42 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index d98704a..4010fd9 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -1162,8 +1162,8 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
>      vhost_iova_tree_remove(v->shared->iova_tree, *result);
>  }
>
> -static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> -                                       const VhostShadowVirtqueue *svq)
> +void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> +                                const VhostShadowVirtqueue *svq)
>  {
>      struct vhost_vdpa *v = dev->opaque;
>      struct vhost_vring_addr svq_addr;
> @@ -1346,17 +1346,14 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
>          return;
>      }
>
> -    vhost_vdpa_dma_batch_begin_once(v->shared, v->address_space_id);
>      for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
>          VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
>
>          vhost_svq_stop(svq);
> -        vhost_vdpa_svq_unmap_rings(dev, svq);
>
>          event_notifier_cleanup(&svq->hdev_kick);
>          event_notifier_cleanup(&svq->hdev_call);
>      }
> -    vhost_vdpa_dma_batch_end_once(v->shared, v->address_space_id);
>  }
>
>  static void vhost_vdpa_suspend(struct vhost_dev *dev)
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index aa13679..f426e2c 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -112,6 +112,9 @@ int vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s, uint32_t asid);
>  int vhost_vdpa_load_setup(VhostVDPAShared *s, AddressSpace *dma_as);
>  int vhost_vdpa_load_cleanup(VhostVDPAShared *s, bool vhost_will_start);
>
> +void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> +                                const VhostShadowVirtqueue *svq);
> +
>  typedef struct vdpa_iommu {
>      VhostVDPAShared *dev_shared;
>      IOMMUMemoryRegion *iommu_mr;
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 683619f..41714d1 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -29,6 +29,7 @@
>  #include "migration/migration.h"
>  #include "migration/misc.h"
>  #include "hw/virtio/vhost.h"
> +#include "hw/virtio/vhost-vdpa.h"
>
>  /* Todo:need to add the multiqueue support here */
>  typedef struct VhostVDPAState {
> @@ -467,15 +468,89 @@ static int vhost_vdpa_net_data_load(NetClientState *nc)
>      return 0;
>  }
>
> +static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
> +{
> +    VhostIOVATree *tree = v->shared->iova_tree;
> +    DMAMap needle = {
> +        /*
> +         * No need to specify size or to look for more translations since
> +         * this contiguous chunk was allocated by us.
> +         */
> +        .translated_addr = (hwaddr)(uintptr_t)addr,
> +    };
> +    const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
> +    int r;
> +
> +    if (unlikely(!map)) {
> +        error_report("Cannot locate expected map");
> +        return;
> +    }
> +
> +    r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
> +                             map->size + 1);
> +    if (unlikely(r != 0)) {
> +        error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
> +    }
> +
> +    vhost_iova_tree_remove(tree, *map);
> +}
> +
>  static void vhost_vdpa_net_client_stop(NetClientState *nc)
>  {
>      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> +    struct vhost_vdpa *v = &s->vhost_vdpa;
> +    struct vhost_vdpa *last_vi = NULL;
> +    bool has_cvq = v->dev->vq_index_end % 2;
> +    int nvqp;
>
>      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
>
>      if (s->vhost_vdpa.index == 0) {
>          migration_remove_notifier(&s->migration_state);
>      }
> +
> +    if (v->dev->vq_index + v->dev->nvqs != v->dev->vq_index_end) {
> +        return;
> +    }
> +
> +    nvqp = (v->dev->vq_index_end + 1) / 2;
> +    for (int i = 0; i < nvqp; ++i) {
> +        VhostVDPAState *s_i = vhost_vdpa_net_get_nc_vdpa(s, i);
> +        struct vhost_vdpa *v_i = &s_i->vhost_vdpa;
> +
> +        if (!v_i->shadow_vqs_enabled) {
> +            continue;
> +        }
> +        if (!last_vi) {
> +            vhost_vdpa_dma_batch_begin_once(v_i->shared,
> +                                            v_i->address_space_id);
> +            last_vi = v_i;
> +        } else if (last_vi->address_space_id != v_i->address_space_id) {
> +            vhost_vdpa_dma_batch_end_once(last_vi->shared,
> +                                          last_vi->address_space_id);
> +            vhost_vdpa_dma_batch_begin_once(v_i->shared,
> +                                            v_i->address_space_id);
> +            last_vi = v_i;
> +        }
> +
> +        for (unsigned j = 0; j < v_i->shadow_vqs->len; ++j) {
> +            VhostShadowVirtqueue *svq = g_ptr_array_index(v_i->shadow_vqs, j);
> +
> +            vhost_vdpa_svq_unmap_rings(v_i->dev, svq);
> +        }
> +    }
> +    if (has_cvq) {
> +        if (last_vi) {
> +            assert(last_vi->address_space_id == v->address_space_id);
> +        }
> +        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
> +        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
> +    }
> +    if (last_vi) {
> +        vhost_vdpa_dma_batch_end_once(last_vi->shared,
> +                                      last_vi->address_space_id);
> +        last_vi = NULL;
> +    }

Since we've delayed the guest unmap memory to _cleanup, why not delay
these unmaps to cleanup too?

>  }
>
>  static int vhost_vdpa_net_load_setup(NetClientState *nc, NICState *nic)
> @@ -585,33 +660,6 @@ static int64_t vhost_vdpa_get_vring_desc_group(int device_fd,
>      return state.num;
>  }
>
> -static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
> -{
> -    VhostIOVATree *tree = v->shared->iova_tree;
> -    DMAMap needle = {
> -        /*
> -         * No need to specify size or to look for more translations since
> -         * this contiguous chunk was allocated by us.
> -         */
> -        .translated_addr = (hwaddr)(uintptr_t)addr,
> -    };
> -    const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
> -    int r;
> -
> -    if (unlikely(!map)) {
> -        error_report("Cannot locate expected map");
> -        return;
> -    }
> -
> -    r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
> -                             map->size + 1);
> -    if (unlikely(r != 0)) {
> -        error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
> -    }
> -
> -    vhost_iova_tree_remove(tree, *map);
> -}
> -
>  /** Map CVQ buffer. */
>  static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
>                                    bool write)
> @@ -740,18 +788,8 @@ err:
>
>  static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
>  {
> -    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> -    struct vhost_vdpa *v = &s->vhost_vdpa;
> -
>      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
>
> -    if (s->vhost_vdpa.shadow_vqs_enabled) {
> -        vhost_vdpa_dma_batch_begin_once(v->shared, v->address_space_id);
> -        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
> -        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
> -        vhost_vdpa_dma_batch_end_once(v->shared, v->address_space_id);
> -    }
> -
>      vhost_vdpa_net_client_stop(nc);
>  }
>
> --
> 1.8.3.1
>
Jason Wang Jan. 15, 2024, 3:47 a.m. UTC | #2
On Fri, Dec 8, 2023 at 2:51 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Should help live migration downtime on source host. Below are the
> coalesced dma_unmap time series on 2 queue pair config (no
> dedicated descriptor group ASID for SVQ).

It's better to explain how we can batch into a single call (e.g do we
batch at the level of a whole device?)

>
> 109531@1693367276.853503:vhost_vdpa_reset_device dev: 0x55c933926890
> 109531@1693367276.853513:vhost_vdpa_add_status dev: 0x55c933926890 status: 0x3
> 109531@1693367276.853520:vhost_vdpa_flush_map dev: 0x55c933926890 doit: 1 svq_flush: 0 persist: 1
> 109531@1693367276.853524:vhost_vdpa_set_config_call dev: 0x55c933926890 fd: -1
> 109531@1693367276.853579:vhost_vdpa_iotlb_begin_batch vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 type: 5
> 109531@1693367276.853586:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x1000 size: 0x2000 type: 3
> 109531@1693367276.853600:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x3000 size: 0x1000 type: 3
> 109531@1693367276.853618:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x4000 size: 0x2000 type: 3
> 109531@1693367276.853625:vhost_vdpa_dma_unmap vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 asid: 0 iova: 0x6000 size: 0x1000 type: 3
> 109531@1693367276.853630:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0x7000 size: 0x2000 type: 3
> 109531@1693367276.853636:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0x9000 size: 0x1000 type: 3
> 109531@1693367276.853642:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0xa000 size: 0x2000 type: 3
> 109531@1693367276.853648:vhost_vdpa_dma_unmap vdpa:0x7fa2aa84c190 fd: 16 msg_type: 2 asid: 0 iova: 0xc000 size: 0x1000 type: 3
> 109531@1693367276.853654:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xf000 size: 0x1000 type: 3
> 109531@1693367276.853660:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0x10000 size: 0x1000 type: 3
> 109531@1693367276.853666:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xd000 size: 0x1000 type: 3
> 109531@1693367276.853670:vhost_vdpa_dma_unmap vdpa:0x7fa2aa6b6190 fd: 16 msg_type: 2 asid: 0 iova: 0xe000 size: 0x1000 type: 3
> 109531@1693367276.853675:vhost_vdpa_iotlb_end_batch vdpa:0x7fa2aa895190 fd: 16 msg_type: 2 type: 6
> 109531@1693367277.014697:vhost_vdpa_get_vq_index dev: 0x55c933925de0 idx: 0 vq idx: 0
> 109531@1693367277.014747:vhost_vdpa_get_vq_index dev: 0x55c933925de0 idx: 1 vq idx: 1
> 109531@1693367277.014753:vhost_vdpa_get_vq_index dev: 0x55c9339262e0 idx: 2 vq idx: 2
> 109531@1693367277.014756:vhost_vdpa_get_vq_index dev: 0x55c9339262e0 idx: 3 vq idx: 3
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  hw/virtio/vhost-vdpa.c         |   7 +--
>  include/hw/virtio/vhost-vdpa.h |   3 ++
>  net/vhost-vdpa.c               | 112 +++++++++++++++++++++++++++--------------
>  3 files changed, 80 insertions(+), 42 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index d98704a..4010fd9 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -1162,8 +1162,8 @@ static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
>      vhost_iova_tree_remove(v->shared->iova_tree, *result);
>  }
>
> -static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> -                                       const VhostShadowVirtqueue *svq)
> +void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> +                                const VhostShadowVirtqueue *svq)
>  {
>      struct vhost_vdpa *v = dev->opaque;
>      struct vhost_vring_addr svq_addr;
> @@ -1346,17 +1346,14 @@ static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
>          return;
>      }
>
> -    vhost_vdpa_dma_batch_begin_once(v->shared, v->address_space_id);
>      for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
>          VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
>
>          vhost_svq_stop(svq);
> -        vhost_vdpa_svq_unmap_rings(dev, svq);
>
>          event_notifier_cleanup(&svq->hdev_kick);
>          event_notifier_cleanup(&svq->hdev_call);
>      }
> -    vhost_vdpa_dma_batch_end_once(v->shared, v->address_space_id);
>  }
>
>  static void vhost_vdpa_suspend(struct vhost_dev *dev)
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index aa13679..f426e2c 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -112,6 +112,9 @@ int vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s, uint32_t asid);
>  int vhost_vdpa_load_setup(VhostVDPAShared *s, AddressSpace *dma_as);
>  int vhost_vdpa_load_cleanup(VhostVDPAShared *s, bool vhost_will_start);
>
> +void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
> +                                const VhostShadowVirtqueue *svq);
> +
>  typedef struct vdpa_iommu {
>      VhostVDPAShared *dev_shared;
>      IOMMUMemoryRegion *iommu_mr;
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index 683619f..41714d1 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -29,6 +29,7 @@
>  #include "migration/migration.h"
>  #include "migration/misc.h"
>  #include "hw/virtio/vhost.h"
> +#include "hw/virtio/vhost-vdpa.h"
>
>  /* Todo:need to add the multiqueue support here */
>  typedef struct VhostVDPAState {
> @@ -467,15 +468,89 @@ static int vhost_vdpa_net_data_load(NetClientState *nc)
>      return 0;
>  }
>
> +static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
> +{
> +    VhostIOVATree *tree = v->shared->iova_tree;
> +    DMAMap needle = {
> +        /*
> +         * No need to specify size or to look for more translations since
> +         * this contiguous chunk was allocated by us.
> +         */
> +        .translated_addr = (hwaddr)(uintptr_t)addr,
> +    };
> +    const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
> +    int r;
> +
> +    if (unlikely(!map)) {
> +        error_report("Cannot locate expected map");
> +        return;
> +    }
> +
> +    r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
> +                             map->size + 1);
> +    if (unlikely(r != 0)) {
> +        error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
> +    }
> +
> +    vhost_iova_tree_remove(tree, *map);
> +}
> +
>  static void vhost_vdpa_net_client_stop(NetClientState *nc)
>  {
>      VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> +    struct vhost_vdpa *v = &s->vhost_vdpa;
> +    struct vhost_vdpa *last_vi = NULL;

Nit: just curious what did "vi" mean here?

> +    bool has_cvq = v->dev->vq_index_end % 2;
> +    int nvqp;
>
>      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
>
>      if (s->vhost_vdpa.index == 0) {
>          migration_remove_notifier(&s->migration_state);
>      }
> +
> +    if (v->dev->vq_index + v->dev->nvqs != v->dev->vq_index_end) {
> +        return;
> +    }
> +
> +    nvqp = (v->dev->vq_index_end + 1) / 2;
> +    for (int i = 0; i < nvqp; ++i) {
> +        VhostVDPAState *s_i = vhost_vdpa_net_get_nc_vdpa(s, i);
> +        struct vhost_vdpa *v_i = &s_i->vhost_vdpa;
> +
> +        if (!v_i->shadow_vqs_enabled) {
> +            continue;
> +        }
> +        if (!last_vi) {
> +            vhost_vdpa_dma_batch_begin_once(v_i->shared,
> +                                            v_i->address_space_id);
> +            last_vi = v_i;
> +        } else if (last_vi->address_space_id != v_i->address_space_id) {
> +            vhost_vdpa_dma_batch_end_once(last_vi->shared,
> +                                          last_vi->address_space_id);
> +            vhost_vdpa_dma_batch_begin_once(v_i->shared,
> +                                            v_i->address_space_id);
> +            last_vi = v_i;
> +        }
> +
> +        for (unsigned j = 0; j < v_i->shadow_vqs->len; ++j) {
> +            VhostShadowVirtqueue *svq = g_ptr_array_index(v_i->shadow_vqs, j);
> +
> +            vhost_vdpa_svq_unmap_rings(v_i->dev, svq);
> +        }
> +    }
> +    if (has_cvq) {
> +        if (last_vi) {
> +            assert(last_vi->address_space_id == v->address_space_id);
> +        }
> +        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
> +        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
> +    }
> +    if (last_vi) {
> +        vhost_vdpa_dma_batch_end_once(last_vi->shared,
> +                                      last_vi->address_space_id);
> +        last_vi = NULL;
> +    }

The logic looks rather complicated, can we simplify it by:

batch_begin_once()
unmap()
batch_end_once()

?

Thanks


>  }
>
>  static int vhost_vdpa_net_load_setup(NetClientState *nc, NICState *nic)
> @@ -585,33 +660,6 @@ static int64_t vhost_vdpa_get_vring_desc_group(int device_fd,
>      return state.num;
>  }
>
> -static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
> -{
> -    VhostIOVATree *tree = v->shared->iova_tree;
> -    DMAMap needle = {
> -        /*
> -         * No need to specify size or to look for more translations since
> -         * this contiguous chunk was allocated by us.
> -         */
> -        .translated_addr = (hwaddr)(uintptr_t)addr,
> -    };
> -    const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
> -    int r;
> -
> -    if (unlikely(!map)) {
> -        error_report("Cannot locate expected map");
> -        return;
> -    }
> -
> -    r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
> -                             map->size + 1);
> -    if (unlikely(r != 0)) {
> -        error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
> -    }
> -
> -    vhost_iova_tree_remove(tree, *map);
> -}
> -
>  /** Map CVQ buffer. */
>  static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
>                                    bool write)
> @@ -740,18 +788,8 @@ err:
>
>  static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
>  {
> -    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> -    struct vhost_vdpa *v = &s->vhost_vdpa;
> -
>      assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
>
> -    if (s->vhost_vdpa.shadow_vqs_enabled) {
> -        vhost_vdpa_dma_batch_begin_once(v->shared, v->address_space_id);
> -        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
> -        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
> -        vhost_vdpa_dma_batch_end_once(v->shared, v->address_space_id);
> -    }
> -
>      vhost_vdpa_net_client_stop(nc);
>  }
>
> --
> 1.8.3.1
>
diff mbox series

Patch

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index d98704a..4010fd9 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -1162,8 +1162,8 @@  static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
     vhost_iova_tree_remove(v->shared->iova_tree, *result);
 }
 
-static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
-                                       const VhostShadowVirtqueue *svq)
+void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
+                                const VhostShadowVirtqueue *svq)
 {
     struct vhost_vdpa *v = dev->opaque;
     struct vhost_vring_addr svq_addr;
@@ -1346,17 +1346,14 @@  static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
         return;
     }
 
-    vhost_vdpa_dma_batch_begin_once(v->shared, v->address_space_id);
     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
 
         vhost_svq_stop(svq);
-        vhost_vdpa_svq_unmap_rings(dev, svq);
 
         event_notifier_cleanup(&svq->hdev_kick);
         event_notifier_cleanup(&svq->hdev_call);
     }
-    vhost_vdpa_dma_batch_end_once(v->shared, v->address_space_id);
 }
 
 static void vhost_vdpa_suspend(struct vhost_dev *dev)
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index aa13679..f426e2c 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -112,6 +112,9 @@  int vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s, uint32_t asid);
 int vhost_vdpa_load_setup(VhostVDPAShared *s, AddressSpace *dma_as);
 int vhost_vdpa_load_cleanup(VhostVDPAShared *s, bool vhost_will_start);
 
+void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
+                                const VhostShadowVirtqueue *svq);
+
 typedef struct vdpa_iommu {
     VhostVDPAShared *dev_shared;
     IOMMUMemoryRegion *iommu_mr;
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 683619f..41714d1 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -29,6 +29,7 @@ 
 #include "migration/migration.h"
 #include "migration/misc.h"
 #include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-vdpa.h"
 
 /* Todo:need to add the multiqueue support here */
 typedef struct VhostVDPAState {
@@ -467,15 +468,89 @@  static int vhost_vdpa_net_data_load(NetClientState *nc)
     return 0;
 }
 
+static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
+{
+    VhostIOVATree *tree = v->shared->iova_tree;
+    DMAMap needle = {
+        /*
+         * No need to specify size or to look for more translations since
+         * this contiguous chunk was allocated by us.
+         */
+        .translated_addr = (hwaddr)(uintptr_t)addr,
+    };
+    const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
+    int r;
+
+    if (unlikely(!map)) {
+        error_report("Cannot locate expected map");
+        return;
+    }
+
+    r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
+                             map->size + 1);
+    if (unlikely(r != 0)) {
+        error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
+    }
+
+    vhost_iova_tree_remove(tree, *map);
+}
+
 static void vhost_vdpa_net_client_stop(NetClientState *nc)
 {
     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+    struct vhost_vdpa *v = &s->vhost_vdpa;
+    struct vhost_vdpa *last_vi = NULL;
+    bool has_cvq = v->dev->vq_index_end % 2;
+    int nvqp;
 
     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 
     if (s->vhost_vdpa.index == 0) {
         migration_remove_notifier(&s->migration_state);
     }
+
+    if (v->dev->vq_index + v->dev->nvqs != v->dev->vq_index_end) {
+        return;
+    }
+
+    nvqp = (v->dev->vq_index_end + 1) / 2;
+    for (int i = 0; i < nvqp; ++i) {
+        VhostVDPAState *s_i = vhost_vdpa_net_get_nc_vdpa(s, i);
+        struct vhost_vdpa *v_i = &s_i->vhost_vdpa;
+
+        if (!v_i->shadow_vqs_enabled) {
+            continue;
+        }
+        if (!last_vi) {
+            vhost_vdpa_dma_batch_begin_once(v_i->shared,
+                                            v_i->address_space_id);
+            last_vi = v_i;
+        } else if (last_vi->address_space_id != v_i->address_space_id) {
+            vhost_vdpa_dma_batch_end_once(last_vi->shared,
+                                          last_vi->address_space_id);
+            vhost_vdpa_dma_batch_begin_once(v_i->shared,
+                                            v_i->address_space_id);
+            last_vi = v_i;
+        }
+
+        for (unsigned j = 0; j < v_i->shadow_vqs->len; ++j) {
+            VhostShadowVirtqueue *svq = g_ptr_array_index(v_i->shadow_vqs, j);
+
+            vhost_vdpa_svq_unmap_rings(v_i->dev, svq);
+        }
+    }
+    if (has_cvq) {
+        if (last_vi) {
+            assert(last_vi->address_space_id == v->address_space_id);
+        }
+        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
+        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
+    }
+    if (last_vi) {
+        vhost_vdpa_dma_batch_end_once(last_vi->shared,
+                                      last_vi->address_space_id);
+        last_vi = NULL;
+    }
 }
 
 static int vhost_vdpa_net_load_setup(NetClientState *nc, NICState *nic)
@@ -585,33 +660,6 @@  static int64_t vhost_vdpa_get_vring_desc_group(int device_fd,
     return state.num;
 }
 
-static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
-{
-    VhostIOVATree *tree = v->shared->iova_tree;
-    DMAMap needle = {
-        /*
-         * No need to specify size or to look for more translations since
-         * this contiguous chunk was allocated by us.
-         */
-        .translated_addr = (hwaddr)(uintptr_t)addr,
-    };
-    const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
-    int r;
-
-    if (unlikely(!map)) {
-        error_report("Cannot locate expected map");
-        return;
-    }
-
-    r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
-                             map->size + 1);
-    if (unlikely(r != 0)) {
-        error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
-    }
-
-    vhost_iova_tree_remove(tree, *map);
-}
-
 /** Map CVQ buffer. */
 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
                                   bool write)
@@ -740,18 +788,8 @@  err:
 
 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
 {
-    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
-    struct vhost_vdpa *v = &s->vhost_vdpa;
-
     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 
-    if (s->vhost_vdpa.shadow_vqs_enabled) {
-        vhost_vdpa_dma_batch_begin_once(v->shared, v->address_space_id);
-        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
-        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
-        vhost_vdpa_dma_batch_end_once(v->shared, v->address_space_id);
-    }
-
     vhost_vdpa_net_client_stop(nc);
 }