diff mbox series

[28/40] vdpa: support iotlb_batch_asid

Message ID 1701970793-6865-29-git-send-email-si-wei.liu@oracle.com (mailing list archive)
State New, archived
Headers show
Series vdpa-net: improve migration downtime through descriptor ASID and persistent IOTLB | expand

Commit Message

Si-Wei Liu Dec. 7, 2023, 5:39 p.m. UTC
Then it's possible to specify ASID when calling the DMA
batching API. If the ASID to work on doesn't align with
the ASID for ongoing transaction, the API will fail the
request and return negative, and the transaction will
remain intact as if no failed request ever had occured.

Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 hw/virtio/vhost-vdpa.c         | 25 +++++++++++++++++++------
 include/hw/virtio/vhost-vdpa.h |  1 +
 net/vhost-vdpa.c               |  1 +
 3 files changed, 21 insertions(+), 6 deletions(-)

Comments

Eugenio Perez Martin Dec. 13, 2023, 3:42 p.m. UTC | #1
On Thu, Dec 7, 2023 at 7:51 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Then it's possible to specify ASID when calling the DMA
> batching API. If the ASID to work on doesn't align with
> the ASID for ongoing transaction, the API will fail the
> request and return negative, and the transaction will
> remain intact as if no failed request ever had occured.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  hw/virtio/vhost-vdpa.c         | 25 +++++++++++++++++++------
>  include/hw/virtio/vhost-vdpa.h |  1 +
>  net/vhost-vdpa.c               |  1 +
>  3 files changed, 21 insertions(+), 6 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index d3f5721..b7896a8 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -189,15 +189,25 @@ static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s, uint32_t asid)
>
>  static int vhost_vdpa_dma_batch_begin_once(VhostVDPAShared *s, uint32_t asid)
>  {
> -    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) ||
> -        s->iotlb_batch_begin_sent) {
> +    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
>          return 0;
>      }
>
> -    if (vhost_vdpa_map_batch_begin(s, asid)) {
> -        s->iotlb_batch_begin_sent = true;
> +    if (s->iotlb_batch_begin_sent && s->iotlb_batch_asid != asid) {
> +        return -1;
> +    }
> +
> +    if (s->iotlb_batch_begin_sent) {
> +        return 0;
>      }
>
> +    if (!vhost_vdpa_map_batch_begin(s, asid)) {
> +        return 0;
> +    }
> +
> +    s->iotlb_batch_begin_sent = true;
> +    s->iotlb_batch_asid = asid;
> +
>      return 0;
>  }
>
> @@ -237,10 +247,13 @@ static int vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s, uint32_t asid)
>          return 0;
>      }
>
> -    if (vhost_vdpa_dma_batch_end(s, asid)) {
> -        s->iotlb_batch_begin_sent = false;
> +    if (!vhost_vdpa_dma_batch_end(s, asid)) {
> +        return 0;
>      }
>
> +    s->iotlb_batch_begin_sent = false;
> +    s->iotlb_batch_asid = -1;

If we define -1 as "not in batch", iotlb_batch_begin_sent is
redundant. Can we "#define IOTLB_NOT_IN_BATCH -1" and remove
iotlb_batch_begin_sent?

> +
>      return 0;
>  }
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 0fe0f60..219316f 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -61,6 +61,7 @@ typedef struct vhost_vdpa_shared {
>      bool map_thread_enabled;
>
>      bool iotlb_batch_begin_sent;
> +    uint32_t iotlb_batch_asid;
>
>      /*
>       * The memory listener has been registered, so DMA maps have been sent to
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index e9b96ed..bc72345 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -1933,6 +1933,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>          s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
>          s->vhost_vdpa.shared->iova_range = iova_range;
>          s->vhost_vdpa.shared->shadow_data = svq;
> +        s->vhost_vdpa.shared->iotlb_batch_asid = -1;
>          s->vhost_vdpa.shared->refcnt++;
>      } else if (!is_datapath) {
>          s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
> --
> 1.8.3.1
>
Jason Wang Jan. 15, 2024, 3:19 a.m. UTC | #2
On Fri, Dec 8, 2023 at 2:51 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> Then it's possible to specify ASID when calling the DMA
> batching API. If the ASID to work on doesn't align with
> the ASID for ongoing transaction, the API will fail the
> request and return negative, and the transaction will
> remain intact as if no failed request ever had occured.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  hw/virtio/vhost-vdpa.c         | 25 +++++++++++++++++++------
>  include/hw/virtio/vhost-vdpa.h |  1 +
>  net/vhost-vdpa.c               |  1 +
>  3 files changed, 21 insertions(+), 6 deletions(-)
>
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index d3f5721..b7896a8 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -189,15 +189,25 @@ static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s, uint32_t asid)
>
>  static int vhost_vdpa_dma_batch_begin_once(VhostVDPAShared *s, uint32_t asid)
>  {
> -    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) ||
> -        s->iotlb_batch_begin_sent) {
> +    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
>          return 0;
>      }
>
> -    if (vhost_vdpa_map_batch_begin(s, asid)) {
> -        s->iotlb_batch_begin_sent = true;
> +    if (s->iotlb_batch_begin_sent && s->iotlb_batch_asid != asid) {
> +        return -1;
> +    }
> +
> +    if (s->iotlb_batch_begin_sent) {
> +        return 0;
>      }
>
> +    if (!vhost_vdpa_map_batch_begin(s, asid)) {
> +        return 0;
> +    }
> +
> +    s->iotlb_batch_begin_sent = true;
> +    s->iotlb_batch_asid = asid;
> +
>      return 0;
>  }
>
> @@ -237,10 +247,13 @@ static int vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s, uint32_t asid)
>          return 0;
>      }
>
> -    if (vhost_vdpa_dma_batch_end(s, asid)) {
> -        s->iotlb_batch_begin_sent = false;
> +    if (!vhost_vdpa_dma_batch_end(s, asid)) {
> +        return 0;
>      }
>
> +    s->iotlb_batch_begin_sent = false;
> +    s->iotlb_batch_asid = -1;
> +
>      return 0;
>  }
>
> diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
> index 0fe0f60..219316f 100644
> --- a/include/hw/virtio/vhost-vdpa.h
> +++ b/include/hw/virtio/vhost-vdpa.h
> @@ -61,6 +61,7 @@ typedef struct vhost_vdpa_shared {
>      bool map_thread_enabled;
>
>      bool iotlb_batch_begin_sent;
> +    uint32_t iotlb_batch_asid;
>
>      /*
>       * The memory listener has been registered, so DMA maps have been sent to
> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> index e9b96ed..bc72345 100644
> --- a/net/vhost-vdpa.c
> +++ b/net/vhost-vdpa.c
> @@ -1933,6 +1933,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
>          s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
>          s->vhost_vdpa.shared->iova_range = iova_range;
>          s->vhost_vdpa.shared->shadow_data = svq;
> +        s->vhost_vdpa.shared->iotlb_batch_asid = -1;

This seems a trick, uAPI defines asid as:

        __u32 asid;

So technically -1U is a legal value.

Thanks

>          s->vhost_vdpa.shared->refcnt++;
>      } else if (!is_datapath) {
>          s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
> --
> 1.8.3.1
>
diff mbox series

Patch

diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index d3f5721..b7896a8 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -189,15 +189,25 @@  static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s, uint32_t asid)
 
 static int vhost_vdpa_dma_batch_begin_once(VhostVDPAShared *s, uint32_t asid)
 {
-    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH)) ||
-        s->iotlb_batch_begin_sent) {
+    if (!(s->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
         return 0;
     }
 
-    if (vhost_vdpa_map_batch_begin(s, asid)) {
-        s->iotlb_batch_begin_sent = true;
+    if (s->iotlb_batch_begin_sent && s->iotlb_batch_asid != asid) {
+        return -1;
+    }
+
+    if (s->iotlb_batch_begin_sent) {
+        return 0;
     }
 
+    if (!vhost_vdpa_map_batch_begin(s, asid)) {
+        return 0;
+    }
+
+    s->iotlb_batch_begin_sent = true;
+    s->iotlb_batch_asid = asid;
+
     return 0;
 }
 
@@ -237,10 +247,13 @@  static int vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s, uint32_t asid)
         return 0;
     }
 
-    if (vhost_vdpa_dma_batch_end(s, asid)) {
-        s->iotlb_batch_begin_sent = false;
+    if (!vhost_vdpa_dma_batch_end(s, asid)) {
+        return 0;
     }
 
+    s->iotlb_batch_begin_sent = false;
+    s->iotlb_batch_asid = -1;
+
     return 0;
 }
 
diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h
index 0fe0f60..219316f 100644
--- a/include/hw/virtio/vhost-vdpa.h
+++ b/include/hw/virtio/vhost-vdpa.h
@@ -61,6 +61,7 @@  typedef struct vhost_vdpa_shared {
     bool map_thread_enabled;
 
     bool iotlb_batch_begin_sent;
+    uint32_t iotlb_batch_asid;
 
     /*
      * The memory listener has been registered, so DMA maps have been sent to
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index e9b96ed..bc72345 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -1933,6 +1933,7 @@  static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
         s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
         s->vhost_vdpa.shared->iova_range = iova_range;
         s->vhost_vdpa.shared->shadow_data = svq;
+        s->vhost_vdpa.shared->iotlb_batch_asid = -1;
         s->vhost_vdpa.shared->refcnt++;
     } else if (!is_datapath) {
         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),