diff mbox series

[RFC,20/27] vhost: Return used buffers

Message ID 20201120185105.279030-21-eperezma@redhat.com (mailing list archive)
State New, archived
Headers show
Series vDPA software assisted live migration | expand

Commit Message

Eugenio Perez Martin Nov. 20, 2020, 6:50 p.m. UTC
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 hw/virtio/vhost-sw-lm-ring.h |  3 +++
 hw/virtio/vhost-sw-lm-ring.c | 14 +++++++----
 hw/virtio/vhost.c            | 46 +++++++++++++++++++++++++++++++++---
 3 files changed, 56 insertions(+), 7 deletions(-)

Comments

Stefan Hajnoczi Dec. 8, 2020, 8:50 a.m. UTC | #1
On Fri, Nov 20, 2020 at 07:50:58PM +0100, Eugenio Pérez wrote:
> @@ -1028,6 +1061,7 @@ static int vhost_sw_live_migration_start(struct vhost_dev *dev)
>  
>      for (idx = 0; idx < dev->nvqs; ++idx) {
>          struct vhost_virtqueue *vq = &dev->vqs[idx];
> +        unsigned num = virtio_queue_get_num(dev->vdev, idx);
>          struct vhost_vring_addr addr = {
>              .index = idx,
>          };
> @@ -1044,6 +1078,12 @@ static int vhost_sw_live_migration_start(struct vhost_dev *dev)
>          r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
>          assert(r == 0);
>  
> +        r = vhost_backend_update_device_iotlb(dev, addr.used_user_addr,
> +                                              addr.used_user_addr,
> +                                              sizeof(vring_used_elem_t) * num,
> +                                              IOMMU_RW);

I don't remember seeing iotlb setup for the rest of the vring or guest
memory. Maybe this should go into a single patch so it's easy to review
the iova space layout.
diff mbox series

Patch

diff --git a/hw/virtio/vhost-sw-lm-ring.h b/hw/virtio/vhost-sw-lm-ring.h
index 429a125558..0c4fa772c7 100644
--- a/hw/virtio/vhost-sw-lm-ring.h
+++ b/hw/virtio/vhost-sw-lm-ring.h
@@ -17,6 +17,9 @@ 
 
 typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
 
+VirtIODevice *vhost_vring_vdev(VhostShadowVirtqueue *svq);
+VirtQueue *vhost_vring_vdev_vq(VhostShadowVirtqueue *svq);
+
 bool vhost_vring_kick(VhostShadowVirtqueue *vq);
 int vhost_vring_add(VhostShadowVirtqueue *vq, VirtQueueElement *elem);
 VirtQueueElement *vhost_vring_get_buf_rcu(VhostShadowVirtqueue *vq, size_t sz);
diff --git a/hw/virtio/vhost-sw-lm-ring.c b/hw/virtio/vhost-sw-lm-ring.c
index 4fafd1b278..244c722910 100644
--- a/hw/virtio/vhost-sw-lm-ring.c
+++ b/hw/virtio/vhost-sw-lm-ring.c
@@ -46,6 +46,16 @@  typedef struct VhostShadowVirtqueue {
     vring_desc_t descs[];
 } VhostShadowVirtqueue;
 
+VirtIODevice *vhost_vring_vdev(VhostShadowVirtqueue *svq)
+{
+    return svq->vdev;
+}
+
+VirtQueue *vhost_vring_vdev_vq(VhostShadowVirtqueue *svq)
+{
+    return svq->vq;
+}
+
 static bool vhost_vring_should_kick_rcu(VhostShadowVirtqueue *vq)
 {
     VirtIODevice *vdev = vq->vdev;
@@ -179,10 +189,6 @@  static int vhost_vring_add_split(VhostShadowVirtqueue *vq,
 int vhost_vring_add(VhostShadowVirtqueue *vq, VirtQueueElement *elem)
 {
     int host_head = vhost_vring_add_split(vq, elem);
-    if (vq->ring_id_maps[host_head]) {
-        g_free(vq->ring_id_maps[host_head]);
-    }
-
     vq->ring_id_maps[host_head] = elem;
     return 0;
 }
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index ac2bc14190..9a3c580dcf 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -986,17 +986,50 @@  static void handle_sw_lm_vq(VirtIODevice *vdev, VirtQueue *vq)
     } while(!virtio_queue_empty(vq));
 }
 
+static void handle_sw_lm_vq_call(struct vhost_dev *hdev,
+                                 VhostShadowVirtqueue *svq)
+{
+    VirtQueueElement *elem;
+    VirtIODevice *vdev = vhost_vring_vdev(svq);
+    VirtQueue *vq = vhost_vring_vdev_vq(svq);
+    uint16_t idx = virtio_get_queue_index(vq);
+
+    RCU_READ_LOCK_GUARD();
+    /*
+     * Make used all buffers as possible.
+     */
+    do {
+        unsigned i = 0;
+
+        vhost_vring_set_notification_rcu(svq, false);
+        while (true) {
+            elem = vhost_vring_get_buf_rcu(svq, sizeof(*elem));
+            if (!elem) {
+                break;
+            }
+
+            assert(i < virtio_queue_get_num(vdev, idx));
+            virtqueue_fill(vq, elem, elem->len, i++);
+        }
+
+        virtqueue_flush(vq, i);
+        virtio_notify_irqfd(vdev, vq);
+
+        vhost_vring_set_notification_rcu(svq, true);
+    } while (vhost_vring_poll_rcu(svq));
+}
+
 static void vhost_handle_call(EventNotifier *n)
 {
     struct vhost_virtqueue *hvq = container_of(n,
                                               struct vhost_virtqueue,
                                               masked_notifier);
     struct vhost_dev *vdev = hvq->dev;
-    int idx = vdev->vq_index + (hvq == &vdev->vqs[0] ? 0 : 1);
-    VirtQueue *vq = virtio_get_queue(vdev->vdev, idx);
+    int idx = hvq == &vdev->vqs[0] ? 0 : 1;
+    VhostShadowVirtqueue *vq = vdev->sw_lm_shadow_vq[idx];
 
     if (event_notifier_test_and_clear(n)) {
-        virtio_notify_irqfd(vdev->vdev, vq);
+        handle_sw_lm_vq_call(vdev, vq);
     }
 }
 
@@ -1028,6 +1061,7 @@  static int vhost_sw_live_migration_start(struct vhost_dev *dev)
 
     for (idx = 0; idx < dev->nvqs; ++idx) {
         struct vhost_virtqueue *vq = &dev->vqs[idx];
+        unsigned num = virtio_queue_get_num(dev->vdev, idx);
         struct vhost_vring_addr addr = {
             .index = idx,
         };
@@ -1044,6 +1078,12 @@  static int vhost_sw_live_migration_start(struct vhost_dev *dev)
         r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr);
         assert(r == 0);
 
+        r = vhost_backend_update_device_iotlb(dev, addr.used_user_addr,
+                                              addr.used_user_addr,
+                                              sizeof(vring_used_elem_t) * num,
+                                              IOMMU_RW);
+        assert(r == 0);
+
         r = dev->vhost_ops->vhost_set_vring_base(dev, &s);
         assert(r == 0);
     }