diff mbox series

[RFC,05/27] vhost: Add hdev->dev.sw_lm_vq_handler

Message ID 20201120185105.279030-6-eperezma@redhat.com (mailing list archive)
State New, archived
Headers show
Series vDPA software assisted live migration | expand

Commit Message

Eugenio Perez Martin Nov. 20, 2020, 6:50 p.m. UTC
Only virtio-net honors it.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
 include/hw/virtio/vhost.h |  1 +
 hw/net/virtio-net.c       | 39 ++++++++++++++++++++++++++++-----------
 2 files changed, 29 insertions(+), 11 deletions(-)

Comments

Stefan Hajnoczi Dec. 7, 2020, 4:52 p.m. UTC | #1
On Fri, Nov 20, 2020 at 07:50:43PM +0100, Eugenio Pérez wrote:
> Only virtio-net honors it.
> 
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
>  include/hw/virtio/vhost.h |  1 +
>  hw/net/virtio-net.c       | 39 ++++++++++++++++++++++++++++-----------
>  2 files changed, 29 insertions(+), 11 deletions(-)
> 
> diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> index 4a8bc75415..b5b7496537 100644
> --- a/include/hw/virtio/vhost.h
> +++ b/include/hw/virtio/vhost.h
> @@ -83,6 +83,7 @@ struct vhost_dev {
>      bool started;
>      bool log_enabled;
>      uint64_t log_size;
> +    VirtIOHandleOutput sw_lm_vq_handler;

sw == software?
lm == live migration?

Maybe there is a name that is clearer. What are these virtqueues called?
Shadow vqs? Logged vqs?

Live migration is a feature that uses dirty memory logging, but other
features may use dirty memory logging too. The name should probably not
be associated with live migration.

>      Error *migration_blocker;
>      const VhostOps *vhost_ops;
>      void *opaque;
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 9179013ac4..9a69ae3598 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -2628,24 +2628,32 @@ static void virtio_net_tx_bh(void *opaque)
>      }
>  }
>  
> -static void virtio_net_add_queue(VirtIONet *n, int index)
> +static void virtio_net_add_queue(VirtIONet *n, int index,
> +                                 VirtIOHandleOutput custom_handler)
>  {

We talked about the possibility of moving this into the generic vhost
code so that devices don't need to be modified. It would be nice to hide
this feature inside vhost.
Eugenio Perez Martin Dec. 9, 2020, 3:02 p.m. UTC | #2
On Mon, Dec 7, 2020 at 5:52 PM Stefan Hajnoczi <stefanha@gmail.com> wrote:
>
> On Fri, Nov 20, 2020 at 07:50:43PM +0100, Eugenio Pérez wrote:
> > Only virtio-net honors it.
> >
> > Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> > ---
> >  include/hw/virtio/vhost.h |  1 +
> >  hw/net/virtio-net.c       | 39 ++++++++++++++++++++++++++++-----------
> >  2 files changed, 29 insertions(+), 11 deletions(-)
> >
> > diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
> > index 4a8bc75415..b5b7496537 100644
> > --- a/include/hw/virtio/vhost.h
> > +++ b/include/hw/virtio/vhost.h
> > @@ -83,6 +83,7 @@ struct vhost_dev {
> >      bool started;
> >      bool log_enabled;
> >      uint64_t log_size;
> > +    VirtIOHandleOutput sw_lm_vq_handler;
>
> sw == software?
> lm == live migration?
>
> Maybe there is a name that is clearer. What are these virtqueues called?
> Shadow vqs? Logged vqs?
>
> Live migration is a feature that uses dirty memory logging, but other
> features may use dirty memory logging too. The name should probably not
> be associated with live migration.
>

I totally agree, I find shadow_vq a better name for it.

> >      Error *migration_blocker;
> >      const VhostOps *vhost_ops;
> >      void *opaque;
> > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > index 9179013ac4..9a69ae3598 100644
> > --- a/hw/net/virtio-net.c
> > +++ b/hw/net/virtio-net.c
> > @@ -2628,24 +2628,32 @@ static void virtio_net_tx_bh(void *opaque)
> >      }
> >  }
> >
> > -static void virtio_net_add_queue(VirtIONet *n, int index)
> > +static void virtio_net_add_queue(VirtIONet *n, int index,
> > +                                 VirtIOHandleOutput custom_handler)
> >  {
>
> We talked about the possibility of moving this into the generic vhost
> code so that devices don't need to be modified. It would be nice to hide
> this feature inside vhost.

I'm thinking of tying it to VirtQueue, allowing the caller to override
the handler knowing it is not going to be called (I mean, not offering
race conditions protection, like before of starting processing
notifications in qemu calling vhost_dev_disable_notifiers).

Thanks!
Stefan Hajnoczi Dec. 10, 2020, 11:30 a.m. UTC | #3
On Wed, Dec 09, 2020 at 04:02:56PM +0100, Eugenio Perez Martin wrote:
> On Mon, Dec 7, 2020 at 5:52 PM Stefan Hajnoczi <stefanha@gmail.com> wrote:
> > On Fri, Nov 20, 2020 at 07:50:43PM +0100, Eugenio Pérez wrote:
> > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > index 9179013ac4..9a69ae3598 100644
> > > --- a/hw/net/virtio-net.c
> > > +++ b/hw/net/virtio-net.c
> > > @@ -2628,24 +2628,32 @@ static void virtio_net_tx_bh(void *opaque)
> > >      }
> > >  }
> > >
> > > -static void virtio_net_add_queue(VirtIONet *n, int index)
> > > +static void virtio_net_add_queue(VirtIONet *n, int index,
> > > +                                 VirtIOHandleOutput custom_handler)
> > >  {
> >
> > We talked about the possibility of moving this into the generic vhost
> > code so that devices don't need to be modified. It would be nice to hide
> > this feature inside vhost.
> 
> I'm thinking of tying it to VirtQueue, allowing the caller to override
> the handler knowing it is not going to be called (I mean, not offering
> race conditions protection, like before of starting processing
> notifications in qemu calling vhost_dev_disable_notifiers).

Yes, I can see how at least part of this belongs to VirtQueue.

Stefan
diff mbox series

Patch

diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 4a8bc75415..b5b7496537 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -83,6 +83,7 @@  struct vhost_dev {
     bool started;
     bool log_enabled;
     uint64_t log_size;
+    VirtIOHandleOutput sw_lm_vq_handler;
     Error *migration_blocker;
     const VhostOps *vhost_ops;
     void *opaque;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 9179013ac4..9a69ae3598 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -2628,24 +2628,32 @@  static void virtio_net_tx_bh(void *opaque)
     }
 }
 
-static void virtio_net_add_queue(VirtIONet *n, int index)
+static void virtio_net_add_queue(VirtIONet *n, int index,
+                                 VirtIOHandleOutput custom_handler)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    VirtIOHandleOutput rx_vq_handler = virtio_net_handle_rx;
+    VirtIOHandleOutput tx_vq_handler;
+    bool tx_timer = n->net_conf.tx && !strcmp(n->net_conf.tx, "timer");
+
+    if (custom_handler) {
+        rx_vq_handler = tx_vq_handler = custom_handler;
+    } else if (tx_timer) {
+        tx_vq_handler = virtio_net_handle_tx_timer;
+    } else {
+        tx_vq_handler = virtio_net_handle_tx_bh;
+    }
 
     n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
-                                           virtio_net_handle_rx);
+                                           rx_vq_handler);
+    n->vqs[index].tx_vq = virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+                                           tx_vq_handler);
 
-    if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
-        n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
-                             virtio_net_handle_tx_timer);
+    if (tx_timer) {
         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                               virtio_net_tx_timer,
                                               &n->vqs[index]);
     } else {
-        n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, n->net_conf.tx_queue_size,
-                             virtio_net_handle_tx_bh);
         n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
     }
 
@@ -2677,6 +2685,10 @@  static void virtio_net_del_queue(VirtIONet *n, int index)
 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    NetClientState *nc = n->nic->conf->peers.ncs[0];
+    struct vhost_net *hdev = get_vhost_net(nc);
+    VirtIOHandleOutput custom_handler = hdev ? hdev->dev.sw_lm_vq_handler
+                                             : NULL;
     int old_num_queues = virtio_get_num_queues(vdev);
     int new_num_queues = new_max_queues * 2 + 1;
     int i;
@@ -2702,7 +2714,7 @@  static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
 
     for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
         /* new_num_queues > old_num_queues */
-        virtio_net_add_queue(n, i / 2);
+        virtio_net_add_queue(n, i / 2, custom_handler);
     }
 
     /* add ctrl_vq last */
@@ -3256,6 +3268,8 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
     VirtIONet *n = VIRTIO_NET(dev);
     NetClientState *nc;
+    struct vhost_net *hdev;
+    VirtIOHandleOutput custom_handler;
     int i;
 
     if (n->net_conf.mtu) {
@@ -3347,8 +3361,11 @@  static void virtio_net_device_realize(DeviceState *dev, Error **errp)
     n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
                                     n->net_conf.tx_queue_size);
 
+    nc = n->nic_conf.peers.ncs[0];
+    hdev = get_vhost_net(nc);
+    custom_handler = hdev ? hdev->dev.sw_lm_vq_handler : NULL;
     for (i = 0; i < n->max_queues; i++) {
-        virtio_net_add_queue(n, i);
+        virtio_net_add_queue(n, i, custom_handler);
     }
 
     n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);