diff mbox series

[v4,07/20] block/export: stop using is_external in vhost-user-blk server

Message ID 20230425172716.1033562-8-stefanha@redhat.com (mailing list archive)
State Superseded
Headers show
Series block: remove aio_disable_external() API | expand

Commit Message

Stefan Hajnoczi April 25, 2023, 5:27 p.m. UTC
vhost-user activity must be suspended during bdrv_drained_begin/end().
This prevents new requests from interfering with whatever is happening
in the drained section.

Previously this was done using aio_set_fd_handler()'s is_external
argument. In a multi-queue block layer world the aio_disable_external()
API cannot be used since multiple AioContext may be processing I/O, not
just one.

Switch to BlockDevOps->drained_begin/end() callbacks.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 block/export/vhost-user-blk-server.c | 43 ++++++++++++++--------------
 util/vhost-user-server.c             | 10 +++----
 2 files changed, 26 insertions(+), 27 deletions(-)

Comments

Kevin Wolf May 2, 2023, 4:04 p.m. UTC | #1
Am 25.04.2023 um 19:27 hat Stefan Hajnoczi geschrieben:
> vhost-user activity must be suspended during bdrv_drained_begin/end().
> This prevents new requests from interfering with whatever is happening
> in the drained section.
> 
> Previously this was done using aio_set_fd_handler()'s is_external
> argument. In a multi-queue block layer world the aio_disable_external()
> API cannot be used since multiple AioContext may be processing I/O, not
> just one.
> 
> Switch to BlockDevOps->drained_begin/end() callbacks.
> 
> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  block/export/vhost-user-blk-server.c | 43 ++++++++++++++--------------
>  util/vhost-user-server.c             | 10 +++----
>  2 files changed, 26 insertions(+), 27 deletions(-)
> 
> diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
> index 092b86aae4..d20f69cd74 100644
> --- a/block/export/vhost-user-blk-server.c
> +++ b/block/export/vhost-user-blk-server.c
> @@ -208,22 +208,6 @@ static const VuDevIface vu_blk_iface = {
>      .process_msg           = vu_blk_process_msg,
>  };
>  
> -static void blk_aio_attached(AioContext *ctx, void *opaque)
> -{
> -    VuBlkExport *vexp = opaque;
> -
> -    vexp->export.ctx = ctx;
> -    vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
> -}
> -
> -static void blk_aio_detach(void *opaque)
> -{
> -    VuBlkExport *vexp = opaque;
> -
> -    vhost_user_server_detach_aio_context(&vexp->vu_server);
> -    vexp->export.ctx = NULL;
> -}

So for changing the AioContext, we now rely on the fact that the node to
be changed is always drained, so the drain callbacks implicitly cover
this case, too?

>  static void
>  vu_blk_initialize_config(BlockDriverState *bs,
>                           struct virtio_blk_config *config,
> @@ -272,6 +256,25 @@ static void vu_blk_exp_resize(void *opaque)
>      vu_config_change_msg(&vexp->vu_server.vu_dev);
>  }
>  
> +/* Called with vexp->export.ctx acquired */
> +static void vu_blk_drained_begin(void *opaque)
> +{
> +    VuBlkExport *vexp = opaque;
> +
> +    vhost_user_server_detach_aio_context(&vexp->vu_server);
> +}

Compared to the old code, we're losing the vexp->export.ctx = NULL. This
is correct at this point because after drained_begin we still keep
processing requests until we arrive at a quiescent state.

However, if we detach the AioContext because we're deleting the
iothread, won't we end up with a dangling pointer in vexp->export.ctx?
Or can we be certain that nothing interesting happens before drained_end
updates it with a new valid pointer again?

Kevin
Stefan Hajnoczi May 2, 2023, 8:06 p.m. UTC | #2
On Tue, May 02, 2023 at 06:04:24PM +0200, Kevin Wolf wrote:
> Am 25.04.2023 um 19:27 hat Stefan Hajnoczi geschrieben:
> > vhost-user activity must be suspended during bdrv_drained_begin/end().
> > This prevents new requests from interfering with whatever is happening
> > in the drained section.
> > 
> > Previously this was done using aio_set_fd_handler()'s is_external
> > argument. In a multi-queue block layer world the aio_disable_external()
> > API cannot be used since multiple AioContext may be processing I/O, not
> > just one.
> > 
> > Switch to BlockDevOps->drained_begin/end() callbacks.
> > 
> > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > ---
> >  block/export/vhost-user-blk-server.c | 43 ++++++++++++++--------------
> >  util/vhost-user-server.c             | 10 +++----
> >  2 files changed, 26 insertions(+), 27 deletions(-)
> > 
> > diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
> > index 092b86aae4..d20f69cd74 100644
> > --- a/block/export/vhost-user-blk-server.c
> > +++ b/block/export/vhost-user-blk-server.c
> > @@ -208,22 +208,6 @@ static const VuDevIface vu_blk_iface = {
> >      .process_msg           = vu_blk_process_msg,
> >  };
> >  
> > -static void blk_aio_attached(AioContext *ctx, void *opaque)
> > -{
> > -    VuBlkExport *vexp = opaque;
> > -
> > -    vexp->export.ctx = ctx;
> > -    vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
> > -}
> > -
> > -static void blk_aio_detach(void *opaque)
> > -{
> > -    VuBlkExport *vexp = opaque;
> > -
> > -    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > -    vexp->export.ctx = NULL;
> > -}
> 
> So for changing the AioContext, we now rely on the fact that the node to
> be changed is always drained, so the drain callbacks implicitly cover
> this case, too?

Yes.

> >  static void
> >  vu_blk_initialize_config(BlockDriverState *bs,
> >                           struct virtio_blk_config *config,
> > @@ -272,6 +256,25 @@ static void vu_blk_exp_resize(void *opaque)
> >      vu_config_change_msg(&vexp->vu_server.vu_dev);
> >  }
> >  
> > +/* Called with vexp->export.ctx acquired */
> > +static void vu_blk_drained_begin(void *opaque)
> > +{
> > +    VuBlkExport *vexp = opaque;
> > +
> > +    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > +}
> 
> Compared to the old code, we're losing the vexp->export.ctx = NULL. This
> is correct at this point because after drained_begin we still keep
> processing requests until we arrive at a quiescent state.
> 
> However, if we detach the AioContext because we're deleting the
> iothread, won't we end up with a dangling pointer in vexp->export.ctx?
> Or can we be certain that nothing interesting happens before drained_end
> updates it with a new valid pointer again?

If you want I can add the detach() callback back again and set ctx to
NULL there?

Stefan
Kevin Wolf May 3, 2023, 8:08 a.m. UTC | #3
Am 02.05.2023 um 22:06 hat Stefan Hajnoczi geschrieben:
> On Tue, May 02, 2023 at 06:04:24PM +0200, Kevin Wolf wrote:
> > Am 25.04.2023 um 19:27 hat Stefan Hajnoczi geschrieben:
> > > vhost-user activity must be suspended during bdrv_drained_begin/end().
> > > This prevents new requests from interfering with whatever is happening
> > > in the drained section.
> > > 
> > > Previously this was done using aio_set_fd_handler()'s is_external
> > > argument. In a multi-queue block layer world the aio_disable_external()
> > > API cannot be used since multiple AioContext may be processing I/O, not
> > > just one.
> > > 
> > > Switch to BlockDevOps->drained_begin/end() callbacks.
> > > 
> > > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > > ---
> > >  block/export/vhost-user-blk-server.c | 43 ++++++++++++++--------------
> > >  util/vhost-user-server.c             | 10 +++----
> > >  2 files changed, 26 insertions(+), 27 deletions(-)
> > > 
> > > diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
> > > index 092b86aae4..d20f69cd74 100644
> > > --- a/block/export/vhost-user-blk-server.c
> > > +++ b/block/export/vhost-user-blk-server.c
> > > @@ -208,22 +208,6 @@ static const VuDevIface vu_blk_iface = {
> > >      .process_msg           = vu_blk_process_msg,
> > >  };
> > >  
> > > -static void blk_aio_attached(AioContext *ctx, void *opaque)
> > > -{
> > > -    VuBlkExport *vexp = opaque;
> > > -
> > > -    vexp->export.ctx = ctx;
> > > -    vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
> > > -}
> > > -
> > > -static void blk_aio_detach(void *opaque)
> > > -{
> > > -    VuBlkExport *vexp = opaque;
> > > -
> > > -    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > > -    vexp->export.ctx = NULL;
> > > -}
> > 
> > So for changing the AioContext, we now rely on the fact that the node to
> > be changed is always drained, so the drain callbacks implicitly cover
> > this case, too?
> 
> Yes.

Ok. This surprised me a bit at first, but I think it's fine.

We just need to remember it if we ever decide that once we have
multiqueue, we can actually change the default AioContext without
draining the node. But maybe at that point, we have to do more
fundamental changes anyway.

> > >  static void
> > >  vu_blk_initialize_config(BlockDriverState *bs,
> > >                           struct virtio_blk_config *config,
> > > @@ -272,6 +256,25 @@ static void vu_blk_exp_resize(void *opaque)
> > >      vu_config_change_msg(&vexp->vu_server.vu_dev);
> > >  }
> > >  
> > > +/* Called with vexp->export.ctx acquired */
> > > +static void vu_blk_drained_begin(void *opaque)
> > > +{
> > > +    VuBlkExport *vexp = opaque;
> > > +
> > > +    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > > +}
> > 
> > Compared to the old code, we're losing the vexp->export.ctx = NULL. This
> > is correct at this point because after drained_begin we still keep
> > processing requests until we arrive at a quiescent state.
> > 
> > However, if we detach the AioContext because we're deleting the
> > iothread, won't we end up with a dangling pointer in vexp->export.ctx?
> > Or can we be certain that nothing interesting happens before drained_end
> > updates it with a new valid pointer again?
> 
> If you want I can add the detach() callback back again and set ctx to
> NULL there?

I haven't thought enough about it to say if it's a problem. If you have
and are confident that it's correct the way it is, I'm happy with it.

But bringing the callback back is the minimal change compared to the old
state. It's just unnecessary code if we don't actually need it.

Kevin
Stefan Hajnoczi May 3, 2023, 1:11 p.m. UTC | #4
On Wed, May 03, 2023 at 10:08:46AM +0200, Kevin Wolf wrote:
> Am 02.05.2023 um 22:06 hat Stefan Hajnoczi geschrieben:
> > On Tue, May 02, 2023 at 06:04:24PM +0200, Kevin Wolf wrote:
> > > Am 25.04.2023 um 19:27 hat Stefan Hajnoczi geschrieben:
> > > > vhost-user activity must be suspended during bdrv_drained_begin/end().
> > > > This prevents new requests from interfering with whatever is happening
> > > > in the drained section.
> > > > 
> > > > Previously this was done using aio_set_fd_handler()'s is_external
> > > > argument. In a multi-queue block layer world the aio_disable_external()
> > > > API cannot be used since multiple AioContext may be processing I/O, not
> > > > just one.
> > > > 
> > > > Switch to BlockDevOps->drained_begin/end() callbacks.
> > > > 
> > > > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > > > ---
> > > >  block/export/vhost-user-blk-server.c | 43 ++++++++++++++--------------
> > > >  util/vhost-user-server.c             | 10 +++----
> > > >  2 files changed, 26 insertions(+), 27 deletions(-)
> > > > 
> > > > diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
> > > > index 092b86aae4..d20f69cd74 100644
> > > > --- a/block/export/vhost-user-blk-server.c
> > > > +++ b/block/export/vhost-user-blk-server.c
> > > > @@ -208,22 +208,6 @@ static const VuDevIface vu_blk_iface = {
> > > >      .process_msg           = vu_blk_process_msg,
> > > >  };
> > > >  
> > > > -static void blk_aio_attached(AioContext *ctx, void *opaque)
> > > > -{
> > > > -    VuBlkExport *vexp = opaque;
> > > > -
> > > > -    vexp->export.ctx = ctx;
> > > > -    vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
> > > > -}
> > > > -
> > > > -static void blk_aio_detach(void *opaque)
> > > > -{
> > > > -    VuBlkExport *vexp = opaque;
> > > > -
> > > > -    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > > > -    vexp->export.ctx = NULL;
> > > > -}
> > > 
> > > So for changing the AioContext, we now rely on the fact that the node to
> > > be changed is always drained, so the drain callbacks implicitly cover
> > > this case, too?
> > 
> > Yes.
> 
> Ok. This surprised me a bit at first, but I think it's fine.
> 
> We just need to remember it if we ever decide that once we have
> multiqueue, we can actually change the default AioContext without
> draining the node. But maybe at that point, we have to do more
> fundamental changes anyway.
> 
> > > >  static void
> > > >  vu_blk_initialize_config(BlockDriverState *bs,
> > > >                           struct virtio_blk_config *config,
> > > > @@ -272,6 +256,25 @@ static void vu_blk_exp_resize(void *opaque)
> > > >      vu_config_change_msg(&vexp->vu_server.vu_dev);
> > > >  }
> > > >  
> > > > +/* Called with vexp->export.ctx acquired */
> > > > +static void vu_blk_drained_begin(void *opaque)
> > > > +{
> > > > +    VuBlkExport *vexp = opaque;
> > > > +
> > > > +    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > > > +}
> > > 
> > > Compared to the old code, we're losing the vexp->export.ctx = NULL. This
> > > is correct at this point because after drained_begin we still keep
> > > processing requests until we arrive at a quiescent state.
> > > 
> > > However, if we detach the AioContext because we're deleting the
> > > iothread, won't we end up with a dangling pointer in vexp->export.ctx?
> > > Or can we be certain that nothing interesting happens before drained_end
> > > updates it with a new valid pointer again?
> > 
> > If you want I can add the detach() callback back again and set ctx to
> > NULL there?
> 
> I haven't thought enough about it to say if it's a problem. If you have
> and are confident that it's correct the way it is, I'm happy with it.
>
> But bringing the callback back is the minimal change compared to the old
> state. It's just unnecessary code if we don't actually need it.

The reasoning behind my patch is that detach() sets NULL today and we
would see crashes if ctx was accessed between detach() -> attach().
Therefore, I'm assuming there are no ctx accesses in the code today and
removing the ctx = NULL assignment doesn't break anything.

However, my approach is not very defensive. If the code is changed in a
way that accesses ctx when it's not supposed to, then a dangling pointer
will be accessed.

I think leaving the detach() callback there can be justified because it
will make it easier to detect bugs in the future. I'll add it back in
the next revision.

Stefan
Kevin Wolf May 3, 2023, 1:43 p.m. UTC | #5
Am 03.05.2023 um 15:11 hat Stefan Hajnoczi geschrieben:
> On Wed, May 03, 2023 at 10:08:46AM +0200, Kevin Wolf wrote:
> > Am 02.05.2023 um 22:06 hat Stefan Hajnoczi geschrieben:
> > > On Tue, May 02, 2023 at 06:04:24PM +0200, Kevin Wolf wrote:
> > > > Am 25.04.2023 um 19:27 hat Stefan Hajnoczi geschrieben:
> > > > > vhost-user activity must be suspended during bdrv_drained_begin/end().
> > > > > This prevents new requests from interfering with whatever is happening
> > > > > in the drained section.
> > > > > 
> > > > > Previously this was done using aio_set_fd_handler()'s is_external
> > > > > argument. In a multi-queue block layer world the aio_disable_external()
> > > > > API cannot be used since multiple AioContext may be processing I/O, not
> > > > > just one.
> > > > > 
> > > > > Switch to BlockDevOps->drained_begin/end() callbacks.
> > > > > 
> > > > > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > > > > ---
> > > > >  block/export/vhost-user-blk-server.c | 43 ++++++++++++++--------------
> > > > >  util/vhost-user-server.c             | 10 +++----
> > > > >  2 files changed, 26 insertions(+), 27 deletions(-)
> > > > > 
> > > > > diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
> > > > > index 092b86aae4..d20f69cd74 100644
> > > > > --- a/block/export/vhost-user-blk-server.c
> > > > > +++ b/block/export/vhost-user-blk-server.c
> > > > > @@ -208,22 +208,6 @@ static const VuDevIface vu_blk_iface = {
> > > > >      .process_msg           = vu_blk_process_msg,
> > > > >  };
> > > > >  
> > > > > -static void blk_aio_attached(AioContext *ctx, void *opaque)
> > > > > -{
> > > > > -    VuBlkExport *vexp = opaque;
> > > > > -
> > > > > -    vexp->export.ctx = ctx;
> > > > > -    vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
> > > > > -}
> > > > > -
> > > > > -static void blk_aio_detach(void *opaque)
> > > > > -{
> > > > > -    VuBlkExport *vexp = opaque;
> > > > > -
> > > > > -    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > > > > -    vexp->export.ctx = NULL;
> > > > > -}
> > > > 
> > > > So for changing the AioContext, we now rely on the fact that the node to
> > > > be changed is always drained, so the drain callbacks implicitly cover
> > > > this case, too?
> > > 
> > > Yes.
> > 
> > Ok. This surprised me a bit at first, but I think it's fine.
> > 
> > We just need to remember it if we ever decide that once we have
> > multiqueue, we can actually change the default AioContext without
> > draining the node. But maybe at that point, we have to do more
> > fundamental changes anyway.
> > 
> > > > >  static void
> > > > >  vu_blk_initialize_config(BlockDriverState *bs,
> > > > >                           struct virtio_blk_config *config,
> > > > > @@ -272,6 +256,25 @@ static void vu_blk_exp_resize(void *opaque)
> > > > >      vu_config_change_msg(&vexp->vu_server.vu_dev);
> > > > >  }
> > > > >  
> > > > > +/* Called with vexp->export.ctx acquired */
> > > > > +static void vu_blk_drained_begin(void *opaque)
> > > > > +{
> > > > > +    VuBlkExport *vexp = opaque;
> > > > > +
> > > > > +    vhost_user_server_detach_aio_context(&vexp->vu_server);
> > > > > +}
> > > > 
> > > > Compared to the old code, we're losing the vexp->export.ctx = NULL. This
> > > > is correct at this point because after drained_begin we still keep
> > > > processing requests until we arrive at a quiescent state.
> > > > 
> > > > However, if we detach the AioContext because we're deleting the
> > > > iothread, won't we end up with a dangling pointer in vexp->export.ctx?
> > > > Or can we be certain that nothing interesting happens before drained_end
> > > > updates it with a new valid pointer again?
> > > 
> > > If you want I can add the detach() callback back again and set ctx to
> > > NULL there?
> > 
> > I haven't thought enough about it to say if it's a problem. If you have
> > and are confident that it's correct the way it is, I'm happy with it.
> >
> > But bringing the callback back is the minimal change compared to the old
> > state. It's just unnecessary code if we don't actually need it.
> 
> The reasoning behind my patch is that detach() sets NULL today and we
> would see crashes if ctx was accessed between detach() -> attach().
> Therefore, I'm assuming there are no ctx accesses in the code today and
> removing the ctx = NULL assignment doesn't break anything.

Sometimes ctx = NULL defaults to qemu_get_aio_context(), so in theory
there could be cases where NULL works, but a dangling pointer wouldn't.

> However, my approach is not very defensive. If the code is changed in
> a way that accesses ctx when it's not supposed to, then a dangling
> pointer will be accessed.
> 
> I think leaving the detach() callback there can be justified because it
> will make it easier to detect bugs in the future. I'll add it back in
> the next revision.

Ok, sounds good me.

Kevin
diff mbox series

Patch

diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
index 092b86aae4..d20f69cd74 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -208,22 +208,6 @@  static const VuDevIface vu_blk_iface = {
     .process_msg           = vu_blk_process_msg,
 };
 
-static void blk_aio_attached(AioContext *ctx, void *opaque)
-{
-    VuBlkExport *vexp = opaque;
-
-    vexp->export.ctx = ctx;
-    vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
-}
-
-static void blk_aio_detach(void *opaque)
-{
-    VuBlkExport *vexp = opaque;
-
-    vhost_user_server_detach_aio_context(&vexp->vu_server);
-    vexp->export.ctx = NULL;
-}
-
 static void
 vu_blk_initialize_config(BlockDriverState *bs,
                          struct virtio_blk_config *config,
@@ -272,6 +256,25 @@  static void vu_blk_exp_resize(void *opaque)
     vu_config_change_msg(&vexp->vu_server.vu_dev);
 }
 
+/* Called with vexp->export.ctx acquired */
+static void vu_blk_drained_begin(void *opaque)
+{
+    VuBlkExport *vexp = opaque;
+
+    vhost_user_server_detach_aio_context(&vexp->vu_server);
+}
+
+/* Called with vexp->export.blk AioContext acquired */
+static void vu_blk_drained_end(void *opaque)
+{
+    VuBlkExport *vexp = opaque;
+
+    /* Refresh AioContext in case it changed */
+    vexp->export.ctx = blk_get_aio_context(vexp->export.blk);
+
+    vhost_user_server_attach_aio_context(&vexp->vu_server, vexp->export.ctx);
+}
+
 /*
  * Ensures that bdrv_drained_begin() waits until in-flight requests complete.
  *
@@ -285,6 +288,8 @@  static bool vu_blk_drained_poll(void *opaque)
 }
 
 static const BlockDevOps vu_blk_dev_ops = {
+    .drained_begin = vu_blk_drained_begin,
+    .drained_end   = vu_blk_drained_end,
     .drained_poll  = vu_blk_drained_poll,
     .resize_cb = vu_blk_exp_resize,
 };
@@ -328,15 +333,11 @@  static int vu_blk_exp_create(BlockExport *exp, BlockExportOptions *opts,
                              logical_block_size, num_queues);
 
     blk_set_dev_ops(exp->blk, &vu_blk_dev_ops, vexp);
-    blk_add_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
-                                 vexp);
 
     blk_set_dev_ops(exp->blk, &vu_blk_dev_ops, vexp);
 
     if (!vhost_user_server_start(&vexp->vu_server, vu_opts->addr, exp->ctx,
                                  num_queues, &vu_blk_iface, errp)) {
-        blk_remove_aio_context_notifier(exp->blk, blk_aio_attached,
-                                        blk_aio_detach, vexp);
         blk_set_dev_ops(exp->blk, NULL, NULL);
         g_free(vexp->handler.serial);
         return -EADDRNOTAVAIL;
@@ -349,8 +350,6 @@  static void vu_blk_exp_delete(BlockExport *exp)
 {
     VuBlkExport *vexp = container_of(exp, VuBlkExport, export);
 
-    blk_remove_aio_context_notifier(exp->blk, blk_aio_attached, blk_aio_detach,
-                                    vexp);
     blk_set_dev_ops(exp->blk, NULL, NULL);
     g_free(vexp->handler.serial);
 }
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
index 2e6b640050..332aea9306 100644
--- a/util/vhost-user-server.c
+++ b/util/vhost-user-server.c
@@ -278,7 +278,7 @@  set_watch(VuDev *vu_dev, int fd, int vu_evt,
         vu_fd_watch->fd = fd;
         vu_fd_watch->cb = cb;
         qemu_socket_set_nonblock(fd);
-        aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler,
+        aio_set_fd_handler(server->ioc->ctx, fd, false, kick_handler,
                            NULL, NULL, NULL, vu_fd_watch);
         vu_fd_watch->vu_dev = vu_dev;
         vu_fd_watch->pvt = pvt;
@@ -299,7 +299,7 @@  static void remove_watch(VuDev *vu_dev, int fd)
     if (!vu_fd_watch) {
         return;
     }
-    aio_set_fd_handler(server->ioc->ctx, fd, true,
+    aio_set_fd_handler(server->ioc->ctx, fd, false,
                        NULL, NULL, NULL, NULL, NULL);
 
     QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
@@ -362,7 +362,7 @@  void vhost_user_server_stop(VuServer *server)
         VuFdWatch *vu_fd_watch;
 
         QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
-            aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+            aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false,
                                NULL, NULL, NULL, NULL, vu_fd_watch);
         }
 
@@ -403,7 +403,7 @@  void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
     qio_channel_attach_aio_context(server->ioc, ctx);
 
     QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
-        aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL,
+        aio_set_fd_handler(ctx, vu_fd_watch->fd, false, kick_handler, NULL,
                            NULL, NULL, vu_fd_watch);
     }
 
@@ -417,7 +417,7 @@  void vhost_user_server_detach_aio_context(VuServer *server)
         VuFdWatch *vu_fd_watch;
 
         QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
-            aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+            aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false,
                                NULL, NULL, NULL, NULL, vu_fd_watch);
         }