@@ -212,15 +212,21 @@ static void blk_aio_attached(AioContext *ctx, void *opaque)
{
VuBlkExport *vexp = opaque;
+ /*
+ * The actual attach will happen in vu_blk_drained_end() and we just
+ * restore ctx here.
+ */
vexp->export.ctx = ctx;
- vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
}
static void blk_aio_detach(void *opaque)
{
VuBlkExport *vexp = opaque;
- vhost_user_server_detach_aio_context(&vexp->vu_server);
+ /*
+ * The actual detach already happened in vu_blk_drained_begin() but from
+ * this point on we must not access ctx anymore.
+ */
vexp->export.ctx = NULL;
}
@@ -272,6 +278,22 @@ static void vu_blk_exp_resize(void *opaque)
vu_config_change_msg(&vexp->vu_server.vu_dev);
}
+/* Called with vexp->export.ctx acquired */
+static void vu_blk_drained_begin(void *opaque)
+{
+ VuBlkExport *vexp = opaque;
+
+ vhost_user_server_detach_aio_context(&vexp->vu_server);
+}
+
+/* Called with vexp->export.blk AioContext acquired */
+static void vu_blk_drained_end(void *opaque)
+{
+ VuBlkExport *vexp = opaque;
+
+ vhost_user_server_attach_aio_context(&vexp->vu_server, vexp->export.ctx);
+}
+
/*
* Ensures that bdrv_drained_begin() waits until in-flight requests complete.
*
@@ -285,6 +307,8 @@ static bool vu_blk_drained_poll(void *opaque)
}
static const BlockDevOps vu_blk_dev_ops = {
+ .drained_begin = vu_blk_drained_begin,
+ .drained_end = vu_blk_drained_end,
.drained_poll = vu_blk_drained_poll,
.resize_cb = vu_blk_exp_resize,
};
@@ -278,7 +278,7 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt,
vu_fd_watch->fd = fd;
vu_fd_watch->cb = cb;
qemu_socket_set_nonblock(fd);
- aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler,
+ aio_set_fd_handler(server->ioc->ctx, fd, false, kick_handler,
NULL, NULL, NULL, vu_fd_watch);
vu_fd_watch->vu_dev = vu_dev;
vu_fd_watch->pvt = pvt;
@@ -299,7 +299,7 @@ static void remove_watch(VuDev *vu_dev, int fd)
if (!vu_fd_watch) {
return;
}
- aio_set_fd_handler(server->ioc->ctx, fd, true,
+ aio_set_fd_handler(server->ioc->ctx, fd, false,
NULL, NULL, NULL, NULL, NULL);
QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
@@ -362,7 +362,7 @@ void vhost_user_server_stop(VuServer *server)
VuFdWatch *vu_fd_watch;
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
- aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+ aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false,
NULL, NULL, NULL, NULL, vu_fd_watch);
}
@@ -403,7 +403,7 @@ void vhost_user_server_attach_aio_context(VuServer *server, AioContext *ctx)
qio_channel_attach_aio_context(server->ioc, ctx);
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
- aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL,
+ aio_set_fd_handler(ctx, vu_fd_watch->fd, false, kick_handler, NULL,
NULL, NULL, vu_fd_watch);
}
@@ -417,7 +417,7 @@ void vhost_user_server_detach_aio_context(VuServer *server)
VuFdWatch *vu_fd_watch;
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
- aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+ aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false,
NULL, NULL, NULL, NULL, vu_fd_watch);
}