@@ -317,7 +317,9 @@ static void xen_block_complete_aio(void *opaque, int ret)
}
xen_block_release_request(request);
- qemu_bh_schedule(dataplane->bh);
+ if (dataplane->more_work) {
+ qemu_bh_schedule(dataplane->bh);
+ }
done:
aio_context_release(dataplane->ctx);
@@ -514,12 +516,13 @@ static int xen_block_get_request(XenBlockDataPlane *dataplane,
*/
#define IO_PLUG_THRESHOLD 1
-static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
+static bool xen_block_handle_requests(XenBlockDataPlane *dataplane)
{
RING_IDX rc, rp;
XenBlockRequest *request;
int inflight_atstart = dataplane->requests_inflight;
int batched = 0;
+ bool done_something = false;
dataplane->more_work = 0;
@@ -551,6 +554,7 @@ static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
}
xen_block_get_request(dataplane, request, rc);
dataplane->rings.common.req_cons = ++rc;
+ done_something = true;
/* parse them */
if (xen_block_parse_request(request) != 0) {
@@ -602,10 +606,7 @@ static void xen_block_handle_requests(XenBlockDataPlane *dataplane)
blk_io_unplug(dataplane->blk);
}
- if (dataplane->more_work &&
- dataplane->requests_inflight < dataplane->max_requests) {
- qemu_bh_schedule(dataplane->bh);
- }
+ return done_something;
}
static void xen_block_dataplane_bh(void *opaque)
@@ -617,11 +618,11 @@ static void xen_block_dataplane_bh(void *opaque)
aio_context_release(dataplane->ctx);
}
-static void xen_block_dataplane_event(void *opaque)
+static bool xen_block_dataplane_event(void *opaque)
{
XenBlockDataPlane *dataplane = opaque;
- qemu_bh_schedule(dataplane->bh);
+ return xen_block_handle_requests(dataplane);
}
XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev,
@@ -931,13 +931,20 @@ struct XenEventChannel {
void *opaque;
};
+static bool xen_device_poll(void *opaque)
+{
+ XenEventChannel *channel = opaque;
+
+ return channel->handler(channel->opaque);
+}
+
static void xen_device_event(void *opaque)
{
XenEventChannel *channel = opaque;
unsigned long port = xenevtchn_pending(channel->xeh);
if (port == channel->local_port) {
- channel->handler(channel->opaque);
+ xen_device_poll(channel);
xenevtchn_unmask(channel->xeh, port);
}
@@ -972,7 +979,7 @@ XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
channel->ctx = ctx;
aio_set_fd_handler(channel->ctx, xenevtchn_fd(channel->xeh), false,
- xen_device_event, NULL, NULL, channel);
+ xen_device_event, NULL, xen_device_poll, channel);
QLIST_INSERT_HEAD(&xendev->event_channels, channel, list);
@@ -119,7 +119,7 @@ void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain,
XenDeviceGrantCopySegment segs[],
unsigned int nr_segs, Error **errp);
-typedef void (*XenEventHandler)(void *opaque);
+typedef bool (*XenEventHandler)(void *opaque);
XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev,
AioContext *ctx,
This patch introduces a poll callback for event channel fd-s and uses this to invoke the channel callback function. To properly support polling, it is necessary for the event channel callback function to return a boolean saying whether it has done any useful work or not. Thus xen_block_dataplane_event() is modified to directly invoke xen_block_handle_requests() and the latter only returns true if it actually processes any requests. This also means that the call to qemu_bh_schedule() is moved into xen_block_complete_aio(), which is more intuitive since the only reason for doing a deferred poll of the shared ring should be because there were previously insufficient resources to fully complete a previous poll. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> --- Cc: Stefan Hajnoczi <stefanha@redhat.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Anthony Perard <anthony.perard@citrix.com> Cc: Kevin Wolf <kwolf@redhat.com> Cc: Max Reitz <mreitz@redhat.com> --- hw/block/dataplane/xen-block.c | 17 +++++++++-------- hw/xen/xen-bus.c | 11 +++++++++-- include/hw/xen/xen-bus.h | 2 +- 3 files changed, 19 insertions(+), 11 deletions(-)