@@ -1428,21 +1428,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
*/
if (wq_list_empty(&ctx->iopoll_list) ||
io_task_work_pending(ctx)) {
+ u32 tail = ctx->cached_cq_tail;
+
if (!llist_empty(&ctx->work_llist))
__io_run_local_work(ctx, true);
+
if (task_work_pending(current) ||
wq_list_empty(&ctx->iopoll_list)) {
- u32 tail = ctx->cached_cq_tail;
-
mutex_unlock(&ctx->uring_lock);
io_run_task_work();
mutex_lock(&ctx->uring_lock);
-
- /* some requests don't go through iopoll_list */
- if (tail != ctx->cached_cq_tail ||
- wq_list_empty(&ctx->iopoll_list))
- break;
}
+ /* some requests don't go through iopoll_list */
+ if (tail != ctx->cached_cq_tail ||
+ wq_list_empty(&ctx->iopoll_list))
+ break;
}
ret = io_do_iopoll(ctx, !min);
if (ret < 0)
Let's keep checks for whether to break the iopoll loop or not same for normal and defer tw, this includes ->cached_cq_tail checks guarding against polling more than asked for. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/io_uring.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)