@@ -1214,13 +1214,16 @@ static inline void io_req_local_work_add(struct io_kiocb *req,
wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
}
-static void io_req_normal_work_add(struct io_kiocb *req)
+void io_req_normal_work_add(struct io_kiocb *first_req,
+ struct io_kiocb *last_req)
{
- struct io_uring_task *tctx = req->tctx;
- struct io_ring_ctx *ctx = req->ctx;
+ struct io_uring_task *tctx = first_req->tctx;
+ struct io_ring_ctx *ctx = first_req->ctx;
/* task_work already pending, we're done */
- if (!llist_add(&req->io_task_work.node, &tctx->task_list))
+ if (!llist_add_batch(&first_req->io_task_work.node,
+ &last_req->io_task_work.node,
+ &tctx->task_list))
return;
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
@@ -1243,7 +1246,7 @@ void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
io_req_local_work_add(req, req->ctx, flags);
else
- io_req_normal_work_add(req);
+ io_req_normal_work_add(req, req);
}
void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
@@ -88,6 +88,8 @@ struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
void __io_req_task_work_add(struct io_kiocb *req, unsigned flags);
void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
unsigned flags);
+void io_req_normal_work_add(struct io_kiocb *first_req,
+ struct io_kiocb *last_req);
bool io_alloc_async_data(struct io_kiocb *req);
void io_req_task_queue(struct io_kiocb *req);
void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts);
Make io_req_normal_work_add accept a list of requests to help with batching multiple requests in one call and reducing the contention when adding to tctx->task_list. Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com> --- io_uring/io_uring.c | 13 ++++++++----- io_uring/io_uring.h | 2 ++ 2 files changed, 10 insertions(+), 5 deletions(-)