diff mbox series

[3/9] io_uring: add helper for filling cqes in __io_submit_flush_completions()

Message ID 20240408010322.4104395-4-ming.lei@redhat.com (mailing list archive)
State New, archived
Headers show
Series io_uring: support sqe group and provide group kbuf | expand

Commit Message

Ming Lei April 8, 2024, 1:03 a.m. UTC
No functional change, and prepare for supporting SQE group.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 io_uring/io_uring.c | 19 +++++++++++++------
 1 file changed, 13 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6d4def11aebf..c73819c04c0b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1458,16 +1458,14 @@  static void io_free_batch_list(struct io_ring_ctx *ctx,
 	} while (node);
 }
 
-void __io_submit_flush_completions(struct io_ring_ctx *ctx)
-	__must_hold(&ctx->uring_lock)
+static inline void io_fill_cqe_lists(struct io_ring_ctx *ctx,
+				     struct io_wq_work_list *list)
 {
-	struct io_submit_state *state = &ctx->submit_state;
 	struct io_wq_work_node *node;
 
-	__io_cq_lock(ctx);
-	__wq_list_for_each(node, &state->compl_reqs) {
+	__wq_list_for_each(node, list) {
 		struct io_kiocb *req = container_of(node, struct io_kiocb,
-					    comp_list);
+						    comp_list);
 
 		if (!(req->flags & REQ_F_CQE_SKIP) &&
 		    unlikely(!io_fill_cqe_req(ctx, req))) {
@@ -1480,6 +1478,15 @@  void __io_submit_flush_completions(struct io_ring_ctx *ctx)
 			}
 		}
 	}
+}
+
+void __io_submit_flush_completions(struct io_ring_ctx *ctx)
+	__must_hold(&ctx->uring_lock)
+{
+	struct io_submit_state *state = &ctx->submit_state;
+
+	__io_cq_lock(ctx);
+	io_fill_cqe_lists(ctx, &state->compl_reqs);
 	__io_cq_unlock_post(ctx);
 
 	if (!wq_list_empty(&ctx->submit_state.compl_reqs)) {