diff mbox series

[V3,3/9] io_uring: add helper of io_req_commit_cqe()

Message ID 20240511001214.173711-4-ming.lei@redhat.com (mailing list archive)
State New
Headers show
Series io_uring: support sqe group and provide group kbuf | expand

Commit Message

Ming Lei May 11, 2024, 12:12 a.m. UTC
Add helper of io_req_commit_cqe() which can be used in posting CQE
from both __io_submit_flush_completions() and io_req_complete_post().

Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 io_uring/io_uring.c | 34 ++++++++++++++++++++--------------
 1 file changed, 20 insertions(+), 14 deletions(-)

Comments

Pavel Begunkov June 10, 2024, 1:18 a.m. UTC | #1
On 5/11/24 01:12, Ming Lei wrote:
> Add helper of io_req_commit_cqe() which can be used in posting CQE
> from both __io_submit_flush_completions() and io_req_complete_post().

Please drop this patch and inline further changes into this
two callers. There are different locking rules, different
hotness, and should better be left duplicated until cleaned
up in a proper way.


> Signed-off-by: Ming Lei <ming.lei@redhat.com>
> ---
>   io_uring/io_uring.c | 34 ++++++++++++++++++++--------------
>   1 file changed, 20 insertions(+), 14 deletions(-)
> 
> diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
> index d3b9988cdae4..e4be930e0f1e 100644
> --- a/io_uring/io_uring.c
> +++ b/io_uring/io_uring.c
> @@ -910,6 +910,22 @@ bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
>   	return posted;
>   }
>   
> +static __always_inline void io_req_commit_cqe(struct io_kiocb *req,
> +		bool lockless_cq)
> +{
> +	struct io_ring_ctx *ctx = req->ctx;
> +
> +	if (unlikely(!io_fill_cqe_req(ctx, req))) {
> +		if (lockless_cq) {
> +			spin_lock(&ctx->completion_lock);
> +			io_req_cqe_overflow(req);
> +			spin_unlock(&ctx->completion_lock);
> +		} else {
> +			io_req_cqe_overflow(req);
> +		}
> +	}
> +}
> +
>   static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
>   {
>   	struct io_ring_ctx *ctx = req->ctx;
> @@ -932,10 +948,8 @@ static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
>   	}
>   
>   	io_cq_lock(ctx);
> -	if (!(req->flags & REQ_F_CQE_SKIP)) {
> -		if (!io_fill_cqe_req(ctx, req))
> -			io_req_cqe_overflow(req);
> -	}
> +	if (!(req->flags & REQ_F_CQE_SKIP))
> +		io_req_commit_cqe(req, false);
>   	io_cq_unlock_post(ctx);
>   
>   	/*
> @@ -1454,16 +1468,8 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
>   		struct io_kiocb *req = container_of(node, struct io_kiocb,
>   					    comp_list);
>   
> -		if (!(req->flags & REQ_F_CQE_SKIP) &&
> -		    unlikely(!io_fill_cqe_req(ctx, req))) {
> -			if (ctx->lockless_cq) {
> -				spin_lock(&ctx->completion_lock);
> -				io_req_cqe_overflow(req);
> -				spin_unlock(&ctx->completion_lock);
> -			} else {
> -				io_req_cqe_overflow(req);
> -			}
> -		}
> +		if (!(req->flags & REQ_F_CQE_SKIP))
> +			io_req_commit_cqe(req, ctx->lockless_cq);
>   	}
>   	__io_cq_unlock_post(ctx);
>
Ming Lei June 11, 2024, 1:21 p.m. UTC | #2
On Mon, Jun 10, 2024 at 02:18:34AM +0100, Pavel Begunkov wrote:
> On 5/11/24 01:12, Ming Lei wrote:
> > Add helper of io_req_commit_cqe() which can be used in posting CQE
> > from both __io_submit_flush_completions() and io_req_complete_post().
> 
> Please drop this patch and inline further changes into this
> two callers. There are different locking rules, different
> hotness, and should better be left duplicated until cleaned
> up in a proper way.

Yes, the helper is just for making following code more clean & readable.

Actually it changes nothing for __io_submit_flush_completions(), but
io_req_complete_post() can be thought as non-fast path. And we may
keep it only friendly for __io_submit_flush_completions(), meantime
just cover io_req_complete_post().


Thanks, 
Ming
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d3b9988cdae4..e4be930e0f1e 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -910,6 +910,22 @@  bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
 	return posted;
 }
 
+static __always_inline void io_req_commit_cqe(struct io_kiocb *req,
+		bool lockless_cq)
+{
+	struct io_ring_ctx *ctx = req->ctx;
+
+	if (unlikely(!io_fill_cqe_req(ctx, req))) {
+		if (lockless_cq) {
+			spin_lock(&ctx->completion_lock);
+			io_req_cqe_overflow(req);
+			spin_unlock(&ctx->completion_lock);
+		} else {
+			io_req_cqe_overflow(req);
+		}
+	}
+}
+
 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 {
 	struct io_ring_ctx *ctx = req->ctx;
@@ -932,10 +948,8 @@  static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
 	}
 
 	io_cq_lock(ctx);
-	if (!(req->flags & REQ_F_CQE_SKIP)) {
-		if (!io_fill_cqe_req(ctx, req))
-			io_req_cqe_overflow(req);
-	}
+	if (!(req->flags & REQ_F_CQE_SKIP))
+		io_req_commit_cqe(req, false);
 	io_cq_unlock_post(ctx);
 
 	/*
@@ -1454,16 +1468,8 @@  void __io_submit_flush_completions(struct io_ring_ctx *ctx)
 		struct io_kiocb *req = container_of(node, struct io_kiocb,
 					    comp_list);
 
-		if (!(req->flags & REQ_F_CQE_SKIP) &&
-		    unlikely(!io_fill_cqe_req(ctx, req))) {
-			if (ctx->lockless_cq) {
-				spin_lock(&ctx->completion_lock);
-				io_req_cqe_overflow(req);
-				spin_unlock(&ctx->completion_lock);
-			} else {
-				io_req_cqe_overflow(req);
-			}
-		}
+		if (!(req->flags & REQ_F_CQE_SKIP))
+			io_req_commit_cqe(req, ctx->lockless_cq);
 	}
 	__io_cq_unlock_post(ctx);