@@ -978,8 +978,10 @@ static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
struct io_rsrc_node *rsrc_node = NULL;
io_cq_lock(ctx);
- if (!(req->flags & REQ_F_CQE_SKIP))
- io_fill_cqe_req(ctx, req);
+ if (!(req->flags & REQ_F_CQE_SKIP)) {
+ if (!io_fill_cqe_req(ctx, req))
+ io_req_cqe_overflow(req);
+ }
/*
* If we're the last reference to this request, add to our locked
@@ -1556,7 +1558,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
comp_list);
if (!(req->flags & REQ_F_CQE_SKIP) &&
- unlikely(!__io_fill_cqe_req(ctx, req))) {
+ unlikely(!io_fill_cqe_req(ctx, req))) {
if (ctx->task_complete) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
@@ -133,8 +133,7 @@ static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
return io_get_cqe_overflow(ctx, false);
}
-static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, struct io_kiocb *req)
{
struct io_uring_cqe *cqe;
@@ -168,14 +167,6 @@ static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx,
return true;
}
-static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
-{
- if (likely(__io_fill_cqe_req(ctx, req)))
- return true;
- return io_req_cqe_overflow(req);
-}
-
static inline void req_set_fail(struct io_kiocb *req)
{
req->flags |= REQ_F_FAIL;
@@ -1064,7 +1064,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
continue;
req->cqe.flags = io_put_kbuf(req, 0);
- if (unlikely(!__io_fill_cqe_req(ctx, req))) {
+ if (unlikely(!io_fill_cqe_req(ctx, req))) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
spin_unlock(&ctx->completion_lock);
io_fill_cqe_req() is only called from one place, open code it, and rename __io_fill_cqe_req(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/io_uring.c | 8 +++++--- io_uring/io_uring.h | 11 +---------- io_uring/rw.c | 2 +- 3 files changed, 7 insertions(+), 14 deletions(-)