diff mbox series

[for-next,1/7] io_uring: add completion locking for iopoll

Message ID 84d86b5c117feda075471c5c9e65208e0dccf5d0.1669203009.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series iopoll cqe posting fixes | expand

Commit Message

Pavel Begunkov Nov. 23, 2022, 11:33 a.m. UTC
There are pieces of code that may allow iopoll to race filling cqes,
temporarily add spinlocking around posting events.

Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/rw.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/rw.c b/io_uring/rw.c
index 1ce065709724..61c326831949 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -1049,6 +1049,7 @@  int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 	else if (!pos)
 		return 0;
 
+	spin_lock(&ctx->completion_lock);
 	prev = start;
 	wq_list_for_each_resume(pos, prev) {
 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
@@ -1063,11 +1064,11 @@  int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
 		req->cqe.flags = io_put_kbuf(req, 0);
 		__io_fill_cqe_req(req->ctx, req);
 	}
-
+	io_commit_cqring(ctx);
+	spin_unlock(&ctx->completion_lock);
 	if (unlikely(!nr_events))
 		return 0;
 
-	io_commit_cqring(ctx);
 	io_cqring_ev_posted_iopoll(ctx);
 	pos = start ? start->next : ctx->iopoll_list.first;
 	wq_list_cut(&ctx->iopoll_list, prev, start);