diff mbox series

[3/5] io_uring/poll: get rid of io_poll_tw_hash_eject()

Message ID 20240930204018.109617-4-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Poll cleanups and unlocked table removal | expand

Commit Message

Jens Axboe Sept. 30, 2024, 8:37 p.m. UTC
It serves no purposes anymore, all it does is delete the hash list
entry. task_work always has the ring locked.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/poll.c | 17 ++---------------
 1 file changed, 2 insertions(+), 15 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/poll.c b/io_uring/poll.c
index 69382da48c00..a7d7fa844729 100644
--- a/io_uring/poll.c
+++ b/io_uring/poll.c
@@ -128,20 +128,6 @@  static void io_poll_req_insert(struct io_kiocb *req)
 	hlist_add_head(&req->hash_node, &table->hbs[index].list);
 }
 
-static void io_poll_tw_hash_eject(struct io_kiocb *req, struct io_tw_state *ts)
-{
-	struct io_ring_ctx *ctx = req->ctx;
-
-	/*
-	 * ->cancel_table_locked is protected by ->uring_lock in
-	 * contrast to per bucket spinlocks. Likely, tctx_task_work()
-	 * already grabbed the mutex for us, but there is a chance it
-	 * failed.
-	 */
-	io_tw_lock(ctx, ts);
-	hash_del(&req->hash_node);
-}
-
 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
 {
 	poll->head = NULL;
@@ -336,7 +322,8 @@  void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts)
 		return;
 	}
 	io_poll_remove_entries(req);
-	io_poll_tw_hash_eject(req, ts);
+	/* task_work always has ->uring_lock held */
+	hash_del(&req->hash_node);
 
 	if (req->opcode == IORING_OP_POLL_ADD) {
 		if (ret == IOU_POLL_DONE) {