diff mbox series

[1/1] io_uring: avoid normal tw intermediate fallback

Message ID d1cd472cec2230c66bd1c8d412a5833f0af75384.1730772720.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series [1/1] io_uring: avoid normal tw intermediate fallback | expand

Commit Message

Pavel Begunkov Nov. 5, 2024, 2:12 a.m. UTC
When a DEFER_TASKRUN io_uring is terminating it requeues deferred task
work items as normal tw, which can further fallback to kthread
execution. Avoid this extra step and always push them to the fallback
kthread.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/io_uring.c | 21 ++++++++++-----------
 io_uring/io_uring.h |  2 +-
 2 files changed, 11 insertions(+), 12 deletions(-)

Comments

Jens Axboe Nov. 5, 2024, 12:53 p.m. UTC | #1
On Tue, 05 Nov 2024 02:12:33 +0000, Pavel Begunkov wrote:
> When a DEFER_TASKRUN io_uring is terminating it requeues deferred task
> work items as normal tw, which can further fallback to kthread
> execution. Avoid this extra step and always push them to the fallback
> kthread.
> 
> 

Applied, thanks!

[1/1] io_uring: avoid normal tw intermediate fallback
      commit: 1e891bb8c4d0fe2d8c008d9d96d7e29d7f86f5e2

Best regards,
diff mbox series

Patch

diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f34fa1ead2cf..219977f8f844 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1199,9 +1199,8 @@  struct llist_node *io_handle_tw_list(struct llist_node *node,
 	return node;
 }
 
-static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+static __cold void __io_fallback_tw(struct llist_node *node, bool sync)
 {
-	struct llist_node *node = llist_del_all(&tctx->task_list);
 	struct io_ring_ctx *last_ctx = NULL;
 	struct io_kiocb *req;
 
@@ -1227,6 +1226,13 @@  static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
 	}
 }
 
+static void io_fallback_tw(struct io_uring_task *tctx, bool sync)
+{
+	struct llist_node *node = llist_del_all(&tctx->task_list);
+
+	__io_fallback_tw(node, sync);
+}
+
 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
 				      unsigned int max_entries,
 				      unsigned int *count)
@@ -1380,16 +1386,9 @@  void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
 
 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
 {
-	struct llist_node *node;
+	struct llist_node *node = llist_del_all(&ctx->work_llist);
 
-	node = llist_del_all(&ctx->work_llist);
-	while (node) {
-		struct io_kiocb *req = container_of(node, struct io_kiocb,
-						    io_task_work.node);
-
-		node = node->next;
-		io_req_normal_work_add(req);
-	}
+	__io_fallback_tw(node, false);
 }
 
 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index 00409505bf07..57b0d0209097 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -137,7 +137,7 @@  static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
 		 * Not from an SQE, as those cannot be submitted, but via
 		 * updating tagged resources.
 		 */
-		if (ctx->submitter_task->flags & PF_EXITING)
+		if (percpu_ref_is_dying(&ctx->refs))
 			lockdep_assert(current_work());
 		else
 			lockdep_assert(current == ctx->submitter_task);