@@ -489,7 +489,11 @@ void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use)
* procedure rather than attempt to run this request (or create a new
* worker for it).
*/
- if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+ WARN_ON_ONCE(!io_ring_ref_is_dying(req->ctx) &&
+ !same_thread_group(req->task, current));
+
+ if (!same_thread_group(req->task, current) ||
+ io_ring_ref_is_dying(req->ctx))
req->work.flags |= IO_WQ_WORK_CANCEL;
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
@@ -1354,8 +1358,8 @@ static void io_req_normal_work_add(struct io_kiocb *req)
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
-
- if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
+ if (!io_ring_ref_is_dying(ctx) &&
+ !task_work_add(req->task, &tctx->task_work, ctx->notify_method))
return;
io_fallback_tw(tctx, false);
@@ -10,6 +10,7 @@
#include "io-wq.h"
#include "slist.h"
#include "filetable.h"
+#include "refs.h"
#ifndef CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -94,7 +95,7 @@ bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
lockdep_assert_held(&ctx->uring_lock); \
} else if (!ctx->task_complete) { \
lockdep_assert_held(&ctx->completion_lock); \
- } else if (ctx->submitter_task->flags & PF_EXITING) { \
+ } else if (io_ring_ref_is_dying(ctx)) { \
lockdep_assert(current_work()); \
} else { \
lockdep_assert(current == ctx->submitter_task); \
Don't gate this on the task exiting flag. It's generally not a good idea to gate it on the task PF_EXITING flag anyway. Once the ring is starting to go through ring teardown, the ref is marked as dying. Use that as our fallback/cancel mechanism. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- io_uring/io_uring.c | 10 +++++++--- io_uring/io_uring.h | 3 ++- 2 files changed, 9 insertions(+), 4 deletions(-)