@@ -528,7 +528,11 @@ static void io_queue_iowq(struct io_kiocb *req)
* procedure rather than attempt to run this request (or create a new
* worker for it).
*/
- if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
+ WARN_ON_ONCE(!io_ring_ref_is_dying(req->ctx) &&
+ !same_thread_group(req->task, current));
+
+ if (!same_thread_group(req->task, current) ||
+ io_ring_ref_is_dying(req->ctx))
req->work.flags |= IO_WQ_WORK_CANCEL;
trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
@@ -1196,7 +1200,8 @@ static void io_req_normal_work_add(struct io_kiocb *req)
return;
}
- if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
+ if (!io_ring_ref_is_dying(ctx) &&
+ !task_work_add(req->task, &tctx->task_work, ctx->notify_method))
return;
io_fallback_tw(tctx, false);
@@ -11,6 +11,7 @@
#include "io-wq.h"
#include "slist.h"
#include "filetable.h"
+#include "refs.h"
#ifndef CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -122,7 +123,7 @@ static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx)
* Not from an SQE, as those cannot be submitted, but via
* updating tagged resources.
*/
- if (ctx->submitter_task->flags & PF_EXITING)
+ if (io_ring_ref_is_dying(ctx))
lockdep_assert(current_work());
else
lockdep_assert(current == ctx->submitter_task);
Don't gate this on the task exiting flag. It's generally not a good idea to gate it on the task PF_EXITING flag anyway. Once the ring is starting to go through ring teardown, the ref is marked as dying. Use that as our fallback/cancel mechanism. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- io_uring/io_uring.c | 9 +++++++-- io_uring/io_uring.h | 3 ++- 2 files changed, 9 insertions(+), 3 deletions(-)