@@ -78,7 +78,8 @@ static int io_async_cancel_one(struct io_uring_task *tctx,
return ret;
}
-int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
+int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd,
+ unsigned issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -93,7 +94,7 @@ int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd)
if (!ret)
return 0;
- ret = io_poll_cancel(ctx, cd);
+ ret = io_poll_cancel(ctx, cd, issue_flags);
if (ret != -ENOENT)
return ret;
@@ -136,7 +137,7 @@ static int __io_async_cancel(struct io_cancel_data *cd, struct io_kiocb *req,
int ret, nr = 0;
do {
- ret = io_try_cancel(req, cd);
+ ret = io_try_cancel(req, cd, issue_flags);
if (ret == -ENOENT)
break;
if (!all)
@@ -3,5 +3,6 @@
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags);
-int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd);
+int io_try_cancel(struct io_kiocb *req, struct io_cancel_data *cd,
+ unsigned int issue_flags);
void init_hash_table(struct io_hash_table *table, unsigned size);
@@ -643,7 +643,8 @@ static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
return req ? 0 : -ENOENT;
}
-int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
+int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned issue_flags)
{
return __io_poll_cancel(ctx, cd, &ctx->cancel_table);
}
@@ -24,7 +24,8 @@ int io_poll_add(struct io_kiocb *req, unsigned int issue_flags);
int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags);
-int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd);
+int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
+ unsigned issue_flags);
int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags);
bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
bool cancel_all);
@@ -262,6 +262,7 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
{
+ unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
struct io_timeout *timeout = io_kiocb_to_cmd(req);
struct io_kiocb *prev = timeout->prev;
int ret = -ENOENT;
@@ -273,7 +274,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
.data = prev->cqe.user_data,
};
- ret = io_try_cancel(req, &cd);
+ ret = io_try_cancel(req, &cd, issue_flags);
}
io_req_set_res(req, ret ?: -ETIME, 0);
io_req_complete_post(req);
Poll cancellation will be soon need to grab ->uring_lock inside, pass the locking state, i.e. issue_flags, inside the cancellation functions. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/cancel.c | 7 ++++--- io_uring/cancel.h | 3 ++- io_uring/poll.c | 3 ++- io_uring/poll.h | 3 ++- io_uring/timeout.c | 3 ++- 5 files changed, 12 insertions(+), 7 deletions(-)