@@ -44,9 +44,9 @@ static bool io_cancel_cb(struct io_wq_work *work, void *data)
return false;
}
if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
- if (cd->seq == req->work.cancel_seq)
+ if (cd->seq == req->cancel_seq)
return false;
- req->work.cancel_seq = cd->seq;
+ req->cancel_seq = cd->seq;
}
return true;
}
@@ -155,7 +155,6 @@ struct io_wq_work_node *wq_stack_extract(struct io_wq_work_node *stack)
struct io_wq_work {
struct io_wq_work_node list;
unsigned flags;
- int cancel_seq;
};
static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
@@ -849,7 +849,7 @@ static void io_prep_async_work(struct io_kiocb *req)
req->work.list.next = NULL;
req->work.flags = 0;
- req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
+ req->cancel_seq = atomic_read(&ctx->cancel_seq);
if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
@@ -486,6 +486,7 @@ struct io_kiocb {
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
struct io_wq_work work;
+ int cancel_seq;
};
struct io_cancel_data {
@@ -405,7 +405,7 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
int v;
INIT_HLIST_NODE(&req->hash_node);
- req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
+ req->cancel_seq = atomic_read(&ctx->cancel_seq);
io_init_poll_iocb(poll, mask, io_poll_wake);
poll->file = req->file;
@@ -565,9 +565,9 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
if (poll_only && req->opcode != IORING_OP_POLL_ADD)
continue;
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
- if (cd->seq == req->work.cancel_seq)
+ if (cd->seq == req->cancel_seq)
continue;
- req->work.cancel_seq = cd->seq;
+ req->cancel_seq = cd->seq;
}
return req;
}
@@ -589,9 +589,9 @@ static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx,
if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) &&
req->file != cd->file)
continue;
- if (cd->seq == req->work.cancel_seq)
+ if (cd->seq == req->cancel_seq)
continue;
- req->work.cancel_seq = cd->seq;
+ req->cancel_seq = cd->seq;
return req;
}
}
@@ -227,9 +227,9 @@ static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
cd->data != tmp->cqe.user_data)
continue;
if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) {
- if (cd->seq == tmp->work.cancel_seq)
+ if (cd->seq == tmp->cancel_seq)
continue;
- tmp->work.cancel_seq = cd->seq;
+ tmp->cancel_seq = cd->seq;
}
req = tmp;
break;
io-wq doesn't use ->cancel_seq, it's only important to io_uring and should be stored there. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- io_uring/cancel.c | 4 ++-- io_uring/io-wq.h | 1 - io_uring/io_uring.c | 2 +- io_uring/io_uring_types.h | 1 + io_uring/poll.c | 10 +++++----- io_uring/timeout.c | 4 ++-- 6 files changed, 11 insertions(+), 11 deletions(-)