@@ -1261,7 +1261,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EFAULT;
idx = array_index_nospec(idx, ctx->nr_user_bufs);
req->imu = READ_ONCE(ctx->user_bufs[idx]);
- io_req_set_rsrc_node(notif, ctx, 0);
+ io_req_set_rsrc_node(notif, ctx);
}
if (req->opcode == IORING_OP_SEND_ZC) {
@@ -107,14 +107,10 @@ static inline void __io_req_set_rsrc_node(struct io_kiocb *req,
}
static inline void io_req_set_rsrc_node(struct io_kiocb *req,
- struct io_ring_ctx *ctx,
- unsigned int issue_flags)
+ struct io_ring_ctx *ctx)
{
- if (!req->rsrc_node) {
- io_ring_submit_lock(ctx, issue_flags);
+ if (!req->rsrc_node)
__io_req_set_rsrc_node(req, ctx);
- io_ring_submit_unlock(ctx, issue_flags);
- }
}
static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
@@ -343,7 +343,7 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
return -EFAULT;
index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
imu = ctx->user_bufs[index];
- io_req_set_rsrc_node(req, ctx, 0);
+ io_req_set_rsrc_node(req, ctx);
io = req->async_data;
ret = io_import_fixed(ddir, &io->iter, imu, rw->addr, rw->len);
@@ -220,7 +220,7 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* being called. This prevents destruction of the mapped buffer
* we'll need at actual import time.
*/
- io_req_set_rsrc_node(req, ctx, 0);
+ io_req_set_rsrc_node(req, ctx);
}
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
All callers already hold the ring lock and hence are passing '0', remove the argument and the conditional locking that it controlled. Suggested-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk> --- Took a second look at this, and it does indeed fall out nicely if done prior to the (buggy) net conversion. Fixed that one up too, fwiw.