@@ -240,10 +240,12 @@ static int __io_sync_cancel(struct io_uring_task *tctx,
/* fixed must be grabbed every time since we drop the uring_lock */
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
- if (unlikely(fd >= ctx->file_table.data.nr))
+ struct io_rsrc_node *node;
+
+ node = io_rsrc_node_lookup(&ctx->file_table.data, &fd);
+ if (unlikely(!node))
return -EBADF;
- fd = array_index_nospec(fd, ctx->file_table.data.nr);
- cd->file = io_file_from_index(&ctx->file_table, fd);
+ cd->file = io_slot_file(node);
if (!cd->file)
return -EBADF;
}
@@ -55,10 +55,10 @@ void io_free_file_tables(struct io_file_table *table)
}
static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
- u32 slot_index)
+ int slot_index)
__must_hold(&req->ctx->uring_lock)
{
- struct io_rsrc_node *node;
+ struct io_rsrc_node *node, *old_node;
if (io_is_uring_fops(file))
return -EBADF;
@@ -72,9 +72,9 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
if (IS_ERR(node))
return -ENOMEM;
- slot_index = array_index_nospec(slot_index, ctx->file_table.data.nr);
- if (ctx->file_table.data.nodes[slot_index])
- io_put_rsrc_node(ctx->file_table.data.nodes[slot_index]);
+ old_node = io_rsrc_node_lookup(&ctx->file_table.data, &slot_index);
+ if (old_node)
+ io_put_rsrc_node(old_node);
else
io_file_bitmap_set(&ctx->file_table, slot_index);
@@ -84,7 +84,7 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file,
}
int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
- unsigned int file_slot)
+ int file_slot)
{
bool alloc_slot = file_slot == IORING_FILE_INDEX_ALLOC;
int ret;
@@ -108,7 +108,7 @@ int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
* fput() is called correspondingly.
*/
int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
- struct file *file, unsigned int file_slot)
+ struct file *file, int file_slot)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
@@ -122,17 +122,19 @@ int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
return ret;
}
-int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset)
+int io_fixed_fd_remove(struct io_ring_ctx *ctx, int offset)
{
+ struct io_rsrc_node *node;
+
if (unlikely(!ctx->file_table.data.nr))
return -ENXIO;
if (offset >= ctx->file_table.data.nr)
return -EINVAL;
- offset = array_index_nospec(offset, ctx->file_table.data.nr);
- if (!ctx->file_table.data.nodes[offset])
+ node = io_rsrc_node_lookup(&ctx->file_table.data, &offset);
+ if (!node)
return -EBADF;
- io_put_rsrc_node(ctx->file_table.data.nodes[offset]);
+ io_put_rsrc_node(node);
ctx->file_table.data.nodes[offset] = NULL;
io_file_bitmap_clear(&ctx->file_table, offset);
return 0;
@@ -10,10 +10,10 @@ bool io_alloc_file_tables(struct io_file_table *table, unsigned nr_files);
void io_free_file_tables(struct io_file_table *table);
int io_fixed_fd_install(struct io_kiocb *req, unsigned int issue_flags,
- struct file *file, unsigned int file_slot);
+ struct file *file, int file_slot);
int __io_fixed_fd_install(struct io_ring_ctx *ctx, struct file *file,
- unsigned int file_slot);
-int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset);
+ int file_slot);
+int io_fixed_fd_remove(struct io_ring_ctx *ctx, int offset);
int io_register_file_alloc_range(struct io_ring_ctx *ctx,
struct io_uring_file_index_range __user *arg);
@@ -52,8 +52,9 @@ static inline struct file *io_slot_file(struct io_rsrc_node *node)
static inline struct file *io_file_from_index(struct io_file_table *table,
int index)
{
- struct io_rsrc_node *node = table->data.nodes[index];
+ struct io_rsrc_node *node;
+ node = io_rsrc_node_lookup(&table->data, &index);
if (node)
return io_slot_file(node);
return NULL;
@@ -1879,7 +1879,7 @@ inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
struct file *file = NULL;
io_ring_submit_lock(ctx, issue_flags);
- node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
+ node = io_rsrc_node_lookup(&ctx->file_table.data, &fd);
if (node) {
io_req_assign_rsrc_node(req, node);
req->flags |= io_slot_flags(node);
@@ -25,7 +25,7 @@ struct io_msg {
u64 user_data;
u32 len;
u32 cmd;
- u32 src_fd;
+ int src_fd;
union {
u32 dst_fd;
u32 cqe_flags;
@@ -172,22 +172,24 @@ static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
return __io_msg_ring_data(target_ctx, msg, issue_flags);
}
-static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
+static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx;
- struct file *file = NULL;
- int idx = msg->src_fd;
+ struct io_rsrc_node *node;
+ int ret = -EBADF;
io_ring_submit_lock(ctx, issue_flags);
- if (likely(idx < ctx->file_table.data.nr)) {
- idx = array_index_nospec(idx, ctx->file_table.data.nr);
- file = io_file_from_index(&ctx->file_table, idx);
- if (file)
- get_file(file);
+ node = io_rsrc_node_lookup(&ctx->file_table.data, &msg->src_fd);
+ if (node) {
+ msg->src_file = io_slot_file(node);
+ if (msg->src_file)
+ get_file(msg->src_file);
+ req->flags |= REQ_F_NEED_CLEANUP;
+ ret = 0;
}
io_ring_submit_unlock(ctx, issue_flags);
- return file;
+ return ret;
}
static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
@@ -256,7 +258,6 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *target_ctx = req->file->private_data;
struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
struct io_ring_ctx *ctx = req->ctx;
- struct file *src_file = msg->src_file;
if (msg->len)
return -EINVAL;
@@ -264,12 +265,10 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
return -EINVAL;
if (target_ctx->flags & IORING_SETUP_R_DISABLED)
return -EBADFD;
- if (!src_file) {
- src_file = io_msg_grab_file(req, issue_flags);
- if (!src_file)
- return -EBADF;
- msg->src_file = src_file;
- req->flags |= REQ_F_NEED_CLEANUP;
+ if (!msg->src_file) {
+ int ret = io_msg_grab_file(req, issue_flags);
+ if (unlikely(ret))
+ return ret;
}
if (io_msg_need_remote(target_ctx))
@@ -1343,13 +1343,12 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *node;
- int idx;
+ int idx = sr->buf_index;
ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags);
- if (sr->buf_index < ctx->buf_table.nr) {
- idx = array_index_nospec(sr->buf_index, ctx->buf_table.nr);
- node = ctx->buf_table.nodes[idx];
+ node = io_rsrc_node_lookup(&ctx->buf_table, &idx);
+ if (node) {
io_req_assign_rsrc_node(sr->notif, node);
ret = 0;
}
@@ -62,13 +62,11 @@ int io_nop(struct io_kiocb *req, unsigned int issue_flags)
if (nop->flags & IORING_NOP_FIXED_BUFFER) {
struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *node;
- int idx;
ret = -EFAULT;
io_ring_submit_lock(ctx, issue_flags);
- if (nop->buffer < ctx->buf_table.nr) {
- idx = array_index_nospec(nop->buffer, ctx->buf_table.nr);
- node = READ_ONCE(ctx->buf_table.nodes[idx]);
+ node = io_rsrc_node_lookup(&ctx->buf_table, &nop->buffer);
+ if (node) {
io_req_assign_rsrc_node(req, node);
ret = 0;
}
@@ -186,6 +186,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
return -EINVAL;
for (done = 0; done < nr_args; done++) {
+ struct io_rsrc_node *node;
u64 tag = 0;
if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
@@ -200,9 +201,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
if (fd == IORING_REGISTER_FILES_SKIP)
continue;
- i = array_index_nospec(up->offset + done, ctx->file_table.data.nr);
- if (ctx->file_table.data.nodes[i]) {
- io_put_rsrc_node(ctx->file_table.data.nodes[i]);
+ i = up->offset + done;
+ node = io_rsrc_node_lookup(&ctx->file_table.data, &i);
+ if (node) {
+ io_put_rsrc_node(node);
ctx->file_table.data.nodes[i] = NULL;
io_file_bitmap_clear(&ctx->file_table, i);
}
@@ -965,13 +967,14 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
goto out_unlock;
for (i = 0; i < nbufs; i++) {
- struct io_rsrc_node *src_node = src_ctx->buf_table.nodes[i];
- struct io_rsrc_node *dst_node;
+ struct io_rsrc_node *dst_node, *src_node;
+ int index = i;
+ src_node = io_rsrc_node_lookup(&src_ctx->buf_table, &index);
if (src_node == rsrc_empty_node) {
dst_node = rsrc_empty_node;
} else {
- dst_node = io_rsrc_node_alloc(ctx, &data, i, IORING_RSRC_BUFFER);
+ dst_node = io_rsrc_node_alloc(ctx, &data, index, IORING_RSRC_BUFFER);
if (!dst_node)
goto out_put_free;
@@ -75,11 +75,11 @@ extern const struct io_rsrc_node empty_node;
#define rsrc_empty_node (struct io_rsrc_node *) &empty_node
static inline struct io_rsrc_node *io_rsrc_node_lookup(struct io_rsrc_data *data,
- int index)
+ int *index)
{
- if (index < data->nr) {
- index = array_index_nospec(index, data->nr);
- return data->nodes[index];
+ if (*index < data->nr) {
+ *index = array_index_nospec(*index, data->nr);
+ return data->nodes[*index];
}
return NULL;
}
@@ -332,17 +332,16 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
struct io_ring_ctx *ctx = req->ctx;
struct io_rsrc_node *node;
struct io_async_rw *io;
- u16 index;
- int ret;
+ int index, ret;
ret = io_prep_rw(req, sqe, ddir, false);
if (unlikely(ret))
return ret;
- if (unlikely(req->buf_index >= ctx->buf_table.nr))
+ index = req->buf_index;
+ node = io_rsrc_node_lookup(&ctx->buf_table, &index);
+ if (!node)
return -EFAULT;
- index = array_index_nospec(req->buf_index, ctx->buf_table.nr);
- node = ctx->buf_table.nodes[index];
io_req_assign_rsrc_node(req, node);
io = req->async_data;
@@ -66,7 +66,7 @@ static struct file *io_splice_get_file(struct io_kiocb *req,
return io_file_get_normal(req, sp->splice_fd_in);
io_ring_submit_lock(ctx, issue_flags);
- node = io_rsrc_node_lookup(&ctx->file_table.data, sp->splice_fd_in);
+ node = io_rsrc_node_lookup(&ctx->file_table.data, &sp->splice_fd_in);
if (node) {
node->refs++;
sp->rsrc_node = node;
@@ -209,18 +209,18 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (ioucmd->flags & IORING_URING_CMD_FIXED) {
struct io_ring_ctx *ctx = req->ctx;
- u16 index;
+ int index = req->buf_index;
+ struct io_rsrc_node *node;
- index = READ_ONCE(sqe->buf_index);
- if (unlikely(index >= ctx->buf_table.nr))
+ node = io_rsrc_node_lookup(&ctx->buf_table, &index);
+ if (unlikely(!node))
return -EFAULT;
- req->buf_index = array_index_nospec(index, ctx->buf_table.nr);
/*
* Pi node upfront, prior to io_uring_cmd_import_fixed()
* being called. This prevents destruction of the mapped buffer
* we'll need at actual import time.
*/
- io_req_assign_rsrc_node(req, ctx->buf_table.nodes[req->buf_index]);
+ io_req_assign_rsrc_node(req, node);
}
ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
Various spots open code this helper, use the provided one. The helper now returns the adjusted, if needed, index into the array, for the cases where the caller needs that. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- io_uring/cancel.c | 8 +++++--- io_uring/filetable.c | 24 +++++++++++++----------- io_uring/filetable.h | 9 +++++---- io_uring/io_uring.c | 2 +- io_uring/msg_ring.c | 33 ++++++++++++++++----------------- io_uring/net.c | 7 +++---- io_uring/nop.c | 6 ++---- io_uring/rsrc.c | 15 +++++++++------ io_uring/rsrc.h | 8 ++++---- io_uring/rw.c | 9 ++++----- io_uring/splice.c | 2 +- io_uring/uring_cmd.c | 10 +++++----- 12 files changed, 68 insertions(+), 65 deletions(-)