@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/xarray.h>
+#include <linux/uio.h>
#include <uapi/linux/io_uring.h>
enum io_uring_cmd_flags {
@@ -36,6 +37,10 @@ struct io_uring_cmd {
u8 pdu[32]; /* available inline for free use */
};
+struct io_fixed_iter {
+ struct iov_iter iter;
+};
+
#if defined(CONFIG_IO_URING)
int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd);
@@ -65,6 +70,8 @@ static inline void io_uring_free(struct task_struct *tsk)
if (tsk->io_uring)
__io_uring_free(tsk);
}
+int io_uring_submit_sqe(int fd, const struct io_uring_sqe *sqe, u32 sqe_len,
+ const struct io_fixed_iter *iter);
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
@@ -96,6 +103,11 @@ static inline const char *io_uring_get_opcode(u8 opcode)
{
return "";
}
+int io_uring_submit_sqe(int fd, const struct io_uring_sqe *sqe, u32 sqe_len,
+ const struct io_fixed_iter *iter)
+{
+ return 0;
+}
#endif
#endif
@@ -398,6 +398,7 @@ enum {
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
+ REQ_F_ITER_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@@ -467,6 +468,8 @@ enum {
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
REQ_F_HASH_LOCKED = BIT(REQ_F_HASH_LOCKED_BIT),
+ /* buffer comes from fixed iter */
+ REQ_F_ITER = BIT(REQ_F_ITER_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
@@ -527,7 +530,7 @@ struct io_kiocb {
* and after selection it points to the buffer ID itself.
*/
u16 buf_index;
- unsigned int flags;
+ u64 flags;
struct io_cqe cqe;
@@ -540,6 +543,9 @@ struct io_kiocb {
/* store used ubuf, so we can prevent reloading */
struct io_mapped_ubuf *imu;
+ /* store fixed iter */
+ const struct io_fixed_iter *iter;
+
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
struct io_buffer *kbuf;
@@ -2232,7 +2232,8 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
}
static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+ const struct io_uring_sqe *sqe,
+ const struct io_fixed_iter *iter)
__must_hold(&ctx->uring_lock)
{
struct io_submit_link *link = &ctx->submit_state.link;
@@ -2241,6 +2242,10 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
ret = io_init_req(ctx, req, sqe);
if (unlikely(ret))
return io_submit_fail_init(sqe, req, ret);
+ if (unlikely(iter)) {
+ req->iter = iter;
+ req->flags |= REQ_F_ITER;
+ }
/* don't need @sqe from now on */
trace_io_uring_submit_sqe(req, true);
@@ -2392,7 +2397,7 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
* Continue submitting even for sqe failure if the
* ring was setup with IORING_SETUP_SUBMIT_ALL
*/
- if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
+ if (unlikely(io_submit_sqe(ctx, req, sqe, NULL)) &&
!(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
left--;
break;
@@ -3272,6 +3277,54 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
return 0;
}
+int io_uring_submit_sqe(int fd, const struct io_uring_sqe *sqe, u32 sqe_len,
+ const struct io_fixed_iter *iter)
+{
+ struct io_kiocb *req;
+ struct fd f;
+ int ret;
+ struct io_ring_ctx *ctx;
+
+ f = fdget(fd);
+ if (unlikely(!f.file))
+ return -EBADF;
+
+ ret = -EOPNOTSUPP;
+ if (unlikely(!io_is_uring_fops(f.file))) {
+ ret = -EBADF;
+ goto out;
+ }
+ ctx = f.file->private_data;
+
+ mutex_lock(&ctx->uring_lock);
+ if (unlikely(!io_alloc_req_refill(ctx)))
+ goto out;
+ req = io_alloc_req(ctx);
+ if (unlikely(!req)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!percpu_ref_tryget_many(&ctx->refs, 1)) {
+ kmem_cache_free(req_cachep, req);
+ ret = -EAGAIN;
+ goto out;
+ }
+ percpu_counter_add(¤t->io_uring->inflight, 1);
+ refcount_add(1, ¤t->usage);
+
+ /* returns number of submitted SQEs or an error */
+ ret = !io_submit_sqe(ctx, req, sqe, iter);
+ mutex_unlock(&ctx->uring_lock);
+ fdput(f);
+ return ret;
+
+out:
+ mutex_unlock(&ctx->uring_lock);
+ fdput(f);
+ return ret;
+}
+EXPORT_SYMBOL(io_uring_submit_sqe);
+
SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
u32, min_complete, u32, flags, const void __user *, argp,
size_t, argsz)
@@ -4270,7 +4323,7 @@ static int __init io_uring_init(void)
BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
- BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
+ BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(u64));
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
@@ -1380,3 +1380,21 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
return 0;
}
+
+int io_import_iter(int ddir, struct iov_iter *iter,
+ const struct io_fixed_iter *fixed_iter,
+ u64 offset, size_t len)
+{
+ size_t count;
+
+ if (WARN_ON_ONCE(!fixed_iter))
+ return -EFAULT;
+
+ count = iov_iter_count(&(fixed_iter->iter));
+ if (offset >= count || (offset + len) > count)
+ return -EFAULT;
+
+ *iter = fixed_iter->iter;
+ return 0;
+}
+
@@ -69,6 +69,10 @@ int io_import_fixed(int ddir, struct iov_iter *iter,
struct io_mapped_ubuf *imu,
u64 buf_addr, size_t len);
+int io_import_iter(int ddir, struct iov_iter *iter,
+ const struct io_fixed_iter *fixed_iter,
+ u64 buf_addr, size_t len);
+
void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_unregister(struct io_ring_ctx *ctx);
int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
@@ -378,6 +378,13 @@ static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
return NULL;
}
+ if (unlikely(req->flags & REQ_F_ITER)) {
+ ret = io_import_iter(ddir, iter, req->iter, rw->addr, rw->len);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
+
buf = u64_to_user_ptr(rw->addr);
sqe_len = rw->len;
Currently this feature can be used by userspace block device to reduce kernel & userspace memory copy overhead. With this feature, userspace block device driver can submit and complete io requests using kernel block layer io requests's memory data, and further, by using ebpf, we can customize how sqe is initialized, how io is submitted and completed. Signed-off-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com> --- include/linux/io_uring.h | 12 +++++++ include/linux/io_uring_types.h | 8 ++++- io_uring/io_uring.c | 59 ++++++++++++++++++++++++++++++++-- io_uring/rsrc.c | 18 +++++++++++ io_uring/rsrc.h | 4 +++ io_uring/rw.c | 7 ++++ 6 files changed, 104 insertions(+), 4 deletions(-)