@@ -352,13 +352,13 @@ enum io_uring_op {
* IORING_NOTIF_USAGE_ZC_COPIED if data was copied
* (at least partially).
*
- * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send will
- * grab as many buffers from the buffer group ID
- * given and send them all. The completion result
- * will be the number of buffers send, with the
- * starting buffer ID in cqe->flags as per usual
- * for provided buffer usage. The buffers will be
- * contigious from the starting buffer ID.
+ * IORING_RECVSEND_BUNDLE Used with IOSQE_BUFFER_SELECT. If set, send or
+ * recv will grab as many buffers from the buffer
+ * group ID given and send them all. The completion
+ * result will be the number of buffers send, with
+ * the starting buffer ID in cqe->flags as per
+ * usual for provided buffer usage. The buffers
+ * will be contigious from the starting buffer ID.
*/
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
#define IORING_RECV_MULTISHOT (1U << 1)
@@ -531,6 +531,7 @@ struct io_uring_params {
#define IORING_FEAT_CQE_SKIP (1U << 11)
#define IORING_FEAT_LINKED_FILE (1U << 12)
#define IORING_FEAT_REG_REG_RING (1U << 13)
+#define IORING_FEAT_RECVSEND_BUNDLE (1U << 14)
/*
* io_uring_register(2) opcodes and arguments
@@ -3982,7 +3982,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
- IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING;
+ IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
+ IORING_FEAT_RECVSEND_BUNDLE;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
@@ -760,7 +760,8 @@ int io_recvmsg_prep_async(struct io_kiocb *req)
return ret;
}
-#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
+#define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
+ IORING_RECVSEND_BUNDLE)
int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
@@ -774,21 +775,14 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
sr->flags = READ_ONCE(sqe->ioprio);
- if (sr->flags & ~(RECVMSG_FLAGS))
+ if (sr->flags & ~RECVMSG_FLAGS)
return -EINVAL;
sr->msg_flags = READ_ONCE(sqe->msg_flags);
if (sr->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
if (sr->msg_flags & MSG_ERRQUEUE)
req->flags |= REQ_F_CLEAR_POLLIN;
- if (sr->flags & IORING_RECV_MULTISHOT) {
- if (!(req->flags & REQ_F_BUFFER_SELECT))
- return -EINVAL;
- if (sr->msg_flags & MSG_WAITALL)
- return -EINVAL;
- if (req->opcode == IORING_OP_RECV && sr->len)
- return -EINVAL;
- req->flags |= REQ_F_APOLL_MULTISHOT;
+ if (req->flags & REQ_F_BUFFER_SELECT) {
/*
* Store the buffer group for this multishot receive separately,
* as if we end up doing an io-wq based issue that selects a
@@ -798,6 +792,20 @@ int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
* restore it.
*/
sr->buf_group = req->buf_index;
+ req->buf_list = NULL;
+ }
+ if (sr->flags & IORING_RECV_MULTISHOT) {
+ if (!(req->flags & REQ_F_BUFFER_SELECT))
+ return -EINVAL;
+ if (sr->msg_flags & MSG_WAITALL)
+ return -EINVAL;
+ if (req->opcode == IORING_OP_RECV && sr->len)
+ return -EINVAL;
+ req->flags |= REQ_F_APOLL_MULTISHOT;
+ }
+ if (sr->flags & IORING_RECVSEND_BUNDLE) {
+ if (req->opcode == IORING_OP_RECVMSG)
+ return -EINVAL;
}
#ifdef CONFIG_COMPAT
@@ -818,12 +826,22 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
struct io_async_msghdr *kmsg,
bool mshot_finished, unsigned issue_flags)
{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
unsigned int cflags;
- cflags = io_put_kbuf(req, issue_flags);
+ if (sr->flags & IORING_RECVSEND_BUNDLE)
+ cflags = io_put_kbufs(req, io_bundle_nbufs(kmsg, *ret),
+ issue_flags);
+ else
+ cflags = io_put_kbuf(req, issue_flags);
+
if (kmsg->msg.msg_inq > 0)
cflags |= IORING_CQE_F_SOCK_NONEMPTY;
+ /* bundle with no more immediate buffers, we're done */
+ if (sr->flags & IORING_RECVSEND_BUNDLE && req->flags & REQ_F_BL_EMPTY)
+ goto finish;
+
/*
* Fill CQE for this receive and see if we should keep trying to
* receive from this socket.
@@ -831,14 +849,18 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
io_fill_cqe_req_aux(req, issue_flags & IO_URING_F_COMPLETE_DEFER,
*ret, cflags | IORING_CQE_F_MORE)) {
- struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
io_mshot_prep_retry(req);
/* Known not-empty or unknown state, retry */
if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
- if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
+ if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY) {
+ if (kmsg->free_iov) {
+ kfree(kmsg->free_iov);
+ kmsg->free_iov = NULL;
+ }
return false;
+ }
/* mshot retries exceeded, force a requeue */
sr->nr_multishot_loops = 0;
mshot_retry_ret = IOU_REQUEUE;
@@ -851,6 +873,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
}
/* Finish the request / stop multishot. */
+finish:
io_req_set_res(req, *ret, cflags);
if (issue_flags & IO_URING_F_MULTISHOT)
@@ -1048,6 +1071,58 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
return ret;
}
+static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
+ size_t *len, unsigned int issue_flags)
+{
+ struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+ int ret;
+
+ /*
+ * If the ring isn't locked, then don't use the peek interface
+ * to grab multiple buffers as we will lock/unlock between
+ * this selection and posting the buffers.
+ */
+ if (!(issue_flags & IO_URING_F_UNLOCKED) &&
+ sr->flags & IORING_RECVSEND_BUNDLE) {
+ struct iovec *iov = kmsg->fast_iov;
+
+ *len = 0;
+ if (kmsg->msg.msg_inq > 0) {
+ *len = kmsg->msg.msg_inq;
+ if (sr->len && *len > sr->len)
+ *len = sr->len;
+ }
+ ret = io_buffers_peek(req, &iov, ARRAY_SIZE(kmsg->fast_iov), len);
+ if (unlikely(ret < 0))
+ return ret;
+
+ if (ret == 1) {
+ sr->buf = iov->iov_base;
+ sr->len = iov->iov_len;
+ goto ubuf;
+ }
+ iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, iov, ret, *len);
+ if (iov != kmsg->fast_iov)
+ kmsg->free_iov = iov;
+ } else {
+ void __user *buf;
+
+ *len = sr->len;
+ buf = io_buffer_select(req, len, issue_flags);
+ if (!buf)
+ return -ENOBUFS;
+ sr->buf = buf;
+ sr->len = *len;
+ubuf:
+ ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
+ &kmsg->msg.msg_iter);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return 0;
+}
+
int io_recv(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -1093,17 +1168,10 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
retry_multishot:
if (io_do_buffer_select(req)) {
- void __user *buf;
-
- buf = io_buffer_select(req, &len, issue_flags);
- if (!buf)
- return -ENOBUFS;
- sr->buf = buf;
- sr->len = len;
- ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
- &kmsg->msg.msg_iter);
+ ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
if (unlikely(ret))
goto out_free;
+ sr->buf = NULL;
}
kmsg->msg.msg_inq = -1;
@@ -1143,13 +1211,8 @@ int io_recv(struct io_kiocb *req, unsigned int issue_flags)
else
io_kbuf_recycle(req, issue_flags);
- if (!io_recv_finish(req, &ret, kmsg, ret <= 0, issue_flags)) {
- if (kmsg->free_iov) {
- kfree(kmsg->free_iov);
- kmsg->free_iov = NULL;
- }
+ if (!io_recv_finish(req, &ret, kmsg, ret <= 0, issue_flags))
goto retry_multishot;
- }
io_req_msg_cleanup(req, kmsg, issue_flags);
return ret;
If IORING_OP_RECV is used with provided buffers, the caller may also set IORING_RECVSEND_BUNDLE to turn it into a multi-buffer recv. This grabs buffers available and receives into them, posting a single completion for all of it. This can be used with multishot receive as well, or without it. Now that both send and receive support bundles, add a feature flag for it as well. If IORING_FEAT_RECVSEND_BUNDLE is set after registering the ring, then the kernel supports bundles for recv and send. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- include/uapi/linux/io_uring.h | 15 +++-- io_uring/io_uring.c | 3 +- io_uring/net.c | 119 ++++++++++++++++++++++++++-------- 3 files changed, 101 insertions(+), 36 deletions(-)