diff mbox series

[RFC,2/3] io_uring: add support for ignoring inline completions for waits

Message ID 90bc3070b66b2a9f832716fd149184309ea6277d.1731205010.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series request parameter set api and wait termination tuning | expand

Commit Message

Pavel Begunkov Nov. 10, 2024, 2:56 p.m. UTC
From: Jens Axboe <axboe@kernel.dk>

io_uring treats all completions the same - they post a completion event,
or more, and anyone waiting on event completions will see each event as
it gets posted.

However, some events may be more interesting that others. For a request
and response type model, it's not uncommon to have send/write events
that are submitted with a recv/read type of request. While the app does
want to see a successful send/write completion eventually, it need not
handle it upfront as it would want to do with a recv/read, as it isn't
time sensitive. Generally, a send/write completion will just mean that
a buffer can get recycled/reused, whereas a recv/read completion needs
acting upon (and a response sent).

This can be somewhat tricky to handle if many requests and responses
are being handled, and the app generally needs to track the number of
pending sends/writes to be able to sanely wait on just new incoming
recv/read requests. And even with that, an application would still
like to see a completion for a short/failed send/write immediately.

Add infrastructure to account inline completions, such that they can
be deducted from the 'wait_nr' being passed in via a submit_and_wait()
type of situation. Inline completions are ones that complete directly
inline from submission, such as a send to a socket where there's
enough space to accomodate the data being sent.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
[pavel: rebased onto iosets]
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 include/linux/io_uring_types.h |  1 +
 include/uapi/linux/io_uring.h  |  4 ++++
 io_uring/io_uring.c            | 12 ++++++++++--
 io_uring/register.c            |  2 +-
 4 files changed, 16 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 79f38c07642d..f04444f9356a 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -213,6 +213,7 @@  struct io_submit_state {
 	bool			need_plug;
 	bool			cq_flush;
 	unsigned short		submit_nr;
+	unsigned short		inline_completions;
 	struct blk_plug		plug;
 };
 
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 6a432383e7c3..e6d10fba8ae2 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -899,6 +899,10 @@  struct io_uring_recvmsg_out {
 	__u32 flags;
 };
 
+enum {
+	IOSQE_SET_F_HINT_IGNORE_INLINE		= 1,
+};
+
 struct io_uring_ioset_reg {
 	__u64 flags;
 	__u64 __resv[3];
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index cf688a9ff737..6e89435c243d 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1575,6 +1575,9 @@  void __io_submit_flush_completions(struct io_ring_ctx *ctx)
 		struct io_kiocb *req = container_of(node, struct io_kiocb,
 					    comp_list);
 
+		if (req->ioset->flags & IOSQE_SET_F_HINT_IGNORE_INLINE)
+			state->inline_completions++;
+
 		if (unlikely(req->flags & (REQ_F_CQE_SKIP | REQ_F_GROUP))) {
 			if (req->flags & REQ_F_GROUP) {
 				io_complete_group_req(req);
@@ -2511,6 +2514,7 @@  static void io_submit_state_start(struct io_submit_state *state,
 	state->plug_started = false;
 	state->need_plug = max_ios > 2;
 	state->submit_nr = max_ios;
+	state->inline_completions = 0;
 	/* set only head, no need to init link_last in advance */
 	state->link.head = NULL;
 	state->group.head = NULL;
@@ -3611,6 +3615,7 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 		size_t, argsz)
 {
 	struct io_ring_ctx *ctx;
+	int inline_complete = 0;
 	struct file *file;
 	long ret;
 
@@ -3676,6 +3681,7 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 			mutex_unlock(&ctx->uring_lock);
 			goto out;
 		}
+		inline_complete = ctx->submit_state.inline_completions;
 		if (flags & IORING_ENTER_GETEVENTS) {
 			if (ctx->syscall_iopoll)
 				goto iopoll_locked;
@@ -3713,8 +3719,10 @@  SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
 
 			ret2 = io_get_ext_arg(ctx, flags, argp, &ext_arg);
 			if (likely(!ret2)) {
-				min_complete = min(min_complete,
-						   ctx->cq_entries);
+				if (min_complete > ctx->cq_entries)
+					min_complete = ctx->cq_entries;
+				else
+					min_complete += inline_complete;
 				ret2 = io_cqring_wait(ctx, min_complete, flags,
 						      &ext_arg);
 			}
diff --git a/io_uring/register.c b/io_uring/register.c
index e7571dc46da5..f87ec7b773bd 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -92,7 +92,7 @@  static int io_update_ioset(struct io_ring_ctx *ctx,
 {
 	if (!(ctx->flags & IORING_SETUP_IOSET))
 		return -EINVAL;
-	if (reg->flags)
+	if (reg->flags & ~IOSQE_SET_F_HINT_IGNORE_INLINE)
 		return -EINVAL;
 	if (reg->__resv[0] || reg->__resv[1] || reg->__resv[2])
 		return -EINVAL;