diff mbox series

[3/3] io_uring/msg_ring: avoid double indirection task_work for fd passing

Message ID 20240524230501.20178-4-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Improve MSG_RING SINGLE_ISSUER performance | expand

Commit Message

Jens Axboe May 24, 2024, 10:58 p.m. UTC
Like what was done for MSG_RING data passing avoiding a double task_work
roundtrip for IORING_SETUP_SINGLE_ISSUER, implement the same model for
fd passing. File descriptor passing is separately locked anyway, so the
only remaining issue is CQE posting, just like it was for data passing.
And for that, we can use the same approach.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/msg_ring.c | 48 +++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 46 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 3f89ff3a40ad..499702425711 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -299,6 +299,41 @@  static void io_msg_tw_fd_complete(struct callback_head *head)
 	io_req_queue_tw_complete(req, ret);
 }
 
+static int io_msg_install_remote(struct io_msg *msg, unsigned int issue_flags,
+				 struct io_ring_ctx *target_ctx)
+{
+	bool skip_cqe = msg->flags & IORING_MSG_RING_CQE_SKIP;
+	struct io_overflow_cqe *ocqe;
+	int ret;
+
+	if (!skip_cqe) {
+		ocqe = io_alloc_overflow(target_ctx);
+		if (!ocqe)
+			return -ENOMEM;
+	}
+
+	if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) {
+		kfree(ocqe);
+		return -EAGAIN;
+	}
+
+	ret = __io_fixed_fd_install(target_ctx, msg->src_file, msg->dst_fd);
+	if (ret < 0)
+		goto out;
+
+	msg->src_file = NULL;
+
+	if (!skip_cqe) {
+		ocqe->cqe.flags = 0;
+		io_msg_add_overflow(msg, target_ctx, ocqe, ret);
+		return 0;
+	}
+out:
+	mutex_unlock(&target_ctx->uring_lock);
+	kfree(ocqe);
+	return ret;
+}
+
 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_ring_ctx *target_ctx = req->file->private_data;
@@ -320,8 +355,17 @@  static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
 		req->flags |= REQ_F_NEED_CLEANUP;
 	}
 
-	if (io_msg_need_remote(target_ctx))
-		return io_msg_exec_remote(req, io_msg_tw_fd_complete);
+	if (io_msg_need_remote(target_ctx)) {
+		int ret;
+
+		ret = io_msg_install_remote(msg, issue_flags, target_ctx);
+		if (ret == -EAGAIN)
+			return io_msg_exec_remote(req, io_msg_tw_fd_complete);
+		else if (ret < 0)
+			return ret;
+		req->flags &= ~REQ_F_NEED_CLEANUP;
+		return 0;
+	}
 	return io_msg_install_complete(req, issue_flags);
 }