diff mbox series

[4/7] io_uring/net: move send zc fixed buffer import into helper

Message ID 20241023161522.1126423-5-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Add support for provided registered buffers | expand

Commit Message

Jens Axboe Oct. 23, 2024, 4:07 p.m. UTC
In preparation to making the fixed buffer importing a bit more elaborate
in terms of what it supports.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/net.c | 77 ++++++++++++++++++++++++++++----------------------
 1 file changed, 44 insertions(+), 33 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/net.c b/io_uring/net.c
index 13b807c729f9..dbef14aa50f9 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -81,6 +81,9 @@  struct io_sr_msg {
 	struct io_kiocb 		*notif;
 };
 
+static int io_sg_from_iter(struct sk_buff *skb, struct iov_iter *from,
+			   size_t length);
+
 /*
  * Number of times we'll try and do receives if there's more data. If we
  * exceed this limit, then add us to the back of the queue and retry from
@@ -578,6 +581,37 @@  int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 	return IOU_OK;
 }
 
+static int io_send_zc_import_single(struct io_kiocb *req,
+				    unsigned int issue_flags)
+{
+	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+	struct io_async_msghdr *kmsg = req->async_data;
+	struct io_ring_ctx *ctx = req->ctx;
+	struct io_mapped_ubuf *imu;
+	int ret;
+	u16 idx;
+
+	ret = -EFAULT;
+	io_ring_submit_lock(ctx, issue_flags);
+	if (sr->buf_index < ctx->nr_user_bufs) {
+		idx = array_index_nospec(sr->buf_index, ctx->nr_user_bufs);
+		imu = READ_ONCE(ctx->user_bufs[idx]);
+		io_req_set_rsrc_node(sr->notif, ctx);
+		ret = 0;
+	}
+	io_ring_submit_unlock(ctx, issue_flags);
+
+	if (unlikely(ret))
+		return ret;
+
+	ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, imu,
+				(u64)(uintptr_t)sr->buf, sr->len);
+	if (unlikely(ret))
+		return ret;
+	kmsg->msg.sg_from_iter = io_sg_from_iter;
+	return 0;
+}
+
 static int __io_send_import(struct io_kiocb *req, struct buf_sel_arg *arg,
 			    int nsegs, unsigned int issue_flags)
 {
@@ -1365,40 +1399,17 @@  static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
 	struct io_async_msghdr *kmsg = req->async_data;
 	int ret;
 
-	if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
-		struct io_ring_ctx *ctx = req->ctx;
-		struct io_mapped_ubuf *imu;
-		int idx;
-
-		ret = -EFAULT;
-		io_ring_submit_lock(ctx, issue_flags);
-		if (sr->buf_index < ctx->nr_user_bufs) {
-			idx = array_index_nospec(sr->buf_index, ctx->nr_user_bufs);
-			imu = READ_ONCE(ctx->user_bufs[idx]);
-			io_req_set_rsrc_node(sr->notif, ctx);
-			ret = 0;
-		}
-		io_ring_submit_unlock(ctx, issue_flags);
+	if (sr->flags & IORING_RECVSEND_FIXED_BUF)
+		return io_send_zc_import_single(req, issue_flags);
 
-		if (unlikely(ret))
-			return ret;
-
-		ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, imu,
-					(u64)(uintptr_t)sr->buf, sr->len);
-		if (unlikely(ret))
-			return ret;
-		kmsg->msg.sg_from_iter = io_sg_from_iter;
-	} else {
-		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
-		if (unlikely(ret))
-			return ret;
-		ret = io_notif_account_mem(sr->notif, sr->len);
-		if (unlikely(ret))
-			return ret;
-		kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
-	}
-
-	return ret;
+	ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
+	if (unlikely(ret))
+		return ret;
+	ret = io_notif_account_mem(sr->notif, sr->len);
+	if (unlikely(ret))
+		return ret;
+	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
+	return 0;
 }
 
 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)