diff mbox series

[3/7] io_uring/net: isolate msghdr copying code

Message ID d3eb1f81c8cfbea9f1aa57dab90c472d2aa6e371.1740569495.git.asml.silence@gmail.com (mailing list archive)
State New
Headers show
Series improve net msghdr / iovec handlng | expand

Commit Message

Pavel Begunkov Feb. 26, 2025, 11:41 a.m. UTC
The user access section in io_msg_copy_hdr() is overextended by covering
selected buffers. It's hard to work with and prone to errors. Limit the
section to msghdr import only, selected buffers will do a separate
copy_from_user() call, and then move it into its own function. This
should be fine, selected buffer single shots are not important, for
multishots the overhead should be non-existent, and it's not that
expensive overall.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 io_uring/net.c | 45 +++++++++++++++++++++++++--------------------
 1 file changed, 25 insertions(+), 20 deletions(-)
diff mbox series

Patch

diff --git a/io_uring/net.c b/io_uring/net.c
index 0013a7169d10..67d768e6ecdd 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -243,6 +243,24 @@  static int io_compat_msg_copy_hdr(struct io_kiocb *req,
 }
 #endif
 
+static int io_copy_msghdr_from_user(struct user_msghdr *msg,
+				    struct user_msghdr __user *umsg)
+{
+	if (!user_access_begin(umsg, sizeof(*umsg)))
+		return -EFAULT;
+	unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
+	unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
+	unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
+	unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
+	unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
+	unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
+	user_access_end();
+	return 0;
+ua_end:
+	user_access_end();
+	return -EFAULT;
+}
+
 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
 			   struct user_msghdr *msg, int ddir)
 {
@@ -259,16 +277,10 @@  static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
 		nr_segs = 1;
 	}
 
-	if (!user_access_begin(umsg, sizeof(*umsg)))
-		return -EFAULT;
+	ret = io_copy_msghdr_from_user(msg, umsg);
+	if (unlikely(ret))
+		return ret;
 
-	ret = -EFAULT;
-	unsafe_get_user(msg->msg_name, &umsg->msg_name, ua_end);
-	unsafe_get_user(msg->msg_namelen, &umsg->msg_namelen, ua_end);
-	unsafe_get_user(msg->msg_iov, &umsg->msg_iov, ua_end);
-	unsafe_get_user(msg->msg_iovlen, &umsg->msg_iovlen, ua_end);
-	unsafe_get_user(msg->msg_control, &umsg->msg_control, ua_end);
-	unsafe_get_user(msg->msg_controllen, &umsg->msg_controllen, ua_end);
 	msg->msg_flags = 0;
 
 	if (req->flags & REQ_F_BUFFER_SELECT) {
@@ -276,24 +288,17 @@  static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
 			sr->len = iov->iov_len = 0;
 			iov->iov_base = NULL;
 		} else if (msg->msg_iovlen > 1) {
-			ret = -EINVAL;
-			goto ua_end;
+			return -EINVAL;
 		} else {
 			struct iovec __user *uiov = msg->msg_iov;
 
-			/* we only need the length for provided buffers */
-			if (!access_ok(&uiov->iov_len, sizeof(uiov->iov_len)))
-				goto ua_end;
-			unsafe_get_user(iov->iov_len, &uiov->iov_len, ua_end);
+			if (copy_from_user(iov, uiov, sizeof(*iov)))
+				return -EFAULT;
 			sr->len = iov->iov_len;
 		}
-		ret = 0;
-ua_end:
-		user_access_end();
-		return ret;
+		return 0;
 	}
 
-	user_access_end();
 	ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
 				&iov, &iomsg->msg.msg_iter, false);
 	if (unlikely(ret < 0))