@@ -282,12 +282,17 @@ static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg
struct user_msghdr msg;
int ret;
- if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
+ if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
return -EFAULT;
- ret = __copy_msghdr(&iomsg->msg, &msg, addr);
- if (ret)
- return ret;
+ ret = -EFAULT;
+ unsafe_get_user(msg.msg_name, &sr->umsg->msg_name, uaccess_end);
+ unsafe_get_user(msg.msg_namelen, &sr->umsg->msg_namelen, uaccess_end);
+ unsafe_get_user(msg.msg_iov, &sr->umsg->msg_iov, uaccess_end);
+ unsafe_get_user(msg.msg_iovlen, &sr->umsg->msg_iovlen, uaccess_end);
+ unsafe_get_user(msg.msg_control, &sr->umsg->msg_control, uaccess_end);
+ unsafe_get_user(msg.msg_controllen, &sr->umsg->msg_controllen, uaccess_end);
+ msg.msg_flags = 0;
if (req->flags & REQ_F_BUFFER_SELECT) {
if (msg.msg_iovlen == 0) {
@@ -295,11 +300,14 @@ static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg
iomsg->fast_iov[0].iov_base = NULL;
iomsg->free_iov = NULL;
} else if (msg.msg_iovlen > 1) {
- return -EINVAL;
+ ret = -EINVAL;
+ goto uaccess_end;
} else {
- if (copy_from_user(iomsg->fast_iov, msg.msg_iov,
- sizeof(*msg.msg_iov)))
- return -EFAULT;
+ /* we only need the length for provided buffers */
+ if (!access_ok(&msg.msg_iov[0].iov_len, sizeof(__kernel_size_t)))
+ goto uaccess_end;
+ unsafe_get_user(iomsg->fast_iov[0].iov_len,
+ &msg.msg_iov[0].iov_len, uaccess_end);
sr->len = iomsg->fast_iov[0].iov_len;
iomsg->free_iov = NULL;
}
@@ -307,10 +315,16 @@ static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg
if (ddir == ITER_DEST && req->flags & REQ_F_APOLL_MULTISHOT) {
iomsg->namelen = msg.msg_namelen;
iomsg->controllen = msg.msg_controllen;
- if (io_recvmsg_multishot_overflow(iomsg))
- return -EOVERFLOW;
+ if (io_recvmsg_multishot_overflow(iomsg)) {
+ ret = -EOVERFLOW;
+uaccess_end:
+ user_access_end();
+ return ret;
+ }
}
+ user_access_end();
} else {
+ user_access_end();
iomsg->free_iov = iomsg->fast_iov;
ret = __import_iovec(ddir, msg.msg_iov, msg.msg_iovlen,
UIO_FASTIOV, &iomsg->free_iov,
@@ -319,6 +333,12 @@ static int __io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg
ret = 0;
}
+ ret = __copy_msghdr(&iomsg->msg, &msg, addr);
+ if (!ret)
+ return 0;
+
+ kfree(iomsg->free_iov);
+ iomsg->free_iov = NULL;
return ret;
}
We're spending a considerable amount of the sendmsg/recvmsg time just copying in the message header. And for provided buffers, the known single entry iovec. Be a bit smarter about it and enable/disable user access around our copying. In a test case that does both sendmsg and recvmsg, the runtime before this change (averaged over multiple runs, very stable times however): Kernel Time Diff ==================================== -git 4720 usec -git+commit 4311 usec -8.7% and looking at a profile diff, we see the following: 0.25% +9.33% [kernel.kallsyms] [k] _copy_from_user 4.47% -3.32% [kernel.kallsyms] [k] __io_msg_copy_hdr.constprop.0 where we drop more than 9% of _copy_from_user() time, and consequently add time to __io_msg_copy_hdr() where the copies are now attributed to, but with a net win of 6%. In comparison, the same test case with send/recv runs in 3745 usec, which is (expectedly) still quite a bit faster. But at least sendmsg/recvmsg is now only ~13% slower, where it was ~21% slower before. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- v2: - add missing access_ok() for the iovec copy - only copy iov->iov_len, we never use iov->iov_base