@@ -391,7 +391,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) {
kmsg = req->async_data;
- kmsg->msg.msg_control_user = sr->msg_control;
} else {
kmsg = io_msg_alloc_async(req, issue_flags);
if (unlikely(!kmsg))
@@ -411,6 +410,8 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+ kmsg->msg.msg_control_user = sr->msg_control;
+
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
if (ret < min_ret) {
@@ -1271,7 +1272,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) {
kmsg = req->async_data;
- kmsg->msg.msg_control_user = sr->msg_control;
} else {
kmsg = io_msg_alloc_async(req, issue_flags);
if (unlikely(!kmsg))
@@ -1291,6 +1291,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
+ kmsg->msg.msg_control_user = sr->msg_control;
kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
We currently set this separately for async/sync entry, but let's just move it to a generic pre-issue spot and eliminate the difference between the two. Signed-off-by: Jens Axboe <axboe@kernel.dk> --- This is for the 6.10 branch, was looking into dealing with the msg_control_user fix for 6.9 SENDMSG_ZC, and I think this is a good pre cleanup that just makes it easier to deal with while merging the sync/async paths.