Message ID | 20240510155900.1825946-3-zijianzhang@bytedance.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net: A lightweight zero-copy notification | expand |
Hi,
kernel test robot noticed the following build warnings:
[auto build test WARNING on net-next/main]
url: https://github.com/intel-lab-lkp/linux/commits/zijianzhang-bytedance-com/selftests-fix-OOM-problem-in-msg_zerocopy-selftest/20240511-000153
base: net-next/main
patch link: https://lore.kernel.org/r/20240510155900.1825946-3-zijianzhang%40bytedance.com
patch subject: [PATCH net-next v3 2/3] sock: add MSG_ZEROCOPY notification mechanism based on msg_control
config: arm-defconfig (https://download.01.org/0day-ci/archive/20240511/202405111306.MOClscNA-lkp@intel.com/config)
compiler: clang version 14.0.6 (https://github.com/llvm/llvm-project f28c006a5895fc0e329fe15fead81e37457cb1d1)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240511/202405111306.MOClscNA-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202405111306.MOClscNA-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> net/core/sock.c:2808:5: warning: stack frame size (1608) exceeds limit (1024) in '__sock_cmsg_send' [-Wframe-larger-than]
int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
^
1 warning generated.
vim +/__sock_cmsg_send +2808 net/core/sock.c
^1da177e4c3f41 Linus Torvalds 2005-04-16 2807
233baf9a1bc46f xu xin 2022-10-20 @2808 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
f28ea365cdefc3 Edward Jee 2015-10-08 2809 struct sockcm_cookie *sockc)
f28ea365cdefc3 Edward Jee 2015-10-08 2810 {
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2811 u32 tsflags;
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2812
f28ea365cdefc3 Edward Jee 2015-10-08 2813 switch (cmsg->cmsg_type) {
f28ea365cdefc3 Edward Jee 2015-10-08 2814 case SO_MARK:
91f0d8a4813a9a Jakub Kicinski 2022-01-31 2815 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
91f0d8a4813a9a Jakub Kicinski 2022-01-31 2816 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
f28ea365cdefc3 Edward Jee 2015-10-08 2817 return -EPERM;
f28ea365cdefc3 Edward Jee 2015-10-08 2818 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
f28ea365cdefc3 Edward Jee 2015-10-08 2819 return -EINVAL;
f28ea365cdefc3 Edward Jee 2015-10-08 2820 sockc->mark = *(u32 *)CMSG_DATA(cmsg);
f28ea365cdefc3 Edward Jee 2015-10-08 2821 break;
7f1bc6e95d7840 Deepa Dinamani 2019-02-02 2822 case SO_TIMESTAMPING_OLD:
382a32018b74f4 Thomas Lange 2024-01-04 2823 case SO_TIMESTAMPING_NEW:
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2824 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32)))
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2825 return -EINVAL;
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2826
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2827 tsflags = *(u32 *)CMSG_DATA(cmsg);
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2828 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK)
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2829 return -EINVAL;
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2830
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2831 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2832 sockc->tsflags |= tsflags;
3dd17e63f5131b Soheil Hassas Yeganeh 2016-04-02 2833 break;
80b14dee2bea12 Richard Cochran 2018-07-03 2834 case SCM_TXTIME:
80b14dee2bea12 Richard Cochran 2018-07-03 2835 if (!sock_flag(sk, SOCK_TXTIME))
80b14dee2bea12 Richard Cochran 2018-07-03 2836 return -EINVAL;
80b14dee2bea12 Richard Cochran 2018-07-03 2837 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64)))
80b14dee2bea12 Richard Cochran 2018-07-03 2838 return -EINVAL;
80b14dee2bea12 Richard Cochran 2018-07-03 2839 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg));
80b14dee2bea12 Richard Cochran 2018-07-03 2840 break;
779f1edec664a7 Soheil Hassas Yeganeh 2016-07-11 2841 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
779f1edec664a7 Soheil Hassas Yeganeh 2016-07-11 2842 case SCM_RIGHTS:
779f1edec664a7 Soheil Hassas Yeganeh 2016-07-11 2843 case SCM_CREDENTIALS:
779f1edec664a7 Soheil Hassas Yeganeh 2016-07-11 2844 break;
274b6fd4a3053e Zijian Zhang 2024-05-10 2845 case SCM_ZC_NOTIFICATION: {
274b6fd4a3053e Zijian Zhang 2024-05-10 2846 int ret, i = 0;
274b6fd4a3053e Zijian Zhang 2024-05-10 2847 int cmsg_data_len, zc_info_elem_num;
274b6fd4a3053e Zijian Zhang 2024-05-10 2848 void __user *usr_addr;
274b6fd4a3053e Zijian Zhang 2024-05-10 2849 struct zc_info_elem zc_info_kern[SOCK_ZC_INFO_MAX];
274b6fd4a3053e Zijian Zhang 2024-05-10 2850 unsigned long flags;
274b6fd4a3053e Zijian Zhang 2024-05-10 2851 struct sk_buff_head *q, local_q;
274b6fd4a3053e Zijian Zhang 2024-05-10 2852 struct sk_buff *skb, *tmp;
274b6fd4a3053e Zijian Zhang 2024-05-10 2853 struct sock_exterr_skb *serr;
274b6fd4a3053e Zijian Zhang 2024-05-10 2854
274b6fd4a3053e Zijian Zhang 2024-05-10 2855 if (!sock_flag(sk, SOCK_ZEROCOPY) || sk->sk_family == PF_RDS)
274b6fd4a3053e Zijian Zhang 2024-05-10 2856 return -EINVAL;
274b6fd4a3053e Zijian Zhang 2024-05-10 2857
274b6fd4a3053e Zijian Zhang 2024-05-10 2858 cmsg_data_len = cmsg->cmsg_len - sizeof(struct cmsghdr);
274b6fd4a3053e Zijian Zhang 2024-05-10 2859 if (cmsg_data_len % sizeof(struct zc_info_elem))
274b6fd4a3053e Zijian Zhang 2024-05-10 2860 return -EINVAL;
274b6fd4a3053e Zijian Zhang 2024-05-10 2861
274b6fd4a3053e Zijian Zhang 2024-05-10 2862 zc_info_elem_num = cmsg_data_len / sizeof(struct zc_info_elem);
274b6fd4a3053e Zijian Zhang 2024-05-10 2863 if (!zc_info_elem_num || zc_info_elem_num > SOCK_ZC_INFO_MAX)
274b6fd4a3053e Zijian Zhang 2024-05-10 2864 return -EINVAL;
274b6fd4a3053e Zijian Zhang 2024-05-10 2865
274b6fd4a3053e Zijian Zhang 2024-05-10 2866 if (in_compat_syscall())
274b6fd4a3053e Zijian Zhang 2024-05-10 2867 usr_addr = compat_ptr(*(compat_uptr_t *)CMSG_DATA(cmsg));
274b6fd4a3053e Zijian Zhang 2024-05-10 2868 else
274b6fd4a3053e Zijian Zhang 2024-05-10 2869 usr_addr = (void __user *)*(void **)CMSG_DATA(cmsg);
274b6fd4a3053e Zijian Zhang 2024-05-10 2870 if (!access_ok(usr_addr, cmsg_data_len))
274b6fd4a3053e Zijian Zhang 2024-05-10 2871 return -EFAULT;
274b6fd4a3053e Zijian Zhang 2024-05-10 2872
274b6fd4a3053e Zijian Zhang 2024-05-10 2873 q = &sk->sk_error_queue;
274b6fd4a3053e Zijian Zhang 2024-05-10 2874 skb_queue_head_init(&local_q);
274b6fd4a3053e Zijian Zhang 2024-05-10 2875 spin_lock_irqsave(&q->lock, flags);
274b6fd4a3053e Zijian Zhang 2024-05-10 2876 skb = skb_peek(q);
274b6fd4a3053e Zijian Zhang 2024-05-10 2877 while (skb && i < zc_info_elem_num) {
274b6fd4a3053e Zijian Zhang 2024-05-10 2878 struct sk_buff *skb_next = skb_peek_next(skb, q);
274b6fd4a3053e Zijian Zhang 2024-05-10 2879
274b6fd4a3053e Zijian Zhang 2024-05-10 2880 serr = SKB_EXT_ERR(skb);
274b6fd4a3053e Zijian Zhang 2024-05-10 2881 if (serr->ee.ee_errno == 0 &&
274b6fd4a3053e Zijian Zhang 2024-05-10 2882 serr->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY) {
274b6fd4a3053e Zijian Zhang 2024-05-10 2883 zc_info_kern[i].hi = serr->ee.ee_data;
274b6fd4a3053e Zijian Zhang 2024-05-10 2884 zc_info_kern[i].lo = serr->ee.ee_info;
274b6fd4a3053e Zijian Zhang 2024-05-10 2885 zc_info_kern[i].zerocopy = !(serr->ee.ee_code
274b6fd4a3053e Zijian Zhang 2024-05-10 2886 & SO_EE_CODE_ZEROCOPY_COPIED);
274b6fd4a3053e Zijian Zhang 2024-05-10 2887 __skb_unlink(skb, q);
274b6fd4a3053e Zijian Zhang 2024-05-10 2888 __skb_queue_tail(&local_q, skb);
274b6fd4a3053e Zijian Zhang 2024-05-10 2889 i++;
274b6fd4a3053e Zijian Zhang 2024-05-10 2890 }
274b6fd4a3053e Zijian Zhang 2024-05-10 2891 skb = skb_next;
274b6fd4a3053e Zijian Zhang 2024-05-10 2892 }
274b6fd4a3053e Zijian Zhang 2024-05-10 2893 spin_unlock_irqrestore(&q->lock, flags);
274b6fd4a3053e Zijian Zhang 2024-05-10 2894
274b6fd4a3053e Zijian Zhang 2024-05-10 2895 ret = copy_to_user(usr_addr,
274b6fd4a3053e Zijian Zhang 2024-05-10 2896 zc_info_kern,
274b6fd4a3053e Zijian Zhang 2024-05-10 2897 i * sizeof(struct zc_info_elem));
274b6fd4a3053e Zijian Zhang 2024-05-10 2898
274b6fd4a3053e Zijian Zhang 2024-05-10 2899 if (unlikely(ret)) {
274b6fd4a3053e Zijian Zhang 2024-05-10 2900 spin_lock_irqsave(&q->lock, flags);
274b6fd4a3053e Zijian Zhang 2024-05-10 2901 skb_queue_reverse_walk_safe(&local_q, skb, tmp) {
274b6fd4a3053e Zijian Zhang 2024-05-10 2902 __skb_unlink(skb, &local_q);
274b6fd4a3053e Zijian Zhang 2024-05-10 2903 __skb_queue_head(q, skb);
274b6fd4a3053e Zijian Zhang 2024-05-10 2904 }
274b6fd4a3053e Zijian Zhang 2024-05-10 2905 spin_unlock_irqrestore(&q->lock, flags);
274b6fd4a3053e Zijian Zhang 2024-05-10 2906 return -EFAULT;
274b6fd4a3053e Zijian Zhang 2024-05-10 2907 }
274b6fd4a3053e Zijian Zhang 2024-05-10 2908
274b6fd4a3053e Zijian Zhang 2024-05-10 2909 while ((skb = __skb_dequeue(&local_q)))
274b6fd4a3053e Zijian Zhang 2024-05-10 2910 consume_skb(skb);
274b6fd4a3053e Zijian Zhang 2024-05-10 2911 break;
274b6fd4a3053e Zijian Zhang 2024-05-10 2912 }
f28ea365cdefc3 Edward Jee 2015-10-08 2913 default:
f28ea365cdefc3 Edward Jee 2015-10-08 2914 return -EINVAL;
f28ea365cdefc3 Edward Jee 2015-10-08 2915 }
39771b127b4123 Willem de Bruijn 2016-04-02 2916 return 0;
39771b127b4123 Willem de Bruijn 2016-04-02 2917 }
39771b127b4123 Willem de Bruijn 2016-04-02 2918 EXPORT_SYMBOL(__sock_cmsg_send);
39771b127b4123 Willem de Bruijn 2016-04-02 2919
zijianzhang@ wrote: > From: Zijian Zhang <zijianzhang@bytedance.com> > > The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. > However, zerocopy is not a free lunch. Apart from the management of user > pages, the combination of poll + recvmsg to receive notifications incurs > unignorable overhead in the applications. The overhead of such sometimes > might be more than the CPU savings from zerocopy. We try to solve this > problem with a new notification mechanism based on msgcontrol. > This new mechanism aims to reduce the overhead associated with receiving > notifications by embedding them directly into user arguments passed with > each sendmsg control message. By doing so, we can significantly reduce > the complexity and overhead for managing notifications. In an ideal > pattern, the user will keep calling sendmsg with SCM_ZC_NOTIFICATION > msg_control, and the notification will be delivered as soon as possible. > > Signed-off-by: Zijian Zhang <zijianzhang@bytedance.com> > Signed-off-by: Xiaochun Lu <xiaochun.lu@bytedance.com> > +#include <linux/types.h> > + > /* > * Desired design of maximum size and alignment (see RFC2553) > */ > @@ -35,4 +37,12 @@ struct __kernel_sockaddr_storage { > #define SOCK_TXREHASH_DISABLED 0 > #define SOCK_TXREHASH_ENABLED 1 > > +#define SOCK_ZC_INFO_MAX 128 > + > +struct zc_info_elem { > + __u32 lo; > + __u32 hi; > + __u8 zerocopy; > +}; > + > #endif /* _UAPI_LINUX_SOCKET_H */ > diff --git a/net/core/sock.c b/net/core/sock.c > index 8d6e638b5426..15da609be026 100644 > --- a/net/core/sock.c > +++ b/net/core/sock.c > @@ -2842,6 +2842,74 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, > case SCM_RIGHTS: > case SCM_CREDENTIALS: > break; > + case SCM_ZC_NOTIFICATION: { > + int ret, i = 0; > + int cmsg_data_len, zc_info_elem_num; > + void __user *usr_addr; > + struct zc_info_elem zc_info_kern[SOCK_ZC_INFO_MAX]; > + unsigned long flags; > + struct sk_buff_head *q, local_q; > + struct sk_buff *skb, *tmp; > + struct sock_exterr_skb *serr; minor: reverse xmas tree > + > + if (!sock_flag(sk, SOCK_ZEROCOPY) || sk->sk_family == PF_RDS) > + return -EINVAL; Is this mechanism supported for PF_RDS? The next patch fails on PF_RDS + '-n' > + > + cmsg_data_len = cmsg->cmsg_len - sizeof(struct cmsghdr); > + if (cmsg_data_len % sizeof(struct zc_info_elem)) > + return -EINVAL; > + > + zc_info_elem_num = cmsg_data_len / sizeof(struct zc_info_elem); > + if (!zc_info_elem_num || zc_info_elem_num > SOCK_ZC_INFO_MAX) > + return -EINVAL; > + > + if (in_compat_syscall()) > + usr_addr = compat_ptr(*(compat_uptr_t *)CMSG_DATA(cmsg)); > + else > + usr_addr = (void __user *)*(void **)CMSG_DATA(cmsg); The main design issue with this series is this indirection, rather than passing the array of notifications as cmsg. This trick circumvents having to deal with compat issues and having to figure out copy_to_user in ____sys_sendmsg (as msg_control is an in-kernel copy). This is quite hacky, from an API design PoV. As is passing a pointer, but expecting msg_controllen to hold the length not of the pointer, but of the pointed to user buffer. I had also hoped for more significant savings. Especially with the higher syscall overhead due to meltdown and spectre mitigations vs when MSG_ZEROCOPY was introduced and I last tried this optimization. > + if (!access_ok(usr_addr, cmsg_data_len)) > + return -EFAULT; > + > + q = &sk->sk_error_queue; > + skb_queue_head_init(&local_q); > + spin_lock_irqsave(&q->lock, flags); > + skb = skb_peek(q); > + while (skb && i < zc_info_elem_num) { > + struct sk_buff *skb_next = skb_peek_next(skb, q); > + > + serr = SKB_EXT_ERR(skb); > + if (serr->ee.ee_errno == 0 && > + serr->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY) { > + zc_info_kern[i].hi = serr->ee.ee_data; > + zc_info_kern[i].lo = serr->ee.ee_info; > + zc_info_kern[i].zerocopy = !(serr->ee.ee_code > + & SO_EE_CODE_ZEROCOPY_COPIED); > + __skb_unlink(skb, q); > + __skb_queue_tail(&local_q, skb); > + i++; > + } > + skb = skb_next; > + } > + spin_unlock_irqrestore(&q->lock, flags); > + > + ret = copy_to_user(usr_addr, > + zc_info_kern, > + i * sizeof(struct zc_info_elem)); > + > + if (unlikely(ret)) { > + spin_lock_irqsave(&q->lock, flags); > + skb_queue_reverse_walk_safe(&local_q, skb, tmp) { > + __skb_unlink(skb, &local_q); > + __skb_queue_head(q, skb); > + } Can just list_splice_init? > + spin_unlock_irqrestore(&q->lock, flags); > + return -EFAULT; > + } > + > + while ((skb = __skb_dequeue(&local_q))) > + consume_skb(skb); > + break; > + } > default: > return -EINVAL; > } > -- > 2.20.1 >
On 5/12/24 5:58 PM, Willem de Bruijn wrote: > zijianzhang@ wrote: >> From: Zijian Zhang <zijianzhang@bytedance.com> >> >> The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. >> However, zerocopy is not a free lunch. Apart from the management of user >> pages, the combination of poll + recvmsg to receive notifications incurs >> unignorable overhead in the applications. The overhead of such sometimes >> might be more than the CPU savings from zerocopy. We try to solve this >> problem with a new notification mechanism based on msgcontrol. >> This new mechanism aims to reduce the overhead associated with receiving >> notifications by embedding them directly into user arguments passed with >> each sendmsg control message. By doing so, we can significantly reduce >> the complexity and overhead for managing notifications. In an ideal >> pattern, the user will keep calling sendmsg with SCM_ZC_NOTIFICATION >> msg_control, and the notification will be delivered as soon as possible. >> >> Signed-off-by: Zijian Zhang <zijianzhang@bytedance.com> >> Signed-off-by: Xiaochun Lu <xiaochun.lu@bytedance.com> > >> +#include <linux/types.h> >> + >> /* >> * Desired design of maximum size and alignment (see RFC2553) >> */ >> @@ -35,4 +37,12 @@ struct __kernel_sockaddr_storage { >> #define SOCK_TXREHASH_DISABLED 0 >> #define SOCK_TXREHASH_ENABLED 1 >> >> +#define SOCK_ZC_INFO_MAX 128 >> + >> +struct zc_info_elem { >> + __u32 lo; >> + __u32 hi; >> + __u8 zerocopy; >> +}; >> + >> #endif /* _UAPI_LINUX_SOCKET_H */ >> diff --git a/net/core/sock.c b/net/core/sock.c >> index 8d6e638b5426..15da609be026 100644 >> --- a/net/core/sock.c >> +++ b/net/core/sock.c >> @@ -2842,6 +2842,74 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, >> case SCM_RIGHTS: >> case SCM_CREDENTIALS: >> break; >> + case SCM_ZC_NOTIFICATION: { >> + int ret, i = 0; >> + int cmsg_data_len, zc_info_elem_num; >> + void __user *usr_addr; >> + struct zc_info_elem zc_info_kern[SOCK_ZC_INFO_MAX]; >> + unsigned long flags; >> + struct sk_buff_head *q, local_q; >> + struct sk_buff *skb, *tmp; >> + struct sock_exterr_skb *serr; > > minor: reverse xmas tree > Ack. >> + >> + if (!sock_flag(sk, SOCK_ZEROCOPY) || sk->sk_family == PF_RDS) >> + return -EINVAL; > > Is this mechanism supported for PF_RDS? > The next patch fails on PF_RDS + '-n' > Nice catch! This mechanism does not support PF_RDS, I will update the selftest code. >> + >> + cmsg_data_len = cmsg->cmsg_len - sizeof(struct cmsghdr); >> + if (cmsg_data_len % sizeof(struct zc_info_elem)) >> + return -EINVAL; >> + >> + zc_info_elem_num = cmsg_data_len / sizeof(struct zc_info_elem); >> + if (!zc_info_elem_num || zc_info_elem_num > SOCK_ZC_INFO_MAX) >> + return -EINVAL; >> + >> + if (in_compat_syscall()) >> + usr_addr = compat_ptr(*(compat_uptr_t *)CMSG_DATA(cmsg)); >> + else >> + usr_addr = (void __user *)*(void **)CMSG_DATA(cmsg); > > The main design issue with this series is this indirection, rather > than passing the array of notifications as cmsg. > > This trick circumvents having to deal with compat issues and having to > figure out copy_to_user in ____sys_sendmsg (as msg_control is an > in-kernel copy). > > This is quite hacky, from an API design PoV. > > As is passing a pointer, but expecting msg_controllen to hold the > length not of the pointer, but of the pointed to user buffer. > > I had also hoped for more significant savings. Especially with the > higher syscall overhead due to meltdown and spectre mitigations vs > when MSG_ZEROCOPY was introduced and I last tried this optimization. > Thanks for the summary, totally agree! It's a hard choice to design the API like this. >> + if (!access_ok(usr_addr, cmsg_data_len)) >> + return -EFAULT; >> + >> + q = &sk->sk_error_queue; >> + skb_queue_head_init(&local_q); >> + spin_lock_irqsave(&q->lock, flags); >> + skb = skb_peek(q); >> + while (skb && i < zc_info_elem_num) { >> + struct sk_buff *skb_next = skb_peek_next(skb, q); >> + >> + serr = SKB_EXT_ERR(skb); >> + if (serr->ee.ee_errno == 0 && >> + serr->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY) { >> + zc_info_kern[i].hi = serr->ee.ee_data; >> + zc_info_kern[i].lo = serr->ee.ee_info; >> + zc_info_kern[i].zerocopy = !(serr->ee.ee_code >> + & SO_EE_CODE_ZEROCOPY_COPIED); >> + __skb_unlink(skb, q); >> + __skb_queue_tail(&local_q, skb); >> + i++; >> + } >> + skb = skb_next; >> + } >> + spin_unlock_irqrestore(&q->lock, flags); >> + >> + ret = copy_to_user(usr_addr, >> + zc_info_kern, >> + i * sizeof(struct zc_info_elem)); >> + >> + if (unlikely(ret)) { >> + spin_lock_irqsave(&q->lock, flags); >> + skb_queue_reverse_walk_safe(&local_q, skb, tmp) { >> + __skb_unlink(skb, &local_q); >> + __skb_queue_head(q, skb); >> + } > > Can just list_splice_init? > Ack. >> + spin_unlock_irqrestore(&q->lock, flags); >> + return -EFAULT; >> + } >> + >> + while ((skb = __skb_dequeue(&local_q))) >> + consume_skb(skb); >> + break; >> + } >> default: >> return -EINVAL; >> } >> -- >> 2.20.1 >> > >
On 5/13/24 12:47 PM, Zijian Zhang wrote: > On 5/12/24 5:58 PM, Willem de Bruijn wrote: >> zijianzhang@ wrote: >>> From: Zijian Zhang <zijianzhang@bytedance.com> >>> >>> The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. >>> However, zerocopy is not a free lunch. Apart from the management of user >>> pages, the combination of poll + recvmsg to receive notifications incurs >>> unignorable overhead in the applications. The overhead of such sometimes >>> might be more than the CPU savings from zerocopy. We try to solve this >>> problem with a new notification mechanism based on msgcontrol. >>> This new mechanism aims to reduce the overhead associated with receiving >>> notifications by embedding them directly into user arguments passed with >>> each sendmsg control message. By doing so, we can significantly reduce >>> the complexity and overhead for managing notifications. In an ideal >>> pattern, the user will keep calling sendmsg with SCM_ZC_NOTIFICATION >>> msg_control, and the notification will be delivered as soon as possible. >>> >>> Signed-off-by: Zijian Zhang <zijianzhang@bytedance.com> >>> Signed-off-by: Xiaochun Lu <xiaochun.lu@bytedance.com> >> >>> +#include <linux/types.h> >>> + >>> /* >>> * Desired design of maximum size and alignment (see RFC2553) >>> */ >>> @@ -35,4 +37,12 @@ struct __kernel_sockaddr_storage { >>> #define SOCK_TXREHASH_DISABLED 0 >>> #define SOCK_TXREHASH_ENABLED 1 >>> +#define SOCK_ZC_INFO_MAX 128 >>> + >>> +struct zc_info_elem { >>> + __u32 lo; >>> + __u32 hi; >>> + __u8 zerocopy; >>> +}; >>> + >>> #endif /* _UAPI_LINUX_SOCKET_H */ >>> diff --git a/net/core/sock.c b/net/core/sock.c >>> index 8d6e638b5426..15da609be026 100644 >>> --- a/net/core/sock.c >>> +++ b/net/core/sock.c >>> @@ -2842,6 +2842,74 @@ int __sock_cmsg_send(struct sock *sk, struct >>> cmsghdr *cmsg, >>> case SCM_RIGHTS: >>> case SCM_CREDENTIALS: >>> break; >>> + case SCM_ZC_NOTIFICATION: { >>> + int ret, i = 0; >>> + int cmsg_data_len, zc_info_elem_num; >>> + void __user *usr_addr; >>> + struct zc_info_elem zc_info_kern[SOCK_ZC_INFO_MAX]; >>> + unsigned long flags; >>> + struct sk_buff_head *q, local_q; >>> + struct sk_buff *skb, *tmp; >>> + struct sock_exterr_skb *serr; >> >> minor: reverse xmas tree >> > > Ack. > >>> + >>> + if (!sock_flag(sk, SOCK_ZEROCOPY) || sk->sk_family == PF_RDS) >>> + return -EINVAL; >> >> Is this mechanism supported for PF_RDS? >> The next patch fails on PF_RDS + '-n' >> > > Nice catch! This mechanism does not support PF_RDS, I will update the > selftest code. > PF_RDS does not use MSGERR queue to store the info, thus it is not supported by this patch. I will leave it as "unsupported" in the "selftest -n" now. If possible, I may leave the support for PF_RDS in another patch set in the future. >>> + >>> + cmsg_data_len = cmsg->cmsg_len - sizeof(struct cmsghdr); >>> + if (cmsg_data_len % sizeof(struct zc_info_elem)) >>> + return -EINVAL; >>> + >>> + zc_info_elem_num = cmsg_data_len / sizeof(struct zc_info_elem); >>> + if (!zc_info_elem_num || zc_info_elem_num > SOCK_ZC_INFO_MAX) >>> + return -EINVAL; >>> + >>> + if (in_compat_syscall()) >>> + usr_addr = compat_ptr(*(compat_uptr_t *)CMSG_DATA(cmsg)); >>> + else >>> + usr_addr = (void __user *)*(void **)CMSG_DATA(cmsg); >> >> The main design issue with this series is this indirection, rather >> than passing the array of notifications as cmsg. >> >> This trick circumvents having to deal with compat issues and having to >> figure out copy_to_user in ____sys_sendmsg (as msg_control is an >> in-kernel copy). >> >> This is quite hacky, from an API design PoV. >> >> As is passing a pointer, but expecting msg_controllen to hold the >> length not of the pointer, but of the pointed to user buffer. >> >> I had also hoped for more significant savings. Especially with the >> higher syscall overhead due to meltdown and spectre mitigations vs >> when MSG_ZEROCOPY was introduced and I last tried this optimization. >> > Thanks for the summary, totally agree! It's a hard choice to design the > API like this. > >>> + if (!access_ok(usr_addr, cmsg_data_len)) >>> + return -EFAULT; >>> + >>> + q = &sk->sk_error_queue; >>> + skb_queue_head_init(&local_q); >>> + spin_lock_irqsave(&q->lock, flags); >>> + skb = skb_peek(q); >>> + while (skb && i < zc_info_elem_num) { >>> + struct sk_buff *skb_next = skb_peek_next(skb, q); >>> + >>> + serr = SKB_EXT_ERR(skb); >>> + if (serr->ee.ee_errno == 0 && >>> + serr->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY) { >>> + zc_info_kern[i].hi = serr->ee.ee_data; >>> + zc_info_kern[i].lo = serr->ee.ee_info; >>> + zc_info_kern[i].zerocopy = !(serr->ee.ee_code >>> + & SO_EE_CODE_ZEROCOPY_COPIED); >>> + __skb_unlink(skb, q); >>> + __skb_queue_tail(&local_q, skb); >>> + i++; >>> + } >>> + skb = skb_next; >>> + } >>> + spin_unlock_irqrestore(&q->lock, flags); >>> + >>> + ret = copy_to_user(usr_addr, >>> + zc_info_kern, >>> + i * sizeof(struct zc_info_elem)); >>> + >>> + if (unlikely(ret)) { >>> + spin_lock_irqsave(&q->lock, flags); >>> + skb_queue_reverse_walk_safe(&local_q, skb, tmp) { >>> + __skb_unlink(skb, &local_q); >>> + __skb_queue_head(q, skb); >>> + } >> >> Can just list_splice_init? >> > > Ack. > >>> + spin_unlock_irqrestore(&q->lock, flags); >>> + return -EFAULT; >>> + } >>> + >>> + while ((skb = __skb_dequeue(&local_q))) >>> + consume_skb(skb); >>> + break; >>> + } >>> default: >>> return -EINVAL; >>> } >>> -- >>> 2.20.1 >>> >> >>
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index e94f621903fe..7761a4e0ea2c 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -140,6 +140,8 @@ #define SO_PASSPIDFD 76 #define SO_PEERPIDFD 77 +#define SCM_ZC_NOTIFICATION 78 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 60ebaed28a4c..89edc51380f0 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -151,6 +151,8 @@ #define SO_PASSPIDFD 76 #define SO_PEERPIDFD 77 +#define SCM_ZC_NOTIFICATION 78 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index be264c2b1a11..2911b43e6a9d 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -132,6 +132,8 @@ #define SO_PASSPIDFD 0x404A #define SO_PEERPIDFD 0x404B +#define SCM_ZC_NOTIFICATION 0x404C + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 682da3714686..dc045e87cc8e 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -133,6 +133,8 @@ #define SO_PASSPIDFD 0x0055 #define SO_PEERPIDFD 0x0056 +#define SCM_ZC_NOTIFICATION 0x0057 + #if !defined(__KERNEL__) diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 8ce8a39a1e5f..7474c8a244bc 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -135,6 +135,8 @@ #define SO_PASSPIDFD 76 #define SO_PEERPIDFD 77 +#define SCM_ZC_NOTIFICATION 78 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index d3fcd3b5ec53..15cec8819f34 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -2,6 +2,8 @@ #ifndef _UAPI_LINUX_SOCKET_H #define _UAPI_LINUX_SOCKET_H +#include <linux/types.h> + /* * Desired design of maximum size and alignment (see RFC2553) */ @@ -35,4 +37,12 @@ struct __kernel_sockaddr_storage { #define SOCK_TXREHASH_DISABLED 0 #define SOCK_TXREHASH_ENABLED 1 +#define SOCK_ZC_INFO_MAX 128 + +struct zc_info_elem { + __u32 lo; + __u32 hi; + __u8 zerocopy; +}; + #endif /* _UAPI_LINUX_SOCKET_H */ diff --git a/net/core/sock.c b/net/core/sock.c index 8d6e638b5426..15da609be026 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2842,6 +2842,74 @@ int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, case SCM_RIGHTS: case SCM_CREDENTIALS: break; + case SCM_ZC_NOTIFICATION: { + int ret, i = 0; + int cmsg_data_len, zc_info_elem_num; + void __user *usr_addr; + struct zc_info_elem zc_info_kern[SOCK_ZC_INFO_MAX]; + unsigned long flags; + struct sk_buff_head *q, local_q; + struct sk_buff *skb, *tmp; + struct sock_exterr_skb *serr; + + if (!sock_flag(sk, SOCK_ZEROCOPY) || sk->sk_family == PF_RDS) + return -EINVAL; + + cmsg_data_len = cmsg->cmsg_len - sizeof(struct cmsghdr); + if (cmsg_data_len % sizeof(struct zc_info_elem)) + return -EINVAL; + + zc_info_elem_num = cmsg_data_len / sizeof(struct zc_info_elem); + if (!zc_info_elem_num || zc_info_elem_num > SOCK_ZC_INFO_MAX) + return -EINVAL; + + if (in_compat_syscall()) + usr_addr = compat_ptr(*(compat_uptr_t *)CMSG_DATA(cmsg)); + else + usr_addr = (void __user *)*(void **)CMSG_DATA(cmsg); + if (!access_ok(usr_addr, cmsg_data_len)) + return -EFAULT; + + q = &sk->sk_error_queue; + skb_queue_head_init(&local_q); + spin_lock_irqsave(&q->lock, flags); + skb = skb_peek(q); + while (skb && i < zc_info_elem_num) { + struct sk_buff *skb_next = skb_peek_next(skb, q); + + serr = SKB_EXT_ERR(skb); + if (serr->ee.ee_errno == 0 && + serr->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY) { + zc_info_kern[i].hi = serr->ee.ee_data; + zc_info_kern[i].lo = serr->ee.ee_info; + zc_info_kern[i].zerocopy = !(serr->ee.ee_code + & SO_EE_CODE_ZEROCOPY_COPIED); + __skb_unlink(skb, q); + __skb_queue_tail(&local_q, skb); + i++; + } + skb = skb_next; + } + spin_unlock_irqrestore(&q->lock, flags); + + ret = copy_to_user(usr_addr, + zc_info_kern, + i * sizeof(struct zc_info_elem)); + + if (unlikely(ret)) { + spin_lock_irqsave(&q->lock, flags); + skb_queue_reverse_walk_safe(&local_q, skb, tmp) { + __skb_unlink(skb, &local_q); + __skb_queue_head(q, skb); + } + spin_unlock_irqrestore(&q->lock, flags); + return -EFAULT; + } + + while ((skb = __skb_dequeue(&local_q))) + consume_skb(skb); + break; + } default: return -EINVAL; }