Message ID | 1728532691-20044-1-git-send-email-alibuda@linux.alibaba.com (mailing list archive) |
---|---|
State | Rejected |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | [net-next] net/smc: Introduce a hook to modify syn_smc at runtime | expand |
On Wed, Oct 9, 2024 at 8:58 PM D. Wythe <alibuda@linux.alibaba.com> wrote: > > > +__bpf_hook_start(); > + > +__weak noinline int select_syn_smc(const struct sock *sk, struct sockaddr *peer) > +{ > + return 1; > +} > + > +__bpf_hook_end(); > + > int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb) > { > struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); > @@ -156,19 +165,43 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, > return NULL; > } > > -static bool smc_hs_congested(const struct sock *sk) > +static void smc_openreq_init(struct request_sock *req, > + const struct tcp_options_received *rx_opt, > + struct sk_buff *skb, const struct sock *sk) > { > + struct inet_request_sock *ireq = inet_rsk(req); > + struct sockaddr_storage rmt_sockaddr = {}; > const struct smc_sock *smc; > > smc = smc_clcsock_user_data(sk); > > if (!smc) > - return true; > + return; > > - if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) > - return true; > + if (smc->limit_smc_hs && workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) > + goto out_no_smc; > > - return false; > + rmt_sockaddr.ss_family = sk->sk_family; > + > + if (rmt_sockaddr.ss_family == AF_INET) { > + struct sockaddr_in *rmt4_sockaddr = (struct sockaddr_in *)&rmt_sockaddr; > + > + rmt4_sockaddr->sin_addr.s_addr = ireq->ir_rmt_addr; > + rmt4_sockaddr->sin_port = ireq->ir_rmt_port; > +#if IS_ENABLED(CONFIG_IPV6) > + } else { > + struct sockaddr_in6 *rmt6_sockaddr = (struct sockaddr_in6 *)&rmt_sockaddr; > + > + rmt6_sockaddr->sin6_addr = ireq->ir_v6_rmt_addr; > + rmt6_sockaddr->sin6_port = ireq->ir_rmt_port; > +#endif /* CONFIG_IPV6 */ > + } > + > + ireq->smc_ok = select_syn_smc(sk, (struct sockaddr *)&rmt_sockaddr); > + return; > +out_no_smc: > + ireq->smc_ok = 0; > + return; > } > > struct smc_hashinfo smc_v4_hashinfo = { > @@ -1671,7 +1704,7 @@ int smc_connect(struct socket *sock, struct sockaddr *addr, > } > > smc_copy_sock_settings_to_clc(smc); > - tcp_sk(smc->clcsock->sk)->syn_smc = 1; > + tcp_sk(smc->clcsock->sk)->syn_smc = select_syn_smc(sk, addr); > if (smc->connect_nonblock) { > rc = -EALREADY; > goto out; > @@ -2650,8 +2683,7 @@ int smc_listen(struct socket *sock, int backlog) > > inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; > > - if (smc->limit_smc_hs) > - tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; > + tcp_sk(smc->clcsock->sk)->smc_openreq_init = smc_openreq_init; > > rc = kernel_listen(smc->clcsock, backlog); > if (rc) { > @@ -3475,6 +3507,24 @@ static void __net_exit smc_net_stat_exit(struct net *net) > .exit = smc_net_stat_exit, > }; > > +#if IS_ENABLED(CONFIG_BPF_SYSCALL) > +BTF_SET8_START(bpf_smc_fmodret_ids) > +BTF_ID_FLAGS(func, select_syn_smc) > +BTF_SET8_END(bpf_smc_fmodret_ids) > + > +static const struct btf_kfunc_id_set bpf_smc_fmodret_set = { > + .owner = THIS_MODULE, > + .set = &bpf_smc_fmodret_ids, > +}; > + > +static int bpf_smc_kfunc_init(void) > +{ > + return register_btf_fmodret_id_set(&bpf_smc_fmodret_set); > +} fmodret was an approach that hid-bpf took initially, but eventually they removed it all and switched to struct-ops approach. Please learn that lesson. Use struct_ops from the beginning. I did a presentation recently explaining the motivation behind struct_ops and tips on how to extend the kernel. TLDR: the step one is to design the extension _without_ bpf. The interface should be usable for kernel modules. And then when you have *_ops style api in place the bpf progs will plug-in without extra work. Slides: https://github.com/4ast/docs/blob/main/BPF%20struct-ops.pdf
On 10/11/24 12:21 AM, Alexei Starovoitov wrote: > On Wed, Oct 9, 2024 at 8:58 PM D. Wythe <alibuda@linux.alibaba.com> wrote: >> >> >> +__bpf_hook_start(); >> + >> +__weak noinline int select_syn_smc(const struct sock *sk, struct sockaddr *peer) >> +{ >> + return 1; >> +} >> + >> +__bpf_hook_end(); >> + >> int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb) >> { >> struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); >> @@ -156,19 +165,43 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, >> return NULL; >> } >> >> -static bool smc_hs_congested(const struct sock *sk) >> +static void smc_openreq_init(struct request_sock *req, >> + const struct tcp_options_received *rx_opt, >> + struct sk_buff *skb, const struct sock *sk) >> { >> + struct inet_request_sock *ireq = inet_rsk(req); >> + struct sockaddr_storage rmt_sockaddr = {}; >> const struct smc_sock *smc; >> >> smc = smc_clcsock_user_data(sk); >> >> if (!smc) >> - return true; >> + return; >> >> - if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) >> - return true; >> + if (smc->limit_smc_hs && workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) >> + goto out_no_smc; >> >> - return false; >> + rmt_sockaddr.ss_family = sk->sk_family; >> + >> + if (rmt_sockaddr.ss_family == AF_INET) { >> + struct sockaddr_in *rmt4_sockaddr = (struct sockaddr_in *)&rmt_sockaddr; >> + >> + rmt4_sockaddr->sin_addr.s_addr = ireq->ir_rmt_addr; >> + rmt4_sockaddr->sin_port = ireq->ir_rmt_port; >> +#if IS_ENABLED(CONFIG_IPV6) >> + } else { >> + struct sockaddr_in6 *rmt6_sockaddr = (struct sockaddr_in6 *)&rmt_sockaddr; >> + >> + rmt6_sockaddr->sin6_addr = ireq->ir_v6_rmt_addr; >> + rmt6_sockaddr->sin6_port = ireq->ir_rmt_port; >> +#endif /* CONFIG_IPV6 */ >> + } >> + >> + ireq->smc_ok = select_syn_smc(sk, (struct sockaddr *)&rmt_sockaddr); >> + return; >> +out_no_smc: >> + ireq->smc_ok = 0; >> + return; >> } >> >> struct smc_hashinfo smc_v4_hashinfo = { >> @@ -1671,7 +1704,7 @@ int smc_connect(struct socket *sock, struct sockaddr *addr, >> } >> >> smc_copy_sock_settings_to_clc(smc); >> - tcp_sk(smc->clcsock->sk)->syn_smc = 1; >> + tcp_sk(smc->clcsock->sk)->syn_smc = select_syn_smc(sk, addr); >> if (smc->connect_nonblock) { >> rc = -EALREADY; >> goto out; >> @@ -2650,8 +2683,7 @@ int smc_listen(struct socket *sock, int backlog) >> >> inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; >> >> - if (smc->limit_smc_hs) >> - tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; >> + tcp_sk(smc->clcsock->sk)->smc_openreq_init = smc_openreq_init; >> >> rc = kernel_listen(smc->clcsock, backlog); >> if (rc) { >> @@ -3475,6 +3507,24 @@ static void __net_exit smc_net_stat_exit(struct net *net) >> .exit = smc_net_stat_exit, >> }; >> >> +#if IS_ENABLED(CONFIG_BPF_SYSCALL) >> +BTF_SET8_START(bpf_smc_fmodret_ids) >> +BTF_ID_FLAGS(func, select_syn_smc) >> +BTF_SET8_END(bpf_smc_fmodret_ids) >> + >> +static const struct btf_kfunc_id_set bpf_smc_fmodret_set = { >> + .owner = THIS_MODULE, >> + .set = &bpf_smc_fmodret_ids, >> +}; >> + >> +static int bpf_smc_kfunc_init(void) >> +{ >> + return register_btf_fmodret_id_set(&bpf_smc_fmodret_set); >> +} > > fmodret was an approach that hid-bpf took initially, > but eventually they removed it all and switched to struct-ops approach. > Please learn that lesson. > Use struct_ops from the beginning. > > I did a presentation recently explaining the motivation behind > struct_ops and tips on how to extend the kernel. > TLDR: the step one is to design the extension _without_ bpf. > The interface should be usable for kernel modules. > And then when you have *_ops style api in place > the bpf progs will plug-in without extra work. > > Slides: > https://github.com/4ast/docs/blob/main/BPF%20struct-ops.pdf Hi Alexei, Thanks very much for your suggestion. In fact, I tried struct_ops in SMC about a year ago. Unfortunately, at that time struct_ops did not support registration from modules, and I had to move some smc dependencies into bpf, which met with community opposition. However, I noticed that this feature is now supported, so perhaps this is an opportunity. But on the other hand, given the current functionality, I wonder if struct_ops might be an overkill. I haven't been able to come up with a suitable abstraction to define this ops, and in the future, this ops might only contain the very one callback (select_syn_smc). Looking forward for your advises. Thanks, D. Wythe
On Thu, Oct 10, 2024 at 11:44 PM D. Wythe <alibuda@linux.alibaba.com> wrote: > > > > On 10/11/24 12:21 AM, Alexei Starovoitov wrote: > > On Wed, Oct 9, 2024 at 8:58 PM D. Wythe <alibuda@linux.alibaba.com> wrote: > >> > >> > >> +__bpf_hook_start(); > >> + > >> +__weak noinline int select_syn_smc(const struct sock *sk, struct sockaddr *peer) > >> +{ > >> + return 1; > >> +} > >> + > >> +__bpf_hook_end(); > >> + > >> int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb) > >> { > >> struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); > >> @@ -156,19 +165,43 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, > >> return NULL; > >> } > >> > >> -static bool smc_hs_congested(const struct sock *sk) > >> +static void smc_openreq_init(struct request_sock *req, > >> + const struct tcp_options_received *rx_opt, > >> + struct sk_buff *skb, const struct sock *sk) > >> { > >> + struct inet_request_sock *ireq = inet_rsk(req); > >> + struct sockaddr_storage rmt_sockaddr = {}; > >> const struct smc_sock *smc; > >> > >> smc = smc_clcsock_user_data(sk); > >> > >> if (!smc) > >> - return true; > >> + return; > >> > >> - if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) > >> - return true; > >> + if (smc->limit_smc_hs && workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) > >> + goto out_no_smc; > >> > >> - return false; > >> + rmt_sockaddr.ss_family = sk->sk_family; > >> + > >> + if (rmt_sockaddr.ss_family == AF_INET) { > >> + struct sockaddr_in *rmt4_sockaddr = (struct sockaddr_in *)&rmt_sockaddr; > >> + > >> + rmt4_sockaddr->sin_addr.s_addr = ireq->ir_rmt_addr; > >> + rmt4_sockaddr->sin_port = ireq->ir_rmt_port; > >> +#if IS_ENABLED(CONFIG_IPV6) > >> + } else { > >> + struct sockaddr_in6 *rmt6_sockaddr = (struct sockaddr_in6 *)&rmt_sockaddr; > >> + > >> + rmt6_sockaddr->sin6_addr = ireq->ir_v6_rmt_addr; > >> + rmt6_sockaddr->sin6_port = ireq->ir_rmt_port; > >> +#endif /* CONFIG_IPV6 */ > >> + } > >> + > >> + ireq->smc_ok = select_syn_smc(sk, (struct sockaddr *)&rmt_sockaddr); > >> + return; > >> +out_no_smc: > >> + ireq->smc_ok = 0; > >> + return; > >> } > >> > >> struct smc_hashinfo smc_v4_hashinfo = { > >> @@ -1671,7 +1704,7 @@ int smc_connect(struct socket *sock, struct sockaddr *addr, > >> } > >> > >> smc_copy_sock_settings_to_clc(smc); > >> - tcp_sk(smc->clcsock->sk)->syn_smc = 1; > >> + tcp_sk(smc->clcsock->sk)->syn_smc = select_syn_smc(sk, addr); > >> if (smc->connect_nonblock) { > >> rc = -EALREADY; > >> goto out; > >> @@ -2650,8 +2683,7 @@ int smc_listen(struct socket *sock, int backlog) > >> > >> inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; > >> > >> - if (smc->limit_smc_hs) > >> - tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; > >> + tcp_sk(smc->clcsock->sk)->smc_openreq_init = smc_openreq_init; > >> > >> rc = kernel_listen(smc->clcsock, backlog); > >> if (rc) { > >> @@ -3475,6 +3507,24 @@ static void __net_exit smc_net_stat_exit(struct net *net) > >> .exit = smc_net_stat_exit, > >> }; > >> > >> +#if IS_ENABLED(CONFIG_BPF_SYSCALL) > >> +BTF_SET8_START(bpf_smc_fmodret_ids) > >> +BTF_ID_FLAGS(func, select_syn_smc) > >> +BTF_SET8_END(bpf_smc_fmodret_ids) > >> + > >> +static const struct btf_kfunc_id_set bpf_smc_fmodret_set = { > >> + .owner = THIS_MODULE, > >> + .set = &bpf_smc_fmodret_ids, > >> +}; > >> + > >> +static int bpf_smc_kfunc_init(void) > >> +{ > >> + return register_btf_fmodret_id_set(&bpf_smc_fmodret_set); > >> +} > > > > fmodret was an approach that hid-bpf took initially, > > but eventually they removed it all and switched to struct-ops approach. > > Please learn that lesson. > > Use struct_ops from the beginning. > > > > I did a presentation recently explaining the motivation behind > > struct_ops and tips on how to extend the kernel. > > TLDR: the step one is to design the extension _without_ bpf. > > The interface should be usable for kernel modules. > > And then when you have *_ops style api in place > > the bpf progs will plug-in without extra work. > > > > Slides: > > https://github.com/4ast/docs/blob/main/BPF%20struct-ops.pdf > > > Hi Alexei, > > Thanks very much for your suggestion. > > In fact, I tried struct_ops in SMC about a year ago. Unfortunately, at that time struct_ops did not > support registration from modules, and I had to move some smc dependencies into bpf, which met with > community opposition. However, I noticed that this feature is now supported, so perhaps this is an > opportunity. > > But on the other hand, given the current functionality, I wonder if struct_ops might be an overkill. > I haven't been able to come up with a suitable abstraction to define this ops, and in the future, > this ops might only contain the very one callback (select_syn_smc). > > Looking forward for your advises. I guess I wasn't clear. It's a Nack to the current fmodret approach.
On 10/11/24 11:37 PM, Alexei Starovoitov wrote: > On Thu, Oct 10, 2024 at 11:44 PM D. Wythe <alibuda@linux.alibaba.com> wrote: >> >> >> >> On 10/11/24 12:21 AM, Alexei Starovoitov wrote: >>> On Wed, Oct 9, 2024 at 8:58 PM D. Wythe <alibuda@linux.alibaba.com> wrote: >>>> >>>> >>>> +__bpf_hook_start(); >>>> + >>>> +__weak noinline int select_syn_smc(const struct sock *sk, struct sockaddr *peer) >>>> +{ >>>> + return 1; >>>> +} >>>> + >>>> +__bpf_hook_end(); >>>> + >>>> int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb) >>>> { >>>> struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); >>>> @@ -156,19 +165,43 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, >>>> return NULL; >>>> } >>>> >>>> -static bool smc_hs_congested(const struct sock *sk) >>>> +static void smc_openreq_init(struct request_sock *req, >>>> + const struct tcp_options_received *rx_opt, >>>> + struct sk_buff *skb, const struct sock *sk) >>>> { >>>> + struct inet_request_sock *ireq = inet_rsk(req); >>>> + struct sockaddr_storage rmt_sockaddr = {}; >>>> const struct smc_sock *smc; >>>> >>>> smc = smc_clcsock_user_data(sk); >>>> >>>> if (!smc) >>>> - return true; >>>> + return; >>>> >>>> - if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) >>>> - return true; >>>> + if (smc->limit_smc_hs && workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) >>>> + goto out_no_smc; >>>> >>>> - return false; >>>> + rmt_sockaddr.ss_family = sk->sk_family; >>>> + >>>> + if (rmt_sockaddr.ss_family == AF_INET) { >>>> + struct sockaddr_in *rmt4_sockaddr = (struct sockaddr_in *)&rmt_sockaddr; >>>> + >>>> + rmt4_sockaddr->sin_addr.s_addr = ireq->ir_rmt_addr; >>>> + rmt4_sockaddr->sin_port = ireq->ir_rmt_port; >>>> +#if IS_ENABLED(CONFIG_IPV6) >>>> + } else { >>>> + struct sockaddr_in6 *rmt6_sockaddr = (struct sockaddr_in6 *)&rmt_sockaddr; >>>> + >>>> + rmt6_sockaddr->sin6_addr = ireq->ir_v6_rmt_addr; >>>> + rmt6_sockaddr->sin6_port = ireq->ir_rmt_port; >>>> +#endif /* CONFIG_IPV6 */ >>>> + } >>>> + >>>> + ireq->smc_ok = select_syn_smc(sk, (struct sockaddr *)&rmt_sockaddr); >>>> + return; >>>> +out_no_smc: >>>> + ireq->smc_ok = 0; >>>> + return; >>>> } >>>> >>>> struct smc_hashinfo smc_v4_hashinfo = { >>>> @@ -1671,7 +1704,7 @@ int smc_connect(struct socket *sock, struct sockaddr *addr, >>>> } >>>> >>>> smc_copy_sock_settings_to_clc(smc); >>>> - tcp_sk(smc->clcsock->sk)->syn_smc = 1; >>>> + tcp_sk(smc->clcsock->sk)->syn_smc = select_syn_smc(sk, addr); >>>> if (smc->connect_nonblock) { >>>> rc = -EALREADY; >>>> goto out; >>>> @@ -2650,8 +2683,7 @@ int smc_listen(struct socket *sock, int backlog) >>>> >>>> inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; >>>> >>>> - if (smc->limit_smc_hs) >>>> - tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; >>>> + tcp_sk(smc->clcsock->sk)->smc_openreq_init = smc_openreq_init; >>>> >>>> rc = kernel_listen(smc->clcsock, backlog); >>>> if (rc) { >>>> @@ -3475,6 +3507,24 @@ static void __net_exit smc_net_stat_exit(struct net *net) >>>> .exit = smc_net_stat_exit, >>>> }; >>>> >>>> +#if IS_ENABLED(CONFIG_BPF_SYSCALL) >>>> +BTF_SET8_START(bpf_smc_fmodret_ids) >>>> +BTF_ID_FLAGS(func, select_syn_smc) >>>> +BTF_SET8_END(bpf_smc_fmodret_ids) >>>> + >>>> +static const struct btf_kfunc_id_set bpf_smc_fmodret_set = { >>>> + .owner = THIS_MODULE, >>>> + .set = &bpf_smc_fmodret_ids, >>>> +}; >>>> + >>>> +static int bpf_smc_kfunc_init(void) >>>> +{ >>>> + return register_btf_fmodret_id_set(&bpf_smc_fmodret_set); >>>> +} >>> >>> fmodret was an approach that hid-bpf took initially, >>> but eventually they removed it all and switched to struct-ops approach. >>> Please learn that lesson. >>> Use struct_ops from the beginning. >>> >>> I did a presentation recently explaining the motivation behind >>> struct_ops and tips on how to extend the kernel. >>> TLDR: the step one is to design the extension _without_ bpf. >>> The interface should be usable for kernel modules. >>> And then when you have *_ops style api in place >>> the bpf progs will plug-in without extra work. >>> >>> Slides: >>> https://github.com/4ast/docs/blob/main/BPF%20struct-ops.pdf >> >> >> Hi Alexei, >> >> Thanks very much for your suggestion. >> >> In fact, I tried struct_ops in SMC about a year ago. Unfortunately, at that time struct_ops did not >> support registration from modules, and I had to move some smc dependencies into bpf, which met with >> community opposition. However, I noticed that this feature is now supported, so perhaps this is an >> opportunity. >> >> But on the other hand, given the current functionality, I wonder if struct_ops might be an overkill. >> I haven't been able to come up with a suitable abstraction to define this ops, and in the future, >> this ops might only contain the very one callback (select_syn_smc). >> >> Looking forward for your advises. > > I guess I wasn't clear. It's a Nack to the current fmodret approach. Understood, we do not oppose the use of struct_ops, especially when modules registration was already supported.
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 6a5e08b..d028d76 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -478,7 +478,9 @@ struct tcp_sock { #endif #if IS_ENABLED(CONFIG_SMC) bool syn_smc; /* SYN includes SMC */ - bool (*smc_hs_congested)(const struct sock *sk); + void (*smc_openreq_init)(struct request_sock *req, + const struct tcp_options_received *rx_opt, + struct sk_buff *skb, const struct sock *sk); #endif #if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index cc05ec1..15fe8b9 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -7036,8 +7036,8 @@ static void tcp_openreq_init(struct request_sock *req, ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_mark = inet_request_mark(sk, skb); #if IS_ENABLED(CONFIG_SMC) - ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested && - tcp_sk(sk)->smc_hs_congested(sk)); + if (rx_opt->smc_ok && tcp_sk(sk)->smc_openreq_init) + tcp_sk(sk)->smc_openreq_init(req, rx_opt, skb, sk); #endif } diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 0316217..550799c 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -70,6 +70,15 @@ static void smc_tcp_listen_work(struct work_struct *); static void smc_connect_work(struct work_struct *); +__bpf_hook_start(); + +__weak noinline int select_syn_smc(const struct sock *sk, struct sockaddr *peer) +{ + return 1; +} + +__bpf_hook_end(); + int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb) { struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); @@ -156,19 +165,43 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk, return NULL; } -static bool smc_hs_congested(const struct sock *sk) +static void smc_openreq_init(struct request_sock *req, + const struct tcp_options_received *rx_opt, + struct sk_buff *skb, const struct sock *sk) { + struct inet_request_sock *ireq = inet_rsk(req); + struct sockaddr_storage rmt_sockaddr = {}; const struct smc_sock *smc; smc = smc_clcsock_user_data(sk); if (!smc) - return true; + return; - if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) - return true; + if (smc->limit_smc_hs && workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq)) + goto out_no_smc; - return false; + rmt_sockaddr.ss_family = sk->sk_family; + + if (rmt_sockaddr.ss_family == AF_INET) { + struct sockaddr_in *rmt4_sockaddr = (struct sockaddr_in *)&rmt_sockaddr; + + rmt4_sockaddr->sin_addr.s_addr = ireq->ir_rmt_addr; + rmt4_sockaddr->sin_port = ireq->ir_rmt_port; +#if IS_ENABLED(CONFIG_IPV6) + } else { + struct sockaddr_in6 *rmt6_sockaddr = (struct sockaddr_in6 *)&rmt_sockaddr; + + rmt6_sockaddr->sin6_addr = ireq->ir_v6_rmt_addr; + rmt6_sockaddr->sin6_port = ireq->ir_rmt_port; +#endif /* CONFIG_IPV6 */ + } + + ireq->smc_ok = select_syn_smc(sk, (struct sockaddr *)&rmt_sockaddr); + return; +out_no_smc: + ireq->smc_ok = 0; + return; } struct smc_hashinfo smc_v4_hashinfo = { @@ -1671,7 +1704,7 @@ int smc_connect(struct socket *sock, struct sockaddr *addr, } smc_copy_sock_settings_to_clc(smc); - tcp_sk(smc->clcsock->sk)->syn_smc = 1; + tcp_sk(smc->clcsock->sk)->syn_smc = select_syn_smc(sk, addr); if (smc->connect_nonblock) { rc = -EALREADY; goto out; @@ -2650,8 +2683,7 @@ int smc_listen(struct socket *sock, int backlog) inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops; - if (smc->limit_smc_hs) - tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested; + tcp_sk(smc->clcsock->sk)->smc_openreq_init = smc_openreq_init; rc = kernel_listen(smc->clcsock, backlog); if (rc) { @@ -3475,6 +3507,24 @@ static void __net_exit smc_net_stat_exit(struct net *net) .exit = smc_net_stat_exit, }; +#if IS_ENABLED(CONFIG_BPF_SYSCALL) +BTF_SET8_START(bpf_smc_fmodret_ids) +BTF_ID_FLAGS(func, select_syn_smc) +BTF_SET8_END(bpf_smc_fmodret_ids) + +static const struct btf_kfunc_id_set bpf_smc_fmodret_set = { + .owner = THIS_MODULE, + .set = &bpf_smc_fmodret_ids, +}; + +static int bpf_smc_kfunc_init(void) +{ + return register_btf_fmodret_id_set(&bpf_smc_fmodret_set); +} +#else +static inline int bpf_smc_kfunc_init(void) { return 0; } +#endif /* CONFIG_BPF_SYSCALL */ + static int __init smc_init(void) { int rc; @@ -3574,8 +3624,17 @@ static int __init smc_init(void) pr_err("%s: smc_inet_init fails with %d\n", __func__, rc); goto out_ulp; } + + rc = bpf_smc_kfunc_init(); + if (rc) { + pr_err("%s: bpf_smc_kfunc_init fails with %d\n", __func__, rc); + goto out_inet; + } + static_branch_enable(&tcp_have_smc); return 0; +out_inet: + smc_inet_exit(); out_ulp: tcp_unregister_ulp(&smc_ulp_ops); out_lo: