Message ID | 20240426065143.4667-7-nbd@nbd.name (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Add TCP fraglist GRO support | expand |
On Fri, Apr 26, 2024 at 8:51 AM Felix Fietkau <nbd@nbd.name> wrote: > > When forwarding TCP after GRO, software segmentation is very expensive, > especially when the checksum needs to be recalculated. > One case where that's currently unavoidable is when routing packets over > PPPoE. Performance improves significantly when using fraglist GRO > implemented in the same way as for UDP. > > When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established > socket in the same netns as the receiving device. While this may not > cover all relevant use cases in multi-netns configurations, it should be > good enough for most configurations that need this. > > Here's a measurement of running 2 TCP streams through a MediaTek MT7622 > device (2-core Cortex-A53), which runs NAT with flow offload enabled from > one ethernet port to PPPoE on another ethernet port + cake qdisc set to > 1Gbps. > > rx-gro-list off: 630 Mbit/s, CPU 35% idle > rx-gro-list on: 770 Mbit/s, CPU 40% idle > > Signe-off-by: Felix Fietkau <nbd@nbd.name> > --- > net/ipv4/tcp_offload.c | 30 ++++++++++++++++++++++++++++++ > net/ipv6/tcpv6_offload.c | 33 +++++++++++++++++++++++++++++++++ > 2 files changed, 63 insertions(+) > > diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c > index ee5403760775..2ae83f4394dc 100644 > --- a/net/ipv4/tcp_offload.c > +++ b/net/ipv4/tcp_offload.c > @@ -406,6 +406,34 @@ void tcp_gro_complete(struct sk_buff *skb) > } > EXPORT_SYMBOL(tcp_gro_complete); > > +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, > + struct tcphdr *th) > +{ > + const struct iphdr *iph = skb_gro_network_header(skb); > + struct net *net = dev_net(skb->dev); Could you defer the initializations of iph and net after the NETIF_F_GRO_FRAGLIST check ? dev_net() has an implicit READ_ONCE() ... > + struct sk_buff *p; > + struct sock *sk; > + int iif, sdif; > + > + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) > + return; > + > + p = tcp_gro_lookup(head, th); > + if (p) { > + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; > + return; > + } > + > + inet_get_iif_sdif(skb, &iif, &sdif); > + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, > + iph->saddr, th->source, > + iph->daddr, ntohs(th->dest), > + iif, sdif); > + NAPI_GRO_CB(skb)->is_flist = !sk; > + if (sk) > + sock_put(sk); > +} > + > INDIRECT_CALLABLE_SCOPE > struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) > { > @@ -421,6 +449,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) > if (!th) > goto flush; > > + tcp4_check_fraglist_gro(head, skb, th); > + > return tcp_gro_receive(head, skb, th); > > flush: > diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c > index c01ace2e9ff0..1ab45cca3936 100644 > --- a/net/ipv6/tcpv6_offload.c > +++ b/net/ipv6/tcpv6_offload.c > @@ -7,12 +7,43 @@ > */ > #include <linux/indirect_call_wrapper.h> > #include <linux/skbuff.h> > +#include <net/inet6_hashtables.h> > #include <net/gro.h> > #include <net/protocol.h> > #include <net/tcp.h> > #include <net/ip6_checksum.h> > #include "ip6_offload.h" > > +static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, > + struct tcphdr *th) > +{ > +#if IS_ENABLED(CONFIG_IPV6) > + const struct ipv6hdr *hdr = skb_gro_network_header(skb); > + struct net *net = dev_net(skb->dev); Same remark here. > + struct sk_buff *p; > + struct sock *sk; > + int iif, sdif; > + > + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) > + return; > + > + p = tcp_gro_lookup(head, th); > + if (p) { > + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; > + return; > + } > + > + inet6_get_iif_sdif(skb, &iif, &sdif); > + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, > + &hdr->saddr, th->source, > + &hdr->daddr, ntohs(th->dest), > + iif, sdif); > + NAPI_GRO_CB(skb)->is_flist = !sk; > + if (sk) > + sock_put(sk); > +#endif /* IS_ENABLED(CONFIG_IPV6) */ > +} > + > INDIRECT_CALLABLE_SCOPE > struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) > { > @@ -28,6 +59,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) > if (!th) > goto flush; > > + tcp6_check_fraglist_gro(head, skb, th); > + > return tcp_gro_receive(head, skb, th); > > flush: > -- > 2.44.0 >
On 26.04.24 09:32, Eric Dumazet wrote: > On Fri, Apr 26, 2024 at 8:51 AM Felix Fietkau <nbd@nbd.name> wrote: >> >> When forwarding TCP after GRO, software segmentation is very expensive, >> especially when the checksum needs to be recalculated. >> One case where that's currently unavoidable is when routing packets over >> PPPoE. Performance improves significantly when using fraglist GRO >> implemented in the same way as for UDP. >> >> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established >> socket in the same netns as the receiving device. While this may not >> cover all relevant use cases in multi-netns configurations, it should be >> good enough for most configurations that need this. >> >> Here's a measurement of running 2 TCP streams through a MediaTek MT7622 >> device (2-core Cortex-A53), which runs NAT with flow offload enabled from >> one ethernet port to PPPoE on another ethernet port + cake qdisc set to >> 1Gbps. >> >> rx-gro-list off: 630 Mbit/s, CPU 35% idle >> rx-gro-list on: 770 Mbit/s, CPU 40% idle >> >> Signe-off-by: Felix Fietkau <nbd@nbd.name> >> --- >> net/ipv4/tcp_offload.c | 30 ++++++++++++++++++++++++++++++ >> net/ipv6/tcpv6_offload.c | 33 +++++++++++++++++++++++++++++++++ >> 2 files changed, 63 insertions(+) >> >> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c >> index ee5403760775..2ae83f4394dc 100644 >> --- a/net/ipv4/tcp_offload.c >> +++ b/net/ipv4/tcp_offload.c >> @@ -406,6 +406,34 @@ void tcp_gro_complete(struct sk_buff *skb) >> } >> EXPORT_SYMBOL(tcp_gro_complete); >> >> +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, >> + struct tcphdr *th) >> +{ >> + const struct iphdr *iph = skb_gro_network_header(skb); >> + struct net *net = dev_net(skb->dev); > > Could you defer the initializations of iph and net after the > NETIF_F_GRO_FRAGLIST check ? > > dev_net() has an implicit READ_ONCE() ... Will do, thanks. - Felix
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index ee5403760775..2ae83f4394dc 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -406,6 +406,34 @@ void tcp_gro_complete(struct sk_buff *skb) } EXPORT_SYMBOL(tcp_gro_complete); +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, + struct tcphdr *th) +{ + const struct iphdr *iph = skb_gro_network_header(skb); + struct net *net = dev_net(skb->dev); + struct sk_buff *p; + struct sock *sk; + int iif, sdif; + + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) + return; + + p = tcp_gro_lookup(head, th); + if (p) { + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; + return; + } + + inet_get_iif_sdif(skb, &iif, &sdif); + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + iph->saddr, th->source, + iph->daddr, ntohs(th->dest), + iif, sdif); + NAPI_GRO_CB(skb)->is_flist = !sk; + if (sk) + sock_put(sk); +} + INDIRECT_CALLABLE_SCOPE struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -421,6 +449,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) if (!th) goto flush; + tcp4_check_fraglist_gro(head, skb, th); + return tcp_gro_receive(head, skb, th); flush: diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index c01ace2e9ff0..1ab45cca3936 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -7,12 +7,43 @@ */ #include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> +#include <net/inet6_hashtables.h> #include <net/gro.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/ip6_checksum.h> #include "ip6_offload.h" +static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb, + struct tcphdr *th) +{ +#if IS_ENABLED(CONFIG_IPV6) + const struct ipv6hdr *hdr = skb_gro_network_header(skb); + struct net *net = dev_net(skb->dev); + struct sk_buff *p; + struct sock *sk; + int iif, sdif; + + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) + return; + + p = tcp_gro_lookup(head, th); + if (p) { + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; + return; + } + + inet6_get_iif_sdif(skb, &iif, &sdif); + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + &hdr->saddr, th->source, + &hdr->daddr, ntohs(th->dest), + iif, sdif); + NAPI_GRO_CB(skb)->is_flist = !sk; + if (sk) + sock_put(sk); +#endif /* IS_ENABLED(CONFIG_IPV6) */ +} + INDIRECT_CALLABLE_SCOPE struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -28,6 +59,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) if (!th) goto flush; + tcp6_check_fraglist_gro(head, skb, th); + return tcp_gro_receive(head, skb, th); flush: