Message ID | 20240425150432.44142-7-nbd@nbd.name (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Add TCP fraglist GRO support | expand |
On Thu, Apr 25, 2024 at 5:04 PM Felix Fietkau <nbd@nbd.name> wrote: > > When forwarding TCP after GRO, software segmentation is very expensive, > especially when the checksum needs to be recalculated. > One case where that's currently unavoidable is when routing packets over > PPPoE. Performance improves significantly when using fraglist GRO > implemented in the same way as for UDP. > > When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established > socket in the same netns as the receiving device. While this may not > cover all relevant use cases in multi-netns configurations, it should be > good enough for most configurations that need this. > > Here's a measurement of running 2 TCP streams through a MediaTek MT7622 > device (2-core Cortex-A53), which runs NAT with flow offload enabled from > one ethernet port to PPPoE on another ethernet port + cake qdisc set to > 1Gbps. > > rx-gro-list off: 630 Mbit/s, CPU 35% idle > rx-gro-list on: 770 Mbit/s, CPU 40% idle > > Signe-off-by: Felix Fietkau <nbd@nbd.name> > --- > net/ipv4/tcp_offload.c | 48 +++++++++++++++++++++++++++++++++++++ > net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 99 insertions(+) > > diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c > index 520fd425ab19..3bb96a110402 100644 > --- a/net/ipv4/tcp_offload.c > +++ b/net/ipv4/tcp_offload.c > @@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb) > } > EXPORT_SYMBOL(tcp_gro_complete); > > +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb) > +{ > + const struct iphdr *iph = skb_gro_network_header(skb); I do not think loading iph before all skb_gro_header() and skb_gro_header_slow() calls is wise. pskb_may_pull() can re-allocate skb->head > + struct net *net = dev_net(skb->dev); > + unsigned int off, hlen, thlen; > + struct sk_buff *p; > + struct tcphdr *th; > + struct sock *sk; > + int iif, sdif; > + > + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) > + return; > + > + off = skb_gro_offset(skb); > + hlen = off + sizeof(*th); > + th = skb_gro_header(skb, hlen, off); > + if (unlikely(!th)) > + return; > + > + thlen = th->doff * 4; > + if (thlen < sizeof(*th)) > + return; > + > + hlen = off + thlen; > + if (!skb_gro_may_pull(skb, hlen)) { > + th = skb_gro_header_slow(skb, hlen, off); > + if (unlikely(!th)) > + return; > + } > + > + p = tcp_gro_lookup(head, th); > + if (p) { > + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; > + return; > + } > + > + inet_get_iif_sdif(skb, &iif, &sdif); > + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, > + iph->saddr, th->source, > + iph->daddr, ntohs(th->dest), > + iif, sdif); > + NAPI_GRO_CB(skb)->is_flist = !sk; > + if (sk) > + sock_put(sk); > +} > + > INDIRECT_CALLABLE_SCOPE > struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) > { > @@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) > return NULL; > } > I would probably pull the whole TCP header here, before calling tcp4_check_fraglist_gro(head, skb) and no longer do this twice from tcp4_check_fraglist_gro() and tcp_gro_receive() Perhaps define a new inline helper, that will be called from tcp4_gro_receive() and tcp6_gro_receive(), and not anymore from tcp_gro_receive() static inline struct tcphdr *tcp_gro_pull_header(...) { .... off = skb_gro_offset(skb); hlen = off + sizeof(*th); th = skb_gro_header(skb, hlen, off); if (unlikely(!th)) return NULL; thlen = th->doff * 4; if (thlen < sizeof(*th)) return NULL; hlen = off + thlen; if (!skb_gro_may_pull(skb, hlen)) th = skb_gro_header_slow(skb, hlen, off); return th; } > + tcp4_check_fraglist_gro(head, skb); > + > return tcp_gro_receive(head, skb); > } > > diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c > index c97d55cf036f..7948420dcad0 100644 > --- a/net/ipv6/tcpv6_offload.c > +++ b/net/ipv6/tcpv6_offload.c > @@ -7,12 +7,61 @@ > */ > #include <linux/indirect_call_wrapper.h> > #include <linux/skbuff.h> > +#include <net/inet6_hashtables.h> > #include <net/gro.h> > #include <net/protocol.h> > #include <net/tcp.h> > #include <net/ip6_checksum.h> > #include "ip6_offload.h" > > +static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb) > +{ > +#if IS_ENABLED(CONFIG_IPV6) > + const struct ipv6hdr *hdr = skb_gro_network_header(skb); > + struct net *net = dev_net(skb->dev); > + unsigned int off, hlen, thlen; > + struct sk_buff *p; > + struct tcphdr *th; > + struct sock *sk; > + int iif, sdif; > + > + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) > + return; > + > + off = skb_gro_offset(skb); > + hlen = off + sizeof(*th); > + th = skb_gro_header(skb, hlen, off); > + if (unlikely(!th)) > + return; > + > + thlen = th->doff * 4; > + if (thlen < sizeof(*th)) > + return; > + > + hlen = off + thlen; > + if (!skb_gro_may_pull(skb, hlen)) { > + th = skb_gro_header_slow(skb, hlen, off); > + if (unlikely(!th)) > + return; > + } > + > + p = tcp_gro_lookup(head, th); > + if (p) { > + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; > + return; > + } > + > + inet6_get_iif_sdif(skb, &iif, &sdif); > + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, > + &hdr->saddr, th->source, > + &hdr->daddr, ntohs(th->dest), > + iif, sdif); > + NAPI_GRO_CB(skb)->is_flist = !sk; > + if (sk) > + sock_put(sk); > +#endif /* IS_ENABLED(CONFIG_IPV6) */ > +} > + > INDIRECT_CALLABLE_SCOPE > struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) > { > @@ -24,6 +73,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) > return NULL; > } > > + tcp6_check_fraglist_gro(head, skb); > + > return tcp_gro_receive(head, skb); > } > > -- > 2.44.0 >
On 25.04.24 17:26, Eric Dumazet wrote: > On Thu, Apr 25, 2024 at 5:04 PM Felix Fietkau <nbd@nbd.name> wrote: >> >> When forwarding TCP after GRO, software segmentation is very expensive, >> especially when the checksum needs to be recalculated. >> One case where that's currently unavoidable is when routing packets over >> PPPoE. Performance improves significantly when using fraglist GRO >> implemented in the same way as for UDP. >> >> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established >> socket in the same netns as the receiving device. While this may not >> cover all relevant use cases in multi-netns configurations, it should be >> good enough for most configurations that need this. >> >> Here's a measurement of running 2 TCP streams through a MediaTek MT7622 >> device (2-core Cortex-A53), which runs NAT with flow offload enabled from >> one ethernet port to PPPoE on another ethernet port + cake qdisc set to >> 1Gbps. >> >> rx-gro-list off: 630 Mbit/s, CPU 35% idle >> rx-gro-list on: 770 Mbit/s, CPU 40% idle >> >> Signe-off-by: Felix Fietkau <nbd@nbd.name> >> --- >> net/ipv4/tcp_offload.c | 48 +++++++++++++++++++++++++++++++++++++ >> net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++ >> 2 files changed, 99 insertions(+) >> >> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c >> index 520fd425ab19..3bb96a110402 100644 >> --- a/net/ipv4/tcp_offload.c >> +++ b/net/ipv4/tcp_offload.c >> @@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb) >> } >> EXPORT_SYMBOL(tcp_gro_complete); >> >> +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb) >> +{ >> + const struct iphdr *iph = skb_gro_network_header(skb); > > I do not think loading iph before all skb_gro_header() and > skb_gro_header_slow() calls is wise. > > pskb_may_pull() can re-allocate skb->head Will fix. >> + struct net *net = dev_net(skb->dev); >> + unsigned int off, hlen, thlen; >> + struct sk_buff *p; >> + struct tcphdr *th; >> + struct sock *sk; >> + int iif, sdif; >> + >> + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) >> + return; >> + >> + off = skb_gro_offset(skb); >> + hlen = off + sizeof(*th); >> + th = skb_gro_header(skb, hlen, off); >> + if (unlikely(!th)) >> + return; >> + >> + thlen = th->doff * 4; >> + if (thlen < sizeof(*th)) >> + return; >> + >> + hlen = off + thlen; >> + if (!skb_gro_may_pull(skb, hlen)) { >> + th = skb_gro_header_slow(skb, hlen, off); >> + if (unlikely(!th)) >> + return; >> + } >> + >> + p = tcp_gro_lookup(head, th); >> + if (p) { >> + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; >> + return; >> + } >> + >> + inet_get_iif_sdif(skb, &iif, &sdif); >> + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, >> + iph->saddr, th->source, >> + iph->daddr, ntohs(th->dest), >> + iif, sdif); >> + NAPI_GRO_CB(skb)->is_flist = !sk; >> + if (sk) >> + sock_put(sk); >> +} >> + >> INDIRECT_CALLABLE_SCOPE >> struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) >> { >> @@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) >> return NULL; >> } >> > > I would probably pull the whole TCP header here, before calling > tcp4_check_fraglist_gro(head, skb) > and no longer do this twice from tcp4_check_fraglist_gro() and tcp_gro_receive() > > Perhaps define a new inline helper, that will be called from > tcp4_gro_receive() and tcp6_gro_receive(), > and not anymore from tcp_gro_receive() > > static inline struct tcphdr *tcp_gro_pull_header(...) > { > .... > off = skb_gro_offset(skb); > hlen = off + sizeof(*th); > th = skb_gro_header(skb, hlen, off); > if (unlikely(!th)) > return NULL; > > thlen = th->doff * 4; > if (thlen < sizeof(*th)) > return NULL; > > hlen = off + thlen; > if (!skb_gro_may_pull(skb, hlen)) > th = skb_gro_header_slow(skb, hlen, off); > > return th; > } Makes sense Thanks, - Felix
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 520fd425ab19..3bb96a110402 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb) } EXPORT_SYMBOL(tcp_gro_complete); +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb) +{ + const struct iphdr *iph = skb_gro_network_header(skb); + struct net *net = dev_net(skb->dev); + unsigned int off, hlen, thlen; + struct sk_buff *p; + struct tcphdr *th; + struct sock *sk; + int iif, sdif; + + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) + return; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*th); + th = skb_gro_header(skb, hlen, off); + if (unlikely(!th)) + return; + + thlen = th->doff * 4; + if (thlen < sizeof(*th)) + return; + + hlen = off + thlen; + if (!skb_gro_may_pull(skb, hlen)) { + th = skb_gro_header_slow(skb, hlen, off); + if (unlikely(!th)) + return; + } + + p = tcp_gro_lookup(head, th); + if (p) { + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; + return; + } + + inet_get_iif_sdif(skb, &iif, &sdif); + sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + iph->saddr, th->source, + iph->daddr, ntohs(th->dest), + iif, sdif); + NAPI_GRO_CB(skb)->is_flist = !sk; + if (sk) + sock_put(sk); +} + INDIRECT_CALLABLE_SCOPE struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) return NULL; } + tcp4_check_fraglist_gro(head, skb); + return tcp_gro_receive(head, skb); } diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index c97d55cf036f..7948420dcad0 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -7,12 +7,61 @@ */ #include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> +#include <net/inet6_hashtables.h> #include <net/gro.h> #include <net/protocol.h> #include <net/tcp.h> #include <net/ip6_checksum.h> #include "ip6_offload.h" +static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_IPV6) + const struct ipv6hdr *hdr = skb_gro_network_header(skb); + struct net *net = dev_net(skb->dev); + unsigned int off, hlen, thlen; + struct sk_buff *p; + struct tcphdr *th; + struct sock *sk; + int iif, sdif; + + if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST)) + return; + + off = skb_gro_offset(skb); + hlen = off + sizeof(*th); + th = skb_gro_header(skb, hlen, off); + if (unlikely(!th)) + return; + + thlen = th->doff * 4; + if (thlen < sizeof(*th)) + return; + + hlen = off + thlen; + if (!skb_gro_may_pull(skb, hlen)) { + th = skb_gro_header_slow(skb, hlen, off); + if (unlikely(!th)) + return; + } + + p = tcp_gro_lookup(head, th); + if (p) { + NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist; + return; + } + + inet6_get_iif_sdif(skb, &iif, &sdif); + sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, + &hdr->saddr, th->source, + &hdr->daddr, ntohs(th->dest), + iif, sdif); + NAPI_GRO_CB(skb)->is_flist = !sk; + if (sk) + sock_put(sk); +#endif /* IS_ENABLED(CONFIG_IPV6) */ +} + INDIRECT_CALLABLE_SCOPE struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -24,6 +73,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb) return NULL; } + tcp6_check_fraglist_gro(head, skb); + return tcp_gro_receive(head, skb); }