diff mbox series

[v2,net-next,v2,5/5] net: add heuristic for enabling TCP fraglist GRO

Message ID 20240425150432.44142-7-nbd@nbd.name (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Add TCP fraglist GRO support | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 932 this patch: 932
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 938 this patch: 938
netdev/verify_signedoff fail author Signed-off-by missing
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 943 this patch: 943
netdev/checkpatch warning WARNING: Non-standard signature: 'Signe-off-by:' - perhaps 'Signed-off-by:'?
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
netdev/contest success net-next-2024-04-26--00-00 (tests: 991)

Commit Message

Felix Fietkau April 25, 2024, 3:04 p.m. UTC
When forwarding TCP after GRO, software segmentation is very expensive,
especially when the checksum needs to be recalculated.
One case where that's currently unavoidable is when routing packets over
PPPoE. Performance improves significantly when using fraglist GRO
implemented in the same way as for UDP.

When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
socket in the same netns as the receiving device. While this may not
cover all relevant use cases in multi-netns configurations, it should be
good enough for most configurations that need this.

Here's a measurement of running 2 TCP streams through a MediaTek MT7622
device (2-core Cortex-A53), which runs NAT with flow offload enabled from
one ethernet port to PPPoE on another ethernet port + cake qdisc set to
1Gbps.

rx-gro-list off: 630 Mbit/s, CPU 35% idle
rx-gro-list on:  770 Mbit/s, CPU 40% idle

Signe-off-by: Felix Fietkau <nbd@nbd.name>
---
 net/ipv4/tcp_offload.c   | 48 +++++++++++++++++++++++++++++++++++++
 net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 99 insertions(+)

Comments

Eric Dumazet April 25, 2024, 3:26 p.m. UTC | #1
On Thu, Apr 25, 2024 at 5:04 PM Felix Fietkau <nbd@nbd.name> wrote:
>
> When forwarding TCP after GRO, software segmentation is very expensive,
> especially when the checksum needs to be recalculated.
> One case where that's currently unavoidable is when routing packets over
> PPPoE. Performance improves significantly when using fraglist GRO
> implemented in the same way as for UDP.
>
> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
> socket in the same netns as the receiving device. While this may not
> cover all relevant use cases in multi-netns configurations, it should be
> good enough for most configurations that need this.
>
> Here's a measurement of running 2 TCP streams through a MediaTek MT7622
> device (2-core Cortex-A53), which runs NAT with flow offload enabled from
> one ethernet port to PPPoE on another ethernet port + cake qdisc set to
> 1Gbps.
>
> rx-gro-list off: 630 Mbit/s, CPU 35% idle
> rx-gro-list on:  770 Mbit/s, CPU 40% idle
>
> Signe-off-by: Felix Fietkau <nbd@nbd.name>
> ---
>  net/ipv4/tcp_offload.c   | 48 +++++++++++++++++++++++++++++++++++++
>  net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 99 insertions(+)
>
> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
> index 520fd425ab19..3bb96a110402 100644
> --- a/net/ipv4/tcp_offload.c
> +++ b/net/ipv4/tcp_offload.c
> @@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb)
>  }
>  EXPORT_SYMBOL(tcp_gro_complete);
>
> +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
> +{
> +       const struct iphdr *iph = skb_gro_network_header(skb);

I do not think loading iph before all skb_gro_header() and
skb_gro_header_slow() calls is wise.

pskb_may_pull() can re-allocate skb->head

> +       struct net *net = dev_net(skb->dev);
> +       unsigned int off, hlen, thlen;
> +       struct sk_buff *p;
> +       struct tcphdr *th;
> +       struct sock *sk;
> +       int iif, sdif;
> +
> +       if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
> +               return;
> +
> +       off = skb_gro_offset(skb);
> +       hlen = off + sizeof(*th);
> +       th = skb_gro_header(skb, hlen, off);
> +       if (unlikely(!th))
> +               return;
> +
> +       thlen = th->doff * 4;
> +       if (thlen < sizeof(*th))
> +               return;
> +
> +       hlen = off + thlen;
> +       if (!skb_gro_may_pull(skb, hlen)) {
> +               th = skb_gro_header_slow(skb, hlen, off);
> +               if (unlikely(!th))
> +                       return;
> +       }
> +
> +       p = tcp_gro_lookup(head, th);
> +       if (p) {
> +               NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
> +               return;
> +       }
> +
> +       inet_get_iif_sdif(skb, &iif, &sdif);
> +       sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
> +                                      iph->saddr, th->source,
> +                                      iph->daddr, ntohs(th->dest),
> +                                      iif, sdif);
> +       NAPI_GRO_CB(skb)->is_flist = !sk;
> +       if (sk)
> +               sock_put(sk);
> +}
> +
>  INDIRECT_CALLABLE_SCOPE
>  struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
>  {
> @@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
>                 return NULL;
>         }
>

I would probably pull the whole TCP header here, before calling
tcp4_check_fraglist_gro(head, skb)
and no longer do this twice from tcp4_check_fraglist_gro() and tcp_gro_receive()

Perhaps define a new inline helper, that will be called from
tcp4_gro_receive() and tcp6_gro_receive(),
and not anymore from  tcp_gro_receive()

static inline struct tcphdr *tcp_gro_pull_header(...)
{
     ....
       off = skb_gro_offset(skb);
       hlen = off + sizeof(*th);
       th = skb_gro_header(skb, hlen, off);
       if (unlikely(!th))
               return NULL;

       thlen = th->doff * 4;
       if (thlen < sizeof(*th))
               return NULL;

       hlen = off + thlen;
       if (!skb_gro_may_pull(skb, hlen))
               th = skb_gro_header_slow(skb, hlen, off);

      return th;
}


> +       tcp4_check_fraglist_gro(head, skb);
> +
>         return tcp_gro_receive(head, skb);
>  }
>
> diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
> index c97d55cf036f..7948420dcad0 100644
> --- a/net/ipv6/tcpv6_offload.c
> +++ b/net/ipv6/tcpv6_offload.c
> @@ -7,12 +7,61 @@
>   */
>  #include <linux/indirect_call_wrapper.h>
>  #include <linux/skbuff.h>
> +#include <net/inet6_hashtables.h>
>  #include <net/gro.h>
>  #include <net/protocol.h>
>  #include <net/tcp.h>
>  #include <net/ip6_checksum.h>
>  #include "ip6_offload.h"
>
> +static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
> +{
> +#if IS_ENABLED(CONFIG_IPV6)
> +       const struct ipv6hdr *hdr = skb_gro_network_header(skb);
> +       struct net *net = dev_net(skb->dev);
> +       unsigned int off, hlen, thlen;
> +       struct sk_buff *p;
> +       struct tcphdr *th;
> +       struct sock *sk;
> +       int iif, sdif;
> +
> +       if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
> +               return;
> +
> +       off = skb_gro_offset(skb);
> +       hlen = off + sizeof(*th);
> +       th = skb_gro_header(skb, hlen, off);
> +       if (unlikely(!th))
> +               return;
> +
> +       thlen = th->doff * 4;
> +       if (thlen < sizeof(*th))
> +               return;
> +
> +       hlen = off + thlen;
> +       if (!skb_gro_may_pull(skb, hlen)) {
> +               th = skb_gro_header_slow(skb, hlen, off);
> +               if (unlikely(!th))
> +                       return;
> +       }
> +
> +       p = tcp_gro_lookup(head, th);
> +       if (p) {
> +               NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
> +               return;
> +       }
> +
> +       inet6_get_iif_sdif(skb, &iif, &sdif);
> +       sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
> +                                       &hdr->saddr, th->source,
> +                                       &hdr->daddr, ntohs(th->dest),
> +                                       iif, sdif);
> +       NAPI_GRO_CB(skb)->is_flist = !sk;
> +       if (sk)
> +               sock_put(sk);
> +#endif /* IS_ENABLED(CONFIG_IPV6) */
> +}
> +
>  INDIRECT_CALLABLE_SCOPE
>  struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
>  {
> @@ -24,6 +73,8 @@ struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
>                 return NULL;
>         }
>
> +       tcp6_check_fraglist_gro(head, skb);
> +
>         return tcp_gro_receive(head, skb);
>  }
>
> --
> 2.44.0
>
Felix Fietkau April 25, 2024, 3:29 p.m. UTC | #2
On 25.04.24 17:26, Eric Dumazet wrote:
> On Thu, Apr 25, 2024 at 5:04 PM Felix Fietkau <nbd@nbd.name> wrote:
>>
>> When forwarding TCP after GRO, software segmentation is very expensive,
>> especially when the checksum needs to be recalculated.
>> One case where that's currently unavoidable is when routing packets over
>> PPPoE. Performance improves significantly when using fraglist GRO
>> implemented in the same way as for UDP.
>>
>> When NETIF_F_GRO_FRAGLIST is enabled, perform a lookup for an established
>> socket in the same netns as the receiving device. While this may not
>> cover all relevant use cases in multi-netns configurations, it should be
>> good enough for most configurations that need this.
>>
>> Here's a measurement of running 2 TCP streams through a MediaTek MT7622
>> device (2-core Cortex-A53), which runs NAT with flow offload enabled from
>> one ethernet port to PPPoE on another ethernet port + cake qdisc set to
>> 1Gbps.
>>
>> rx-gro-list off: 630 Mbit/s, CPU 35% idle
>> rx-gro-list on:  770 Mbit/s, CPU 40% idle
>>
>> Signe-off-by: Felix Fietkau <nbd@nbd.name>
>> ---
>>  net/ipv4/tcp_offload.c   | 48 +++++++++++++++++++++++++++++++++++++
>>  net/ipv6/tcpv6_offload.c | 51 ++++++++++++++++++++++++++++++++++++++++
>>  2 files changed, 99 insertions(+)
>>
>> diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
>> index 520fd425ab19..3bb96a110402 100644
>> --- a/net/ipv4/tcp_offload.c
>> +++ b/net/ipv4/tcp_offload.c
>> @@ -405,6 +405,52 @@ void tcp_gro_complete(struct sk_buff *skb)
>>  }
>>  EXPORT_SYMBOL(tcp_gro_complete);
>>
>> +static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
>> +{
>> +       const struct iphdr *iph = skb_gro_network_header(skb);
> 
> I do not think loading iph before all skb_gro_header() and
> skb_gro_header_slow() calls is wise.
> 
> pskb_may_pull() can re-allocate skb->head

Will fix.

>> +       struct net *net = dev_net(skb->dev);
>> +       unsigned int off, hlen, thlen;
>> +       struct sk_buff *p;
>> +       struct tcphdr *th;
>> +       struct sock *sk;
>> +       int iif, sdif;
>> +
>> +       if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
>> +               return;
>> +
>> +       off = skb_gro_offset(skb);
>> +       hlen = off + sizeof(*th);
>> +       th = skb_gro_header(skb, hlen, off);
>> +       if (unlikely(!th))
>> +               return;
>> +
>> +       thlen = th->doff * 4;
>> +       if (thlen < sizeof(*th))
>> +               return;
>> +
>> +       hlen = off + thlen;
>> +       if (!skb_gro_may_pull(skb, hlen)) {
>> +               th = skb_gro_header_slow(skb, hlen, off);
>> +               if (unlikely(!th))
>> +                       return;
>> +       }
>> +
>> +       p = tcp_gro_lookup(head, th);
>> +       if (p) {
>> +               NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
>> +               return;
>> +       }
>> +
>> +       inet_get_iif_sdif(skb, &iif, &sdif);
>> +       sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
>> +                                      iph->saddr, th->source,
>> +                                      iph->daddr, ntohs(th->dest),
>> +                                      iif, sdif);
>> +       NAPI_GRO_CB(skb)->is_flist = !sk;
>> +       if (sk)
>> +               sock_put(sk);
>> +}
>> +
>>  INDIRECT_CALLABLE_SCOPE
>>  struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
>>  {
>> @@ -416,6 +462,8 @@ struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
>>                 return NULL;
>>         }
>>
> 
> I would probably pull the whole TCP header here, before calling
> tcp4_check_fraglist_gro(head, skb)
> and no longer do this twice from tcp4_check_fraglist_gro() and tcp_gro_receive()
> 
> Perhaps define a new inline helper, that will be called from
> tcp4_gro_receive() and tcp6_gro_receive(),
> and not anymore from  tcp_gro_receive()
> 
> static inline struct tcphdr *tcp_gro_pull_header(...)
> {
>       ....
>         off = skb_gro_offset(skb);
>         hlen = off + sizeof(*th);
>         th = skb_gro_header(skb, hlen, off);
>         if (unlikely(!th))
>                 return NULL;
> 
>         thlen = th->doff * 4;
>         if (thlen < sizeof(*th))
>                 return NULL;
> 
>         hlen = off + thlen;
>         if (!skb_gro_may_pull(skb, hlen))
>                 th = skb_gro_header_slow(skb, hlen, off);
> 
>        return th;
> }

Makes sense

Thanks,

- Felix
diff mbox series

Patch

diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 520fd425ab19..3bb96a110402 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -405,6 +405,52 @@  void tcp_gro_complete(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_gro_complete);
 
+static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
+{
+	const struct iphdr *iph = skb_gro_network_header(skb);
+	struct net *net = dev_net(skb->dev);
+	unsigned int off, hlen, thlen;
+	struct sk_buff *p;
+	struct tcphdr *th;
+	struct sock *sk;
+	int iif, sdif;
+
+	if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+		return;
+
+	off = skb_gro_offset(skb);
+	hlen = off + sizeof(*th);
+	th = skb_gro_header(skb, hlen, off);
+	if (unlikely(!th))
+		return;
+
+	thlen = th->doff * 4;
+	if (thlen < sizeof(*th))
+		return;
+
+	hlen = off + thlen;
+	if (!skb_gro_may_pull(skb, hlen)) {
+		th = skb_gro_header_slow(skb, hlen, off);
+		if (unlikely(!th))
+			return;
+	}
+
+	p = tcp_gro_lookup(head, th);
+	if (p) {
+		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+		return;
+	}
+
+	inet_get_iif_sdif(skb, &iif, &sdif);
+	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+				       iph->saddr, th->source,
+				       iph->daddr, ntohs(th->dest),
+				       iif, sdif);
+	NAPI_GRO_CB(skb)->is_flist = !sk;
+	if (sk)
+		sock_put(sk);
+}
+
 INDIRECT_CALLABLE_SCOPE
 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
 {
@@ -416,6 +462,8 @@  struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
 		return NULL;
 	}
 
+	tcp4_check_fraglist_gro(head, skb);
+
 	return tcp_gro_receive(head, skb);
 }
 
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index c97d55cf036f..7948420dcad0 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,12 +7,61 @@ 
  */
 #include <linux/indirect_call_wrapper.h>
 #include <linux/skbuff.h>
+#include <net/inet6_hashtables.h>
 #include <net/gro.h>
 #include <net/protocol.h>
 #include <net/tcp.h>
 #include <net/ip6_checksum.h>
 #include "ip6_offload.h"
 
+static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+	const struct ipv6hdr *hdr = skb_gro_network_header(skb);
+	struct net *net = dev_net(skb->dev);
+	unsigned int off, hlen, thlen;
+	struct sk_buff *p;
+	struct tcphdr *th;
+	struct sock *sk;
+	int iif, sdif;
+
+	if (!(skb->dev->features & NETIF_F_GRO_FRAGLIST))
+		return;
+
+	off = skb_gro_offset(skb);
+	hlen = off + sizeof(*th);
+	th = skb_gro_header(skb, hlen, off);
+	if (unlikely(!th))
+		return;
+
+	thlen = th->doff * 4;
+	if (thlen < sizeof(*th))
+		return;
+
+	hlen = off + thlen;
+	if (!skb_gro_may_pull(skb, hlen)) {
+		th = skb_gro_header_slow(skb, hlen, off);
+		if (unlikely(!th))
+			return;
+	}
+
+	p = tcp_gro_lookup(head, th);
+	if (p) {
+		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
+		return;
+	}
+
+	inet6_get_iif_sdif(skb, &iif, &sdif);
+	sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
+					&hdr->saddr, th->source,
+					&hdr->daddr, ntohs(th->dest),
+					iif, sdif);
+	NAPI_GRO_CB(skb)->is_flist = !sk;
+	if (sk)
+		sock_put(sk);
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+}
+
 INDIRECT_CALLABLE_SCOPE
 struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
 {
@@ -24,6 +73,8 @@  struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
 		return NULL;
 	}
 
+	tcp6_check_fraglist_gro(head, skb);
+
 	return tcp_gro_receive(head, skb);
 }