diff mbox series

[net-next,v2,3/4] net: gro: set inner_network_header in receive phase

Message ID 2076d2ff-cd17-4ab0-b1db-4875d96bf9a6@gmail.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series net: gro: encapsulation bug fix and flush checks improvements | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 968 this patch: 968
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 2 maintainers not CCed: liujian56@huawei.com xeb@mail.ru
netdev/build_clang success Errors and warnings before: 957 this patch: 957
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 986 this patch: 986
netdev/checkpatch warning WARNING: line length of 90 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Richard Gobert March 7, 2024, 1:27 p.m. UTC
This patch sets network_header and inner_network_header to their respective
values during the receive phase of GRO. This allows us to use
inner_network_header later on in GRO. network_header is already set in
dev_gro_receive and under encapsulation inner_network_header is set.

Signed-off-by: Richard Gobert <richardbgobert@gmail.com>
---
 include/net/gro.h      | 13 +++++++++++--
 net/8021q/vlan_core.c  |  5 +++++
 net/ethernet/eth.c     |  1 +
 net/ipv4/af_inet.c     |  9 ++-------
 net/ipv4/gre_offload.c |  1 +
 net/ipv6/ip6_offload.c | 12 +++++-------
 6 files changed, 25 insertions(+), 16 deletions(-)

Comments

Eric Dumazet March 7, 2024, 5:59 p.m. UTC | #1
On Thu, Mar 7, 2024 at 2:28 PM Richard Gobert <richardbgobert@gmail.com> wrote:
>
> This patch sets network_header and inner_network_header to their respective
> values during the receive phase of GRO. This allows us to use
> inner_network_header later on in GRO. network_header is already set in
> dev_gro_receive and under encapsulation inner_network_header is set.
>

> +static inline int skb_gro_network_offset(const struct sk_buff *skb)
> +{
> +       const u32 mask = NAPI_GRO_CB(skb)->encap_mark - 1;
> +
> +       return (skb_network_offset(skb) & mask) | (skb_inner_network_offset(skb) & ~mask);

Presumably this is not needed.

> +}
> +
>  static inline void *skb_gro_network_header(const struct sk_buff *skb)
>  {
> +       const int offset = skb_gro_network_offset(skb);
> +
>         if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
> -               return skb_gro_header_fast(skb, skb_network_offset(skb));
> +               return skb_gro_header_fast(skb, offset);
>
> -       return skb_network_header(skb);
> +       return skb->data + offset;
>  }

I would instead add a new offset parameter to this function.

Again, ideally GRO should work without touching any skb->{offset}.

GRO stack should maintain the offsets it needs in its own storage
(stack parameter, or other storage if needed)

Upper stack can not trust any of these skb fields, otherwise we would
have some troubles with napi_reuse_skb()
Richard Gobert March 9, 2024, 3:13 p.m. UTC | #2
Eric Dumazet wrote:
> On Thu, Mar 7, 2024 at 2:28 PM Richard Gobert <richardbgobert@gmail.com> wrote:
>>
>> This patch sets network_header and inner_network_header to their respective
>> values during the receive phase of GRO. This allows us to use
>> inner_network_header later on in GRO. network_header is already set in
>> dev_gro_receive and under encapsulation inner_network_header is set.
>>
> 
>> +static inline int skb_gro_network_offset(const struct sk_buff *skb)
>> +{
>> +       const u32 mask = NAPI_GRO_CB(skb)->encap_mark - 1;
>> +
>> +       return (skb_network_offset(skb) & mask) | (skb_inner_network_offset(skb) & ~mask);
> 
> Presumably this is not needed.
> 
>> +}
>> +
>>  static inline void *skb_gro_network_header(const struct sk_buff *skb)
>>  {
>> +       const int offset = skb_gro_network_offset(skb);
>> +
>>         if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
>> -               return skb_gro_header_fast(skb, skb_network_offset(skb));
>> +               return skb_gro_header_fast(skb, offset);
>>
>> -       return skb_network_header(skb);
>> +       return skb->data + offset;
>>  }
> 
> I would instead add a new offset parameter to this function.
> 
> Again, ideally GRO should work without touching any skb->{offset}.
> 
> GRO stack should maintain the offsets it needs in its own storage
> (stack parameter, or other storage if needed)
> 
> Upper stack can not trust any of these skb fields, otherwise we would
> have some troubles with napi_reuse_skb()

Inner and outer network headers are needed for commit #4, I will store them
in the CB.

Moreover I will work on another patch series, that changes GRO receive phase
to use stack params as you suggested. (prev offset, and I will also work on a
version with data_offset too).
diff mbox series

Patch

diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 7515e6bcbb7d..db27b6851360 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -441,6 +441,7 @@  struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
 
 	skb_gro_pull(skb, sizeof(*eh));
 	skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
+	skb_set_inner_network_header(skb, skb_gro_offset(skb));
 
 	pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
 					    ipv6_gro_receive, inet_gro_receive,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 9334b563a88b..09ab9ac4420b 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1567,10 +1567,6 @@  struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
 
 	NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
 	NAPI_GRO_CB(skb)->flush |= flush;
-	skb_set_network_header(skb, off);
-	/* The above will be needed by the transport layer if there is one
-	 * immediately following this IP hdr.
-	 */
 
 	/* Note : No need to call skb_gro_postpull_rcsum() here,
 	 * as we already checked checksum over ipv4 header was 0
@@ -1596,6 +1592,7 @@  static struct sk_buff *ipip_gro_receive(struct list_head *head,
 	}
 
 	NAPI_GRO_CB(skb)->encap_mark = 1;
+	skb_set_inner_network_header(skb, skb_gro_offset(skb));
 
 	return inet_gro_receive(head, skb);
 }
@@ -1648,10 +1645,8 @@  int inet_gro_complete(struct sk_buff *skb, int prior_off, int nhoff)
 	int proto = iph->protocol;
 	int err = -ENOSYS;
 
-	if (skb->encapsulation) {
+	if (skb->encapsulation)
 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IP));
-		skb_set_inner_network_header(skb, nhoff);
-	}
 
 	iph_set_totlen(iph, skb->len - nhoff);
 	csum_replace2(&iph->check, totlen, iph->tot_len);
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index d4520c3f7c09..bfc3a0cbb3a3 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -224,6 +224,7 @@  static struct sk_buff *gre_gro_receive(struct list_head *head,
 	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
 	skb_gro_postpull_rcsum(skb, greh, grehlen);
 
+	skb_set_inner_network_header(skb, skb_gro_offset(skb));
 	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 	flush = 0;
 
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e3a05b84c76a..29601be9fa90 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -67,7 +67,7 @@  static int ipv6_gro_pull_exthdrs(struct sk_buff *skb, int off, int proto)
 		off += len;
 	}
 
-	skb_gro_pull(skb, off - skb_network_offset(skb));
+	skb_gro_pull(skb, off - skb_gro_network_offset(skb));
 	return proto;
 }
 
@@ -236,8 +236,6 @@  INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
 	if (unlikely(!iph))
 		goto out;
 
-	skb_set_network_header(skb, off);
-
 	flush += ntohs(iph->payload_len) != skb->len - hlen;
 
 	proto = iph->nexthdr;
@@ -259,7 +257,7 @@  INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
 	NAPI_GRO_CB(skb)->proto = proto;
 
 	flush--;
-	nlen = skb_network_header_len(skb);
+	nlen = skb_gro_offset(skb) - off;
 
 	list_for_each_entry(p, head, list) {
 		const struct ipv6hdr *iph2;
@@ -327,6 +325,7 @@  static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
 	}
 
 	NAPI_GRO_CB(skb)->encap_mark = 1;
+	skb_set_inner_network_header(skb, skb_gro_offset(skb));
 
 	return ipv6_gro_receive(head, skb);
 }
@@ -342,6 +341,7 @@  static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
 	}
 
 	NAPI_GRO_CB(skb)->encap_mark = 1;
+	skb_set_inner_network_header(skb, skb_gro_offset(skb));
 
 	return inet_gro_receive(head, skb);
 }
@@ -355,10 +355,8 @@  INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb,
 	u32 payload_len;
 	int nhlen;
 
-	if (skb->encapsulation) {
+	if (skb->encapsulation)
 		skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
-		skb_set_inner_network_header(skb, nhoff);
-	}
 
 	payload_len = skb->len - nhoff - sizeof(*iph);
 	if (unlikely(payload_len > IPV6_MAXPLEN)) {
diff --git a/include/net/gro.h b/include/net/gro.h
index cb7282bf3d63..923cbcc4c2fa 100644
--- a/include/net/gro.h
+++ b/include/net/gro.h
@@ -171,12 +171,21 @@  static inline void *skb_gro_header(struct sk_buff *skb, unsigned int hlen,
 	return ptr;
 }
 
+static inline int skb_gro_network_offset(const struct sk_buff *skb)
+{
+	const u32 mask = NAPI_GRO_CB(skb)->encap_mark - 1;
+
+	return (skb_network_offset(skb) & mask) | (skb_inner_network_offset(skb) & ~mask);
+}
+
 static inline void *skb_gro_network_header(const struct sk_buff *skb)
 {
+	const int offset = skb_gro_network_offset(skb);
+
 	if (skb_gro_may_pull(skb, skb_gro_offset(skb)))
-		return skb_gro_header_fast(skb, skb_network_offset(skb));
+		return skb_gro_header_fast(skb, offset);
 
-	return skb_network_header(skb);
+	return skb->data + offset;
 }
 
 static inline __wsum inet_gro_compute_pseudo(const struct sk_buff *skb,
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 247704cf70af..a61bfcff6250 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -478,6 +478,11 @@  static struct sk_buff *vlan_gro_receive(struct list_head *head,
 	if (unlikely(!vhdr))
 		goto out;
 
+	if (NAPI_GRO_CB(skb)->encap_mark)
+		skb_set_inner_network_header(skb, hlen);
+	else
+		skb_set_network_header(skb, hlen);
+
 	type = vhdr->h_vlan_encapsulated_proto;
 
 	ptype = gro_find_receive_by_type(type);