@@ -341,7 +341,7 @@ rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
*/
if (q->flags & IFF_VNET_HDR)
netdev_features_set(features, tap->tap_features);
- if (netif_needs_gso(skb, features)) {
+ if (netif_needs_gso(skb, &features)) {
struct sk_buff *segs = __skb_gso_segment(skb, &features, false);
struct sk_buff *next;
@@ -777,7 +777,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
netif_skb_features(skb, &features);
if (unlikely(!netif_carrier_ok(dev) ||
(slots > 1 && !xennet_can_sg(dev)) ||
- netif_needs_gso(skb, features))) {
+ netif_needs_gso(skb, &features))) {
spin_unlock_irqrestore(&queue->tx_lock, flags);
goto drop;
}
@@ -6882,7 +6882,7 @@ void qeth_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t restricted;
netdev_features_zero(restricted);
- if (skb_is_gso(skb) && !netif_needs_gso(skb, *features))
+ if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
netdev_features_set(restricted, NETIF_F_ALL_TSO);
switch (vlan_get_protocol(skb)) {
@@ -6914,7 +6914,7 @@ void qeth_features_check(struct sk_buff *skb, struct net_device *dev,
* additional buffer element. This reduces buffer utilization, and
* hurts throughput. So compress small segments into one element.
*/
- if (netif_needs_gso(skb, *features)) {
+ if (netif_needs_gso(skb, features)) {
/* match skb_segment(): */
unsigned int doffset = skb->data - skb_mac_header(skb);
unsigned int hsize = skb_shinfo(skb)->gso_size;
@@ -4917,7 +4917,7 @@ void passthru_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t *features);
void netif_skb_features(struct sk_buff *skb, netdev_features_t *features);
-static inline bool net_gso_ok(netdev_features_t features, int gso_type)
+static inline bool net_gso_ok(const netdev_features_t *features, int gso_type)
{
#define ASSERT_GSO_TYPE(fl, feat) \
static_assert((fl) == (feat) - NETIF_F_GSO_SHIFT)
@@ -4952,17 +4952,18 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
if (new_gso_type) { /* placeholder for new gso type */
}
- return (features & feature) == feature;
+ return (*features & feature) == feature;
}
-static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
+static inline bool skb_gso_ok(struct sk_buff *skb,
+ const netdev_features_t *features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
- (!skb_has_frag_list(skb) || netdev_feature_test(NETIF_F_FRAGLIST_BIT, features));
+ (!skb_has_frag_list(skb) || netdev_feature_test(NETIF_F_FRAGLIST_BIT, *features));
}
static inline bool netif_needs_gso(struct sk_buff *skb,
- netdev_features_t features)
+ const netdev_features_t *features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
@@ -2237,7 +2237,7 @@ bool sk_mc_loop(struct sock *sk);
static inline bool sk_can_gso(const struct sock *sk)
{
- return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
+ return net_gso_ok(&sk->sk_route_caps, sk->sk_gso_type);
}
void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
@@ -3404,7 +3404,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
dev->gso_partial_features);
netdev_feature_add(NETIF_F_GSO_ROBUST_BIT, partial_features);
netdev_features_set(partial_features, features);
- if (!skb_gso_ok(skb, partial_features))
+ if (!skb_gso_ok(skb, &partial_features))
netdev_feature_del(NETIF_F_GSO_PARTIAL_BIT, features);
}
@@ -3665,7 +3665,7 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (unlikely(!skb))
goto out_null;
- if (netif_needs_gso(skb, features)) {
+ if (netif_needs_gso(skb, &features)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, &features);
@@ -4068,7 +4068,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
unsigned int frag_len;
if (!list_skb ||
- !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
+ !net_gso_ok(&features, skb_shinfo(head_skb)->gso_type))
goto normal;
/* If we get here then all the required
@@ -85,7 +85,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_copy(features, *feats);
netdev_feature_add(NETIF_F_GSO_ROBUST_BIT, features);
- if (skb_gso_ok(skb, features)) {
+ if (skb_gso_ok(skb, &features)) {
/* Packet is from an untrusted source, reset gso_segs. */
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
@@ -52,7 +52,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
__skb_pull(skb, sizeof(*sh));
netdev_feature_add(NETIF_F_GSO_ROBUST_BIT, tmp);
- if (skb_gso_ok(skb, tmp)) {
+ if (skb_gso_ok(skb, &tmp)) {
/* Packet is from an untrusted source, reset gso_segs. */
struct skb_shared_info *pinfo = skb_shinfo(skb);
struct sk_buff *frag_iter;
The functions netif_needs_gso(), net_gso_ok(), and skb_gso_ok() using netdev_features_t as parameters. For the prototype of netdev_features_t will be extended to be larger than 8 bytes, so change the prototype of these functions, change the prototype of input features to 'netdev_features_t *'. Signed-off-by: Jian Shen <shenjian15@huawei.com> --- drivers/net/tap.c | 2 +- drivers/net/xen-netfront.c | 2 +- drivers/s390/net/qeth_core_main.c | 4 ++-- include/linux/netdevice.h | 11 ++++++----- include/net/sock.h | 2 +- net/core/dev.c | 4 ++-- net/core/skbuff.c | 2 +- net/ipv4/tcp_offload.c | 2 +- net/sctp/offload.c | 2 +- 9 files changed, 16 insertions(+), 15 deletions(-)