@@ -3730,8 +3730,10 @@ static inline bool skb_needs_linearize(struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_nonlinear(skb) &&
- ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
- (skb_shinfo(skb)->nr_frags && !(features & NETIF_F_SG)));
+ ((skb_has_frag_list(skb) &&
+ !netdev_feature_test_bit(NETIF_F_FRAGLIST_BIT, features)) ||
+ (skb_shinfo(skb)->nr_frags &&
+ !netdev_feature_test_bit(NETIF_F_SG_BIT, features)));
}
static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
@@ -3992,7 +3992,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
* skbs; we do so by disabling SG.
*/
if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
- features &= ~NETIF_F_SG;
+ netdev_feature_clear_bit(NETIF_F_SG_BIT, &features);
}
__skb_push(head_skb, doffset);
@@ -4000,11 +4000,12 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
- sg = !!(features & NETIF_F_SG);
+ sg = !!netdev_feature_test_bit(NETIF_F_SG_BIT, features);
csum = !!can_checksum_protocol(features, proto);
if (sg && csum && (mss != GSO_BY_FRAGS)) {
- if (!(features & NETIF_F_GSO_PARTIAL)) {
+ if (!netdev_feature_test_bit(NETIF_F_GSO_PARTIAL_BIT,
+ features)) {
struct sk_buff *iter;
unsigned int frag_len;
@@ -4261,7 +4262,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
/* Update type to add partial and then remove dodgy if set */
- type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
+ type |= netdev_feature_test_bit(NETIF_F_GSO_PARTIAL_BIT,
+ features) * SKB_GSO_PARTIAL;
type &= ~SKB_GSO_DODGY;
/* Update GSO info and prepare to start updating headers on
Use netdev_feature_xxx helpers to replace the logical operation for netdev features. Signed-off-by: Jian Shen <shenjian15@huawei.com> --- include/linux/skbuff.h | 6 ++++-- net/core/skbuff.c | 10 ++++++---- 2 files changed, 10 insertions(+), 6 deletions(-)