@@ -2050,8 +2050,8 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
{
- sk->sk_route_nocaps |= flags;
- sk->sk_route_caps &= ~flags;
+ netdev_feature_or(&sk->sk_route_nocaps, sk->sk_route_nocaps, flags);
+ netdev_feature_andnot(&sk->sk_route_caps, sk->sk_route_caps, flags);
}
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
@@ -2063,7 +2063,8 @@ static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
if (!csum_and_copy_from_iter_full(to, copy, &csum, from))
return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, offset);
- } else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
+ } else if (netdev_feature_test_bit(NETIF_F_NOCACHE_COPY_BIT,
+ sk->sk_route_caps)) {
if (!copy_from_iter_full_nocache(to, copy, from))
return -EFAULT;
} else if (!copy_from_iter_full(to, copy, from))
@@ -379,7 +379,8 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
copy = (buf_size > bytes) ? bytes : buf_size;
to = sg_virt(sge) + msg->sg.copybreak;
msg->sg.copybreak += copy;
- if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
+ if (netdev_feature_test_bit(NETIF_F_NOCACHE_COPY_BIT,
+ sk->sk_route_caps))
ret = copy_from_iter_nocache(to, copy, from);
else
ret = copy_from_iter(to, copy, from);
@@ -2146,15 +2146,20 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
u32 max_segs = 1;
sk_dst_set(sk, dst);
- sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
- if (sk->sk_route_caps & NETIF_F_GSO)
- sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
- sk->sk_route_caps &= ~sk->sk_route_nocaps;
+ netdev_feature_or(&sk->sk_route_caps, dst->dev->features,
+ sk->sk_route_forced_caps);
+ if (netdev_feature_test_bit(NETIF_F_GSO_BIT, sk->sk_route_caps))
+ netdev_feature_set_bits(NETIF_F_GSO_SOFTWARE,
+ &sk->sk_route_caps);
+ netdev_feature_andnot(&sk->sk_route_caps, sk->sk_route_caps,
+ sk->sk_route_nocaps);
if (sk_can_gso(sk)) {
if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
- sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ netdev_feature_clear_bits(NETIF_F_GSO_MASK,
+ &sk->sk_route_caps);
} else {
- sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ netdev_feature_set_bits(NETIF_F_SG | NETIF_F_HW_CSUM,
+ &sk->sk_route_caps);
sk->sk_gso_max_size = dst->dev->gso_max_size;
max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
}
@@ -1295,7 +1295,7 @@ int inet_sk_rebuild_header(struct sock *sk)
err = PTR_ERR(rt);
/* Routing failed... */
- sk->sk_route_caps = 0;
+ netdev_feature_zero(&sk->sk_route_caps);
/*
* Other protocols have to map its equivalent state to TCP_SYN_SENT.
* DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme
@@ -457,7 +457,8 @@ void tcp_init_sock(struct sock *sk)
WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
sk_sockets_allocated_inc(sk);
- sk->sk_route_forced_caps = NETIF_F_GSO;
+ netdev_feature_zero(&sk->sk_route_forced_caps);
+ netdev_feature_set_bit(NETIF_F_GSO_BIT, &sk->sk_route_forced_caps);
}
EXPORT_SYMBOL(tcp_init_sock);
@@ -1106,7 +1107,7 @@ EXPORT_SYMBOL_GPL(do_tcp_sendpages);
int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
{
- if (!(sk->sk_route_caps & NETIF_F_SG))
+ if (!netdev_feature_test_bit(NETIF_F_SG_BIT, sk->sk_route_caps))
return sock_no_sendpage_locked(sk, page, offset, size, flags);
tcp_rate_check_app_limited(sk); /* is sending application-limited? */
@@ -1166,7 +1167,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
if (err) {
tcp_set_state(sk, TCP_CLOSE);
inet->inet_dport = 0;
- sk->sk_route_caps = 0;
+ netdev_feature_zero(&sk->sk_route_caps);
}
}
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
@@ -1205,7 +1206,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
goto out_err;
}
- zc = sk->sk_route_caps & NETIF_F_SG;
+ zc = netdev_feature_test_bit(NETIF_F_SG_BIT,
+ sk->sk_route_caps);
if (!zc)
uarg->zerocopy = 0;
}
@@ -323,7 +323,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
*/
tcp_set_state(sk, TCP_CLOSE);
ip_rt_put(rt);
- sk->sk_route_caps = 0;
+ netdev_feature_zero(&sk->sk_route_caps);
inet->inet_dport = 0;
return err;
}
@@ -833,7 +833,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
- sk->sk_route_caps = 0;
+ netdev_feature_zero(&sk->sk_route_caps);
sk->sk_err_soft = -PTR_ERR(dst);
return PTR_ERR(dst);
}
@@ -121,7 +121,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
dst = inet6_csk_route_socket(sk, &fl6);
if (IS_ERR(dst)) {
sk->sk_err_soft = -PTR_ERR(dst);
- sk->sk_route_caps = 0;
+ netdev_feature_zero(&sk->sk_route_caps);
kfree_skb(skb);
return PTR_ERR(dst);
}
@@ -341,7 +341,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
tcp_set_state(sk, TCP_CLOSE);
failure:
inet->inet_dport = 0;
- sk->sk_route_caps = 0;
+ netdev_feature_zero(&sk->sk_route_caps);
return err;
}
Use netdev_feature_xxx helpers to replace the logical operation for netdev features. Signed-off-by: Jian Shen <shenjian15@huawei.com> --- include/net/sock.h | 7 ++++--- net/core/skmsg.c | 3 ++- net/core/sock.c | 17 +++++++++++------ net/ipv4/af_inet.c | 2 +- net/ipv4/tcp.c | 10 ++++++---- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/af_inet6.c | 2 +- net/ipv6/inet6_connection_sock.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- 9 files changed, 28 insertions(+), 19 deletions(-)