diff mbox series

[net-next,v2,2/2] udp: track the forward memory release threshold in an hot cacheline

Message ID 2dede94e742d8096d6ac5e0f1979054ee158d9a8.1666287924.git.pabeni@redhat.com (mailing list archive)
State Accepted
Commit 8a3854c7b8e4532063b14bed34115079b7d0cb36
Delegated to: Netdev Maintainers
Headers show
Series udp: avoid false sharing on receive | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 2012 this patch: 2012
netdev/cc_maintainers warning 3 maintainers not CCed: dhowells@redhat.com yoshfuji@linux-ipv6.org willemb@google.com
netdev/build_clang success Errors and warnings before: 526 this patch: 526
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 2140 this patch: 2140
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 82 lines checked
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Paolo Abeni Oct. 20, 2022, 5:48 p.m. UTC
When the receiver process and the BH runs on different cores,
udp_rmem_release() experience a cache miss while accessing sk_rcvbuf,
as the latter shares the same cacheline with sk_forward_alloc, written
by the BH.

With this patch, UDP tracks the rcvbuf value and its update via custom
SOL_SOCKET socket options, and copies the forward memory threshold value
used by udp_rmem_release() in a different cacheline, already accessed by
the above function and uncontended.

Since the UDP socket init operation grown a bit, factor out the common
code between v4 and v6 in a shared helper.

Overall the above give a 10% peek throughput increase under UDP flood.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
v1 -> v2:
 - factor out common init helper for udp && udpv6 sock (Kuniyuki)
---
 include/linux/udp.h |  3 +++
 include/net/udp.h   |  9 +++++++++
 net/ipv4/udp.c      | 18 +++++++++++++++---
 net/ipv6/udp.c      |  4 ++--
 4 files changed, 29 insertions(+), 5 deletions(-)

Comments

Eric Dumazet Oct. 20, 2022, 6:10 p.m. UTC | #1
On Thu, Oct 20, 2022 at 10:49 AM Paolo Abeni <pabeni@redhat.com> wrote:
>
> When the receiver process and the BH runs on different cores,
> udp_rmem_release() experience a cache miss while accessing sk_rcvbuf,
> as the latter shares the same cacheline with sk_forward_alloc, written
> by the BH.
>
> With this patch, UDP tracks the rcvbuf value and its update via custom
> SOL_SOCKET socket options, and copies the forward memory threshold value
> used by udp_rmem_release() in a different cacheline, already accessed by
> the above function and uncontended.
>
> Since the UDP socket init operation grown a bit, factor out the common
> code between v4 and v6 in a shared helper.
>
> Overall the above give a 10% peek throughput increase under UDP flood.
>
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>


Reviewed-by: Eric Dumazet <edumazet@google.com>
Kuniyuki Iwashima Oct. 20, 2022, 6:20 p.m. UTC | #2
From:   Paolo Abeni <pabeni@redhat.com>
Date:   Thu, 20 Oct 2022 19:48:52 +0200
> When the receiver process and the BH runs on different cores,
> udp_rmem_release() experience a cache miss while accessing sk_rcvbuf,
> as the latter shares the same cacheline with sk_forward_alloc, written
> by the BH.
> 
> With this patch, UDP tracks the rcvbuf value and its update via custom
> SOL_SOCKET socket options, and copies the forward memory threshold value
> used by udp_rmem_release() in a different cacheline, already accessed by
> the above function and uncontended.
> 
> Since the UDP socket init operation grown a bit, factor out the common
> code between v4 and v6 in a shared helper.
> 
> Overall the above give a 10% peek throughput increase under UDP flood.
> 
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>

Acked-by: Kuniyuki Iwashima <kuniyu@amazon.com>

Thank you!


> ---
> v1 -> v2:
>  - factor out common init helper for udp && udpv6 sock (Kuniyuki)
> ---
>  include/linux/udp.h |  3 +++
>  include/net/udp.h   |  9 +++++++++
>  net/ipv4/udp.c      | 18 +++++++++++++++---
>  net/ipv6/udp.c      |  4 ++--
>  4 files changed, 29 insertions(+), 5 deletions(-)
> 
> diff --git a/include/linux/udp.h b/include/linux/udp.h
> index e96da4157d04..5cdba00a904a 100644
> --- a/include/linux/udp.h
> +++ b/include/linux/udp.h
> @@ -87,6 +87,9 @@ struct udp_sock {
>  
>  	/* This field is dirtied by udp_recvmsg() */
>  	int		forward_deficit;
> +
> +	/* This fields follows rcvbuf value, and is touched by udp_recvmsg */
> +	int		forward_threshold;
>  };
>  
>  #define UDP_MAX_SEGMENTS	(1 << 6UL)
> diff --git a/include/net/udp.h b/include/net/udp.h
> index fee053bcd17c..de4b528522bb 100644
> --- a/include/net/udp.h
> +++ b/include/net/udp.h
> @@ -174,6 +174,15 @@ INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
>  struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
>  				  netdev_features_t features, bool is_ipv6);
>  
> +static inline void udp_lib_init_sock(struct sock *sk)
> +{
> +	struct udp_sock *up = udp_sk(sk);
> +
> +	skb_queue_head_init(&up->reader_queue);
> +	up->forward_threshold = sk->sk_rcvbuf >> 2;
> +	set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
> +}
> +
>  /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
>  static inline int udp_lib_hash(struct sock *sk)
>  {
> diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
> index 8126f67d18b3..e361ad93999e 100644
> --- a/net/ipv4/udp.c
> +++ b/net/ipv4/udp.c
> @@ -1448,7 +1448,7 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
>  	if (likely(partial)) {
>  		up->forward_deficit += size;
>  		size = up->forward_deficit;
> -		if (size < (sk->sk_rcvbuf >> 2) &&
> +		if (size < READ_ONCE(up->forward_threshold) &&
>  		    !skb_queue_empty(&up->reader_queue))
>  			return;
>  	} else {
> @@ -1622,7 +1622,7 @@ static void udp_destruct_sock(struct sock *sk)
>  
>  int udp_init_sock(struct sock *sk)
>  {
> -	skb_queue_head_init(&udp_sk(sk)->reader_queue);
> +	udp_lib_init_sock(sk);
>  	sk->sk_destruct = udp_destruct_sock;
>  	return 0;
>  }
> @@ -2671,6 +2671,18 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
>  	int err = 0;
>  	int is_udplite = IS_UDPLITE(sk);
>  
> +	if (level == SOL_SOCKET) {
> +		err = sk_setsockopt(sk, level, optname, optval, optlen);
> +
> +		if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) {
> +			sockopt_lock_sock(sk);
> +			/* paired with READ_ONCE in udp_rmem_release() */
> +			WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2);
> +			sockopt_release_sock(sk);
> +		}
> +		return err;
> +	}
> +
>  	if (optlen < sizeof(int))
>  		return -EINVAL;
>  
> @@ -2784,7 +2796,7 @@ EXPORT_SYMBOL(udp_lib_setsockopt);
>  int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
>  		   unsigned int optlen)
>  {
> -	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
> +	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
>  		return udp_lib_setsockopt(sk, level, optname,
>  					  optval, optlen,
>  					  udp_push_pending_frames);
> diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
> index 8d09f0ea5b8c..b0bc4e27ec2f 100644
> --- a/net/ipv6/udp.c
> +++ b/net/ipv6/udp.c
> @@ -64,7 +64,7 @@ static void udpv6_destruct_sock(struct sock *sk)
>  
>  int udpv6_init_sock(struct sock *sk)
>  {
> -	skb_queue_head_init(&udp_sk(sk)->reader_queue);
> +	udp_lib_init_sock(sk);
>  	sk->sk_destruct = udpv6_destruct_sock;
>  	return 0;
>  }
> @@ -1671,7 +1671,7 @@ void udpv6_destroy_sock(struct sock *sk)
>  int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
>  		     unsigned int optlen)
>  {
> -	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
> +	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
>  		return udp_lib_setsockopt(sk, level, optname,
>  					  optval, optlen,
>  					  udp_v6_push_pending_frames);
> -- 
> 2.37.3
diff mbox series

Patch

diff --git a/include/linux/udp.h b/include/linux/udp.h
index e96da4157d04..5cdba00a904a 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -87,6 +87,9 @@  struct udp_sock {
 
 	/* This field is dirtied by udp_recvmsg() */
 	int		forward_deficit;
+
+	/* This fields follows rcvbuf value, and is touched by udp_recvmsg */
+	int		forward_threshold;
 };
 
 #define UDP_MAX_SEGMENTS	(1 << 6UL)
diff --git a/include/net/udp.h b/include/net/udp.h
index fee053bcd17c..de4b528522bb 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -174,6 +174,15 @@  INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
 				  netdev_features_t features, bool is_ipv6);
 
+static inline void udp_lib_init_sock(struct sock *sk)
+{
+	struct udp_sock *up = udp_sk(sk);
+
+	skb_queue_head_init(&up->reader_queue);
+	up->forward_threshold = sk->sk_rcvbuf >> 2;
+	set_bit(SOCK_CUSTOM_SOCKOPT, &sk->sk_socket->flags);
+}
+
 /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */
 static inline int udp_lib_hash(struct sock *sk)
 {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8126f67d18b3..e361ad93999e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1448,7 +1448,7 @@  static void udp_rmem_release(struct sock *sk, int size, int partial,
 	if (likely(partial)) {
 		up->forward_deficit += size;
 		size = up->forward_deficit;
-		if (size < (sk->sk_rcvbuf >> 2) &&
+		if (size < READ_ONCE(up->forward_threshold) &&
 		    !skb_queue_empty(&up->reader_queue))
 			return;
 	} else {
@@ -1622,7 +1622,7 @@  static void udp_destruct_sock(struct sock *sk)
 
 int udp_init_sock(struct sock *sk)
 {
-	skb_queue_head_init(&udp_sk(sk)->reader_queue);
+	udp_lib_init_sock(sk);
 	sk->sk_destruct = udp_destruct_sock;
 	return 0;
 }
@@ -2671,6 +2671,18 @@  int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 	int err = 0;
 	int is_udplite = IS_UDPLITE(sk);
 
+	if (level == SOL_SOCKET) {
+		err = sk_setsockopt(sk, level, optname, optval, optlen);
+
+		if (optname == SO_RCVBUF || optname == SO_RCVBUFFORCE) {
+			sockopt_lock_sock(sk);
+			/* paired with READ_ONCE in udp_rmem_release() */
+			WRITE_ONCE(up->forward_threshold, sk->sk_rcvbuf >> 2);
+			sockopt_release_sock(sk);
+		}
+		return err;
+	}
+
 	if (optlen < sizeof(int))
 		return -EINVAL;
 
@@ -2784,7 +2796,7 @@  EXPORT_SYMBOL(udp_lib_setsockopt);
 int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
 		   unsigned int optlen)
 {
-	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
 		return udp_lib_setsockopt(sk, level, optname,
 					  optval, optlen,
 					  udp_push_pending_frames);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8d09f0ea5b8c..b0bc4e27ec2f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -64,7 +64,7 @@  static void udpv6_destruct_sock(struct sock *sk)
 
 int udpv6_init_sock(struct sock *sk)
 {
-	skb_queue_head_init(&udp_sk(sk)->reader_queue);
+	udp_lib_init_sock(sk);
 	sk->sk_destruct = udpv6_destruct_sock;
 	return 0;
 }
@@ -1671,7 +1671,7 @@  void udpv6_destroy_sock(struct sock *sk)
 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
 		     unsigned int optlen)
 {
-	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
+	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
 		return udp_lib_setsockopt(sk, level, optname,
 					  optval, optlen,
 					  udp_v6_push_pending_frames);