diff mbox series

[v4,bpf-next,06/11] tcp: Migrate TCP_NEW_SYN_RECV requests at retransmitting SYN+ACKs.

Message ID 20210427034623.46528-7-kuniyu@amazon.co.jp (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Socket migration for SO_REUSEPORT. | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 6 maintainers not CCed: dsahern@kernel.org yhs@fb.com kpsingh@kernel.org yoshfuji@linux-ipv6.org john.fastabend@gmail.com songliubraving@fb.com
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 4 this patch: 4
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 4 this patch: 4
netdev/header_inline success Link

Commit Message

Iwashima, Kuniyuki April 27, 2021, 3:46 a.m. UTC
As with the preceding patch, this patch changes reqsk_timer_handler() to
call reuseport_migrate_sock() and reqsk_clone() to migrate in-flight
requests at retransmitting SYN+ACKs. If we can select a new listener and
clone the request, we resume setting the SYN+ACK timer for the new req. If
we can set the timer, we call inet_ehash_insert() to unhash the old req and
put the new req into ehash.

The noteworthy point here is that by unhashing the old req, another CPU
processing it may lose the "own_req" race in tcp_v[46]_syn_recv_sock() and
drop the final ACK packet. However, the new timer will recover this
situation.

Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
---
 net/core/request_sock.c         |  1 +
 net/ipv4/inet_connection_sock.c | 76 +++++++++++++++++++++++++++++----
 2 files changed, 68 insertions(+), 9 deletions(-)

Comments

Martin KaFai Lau May 5, 2021, 4:56 a.m. UTC | #1
On Tue, Apr 27, 2021 at 12:46:18PM +0900, Kuniyuki Iwashima wrote:
[ ... ]

> diff --git a/net/core/request_sock.c b/net/core/request_sock.c
> index 82cf9fbe2668..08c37ecd923b 100644
> --- a/net/core/request_sock.c
> +++ b/net/core/request_sock.c
> @@ -151,6 +151,7 @@ struct request_sock *reqsk_clone(struct request_sock *req, struct sock *sk)
>  	memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
>  	       req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
>  
> +	sk_node_init(&nreq_sk->sk_node);
This belongs to patch 5.
"rsk_refcnt" also needs to be 0 instead of staying uninitialized
after reqsk_clone() returned.

>  	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
>  #ifdef CONFIG_XPS
>  	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
> diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
> index 851992405826..dc984d1f352e 100644
> --- a/net/ipv4/inet_connection_sock.c
> +++ b/net/ipv4/inet_connection_sock.c
> @@ -695,10 +695,20 @@ int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
>  }
>  EXPORT_SYMBOL(inet_rtx_syn_ack);
>  
> +static void reqsk_queue_migrated(struct request_sock_queue *queue,
> +				 const struct request_sock *req)
> +{
> +	if (req->num_timeout == 0)
> +		atomic_inc(&queue->young);
> +	atomic_inc(&queue->qlen);
> +}
> +
>  static void reqsk_migrate_reset(struct request_sock *req)
>  {
> +	req->saved_syn = NULL;
> +	inet_rsk(req)->ireq_opt = NULL;
>  #if IS_ENABLED(CONFIG_IPV6)
> -	inet_rsk(req)->ipv6_opt = NULL;
> +	inet_rsk(req)->pktopts = NULL;
>  #endif
>  }
>  
> @@ -741,16 +751,37 @@ EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
>  
>  static void reqsk_timer_handler(struct timer_list *t)
>  {
> -	struct request_sock *req = from_timer(req, t, rsk_timer);
> -	struct sock *sk_listener = req->rsk_listener;
> -	struct net *net = sock_net(sk_listener);
> -	struct inet_connection_sock *icsk = inet_csk(sk_listener);
> -	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
> +	struct request_sock *req = from_timer(req, t, rsk_timer), *nreq = NULL, *oreq = req;
nit. This line is too long.
Lets move the new "*nreq" and "*oreg" to a new line and keep the current
"*req" line as is:
	struct request_sock *req = from_timer(req, t, rsk_timer);
	struct request_sock *oreq = req, *nreq = NULL;

> +	struct sock *sk_listener = req->rsk_listener, *nsk = NULL;
"*nsk" can be moved into the following "!= TCP_LISTEN" case below.
Keep the current "*sk_listener" line as is.

> +	struct inet_connection_sock *icsk;
> +	struct request_sock_queue *queue;
> +	struct net *net;
>  	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
>  
> -	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
> -		goto drop;
> +	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {

		struct sock *nsk;

> +		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
> +		if (!nsk)
> +			goto drop;
> +
> +		nreq = reqsk_clone(req, nsk);
> +		if (!nreq)
> +			goto drop;
> +
> +		/* The new timer for the cloned req can decrease the 2
> +		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
> +		 * hold another count to prevent use-after-free and
> +		 * call reqsk_put() just before return.
> +		 */
> +		refcount_set(&nreq->rsk_refcnt, 2 + 1);
> +		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
> +		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
> +
> +		req = nreq;
> +		sk_listener = nsk;
> +	}
Iwashima, Kuniyuki May 5, 2021, 11:16 p.m. UTC | #2
From:   Martin KaFai Lau <kafai@fb.com>
Date:   Tue, 4 May 2021 21:56:18 -0700
> On Tue, Apr 27, 2021 at 12:46:18PM +0900, Kuniyuki Iwashima wrote:
> [ ... ]
> 
> > diff --git a/net/core/request_sock.c b/net/core/request_sock.c
> > index 82cf9fbe2668..08c37ecd923b 100644
> > --- a/net/core/request_sock.c
> > +++ b/net/core/request_sock.c
> > @@ -151,6 +151,7 @@ struct request_sock *reqsk_clone(struct request_sock *req, struct sock *sk)
> >  	memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
> >  	       req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
> >  
> > +	sk_node_init(&nreq_sk->sk_node);
> This belongs to patch 5.
> "rsk_refcnt" also needs to be 0 instead of staying uninitialized
> after reqsk_clone() returned.

I'll move this part to patch 5 and initialize refcnt as 0 in reqsk_clone()
like reqsk_alloc().


> 
> >  	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
> >  #ifdef CONFIG_XPS
> >  	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
> > diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
> > index 851992405826..dc984d1f352e 100644
> > --- a/net/ipv4/inet_connection_sock.c
> > +++ b/net/ipv4/inet_connection_sock.c
> > @@ -695,10 +695,20 @@ int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
> >  }
> >  EXPORT_SYMBOL(inet_rtx_syn_ack);
> >  
> > +static void reqsk_queue_migrated(struct request_sock_queue *queue,
> > +				 const struct request_sock *req)
> > +{
> > +	if (req->num_timeout == 0)
> > +		atomic_inc(&queue->young);
> > +	atomic_inc(&queue->qlen);
> > +}
> > +
> >  static void reqsk_migrate_reset(struct request_sock *req)
> >  {
> > +	req->saved_syn = NULL;
> > +	inet_rsk(req)->ireq_opt = NULL;
> >  #if IS_ENABLED(CONFIG_IPV6)
> > -	inet_rsk(req)->ipv6_opt = NULL;
> > +	inet_rsk(req)->pktopts = NULL;
> >  #endif
> >  }
> >  
> > @@ -741,16 +751,37 @@ EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
> >  
> >  static void reqsk_timer_handler(struct timer_list *t)
> >  {
> > -	struct request_sock *req = from_timer(req, t, rsk_timer);
> > -	struct sock *sk_listener = req->rsk_listener;
> > -	struct net *net = sock_net(sk_listener);
> > -	struct inet_connection_sock *icsk = inet_csk(sk_listener);
> > -	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
> > +	struct request_sock *req = from_timer(req, t, rsk_timer), *nreq = NULL, *oreq = req;
> nit. This line is too long.
> Lets move the new "*nreq" and "*oreg" to a new line and keep the current
> "*req" line as is:
> 	struct request_sock *req = from_timer(req, t, rsk_timer);
> 	struct request_sock *oreq = req, *nreq = NULL;

I'll fix that.


> 
> > +	struct sock *sk_listener = req->rsk_listener, *nsk = NULL;
> "*nsk" can be moved into the following "!= TCP_LISTEN" case below.
> Keep the current "*sk_listener" line as is.

I'll move the nsk's definition.

Thank you.


> 
> > +	struct inet_connection_sock *icsk;
> > +	struct request_sock_queue *queue;
> > +	struct net *net;
> >  	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
> >  
> > -	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
> > -		goto drop;
> > +	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
> 
> 		struct sock *nsk;
> 
> > +		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
> > +		if (!nsk)
> > +			goto drop;
> > +
> > +		nreq = reqsk_clone(req, nsk);
> > +		if (!nreq)
> > +			goto drop;
> > +
> > +		/* The new timer for the cloned req can decrease the 2
> > +		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
> > +		 * hold another count to prevent use-after-free and
> > +		 * call reqsk_put() just before return.
> > +		 */
> > +		refcount_set(&nreq->rsk_refcnt, 2 + 1);
> > +		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
> > +		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
> > +
> > +		req = nreq;
> > +		sk_listener = nsk;
> > +	}
diff mbox series

Patch

diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 82cf9fbe2668..08c37ecd923b 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -151,6 +151,7 @@  struct request_sock *reqsk_clone(struct request_sock *req, struct sock *sk)
 	memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
 	       req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
 
+	sk_node_init(&nreq_sk->sk_node);
 	nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
 #ifdef CONFIG_XPS
 	nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 851992405826..dc984d1f352e 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -695,10 +695,20 @@  int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
 }
 EXPORT_SYMBOL(inet_rtx_syn_ack);
 
+static void reqsk_queue_migrated(struct request_sock_queue *queue,
+				 const struct request_sock *req)
+{
+	if (req->num_timeout == 0)
+		atomic_inc(&queue->young);
+	atomic_inc(&queue->qlen);
+}
+
 static void reqsk_migrate_reset(struct request_sock *req)
 {
+	req->saved_syn = NULL;
+	inet_rsk(req)->ireq_opt = NULL;
 #if IS_ENABLED(CONFIG_IPV6)
-	inet_rsk(req)->ipv6_opt = NULL;
+	inet_rsk(req)->pktopts = NULL;
 #endif
 }
 
@@ -741,16 +751,37 @@  EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
 
 static void reqsk_timer_handler(struct timer_list *t)
 {
-	struct request_sock *req = from_timer(req, t, rsk_timer);
-	struct sock *sk_listener = req->rsk_listener;
-	struct net *net = sock_net(sk_listener);
-	struct inet_connection_sock *icsk = inet_csk(sk_listener);
-	struct request_sock_queue *queue = &icsk->icsk_accept_queue;
+	struct request_sock *req = from_timer(req, t, rsk_timer), *nreq = NULL, *oreq = req;
+	struct sock *sk_listener = req->rsk_listener, *nsk = NULL;
+	struct inet_connection_sock *icsk;
+	struct request_sock_queue *queue;
+	struct net *net;
 	int max_syn_ack_retries, qlen, expire = 0, resend = 0;
 
-	if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
-		goto drop;
+	if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
+		nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
+		if (!nsk)
+			goto drop;
+
+		nreq = reqsk_clone(req, nsk);
+		if (!nreq)
+			goto drop;
+
+		/* The new timer for the cloned req can decrease the 2
+		 * by calling inet_csk_reqsk_queue_drop_and_put(), so
+		 * hold another count to prevent use-after-free and
+		 * call reqsk_put() just before return.
+		 */
+		refcount_set(&nreq->rsk_refcnt, 2 + 1);
+		timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
+		reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
+
+		req = nreq;
+		sk_listener = nsk;
+	}
 
+	icsk = inet_csk(sk_listener);
+	net = sock_net(sk_listener);
 	max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
 	/* Normally all the openreqs are young and become mature
 	 * (i.e. converted to established socket) for first timeout.
@@ -769,6 +800,7 @@  static void reqsk_timer_handler(struct timer_list *t)
 	 * embrions; and abort old ones without pity, if old
 	 * ones are about to clog our table.
 	 */
+	queue = &icsk->icsk_accept_queue;
 	qlen = reqsk_queue_len(queue);
 	if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
 		int young = reqsk_queue_len_young(queue) << 1;
@@ -793,10 +825,36 @@  static void reqsk_timer_handler(struct timer_list *t)
 			atomic_dec(&queue->young);
 		timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
 		mod_timer(&req->rsk_timer, jiffies + timeo);
+
+		if (!nreq)
+			return;
+
+		if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
+			/* delete timer */
+			inet_csk_reqsk_queue_drop(sk_listener, nreq);
+			goto drop;
+		}
+
+		reqsk_migrate_reset(oreq);
+		reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
+		reqsk_put(oreq);
+
+		reqsk_put(nreq);
 		return;
 	}
+
 drop:
-	inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
+	/* Even if we can clone the req, we may need not retransmit any more
+	 * SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
+	 * CPU may win the "own_req" race so that inet_ehash_insert() fails.
+	 */
+	if (nreq) {
+		reqsk_migrate_reset(nreq);
+		reqsk_queue_removed(queue, nreq);
+		__reqsk_free(nreq);
+	}
+
+	inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
 }
 
 static void reqsk_queue_hash_req(struct request_sock *req,