diff mbox series

tcp: relookup sock for RST+ACK packets handled by obsolete req sock

Message ID 20210311230756.971993-1-ovov@yandex-team.ru (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series tcp: relookup sock for RST+ACK packets handled by obsolete req sock | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count success Link
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cc_maintainers fail 5 maintainers not CCed: dsahern@kernel.org yoshfuji@linux-ipv6.org kuba@kernel.org davem@davemloft.net edumazet@google.com
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 3447 this patch: 3447
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch warning WARNING: 'immediatly' may be misspelled - perhaps 'immediately'? WARNING: Missing a blank line after declarations
netdev/build_allmodconfig_warn success Errors and warnings before: 3684 this patch: 3684
netdev/header_inline success Link

Commit Message

Alexander Ovechkin March 11, 2021, 11:07 p.m. UTC
Currently tcp_check_req can be called with obsolete req socket for which big
socket have been already created (because of CPU race or early demux
assigning req socket to multiple packets in gro batch).

Commit e0f9759f530bf789e984 (\"tcp: try to keep packet if SYN_RCV race
is lost\") added retry in case when tcp_check_req is called for PSH|ACK packet.
But if client sends RST+ACK immediatly after connection being
established (it is performing healthcheck, for example) retry does not
occur. In that case tcp_check_req tries to close req socket,
leaving big socket active.

Signed-off-by: Alexander Ovechkin <ovov@yandex-team.ru>
Reported-by: Oleg Senin <olegsenin@yandex-team.ru>
---
 include/net/inet_connection_sock.h | 2 +-
 net/ipv4/inet_connection_sock.c    | 6 ++++--
 net/ipv4/tcp_minisocks.c           | 6 ++++--
 3 files changed, 9 insertions(+), 5 deletions(-)

Comments

Eric Dumazet March 15, 2021, 10:33 a.m. UTC | #1
On 3/12/21 12:07 AM, Alexander Ovechkin wrote:
> Currently tcp_check_req can be called with obsolete req socket for which big
> socket have been already created (because of CPU race or early demux
> assigning req socket to multiple packets in gro batch).
> 
> Commit e0f9759f530bf789e984 (\"tcp: try to keep packet if SYN_RCV race
> is lost\") added retry in case when tcp_check_req is called for PSH|ACK packet.
> But if client sends RST+ACK immediatly after connection being
> established (it is performing healthcheck, for example) retry does not
> occur. In that case tcp_check_req tries to close req socket,
> leaving big socket active.
> 

Please insert the following tag, right before your SOB
Fixes: e0f9759f530 ("tcp: try to keep packet if SYN_RCV race is lost")
> Signed-off-by: Alexander Ovechkin <ovov@yandex-team.ru>
> Reported-by: Oleg Senin <olegsenin@yandex-team.ru>

Please CC TCP maintainer for your TCP patches, I almost missed it.


> ---
>  include/net/inet_connection_sock.h | 2 +-
>  net/ipv4/inet_connection_sock.c    | 6 ++++--
>  net/ipv4/tcp_minisocks.c           | 6 ++++--
>  3 files changed, 9 insertions(+), 5 deletions(-)
> 
> diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
> index 10a625760de9..3c8c59471bc1 100644
> --- a/include/net/inet_connection_sock.h
> +++ b/include/net/inet_connection_sock.h
> @@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
>  	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
>  }
>  
> -void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
> +bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
>  void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
>  
>  static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
> diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
> index 6bd7ca09af03..08ca9de2a708 100644
> --- a/net/ipv4/inet_connection_sock.c
> +++ b/net/ipv4/inet_connection_sock.c
> @@ -705,12 +705,14 @@ static bool reqsk_queue_unlink(struct request_sock *req)
>  	return found;
>  }
>  
> -void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
> +bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
>  {
> -	if (reqsk_queue_unlink(req)) {
> +	bool unlinked = reqsk_queue_unlink(req);

Add an empty line.

> +	if (unlinked) {
>  		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
>  		reqsk_put(req);
>  	}
> +	return unlinked;
>  }
>  EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
>  
> diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
> index 0055ae0a3bf8..31ed3423503d 100644
> --- a/net/ipv4/tcp_minisocks.c
> +++ b/net/ipv4/tcp_minisocks.c
> @@ -804,8 +804,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
>  		tcp_reset(sk, skb);
>  	}
>  	if (!fastopen) {
> -		inet_csk_reqsk_queue_drop(sk, req);
> -		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
> +		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);

Same here.

> +		if (unlinked)
> +			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
> +		*req_stolen = !unlinked;
>  	}
>  	return NULL;
>  }
> 

Other than that, your patch looks fine to me, thanks.
diff mbox series

Patch

diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 10a625760de9..3c8c59471bc1 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -282,7 +282,7 @@  static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
 	return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
 }
 
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
 
 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6bd7ca09af03..08ca9de2a708 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -705,12 +705,14 @@  static bool reqsk_queue_unlink(struct request_sock *req)
 	return found;
 }
 
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 {
-	if (reqsk_queue_unlink(req)) {
+	bool unlinked = reqsk_queue_unlink(req);
+	if (unlinked) {
 		reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
 		reqsk_put(req);
 	}
+	return unlinked;
 }
 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0055ae0a3bf8..31ed3423503d 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -804,8 +804,10 @@  struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 		tcp_reset(sk, skb);
 	}
 	if (!fastopen) {
-		inet_csk_reqsk_queue_drop(sk, req);
-		__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+		bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
+		if (unlinked)
+			__NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+		*req_stolen = !unlinked;
 	}
 	return NULL;
 }