diff mbox series

[v2,mptcp-net] mptcp: avoid race on msk state changes

Message ID a96e100aa429a9bb947a2b22cc16682649c93550.1623949487.git.pabeni@redhat.com (mailing list archive)
State Accepted, archived
Commit fa35d78a941945996278e038446ad051aabdd32e
Delegated to: Matthieu Baerts
Headers show
Series [v2,mptcp-net] mptcp: avoid race on msk state changes | expand

Commit Message

Paolo Abeni June 17, 2021, 5:05 p.m. UTC
The msk socket state is currently updated in a few spots without
owning the msk socket lock itself.

Some of such operations are safe, as they happens before exposing
the msk socket to user-space and can't race with other changes.

A couple of them, at connect time, can actually race with close()
or shutdown(), leaving breaking the socket state machine.

This change addresses the issue moving such update under the msk
socket lock with the usual:

<acquire spinlock>
<check sk lock onwers>
<ev defer to release_cb>

scheme.

Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/56
Fixes: 8fd738049ac3 ("mptcp: fallback in case of simultaneous connect")
Fixes: c3c123d16c0e ("net: mptcp: don't hang in mptcp_sendmsg() after TCP fallbac")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
--
v1 -> v2:
 - move the hooks inside release_cb() before other state-related actions
---
 net/mptcp/protocol.c |  5 +++++
 net/mptcp/protocol.h |  2 ++
 net/mptcp/subflow.c  | 30 ++++++++++++++++++++++--------
 3 files changed, 29 insertions(+), 8 deletions(-)

Comments

Mat Martineau June 17, 2021, 6:50 p.m. UTC | #1
On Thu, 17 Jun 2021, Paolo Abeni wrote:

> The msk socket state is currently updated in a few spots without
> owning the msk socket lock itself.
>
> Some of such operations are safe, as they happens before exposing
> the msk socket to user-space and can't race with other changes.
>
> A couple of them, at connect time, can actually race with close()
> or shutdown(), leaving breaking the socket state machine.
>
> This change addresses the issue moving such update under the msk
> socket lock with the usual:
>
> <acquire spinlock>
> <check sk lock onwers>
> <ev defer to release_cb>
>
> scheme.
>
> Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/56
> Fixes: 8fd738049ac3 ("mptcp: fallback in case of simultaneous connect")
> Fixes: c3c123d16c0e ("net: mptcp: don't hang in mptcp_sendmsg() after TCP fallbac")
> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
> --
> v1 -> v2:
> - move the hooks inside release_cb() before other state-related actions

Thanks for the v2, looks ready to apply.

Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>


-Mat


> ---
> net/mptcp/protocol.c |  5 +++++
> net/mptcp/protocol.h |  2 ++
> net/mptcp/subflow.c  | 30 ++++++++++++++++++++++--------
> 3 files changed, 29 insertions(+), 8 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 05c8382aafef..ddce5b7bbefd 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -2902,6 +2902,11 @@ static void mptcp_release_cb(struct sock *sk)
> 		spin_lock_bh(&sk->sk_lock.slock);
> 	}
>
> +	/* be sure to set the current sk state before tacking actions
> +	 * depending on sk_state
> +	 */
> +	if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
> +		__mptcp_set_connected(sk);
> 	if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
> 		__mptcp_clean_una_wakeup(sk);
> 	if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
> diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
> index 2480db50cbd2..515bb1e6acec 100644
> --- a/net/mptcp/protocol.h
> +++ b/net/mptcp/protocol.h
> @@ -112,6 +112,7 @@
> #define MPTCP_ERROR_REPORT	8
> #define MPTCP_RETRANSMIT	9
> #define MPTCP_WORK_SYNC_SETSOCKOPT 10
> +#define MPTCP_CONNECTED		11
>
> static inline bool before64(__u64 seq1, __u64 seq2)
> {
> @@ -600,6 +601,7 @@ void mptcp_get_options(const struct sock *sk,
> 		       struct mptcp_options_received *mp_opt);
>
> void mptcp_finish_connect(struct sock *sk);
> +void __mptcp_set_connected(struct sock *sk);
> static inline bool mptcp_is_fully_established(struct sock *sk)
> {
> 	return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
> diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
> index 6772802737e4..0b5d4a3eadcd 100644
> --- a/net/mptcp/subflow.c
> +++ b/net/mptcp/subflow.c
> @@ -373,6 +373,24 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
> 	return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
> }
>
> +void __mptcp_set_connected(struct sock *sk)
> +{
> +	if (sk->sk_state == TCP_SYN_SENT) {
> +		inet_sk_state_store(sk, TCP_ESTABLISHED);
> +		sk->sk_state_change(sk);
> +	}
> +}
> +
> +static void mptcp_set_connected(struct sock *sk)
> +{
> +	mptcp_data_lock(sk);
> +	if (!sock_owned_by_user(sk))
> +		__mptcp_set_connected(sk);
> +	else
> +		set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
> +	mptcp_data_unlock(sk);
> +}
> +
> static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
> {
> 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
> @@ -381,10 +399,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
>
> 	subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
>
> -	if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
> -		inet_sk_state_store(parent, TCP_ESTABLISHED);
> -		parent->sk_state_change(parent);
> -	}
>
> 	/* be sure no special action on any packet other than syn-ack */
> 	if (subflow->conn_finished)
> @@ -417,6 +431,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
> 			 subflow->remote_key);
> 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
> 		mptcp_finish_connect(sk);
> +		mptcp_set_connected(parent);
> 	} else if (subflow->request_join) {
> 		u8 hmac[SHA256_DIGEST_SIZE];
>
> @@ -457,6 +472,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
> 	} else if (mptcp_check_fallback(sk)) {
> fallback:
> 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
> +		mptcp_set_connected(parent);
> 	}
> 	return;
>
> @@ -564,6 +580,7 @@ static void mptcp_sock_destruct(struct sock *sk)
>
> static void mptcp_force_close(struct sock *sk)
> {
> +	/* the msk is not yet exposed to user-space */
> 	inet_sk_state_store(sk, TCP_CLOSE);
> 	sk_common_release(sk);
> }
> @@ -1577,10 +1594,7 @@ static void subflow_state_change(struct sock *sk)
> 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
> 		pr_fallback(mptcp_sk(parent));
> 		subflow->conn_finished = 1;
> -		if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
> -			inet_sk_state_store(parent, TCP_ESTABLISHED);
> -			parent->sk_state_change(parent);
> -		}
> +		mptcp_set_connected(parent);
> 	}
>
> 	/* as recvmsg() does not acquire the subflow socket for ssk selection
> -- 
> 2.26.3
>
>
>

--
Mat Martineau
Intel
Matthieu Baerts June 19, 2021, 7:03 a.m. UTC | #2
Hi Paolo, Mat,

On 17/06/2021 19:05, Paolo Abeni wrote:
> The msk socket state is currently updated in a few spots without
> owning the msk socket lock itself.
> 
> Some of such operations are safe, as they happens before exposing
> the msk socket to user-space and can't race with other changes.
> 
> A couple of them, at connect time, can actually race with close()
> or shutdown(), leaving breaking the socket state machine.
> 
> This change addresses the issue moving such update under the msk
> socket lock with the usual:
> 
> <acquire spinlock>
> <check sk lock onwers>
> <ev defer to release_cb>
> 
> scheme.
> 
> Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/56
> Fixes: 8fd738049ac3 ("mptcp: fallback in case of simultaneous connect")
> Fixes: c3c123d16c0e ("net: mptcp: don't hang in mptcp_sendmsg() after TCP fallbac")

(I added a "k" at the end)

> Signed-off-by: Paolo Abeni <pabeni@redhat.com>

Thank you for the patch and the review!

Now in our tree -- "fixes for net" -- with Mat's RvB tag:

- fa35d78a9419: mptcp: avoid race on msk state changes
- Results: 1aa8d1f89a86..43a335a6d44d

Builds and tests are now in progress:

https://cirrus-ci.com/github/multipath-tcp/mptcp_net-next/export/20210619T070308
https://github.com/multipath-tcp/mptcp_net-next/actions/workflows/build-validation.yml?query=branch:export/20210619T070308

Cheers,
Matt
diff mbox series

Patch

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 05c8382aafef..ddce5b7bbefd 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2902,6 +2902,11 @@  static void mptcp_release_cb(struct sock *sk)
 		spin_lock_bh(&sk->sk_lock.slock);
 	}
 
+	/* be sure to set the current sk state before tacking actions
+	 * depending on sk_state
+	 */
+	if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
+		__mptcp_set_connected(sk);
 	if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
 		__mptcp_clean_una_wakeup(sk);
 	if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 2480db50cbd2..515bb1e6acec 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -112,6 +112,7 @@ 
 #define MPTCP_ERROR_REPORT	8
 #define MPTCP_RETRANSMIT	9
 #define MPTCP_WORK_SYNC_SETSOCKOPT 10
+#define MPTCP_CONNECTED		11
 
 static inline bool before64(__u64 seq1, __u64 seq2)
 {
@@ -600,6 +601,7 @@  void mptcp_get_options(const struct sock *sk,
 		       struct mptcp_options_received *mp_opt);
 
 void mptcp_finish_connect(struct sock *sk);
+void __mptcp_set_connected(struct sock *sk);
 static inline bool mptcp_is_fully_established(struct sock *sk)
 {
 	return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6772802737e4..0b5d4a3eadcd 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -373,6 +373,24 @@  static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
 	return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
 }
 
+void __mptcp_set_connected(struct sock *sk)
+{
+	if (sk->sk_state == TCP_SYN_SENT) {
+		inet_sk_state_store(sk, TCP_ESTABLISHED);
+		sk->sk_state_change(sk);
+	}
+}
+
+static void mptcp_set_connected(struct sock *sk)
+{
+	mptcp_data_lock(sk);
+	if (!sock_owned_by_user(sk))
+		__mptcp_set_connected(sk);
+	else
+		set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
+	mptcp_data_unlock(sk);
+}
+
 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
 {
 	struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -381,10 +399,6 @@  static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
 
 	subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
 
-	if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
-		inet_sk_state_store(parent, TCP_ESTABLISHED);
-		parent->sk_state_change(parent);
-	}
 
 	/* be sure no special action on any packet other than syn-ack */
 	if (subflow->conn_finished)
@@ -417,6 +431,7 @@  static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
 			 subflow->remote_key);
 		MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
 		mptcp_finish_connect(sk);
+		mptcp_set_connected(parent);
 	} else if (subflow->request_join) {
 		u8 hmac[SHA256_DIGEST_SIZE];
 
@@ -457,6 +472,7 @@  static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
 	} else if (mptcp_check_fallback(sk)) {
 fallback:
 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
+		mptcp_set_connected(parent);
 	}
 	return;
 
@@ -564,6 +580,7 @@  static void mptcp_sock_destruct(struct sock *sk)
 
 static void mptcp_force_close(struct sock *sk)
 {
+	/* the msk is not yet exposed to user-space */
 	inet_sk_state_store(sk, TCP_CLOSE);
 	sk_common_release(sk);
 }
@@ -1577,10 +1594,7 @@  static void subflow_state_change(struct sock *sk)
 		mptcp_rcv_space_init(mptcp_sk(parent), sk);
 		pr_fallback(mptcp_sk(parent));
 		subflow->conn_finished = 1;
-		if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
-			inet_sk_state_store(parent, TCP_ESTABLISHED);
-			parent->sk_state_change(parent);
-		}
+		mptcp_set_connected(parent);
 	}
 
 	/* as recvmsg() does not acquire the subflow socket for ssk selection