@@ -466,6 +466,18 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
struct tcp_sock *tp;
u32 old_copied_seq;
bool done = false;
+ int sk_rbuf;
+
+ sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+
+ if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+ int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+
+ if (unlikely(ssk_rbuf > sk_rbuf)) {
+ WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf);
+ sk_rbuf = ssk_rbuf;
+ }
+ }
pr_debug("msk=%p ssk=%p", msk, ssk);
tp = tcp_sk(ssk);
@@ -528,7 +540,7 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
WRITE_ONCE(tp->copied_seq, seq);
more_data_avail = mptcp_subflow_data_available(ssk);
- if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) {
+ if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) {
done = true;
break;
}
@@ -622,6 +634,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
+ int sk_rbuf, ssk_rbuf;
bool wake;
/* move_skbs_to_msk below can legitly clear the data_avail flag,
@@ -632,12 +645,16 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
if (wake)
set_bit(MPTCP_DATA_READY, &msk->flags);
- if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
- move_skbs_to_msk(msk, ssk))
+ ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
+ sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
+ if (unlikely(ssk_rbuf > sk_rbuf))
+ sk_rbuf = ssk_rbuf;
+
+ /* over limit? can't append more skbs to msk */
+ if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
goto wake;
- /* don't schedule if mptcp sk is (still) over limit */
- if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf))
+ if (move_skbs_to_msk(msk, ssk))
goto wake;
/* mptcp socket is owned, release_cb should retry */