@@ -232,7 +232,8 @@ struct tcp_sock {
repair : 1,
tcp_usec_ts : 1, /* TSval values in usec */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
+ is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
+ mss_cache_set_by_ca:1;/* mss_cache set by CA */
__cacheline_group_end(tcp_sock_read_txrx);
/* RX read-mostly hotpath cache lines */
@@ -456,6 +456,7 @@ void tcp_init_sock(struct sock *sk)
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = TCP_MSS_DEFAULT;
+ tp->mss_cache_set_by_ca = false;
tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
tcp_assign_congestion_control(sk);
@@ -2074,7 +2074,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
struct inet_connection_sock *icsk = inet_csk(sk);
int mss_now;
- if (icsk->icsk_mtup.search_high > pmtu)
+ if (icsk->icsk_mtup.search_high > pmtu && !tp->mss_cache_set_by_ca)
icsk->icsk_mtup.search_high = pmtu;
mss_now = tcp_mtu_to_mss(sk, pmtu);
@@ -2104,7 +2104,7 @@ unsigned int tcp_current_mss(struct sock *sk)
mss_now = tp->mss_cache;
- if (dst) {
+ if (dst && !tp->mss_cache_set_by_ca) {
u32 mtu = dst_mtu(dst);
if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
mss_now = tcp_sync_mss(sk, mtu);