@@ -1422,11 +1422,10 @@ static inline unsigned long tcp_pacing_delay(const struct sock *sk)
static inline void tcp_reset_xmit_timer(struct sock *sk,
const int what,
- unsigned long when,
- const unsigned long max_when)
+ unsigned long when)
{
inet_csk_reset_xmit_timer(sk, what, when + tcp_pacing_delay(sk),
- max_when);
+ TCP_RTO_MAX);
}
/* Something is really bad, we could not queue an additional packet,
@@ -1455,7 +1454,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
{
if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- tcp_probe0_base(sk), TCP_RTO_MAX);
+ tcp_probe0_base(sk));
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -3282,8 +3282,7 @@ void tcp_rearm_rto(struct sock *sk)
*/
rto = usecs_to_jiffies(max_t(int, delta_us, 1));
}
- tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
- TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto);
}
}
@@ -3563,7 +3562,7 @@ static void tcp_ack_probe(struct sock *sk)
unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
when = tcp_clamp_probe0_to_user_timeout(sk, when);
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when);
}
}
@@ -2911,7 +2911,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
- tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout);
return true;
}
@@ -3545,8 +3545,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
}
if (rearm_timer)
tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto);
}
/* We allow to exceed memory limits for FIN packets to expedite
@@ -4402,7 +4401,7 @@ void tcp_send_probe0(struct sock *sk)
}
timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
- tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
+ tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout);
}
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
All callers use TCP_RTO_MAX, we can factorize this constant, becoming a variable soon. Signed-off-by: Eric Dumazet <edumazet@google.com> --- include/net/tcp.h | 7 +++---- net/ipv4/tcp_input.c | 5 ++--- net/ipv4/tcp_output.c | 7 +++---- 3 files changed, 8 insertions(+), 11 deletions(-)