@@ -968,6 +968,8 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+ ktime_t expire, now;
u64 len_ns;
u32 rate;
@@ -979,12 +981,28 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)
len_ns = (u64)skb->len * NSEC_PER_SEC;
do_div(len_ns, rate);
- hrtimer_start(&tcp_sk(sk)->pacing_timer,
- ktime_add_ns(ktime_get(), len_ns),
+ now = ktime_get();
+ /* If hrtimer is already armed, then our caller has not
+ * used tcp_pacing_check().
+ */
+ if (unlikely(hrtimer_is_queued(&tp->pacing_timer))) {
+ expire = hrtimer_get_softexpires(&tp->pacing_timer);
+ if (ktime_after(expire, now))
+ now = expire;
+ if (hrtimer_try_to_cancel(&tp->pacing_timer) == 1)
+ __sock_put(sk);
+ }
+ hrtimer_start(&tp->pacing_timer, ktime_add_ns(now, len_ns),
HRTIMER_MODE_ABS_PINNED_SOFT);
sock_hold(sk);
}
+static bool tcp_pacing_check(const struct sock *sk)
+{
+ return tcp_needs_internal_pacing(sk) &&
+ hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
+}
+
static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
{
skb->skb_mstamp = tp->tcp_mstamp;
@@ -2121,6 +2139,9 @@ static int tcp_mtu_probe(struct sock *sk)
if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
return -1;
+ if (tcp_pacing_check(sk))
+ return -1;
+
/* We're allowed to probe. Build it now. */
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb)
@@ -2194,12 +2215,6 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
-static bool tcp_pacing_check(const struct sock *sk)
-{
- return tcp_needs_internal_pacing(sk) &&
- hrtimer_is_queued(&tcp_sk(sk)->pacing_timer);
-}
-
/* TCP Small Queues :
* Control number of packets in qdisc/devices to two packets / or ~1 ms.
* (These limits are doubled for retransmits)