@@ -892,8 +892,8 @@ TRACE_EVENT(rxrpc_txqueue,
TP_STRUCT__entry(
__field(unsigned int, call)
__field(enum rxrpc_txqueue_trace, why)
- __field(rxrpc_seq_t, acks_hard_ack)
__field(rxrpc_seq_t, tx_bottom)
+ __field(rxrpc_seq_t, acks_first_seq)
__field(rxrpc_seq_t, tx_top)
__field(rxrpc_seq_t, send_top)
__field(int, tx_winsize)
@@ -902,8 +902,8 @@ TRACE_EVENT(rxrpc_txqueue,
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
- __entry->acks_hard_ack = call->acks_hard_ack;
__entry->tx_bottom = call->tx_bottom;
+ __entry->acks_first_seq = call->acks_first_seq;
__entry->tx_top = call->tx_top;
__entry->send_top = call->send_top;
__entry->tx_winsize = call->tx_winsize;
@@ -913,9 +913,9 @@ TRACE_EVENT(rxrpc_txqueue,
__entry->call,
__print_symbolic(__entry->why, rxrpc_txqueue_traces),
__entry->tx_bottom,
- __entry->acks_hard_ack,
- __entry->tx_top - __entry->tx_bottom,
- __entry->tx_top - __entry->acks_hard_ack,
+ __entry->acks_first_seq,
+ __entry->acks_first_seq - __entry->tx_bottom,
+ __entry->tx_top - __entry->acks_first_seq,
__entry->send_top - __entry->tx_top,
__entry->tx_winsize)
);
@@ -945,7 +945,7 @@ TRACE_EVENT(rxrpc_transmit,
__entry->cong_cwnd = call->cong_cwnd;
__entry->cong_extra = call->cong_extra;
__entry->prepared = send_top - call->tx_bottom;
- __entry->in_flight = call->tx_top - call->acks_hard_ack;
+ __entry->in_flight = call->tx_top - call->tx_bottom;
__entry->pmtud_jumbo = call->peer->pmtud_jumbo;
),
@@ -1707,7 +1707,7 @@ TRACE_EVENT(rxrpc_congest,
TP_fast_assign(
__entry->call = call->debug_id;
__entry->change = change;
- __entry->hard_ack = call->acks_hard_ack;
+ __entry->hard_ack = call->acks_first_seq;
__entry->top = call->tx_top;
__entry->lowest_nak = call->acks_lowest_nak;
__entry->ack_serial = ack_serial;
@@ -1754,7 +1754,7 @@ TRACE_EVENT(rxrpc_reset_cwnd,
__entry->mode = call->cong_mode;
__entry->cwnd = call->cong_cwnd;
__entry->extra = call->cong_extra;
- __entry->hard_ack = call->acks_hard_ack;
+ __entry->hard_ack = call->acks_first_seq;
__entry->prepared = call->send_top - call->tx_bottom;
__entry->since_last_tx = ktime_sub(now, call->tx_last_sent);
__entry->has_data = call->tx_bottom != call->tx_top;
@@ -1855,7 +1855,7 @@ TRACE_EVENT(rxrpc_resend,
TP_fast_assign(
struct rxrpc_skb_priv *sp = ack ? rxrpc_skb(ack) : NULL;
__entry->call = call->debug_id;
- __entry->seq = call->acks_hard_ack;
+ __entry->seq = call->acks_first_seq;
__entry->transmitted = call->tx_transmitted;
__entry->ack_serial = sp ? sp->hdr.serial : 0;
),
@@ -1944,7 +1944,7 @@ TRACE_EVENT(rxrpc_call_reset,
__entry->call_id = call->call_id;
__entry->call_serial = call->rx_serial;
__entry->conn_serial = call->conn->hi_serial;
- __entry->tx_seq = call->acks_hard_ack;
+ __entry->tx_seq = call->acks_first_seq;
__entry->rx_seq = call->rx_highest_seq;
),
@@ -759,7 +759,6 @@ struct rxrpc_call {
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
rxrpc_seq_t acks_first_seq; /* first sequence number received */
rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */
- rxrpc_seq_t acks_hard_ack; /* Latest hard-ack point */
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */
};
@@ -109,7 +109,7 @@ void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb)
bool unacked = false, did_send = false;
unsigned int qix;
- _enter("{%d,%d}", call->acks_hard_ack, call->tx_top);
+ _enter("{%d,%d}", call->tx_bottom, call->tx_top);
if (call->tx_bottom == call->tx_top)
goto no_resend;
@@ -267,7 +267,7 @@ static void rxrpc_close_tx_phase(struct rxrpc_call *call)
static unsigned int rxrpc_tx_window_space(struct rxrpc_call *call)
{
int winsize = umin(call->tx_winsize, call->cong_cwnd + call->cong_extra);
- int in_flight = call->tx_top - call->acks_hard_ack;
+ int in_flight = call->tx_top - call->tx_bottom;
return max(winsize - in_flight, 0);
}
@@ -40,7 +40,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
bool resend = false;
summary->flight_size =
- (call->tx_top - call->acks_hard_ack) - summary->nr_acks;
+ (call->tx_top - call->tx_bottom) - summary->nr_acks;
if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
summary->retrans_timeo = true;
@@ -175,7 +175,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
* state.
*/
if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ||
- summary->nr_acks != call->tx_top - call->acks_hard_ack) {
+ summary->nr_acks != call->tx_top - call->tx_bottom) {
call->cong_extra++;
wake_up(&call->waitq);
}
@@ -218,7 +218,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
rxrpc_seq_t seq = call->tx_bottom + 1;
bool rot_last = false;
- _enter("%x,%x,%x", call->tx_bottom, call->acks_hard_ack, to);
+ _enter("%x,%x", call->tx_bottom, to);
trace_rxrpc_tx_rotate(call, seq, to);
trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate);
@@ -246,7 +246,6 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
tq->bufs[ix] = NULL;
smp_store_release(&call->tx_bottom, seq);
- smp_store_release(&call->acks_hard_ack, seq);
trace_rxrpc_txqueue(call, (rot_last ?
rxrpc_txqueue_rotate_last :
rxrpc_txqueue_rotate));
@@ -278,9 +277,9 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
}
}
- _debug("%x,%x,%x,%d", to, call->acks_hard_ack, call->tx_top, rot_last);
+ _debug("%x,%x,%x,%d", to, call->tx_bottom, call->tx_top, rot_last);
- if (call->acks_lowest_nak == call->acks_hard_ack) {
+ if (call->acks_lowest_nak == call->tx_bottom) {
call->acks_lowest_nak = to;
} else if (after(to, call->acks_lowest_nak)) {
summary->new_low_nack = true;
@@ -968,7 +967,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
first_soft_ack == 1 &&
prev_pkt == 0 &&
- call->acks_hard_ack == 0 &&
+ call->tx_bottom == 0 &&
rxrpc_is_client_call(call)) {
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
0, -ENETRESET);
@@ -1033,13 +1032,13 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
goto send_response;
}
- if (before(hard_ack, call->acks_hard_ack) ||
+ if (before(hard_ack, call->tx_bottom) ||
after(hard_ack, call->tx_top))
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
if (nr_acks > call->tx_top - hard_ack)
return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
- if (after(hard_ack, call->acks_hard_ack)) {
+ if (after(hard_ack, call->tx_bottom)) {
if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
goto send_response;
@@ -52,7 +52,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
struct rxrpc_call *call;
struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
enum rxrpc_call_state state;
- rxrpc_seq_t acks_hard_ack;
+ rxrpc_seq_t tx_bottom;
char lbuff[50], rbuff[50];
long timeout = 0;
@@ -79,7 +79,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
if (state != RXRPC_CALL_SERVER_PREALLOC)
timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real());
- acks_hard_ack = READ_ONCE(call->acks_hard_ack);
+ tx_bottom = READ_ONCE(call->tx_bottom);
seq_printf(seq,
"UDP %-47.47s %-47.47s %4x %08x %08x %s %3u"
" %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
@@ -93,7 +93,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
rxrpc_call_states[state],
call->abort_code,
call->debug_id,
- acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
+ tx_bottom, READ_ONCE(call->tx_top) - tx_bottom,
call->ackr_window, call->ackr_wtop - call->ackr_window,
call->rx_serial,
call->cong_cwnd,
@@ -138,7 +138,7 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
rtt = 2;
timeout = rtt;
- tx_start = smp_load_acquire(&call->acks_hard_ack);
+ tx_start = smp_load_acquire(&call->tx_bottom);
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -195,8 +195,8 @@ static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
DECLARE_WAITQUEUE(myself, current);
int ret;
- _enter(",{%u,%u,%u,%u}",
- call->tx_bottom, call->acks_hard_ack, call->tx_top, call->tx_winsize);
+ _enter(",{%u,%u,%u}",
+ call->tx_bottom, call->tx_top, call->tx_winsize);
add_wait_queue(&call->waitq, &myself);
Now that packets are removed from the Tx queue in the rotation function rather than being cleaned up later, call->acks_hard_ack now advances in step with call->tx_bottom, so remove it. Some of the places call->acks_hard_ack is used in the rxrpc tracepoints are replaced by call->acks_first_seq instead as that's the peer's reported idea of the hard-ACK point. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: "David S. Miller" <davem@davemloft.net> cc: Eric Dumazet <edumazet@google.com> cc: Jakub Kicinski <kuba@kernel.org> cc: Paolo Abeni <pabeni@redhat.com> cc: linux-afs@lists.infradead.org cc: netdev@vger.kernel.org --- include/trace/events/rxrpc.h | 20 ++++++++++---------- net/rxrpc/ar-internal.h | 1 - net/rxrpc/call_event.c | 4 ++-- net/rxrpc/input.c | 17 ++++++++--------- net/rxrpc/proc.c | 6 +++--- net/rxrpc/sendmsg.c | 6 +++--- 6 files changed, 26 insertions(+), 28 deletions(-)