@@ -1440,6 +1440,44 @@ TRACE_EVENT(rxrpc_congest,
__entry->sum.retrans_timeo ? " rTxTo" : "")
);
+TRACE_EVENT(rxrpc_reset_cwnd,
+ TP_PROTO(struct rxrpc_call *call, ktime_t now),
+
+ TP_ARGS(call, now),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum rxrpc_congest_mode, mode )
+ __field(unsigned short, cwnd )
+ __field(unsigned short, extra )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, prepared )
+ __field(ktime_t, since_last_tx )
+ __field(bool, has_data )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->mode = call->cong_mode;
+ __entry->cwnd = call->cong_cwnd;
+ __entry->extra = call->cong_extra;
+ __entry->hard_ack = call->acks_hard_ack;
+ __entry->prepared = call->tx_prepared - call->tx_bottom;
+ __entry->since_last_tx = ktime_sub(now, call->tx_last_sent);
+ __entry->has_data = !list_empty(&call->tx_sendmsg);
+ ),
+
+ TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u",
+ __entry->call,
+ __entry->hard_ack,
+ __print_symbolic(__entry->mode, rxrpc_congest_modes),
+ __entry->cwnd,
+ __entry->extra,
+ __entry->prepared,
+ ktime_to_ns(__entry->since_last_tx),
+ __entry->has_data)
+ );
+
TRACE_EVENT(rxrpc_disconnect_call,
TP_PROTO(struct rxrpc_call *call),
@@ -101,6 +101,9 @@ struct rxrpc_net {
atomic_t stat_tx_data_retrans;
atomic_t stat_tx_data_send;
atomic_t stat_tx_data_send_frag;
+ atomic_t stat_tx_data_send_fail;
+ atomic_t stat_tx_data_underflow;
+ atomic_t stat_tx_data_cwnd_reset;
atomic_t stat_rx_data;
atomic_t stat_rx_data_reqack;
atomic_t stat_rx_data_jumbo;
@@ -310,8 +310,10 @@ static void rxrpc_transmit_some_data(struct rxrpc_call *call)
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
if (rxrpc_tx_window_space(call) == 0)
return;
- if (list_empty(&call->tx_sendmsg))
+ if (list_empty(&call->tx_sendmsg)) {
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_underflow);
return;
+ }
rxrpc_decant_prepared_tx(call);
break;
default:
@@ -27,6 +27,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
enum rxrpc_congest_change change = rxrpc_cong_no_change;
unsigned int cumulative_acks = call->cong_cumul_acks;
unsigned int cwnd = call->cong_cwnd;
+ ktime_t now;
bool resend = false;
summary->flight_size =
@@ -59,13 +60,15 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
/* If we haven't transmitted anything for >1RTT, we should reset the
* congestion management state.
*/
+ now = ktime_get_real();
if ((call->cong_mode == RXRPC_CALL_SLOW_START ||
call->cong_mode == RXRPC_CALL_CONGEST_AVOIDANCE) &&
ktime_before(ktime_add_us(call->tx_last_sent,
- call->peer->srtt_us >> 3),
- ktime_get_real())
+ call->peer->srtt_us >> 3), now)
) {
+ trace_rxrpc_reset_cwnd(call, now);
change = rxrpc_cong_idle_reset;
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
summary->mode = RXRPC_CALL_SLOW_START;
if (RXRPC_TX_SMSS > 2190)
summary->cwnd = 2;
@@ -485,6 +485,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
up_read(&conn->local->defrag_sem);
if (ret < 0) {
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_nofrag);
@@ -567,6 +568,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
}
if (ret < 0) {
+ rxrpc_inc_stat(call->rxnet, stat_tx_data_send_fail);
rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_frag);
@@ -408,13 +408,16 @@ int rxrpc_stats_show(struct seq_file *seq, void *v)
struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq));
seq_printf(seq,
- "Data : send=%u sendf=%u\n",
+ "Data : send=%u sendf=%u fail=%u\n",
atomic_read(&rxnet->stat_tx_data_send),
- atomic_read(&rxnet->stat_tx_data_send_frag));
+ atomic_read(&rxnet->stat_tx_data_send_frag),
+ atomic_read(&rxnet->stat_tx_data_send_fail));
seq_printf(seq,
- "Data-Tx : nr=%u retrans=%u\n",
+ "Data-Tx : nr=%u retrans=%u uf=%u cwr=%u\n",
atomic_read(&rxnet->stat_tx_data),
- atomic_read(&rxnet->stat_tx_data_retrans));
+ atomic_read(&rxnet->stat_tx_data_retrans),
+ atomic_read(&rxnet->stat_tx_data_underflow),
+ atomic_read(&rxnet->stat_tx_data_cwnd_reset));
seq_printf(seq,
"Data-Rx : nr=%u reqack=%u jumbo=%u\n",
atomic_read(&rxnet->stat_rx_data),
@@ -482,8 +485,11 @@ int rxrpc_stats_clear(struct file *file, char *buf, size_t size)
atomic_set(&rxnet->stat_tx_data, 0);
atomic_set(&rxnet->stat_tx_data_retrans, 0);
+ atomic_set(&rxnet->stat_tx_data_underflow, 0);
+ atomic_set(&rxnet->stat_tx_data_cwnd_reset, 0);
atomic_set(&rxnet->stat_tx_data_send, 0);
atomic_set(&rxnet->stat_tx_data_send_frag, 0);
+ atomic_set(&rxnet->stat_tx_data_send_fail, 0);
atomic_set(&rxnet->stat_rx_data, 0);
atomic_set(&rxnet->stat_rx_data_reqack, 0);
atomic_set(&rxnet->stat_rx_data_jumbo, 0);
Add a tracepoint to log when a cwnd reset occurs due to lack of transmission on a call. Add stat counters to count transmission underflows (ie. when we have tx window space, but sendmsg doesn't manage to keep up), cwnd resets and transmission failures. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org --- include/trace/events/rxrpc.h | 38 ++++++++++++++++++++++++++++++++++++++ net/rxrpc/ar-internal.h | 3 +++ net/rxrpc/call_event.c | 4 +++- net/rxrpc/input.c | 7 +++++-- net/rxrpc/output.c | 2 ++ net/rxrpc/proc.c | 14 ++++++++++---- 6 files changed, 61 insertions(+), 7 deletions(-)