diff mbox series

[net-next,v2,31/39] rxrpc: Send jumbo DATA packets

Message ID 20241204074710.990092-32-dhowells@redhat.com (mailing list archive)
State Accepted
Commit fe24a5494390d22ff645fd201d2bf1669fa3aab1
Delegated to: Netdev Maintainers
Headers show
Series rxrpc: Implement jumbo DATA transmission and RACK-TLP | expand

Checks

Context Check Description
netdev/series_format fail Series longer than 15 patches
netdev/tree_selection success Clearly marked for net-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 3 this patch: 3
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers warning 1 maintainers not CCed: horms@kernel.org
netdev/build_clang success Errors and warnings before: 5 this patch: 3
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 307 this patch: 307
netdev/checkpatch warning WARNING: line length of 86 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

David Howells Dec. 4, 2024, 7:46 a.m. UTC
Send jumbo DATA packets if the path-MTU probing using padded PING ACK
packets shows up sufficient capacity to do so.  This allows larger chunks
of data to be sent without reducing the retryability as the subpackets in a
jumbo packet can also be retransmitted individually.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: linux-afs@lists.infradead.org
cc: netdev@vger.kernel.org
---
 net/rxrpc/ar-internal.h | 1 +
 net/rxrpc/call_event.c  | 2 +-
 net/rxrpc/call_object.c | 1 +
 net/rxrpc/input.c       | 3 +++
 4 files changed, 6 insertions(+), 1 deletion(-)
diff mbox series

Patch

diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index d0d0ab453909..1307749a1a74 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -710,6 +710,7 @@  struct rxrpc_call {
 	u16			tx_backoff;	/* Delay to insert due to Tx failure (ms) */
 	u8			tx_winsize;	/* Maximum size of Tx window */
 #define RXRPC_TX_MAX_WINDOW	128
+	u8			tx_jumbo_max;	/* Maximum subpkts peer will accept */
 	ktime_t			tx_last_sent;	/* Last time a transmission occurred */
 
 	/* Received data tracking */
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 4390c97e3ba6..39772459426b 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -288,7 +288,7 @@  static void rxrpc_transmit_fresh_data(struct rxrpc_call *call)
 		struct rxrpc_txqueue *tq;
 		struct rxrpc_txbuf *txb;
 		rxrpc_seq_t send_top, seq;
-		int limit = min(space, 1);
+		int limit = min(space, max(call->peer->pmtud_jumbo, 1));
 
 		/* Order send_top before the contents of the new txbufs and
 		 * txqueue pointers
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index bba058055c97..e0644e9a8d21 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -155,6 +155,7 @@  struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
 	refcount_set(&call->ref, 1);
 	call->debug_id		= debug_id;
 	call->tx_total_len	= -1;
+	call->tx_jumbo_max	= 1;
 	call->next_rx_timo	= 20 * HZ;
 	call->next_req_timo	= 1 * HZ;
 	call->ackr_window	= 1;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 1eb9c22aba51..a7a249872a54 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -796,8 +796,11 @@  static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb
 		peer->ackr_adv_pmtud = true;
 	} else {
 		peer->ackr_adv_pmtud = false;
+		capacity = clamp(capacity, 1, jumbo_max);
 	}
 
+	call->tx_jumbo_max = capacity;
+
 	if (wake)
 		wake_up(&call->waitq);
 }