@@ -1828,12 +1828,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- if (unlikely(tls_record_is_start_marker(record))) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
- goto out;
- }
-
tls_end_offset = record->end_seq - tcp_seq;
pr_debug("seq %#x, start %#x end %#x prev %#x, datalen %d offset %d\n",
@@ -1877,6 +1871,36 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
skb_get(skb);
}
+ if (unlikely(tls_record_is_start_marker(record))) {
+ atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
+ /* If tls_end_offset < data_len, means there is some
+ * data after start marker, which needs encryption, send
+ * plaintext first and take skb refcount. else send out
+ * complete pkt as plaintext.
+ */
+ if (tls_end_offset < data_len)
+ skb_get(skb);
+ else
+ tls_end_offset = data_len;
+
+ ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
+ (!th->fin && th->psh), q,
+ tls_end_offset, skb_offset);
+ if (ret) {
+ /* free the refcount taken earlier */
+ if (tls_end_offset < data_len)
+ dev_kfree_skb_any(skb);
+ spin_unlock_irqrestore(&tx_ctx->base.lock,
+ flags);
+ goto out;
+ }
+
+ data_len -= tls_end_offset;
+ tcp_seq = record->end_seq;
+ skb_offset += tls_end_offset;
+ continue;
+ }
+
/* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) {
ret = chcr_end_part_handler(tx_info, skb, record,
There could be a case where ACK for tls exchanges prior to start marker is missed out, and by the time tls is offloaded. This pkt should not be discarded and handled carefully. It could be plaintext alone or plaintext + finish as well. Fixes: 5a4b9fe7fece ("cxgb4/chcr: complete record tx handling") Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com> --- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 36 +++++++++++++++---- 1 file changed, 30 insertions(+), 6 deletions(-)