@@ -943,8 +943,8 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
* First subdescriptor for every send descriptor.
*/
static inline void
-nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, int subdesc_cnt,
- struct sk_buff *skb, int len, bool hw_tso)
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
+ int subdesc_cnt, struct sk_buff *skb, int len)
{
int proto;
struct sq_hdr_subdesc *hdr;
@@ -980,10 +980,13 @@ nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, int subdesc_cnt,
}
}
- if (hw_tso && skb_shinfo(skb)->gso_size) {
+ if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
hdr->tso_start = skb_transport_offset(skb) + tcp_hdrlen(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
+ /* For non-tunneled pkts, point this to L2 ethertype */
+ hdr->inner_l3_offset = skb_network_offset(skb) - 2;
+ nic->drv_stats.tx_tso++;
}
}
@@ -1054,8 +1057,8 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
data_left -= size;
tso_build_data(skb, &tso, size);
}
- nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
- seg_subdescs - 1, skb, seg_len, false);
+ nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
+ seg_subdescs - 1, skb, seg_len);
sq->skbuff[hdr_qentry] = (u64)NULL;
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1111,8 +1114,8 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
/* Add SQ header subdesc */
- nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb,
- skb->len, nic->hw_tso);
+ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+ skb, skb->len);
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);