diff mbox series

[net-next,15/15] mlx5: support BIG TCP packets

Message ID 20220203015140.3022854-16-eric.dumazet@gmail.com (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series tcp: BIG TCP implementation | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit fail Errors and warnings before: 148 this patch: 181
netdev/cc_maintainers warning 1 maintainers not CCed: linux-rdma@vger.kernel.org
netdev/build_clang fail Errors and warnings before: 4 this patch: 13
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 7 this patch: 11
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 90 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Eric Dumazet Feb. 3, 2022, 1:51 a.m. UTC
From: Coco Li <lixiaoyan@google.com>

mlx5 supports LSOv2.

IPv6 gro/tcp stacks insert a temporary Hop-by-Hop header
with JUMBO TLV for big packets.

We need to ignore/skip this HBH header when populating TX descriptor.

Signed-off-by: Coco Li <lixiaoyan@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Saeed Mahameed <saeedm@nvidia.com>
Cc: Leon Romanovsky <leon@kernel.org>
---
 .../net/ethernet/mellanox/mlx5/core/en_main.c |  1 +
 .../net/ethernet/mellanox/mlx5/core/en_tx.c   | 81 +++++++++++++++----
 2 files changed, 65 insertions(+), 17 deletions(-)

Comments

Tariq Toukan Feb. 3, 2022, 7:27 a.m. UTC | #1
Hi,

Thanks for your patch!

On 2/3/2022 3:51 AM, Eric Dumazet wrote:
> From: Coco Li <lixiaoyan@google.com>
> 
> mlx5 supports LSOv2.
> 
> IPv6 gro/tcp stacks insert a temporary Hop-by-Hop header
> with JUMBO TLV for big packets.
> 
> We need to ignore/skip this HBH header when populating TX descriptor.
> 
> Signed-off-by: Coco Li <lixiaoyan@google.com>
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> Cc: Saeed Mahameed <saeedm@nvidia.com>
> Cc: Leon Romanovsky <leon@kernel.org>
> ---
>   .../net/ethernet/mellanox/mlx5/core/en_main.c |  1 +
>   .../net/ethernet/mellanox/mlx5/core/en_tx.c   | 81 +++++++++++++++----
>   2 files changed, 65 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> index bf80fb6124499fc4e6a0310ab92c91159b4ccbbb..1c4ce90e5d0f5186c402137b744258ff4ce6a348 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> @@ -4888,6 +4888,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
>   
>   	netdev->priv_flags       |= IFF_UNICAST_FLT;
>   
> +	netif_set_tso_ipv6_max_size(netdev, 512 * 1024);
>   	mlx5e_set_netdev_dev_addr(netdev);
>   	mlx5e_ipsec_build_netdev(priv);
>   	mlx5e_tls_build_netdev(priv);
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
> index 7fd33b356cc8d191413e8259acd0b26b3ebd6ba9..fc945bd8219dcb69950b1840bb492649c8749976 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
> @@ -40,6 +40,7 @@
>   #include "en_accel/en_accel.h"
>   #include "en_accel/ipsec_rxtx.h"
>   #include "en/ptp.h"
> +#include <net/ipv6.h>
>   
>   static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
>   {
> @@ -241,8 +242,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
>   		sq->stats->csum_none++;
>   }
>   
> +/* Returns the number of header bytes that we plan
> + * to inline later in the transmit descriptor
> + */
>   static inline u16
> -mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
> +mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
>   {
>   	struct mlx5e_sq_stats *stats = sq->stats;
>   	u16 ihs;
> @@ -252,15 +256,18 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
>   		stats->tso_inner_packets++;
>   		stats->tso_inner_bytes += skb->len - ihs;
>   	} else {
> -		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
> +		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
>   			ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
> -		else
> +		} else {
> +			if (ipv6_has_hopopt_jumbo(skb))
> +				*hopbyhop = sizeof(struct hop_jumbo_hdr);
>   			ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
> +		}
>   		stats->tso_packets++;
> -		stats->tso_bytes += skb->len - ihs;
> +		stats->tso_bytes += skb->len - ihs - *hopbyhop;

AFAIU, *hopbyhop is already accounted inside ihs, why decrement it once 
more?

Probably it'd be cleaner to assign/fix both ihs and hopbyhop under 
ipv6_has_hopopt_jumbo branch():

		ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
		if (ipv6_has_hopopt_jumbo(skb)) {
			*hopbyhop = sizeof(struct hop_jumbo_hdr);
			ihs -= sizeof(struct hop_jumbo_hdr);
		}
...
		stats->tso_bytes += skb->len - ihs - *hopbyhop;
...
		return ihs;

>   	}
>   
> -	return ihs;
> +	return ihs - *hopbyhop;
>   }
>   
>   static inline int
> @@ -319,6 +326,7 @@ struct mlx5e_tx_attr {
>   	__be16 mss;
>   	u16 insz;
>   	u8 opcode;
> +	u8 hopbyhop;
>   };
>   
>   struct mlx5e_tx_wqe_attr {
> @@ -355,14 +363,16 @@ static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
>   	struct mlx5e_sq_stats *stats = sq->stats;
>   
>   	if (skb_is_gso(skb)) {
> -		u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
> +		int hopbyhop;

missing init to zero. mlx5e_tx_get_gso_ihs() doesn't always write to it.

> +		u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
>   
>   		*attr = (struct mlx5e_tx_attr) {
>   			.opcode    = MLX5_OPCODE_LSO,
>   			.mss       = cpu_to_be16(skb_shinfo(skb)->gso_size),
>   			.ihs       = ihs,
>   			.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
> -			.headlen   = skb_headlen(skb) - ihs,
> +			.headlen   = skb_headlen(skb) - ihs - hopbyhop,
> +			.hopbyhop  = hopbyhop,
>   		};
>   
>   		stats->packets += skb_shinfo(skb)->gso_segs;
> @@ -476,7 +486,8 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
>   	struct mlx5_wqe_eth_seg  *eseg;
>   	struct mlx5_wqe_data_seg *dseg;
>   	struct mlx5e_tx_wqe_info *wi;
> -
> +	u16 ihs = attr->ihs;
> +	struct ipv6hdr *h6;
>   	struct mlx5e_sq_stats *stats = sq->stats;
>   	int num_dma;
>   
> @@ -490,15 +501,36 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
>   
>   	eseg->mss = attr->mss;
>   
> -	if (attr->ihs) {
> -		if (skb_vlan_tag_present(skb)) {
> -			eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
> -			mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
> +	if (ihs) {
> +		u8 *start = eseg->inline_hdr.start;
> +
> +		if (unlikely(attr->hopbyhop)) {
> +			/* remove the HBH header.
> +			 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
> +			 */
> +			if (skb_vlan_tag_present(skb)) {
> +				mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
> +				ihs += VLAN_HLEN;
> +				h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
> +			} else {
> +				memcpy(start, skb->data, ETH_HLEN + sizeof(*h6));
> +				h6 = (struct ipv6hdr *)(start + ETH_HLEN);
> +			}
> +			h6->nexthdr = IPPROTO_TCP;
> +			/* Copy the TCP header after the IPv6 one */
> +			memcpy(h6 + 1,
> +			       skb->data + ETH_HLEN + sizeof(*h6) +
> +					sizeof(struct hop_jumbo_hdr),
> +			       tcp_hdrlen(skb));
> +			/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */

You are not using ihs when preparing the inline part of the descriptor, 
so this might yield a mismatch between ihs and the sum of the sizes 
you're copying above. Is there a guarantee that this won't happen?

> +		} else if (skb_vlan_tag_present(skb)) {
> +			mlx5e_insert_vlan(start, skb, ihs);
> +			ihs += VLAN_HLEN;
>   			stats->added_vlan_packets++;
>   		} else {
> -			eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
> -			memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
> +			memcpy(start, skb->data, ihs);
>   		}
> +		eseg->inline_hdr.sz |= cpu_to_be16(ihs);
>   		dseg += wqe_attr->ds_cnt_inl;
>   	} else if (skb_vlan_tag_present(skb)) {
>   		eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
> @@ -509,7 +541,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
>   	}
>   
>   	dseg += wqe_attr->ds_cnt_ids;
> -	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
> +	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
>   					  attr->headlen, dseg);
>   	if (unlikely(num_dma < 0))
>   		goto err_drop;
> @@ -1016,12 +1048,27 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
>   	eseg->mss = attr.mss;
>   
>   	if (attr.ihs) {
> -		memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
> +		if (unlikely(attr.hopbyhop)) {
> +			/* remove the HBH header.
> +			 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
> +			 */
> +			memcpy(eseg->inline_hdr.start, skb->data, ETH_HLEN + sizeof(*h6));
> +			h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
> +			h6->nexthdr = IPPROTO_TCP;
> +			/* Copy the TCP header after the IPv6 one */
> +			memcpy(h6 + 1,
> +			       skb->data + ETH_HLEN + sizeof(*h6) +
> +					sizeof(struct hop_jumbo_hdr),
> +			       tcp_hdrlen(skb));
> +			/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
> +		} else {
> +			memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
> +		}
>   		eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
>   		dseg += wqe_attr.ds_cnt_inl;
>   	}
>   
> -	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
> +	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
>   					  attr.headlen, dseg);
>   	if (unlikely(num_dma < 0))
>   		goto err_drop;
kernel test robot Feb. 4, 2022, 4:03 a.m. UTC | #2
Hi Eric,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Eric-Dumazet/tcp-BIG-TCP-implementation/20220203-095336
base:   https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git 52dae93f3bad842c6d585700460a0dea4d70e096
config: arc-allyesconfig (https://download.01.org/0day-ci/archive/20220204/202202041153.aALvQUP0-lkp@intel.com/config)
compiler: arceb-elf-gcc (GCC) 11.2.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/0day-ci/linux/commit/7561f5d66d00583e6d88fa6b2fffd868dcc82b2e
        git remote add linux-review https://github.com/0day-ci/linux
        git fetch --no-tags linux-review Eric-Dumazet/tcp-BIG-TCP-implementation/20220203-095336
        git checkout 7561f5d66d00583e6d88fa6b2fffd868dcc82b2e
        # save the config file to linux build tree
        mkdir build_dir
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross O=build_dir ARCH=arc SHELL=/bin/bash

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>

All errors (new ones prefixed by >>):

   In file included from include/linux/container_of.h:5,
                    from include/linux/kernel.h:21,
                    from include/linux/skbuff.h:13,
                    from include/linux/tcp.h:17,
                    from drivers/net/ethernet/mellanox/mlx5/core/en_tx.c:33:
   include/linux/build_bug.h:78:41: error: static assertion failed: "BITS_PER_LONG >= NR_MSG_FRAG_IDS"
      78 | #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
         |                                         ^~~~~~~~~~~~~~
   include/linux/build_bug.h:77:34: note: in expansion of macro '__static_assert'
      77 | #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
         |                                  ^~~~~~~~~~~~~~~
   include/linux/skmsg.h:41:1: note: in expansion of macro 'static_assert'
      41 | static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
         | ^~~~~~~~~~~~~
   drivers/net/ethernet/mellanox/mlx5/core/en_tx.c: In function 'mlx5i_sq_xmit':
>> drivers/net/ethernet/mellanox/mlx5/core/en_tx.c:1055:86: error: 'h6' undeclared (first use in this function)
    1055 |                         memcpy(eseg->inline_hdr.start, skb->data, ETH_HLEN + sizeof(*h6));
         |                                                                                      ^~
   drivers/net/ethernet/mellanox/mlx5/core/en_tx.c:1055:86: note: each undeclared identifier is reported only once for each function it appears in


vim +/h6 +1055 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c

  1011	
  1012	void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
  1013			   struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
  1014	{
  1015		struct mlx5e_tx_wqe_attr wqe_attr;
  1016		struct mlx5e_tx_attr attr;
  1017		struct mlx5i_tx_wqe *wqe;
  1018	
  1019		struct mlx5_wqe_datagram_seg *datagram;
  1020		struct mlx5_wqe_ctrl_seg *cseg;
  1021		struct mlx5_wqe_eth_seg  *eseg;
  1022		struct mlx5_wqe_data_seg *dseg;
  1023		struct mlx5e_tx_wqe_info *wi;
  1024	
  1025		struct mlx5e_sq_stats *stats = sq->stats;
  1026		int num_dma;
  1027		u16 pi;
  1028	
  1029		mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
  1030		mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
  1031	
  1032		pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
  1033		wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
  1034	
  1035		stats->xmit_more += xmit_more;
  1036	
  1037		/* fill wqe */
  1038		wi       = &sq->db.wqe_info[pi];
  1039		cseg     = &wqe->ctrl;
  1040		datagram = &wqe->datagram;
  1041		eseg     = &wqe->eth;
  1042		dseg     =  wqe->data;
  1043	
  1044		mlx5i_txwqe_build_datagram(av, dqpn, dqkey, datagram);
  1045	
  1046		mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, eseg);
  1047	
  1048		eseg->mss = attr.mss;
  1049	
  1050		if (attr.ihs) {
  1051			if (unlikely(attr.hopbyhop)) {
  1052				/* remove the HBH header.
  1053				 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
  1054				 */
> 1055				memcpy(eseg->inline_hdr.start, skb->data, ETH_HLEN + sizeof(*h6));

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bf80fb6124499fc4e6a0310ab92c91159b4ccbbb..1c4ce90e5d0f5186c402137b744258ff4ce6a348 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4888,6 +4888,7 @@  static void mlx5e_build_nic_netdev(struct net_device *netdev)
 
 	netdev->priv_flags       |= IFF_UNICAST_FLT;
 
+	netif_set_tso_ipv6_max_size(netdev, 512 * 1024);
 	mlx5e_set_netdev_dev_addr(netdev);
 	mlx5e_ipsec_build_netdev(priv);
 	mlx5e_tls_build_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 7fd33b356cc8d191413e8259acd0b26b3ebd6ba9..fc945bd8219dcb69950b1840bb492649c8749976 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -40,6 +40,7 @@ 
 #include "en_accel/en_accel.h"
 #include "en_accel/ipsec_rxtx.h"
 #include "en/ptp.h"
+#include <net/ipv6.h>
 
 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
 {
@@ -241,8 +242,11 @@  mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 		sq->stats->csum_none++;
 }
 
+/* Returns the number of header bytes that we plan
+ * to inline later in the transmit descriptor
+ */
 static inline u16
-mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
 {
 	struct mlx5e_sq_stats *stats = sq->stats;
 	u16 ihs;
@@ -252,15 +256,18 @@  mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
 		stats->tso_inner_packets++;
 		stats->tso_inner_bytes += skb->len - ihs;
 	} else {
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
 			ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
-		else
+		} else {
+			if (ipv6_has_hopopt_jumbo(skb))
+				*hopbyhop = sizeof(struct hop_jumbo_hdr);
 			ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		}
 		stats->tso_packets++;
-		stats->tso_bytes += skb->len - ihs;
+		stats->tso_bytes += skb->len - ihs - *hopbyhop;
 	}
 
-	return ihs;
+	return ihs - *hopbyhop;
 }
 
 static inline int
@@ -319,6 +326,7 @@  struct mlx5e_tx_attr {
 	__be16 mss;
 	u16 insz;
 	u8 opcode;
+	u8 hopbyhop;
 };
 
 struct mlx5e_tx_wqe_attr {
@@ -355,14 +363,16 @@  static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 	struct mlx5e_sq_stats *stats = sq->stats;
 
 	if (skb_is_gso(skb)) {
-		u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+		int hopbyhop;
+		u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb, &hopbyhop);
 
 		*attr = (struct mlx5e_tx_attr) {
 			.opcode    = MLX5_OPCODE_LSO,
 			.mss       = cpu_to_be16(skb_shinfo(skb)->gso_size),
 			.ihs       = ihs,
 			.num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
-			.headlen   = skb_headlen(skb) - ihs,
+			.headlen   = skb_headlen(skb) - ihs - hopbyhop,
+			.hopbyhop  = hopbyhop,
 		};
 
 		stats->packets += skb_shinfo(skb)->gso_segs;
@@ -476,7 +486,8 @@  mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 	struct mlx5_wqe_eth_seg  *eseg;
 	struct mlx5_wqe_data_seg *dseg;
 	struct mlx5e_tx_wqe_info *wi;
-
+	u16 ihs = attr->ihs;
+	struct ipv6hdr *h6;
 	struct mlx5e_sq_stats *stats = sq->stats;
 	int num_dma;
 
@@ -490,15 +501,36 @@  mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 
 	eseg->mss = attr->mss;
 
-	if (attr->ihs) {
-		if (skb_vlan_tag_present(skb)) {
-			eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
-			mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
+	if (ihs) {
+		u8 *start = eseg->inline_hdr.start;
+
+		if (unlikely(attr->hopbyhop)) {
+			/* remove the HBH header.
+			 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+			 */
+			if (skb_vlan_tag_present(skb)) {
+				mlx5e_insert_vlan(start, skb, ETH_HLEN + sizeof(*h6));
+				ihs += VLAN_HLEN;
+				h6 = (struct ipv6hdr *)(start + sizeof(struct vlan_ethhdr));
+			} else {
+				memcpy(start, skb->data, ETH_HLEN + sizeof(*h6));
+				h6 = (struct ipv6hdr *)(start + ETH_HLEN);
+			}
+			h6->nexthdr = IPPROTO_TCP;
+			/* Copy the TCP header after the IPv6 one */
+			memcpy(h6 + 1,
+			       skb->data + ETH_HLEN + sizeof(*h6) +
+					sizeof(struct hop_jumbo_hdr),
+			       tcp_hdrlen(skb));
+			/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+		} else if (skb_vlan_tag_present(skb)) {
+			mlx5e_insert_vlan(start, skb, ihs);
+			ihs += VLAN_HLEN;
 			stats->added_vlan_packets++;
 		} else {
-			eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
-			memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
+			memcpy(start, skb->data, ihs);
 		}
+		eseg->inline_hdr.sz |= cpu_to_be16(ihs);
 		dseg += wqe_attr->ds_cnt_inl;
 	} else if (skb_vlan_tag_present(skb)) {
 		eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
@@ -509,7 +541,7 @@  mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 	}
 
 	dseg += wqe_attr->ds_cnt_ids;
-	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
+	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs + attr->hopbyhop,
 					  attr->headlen, dseg);
 	if (unlikely(num_dma < 0))
 		goto err_drop;
@@ -1016,12 +1048,27 @@  void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
 	eseg->mss = attr.mss;
 
 	if (attr.ihs) {
-		memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
+		if (unlikely(attr.hopbyhop)) {
+			/* remove the HBH header.
+			 * Layout: [Ethernet header][IPv6 header][HBH][TCP header]
+			 */
+			memcpy(eseg->inline_hdr.start, skb->data, ETH_HLEN + sizeof(*h6));
+			h6 = (struct ipv6hdr *)((char *)eseg->inline_hdr.start + ETH_HLEN);
+			h6->nexthdr = IPPROTO_TCP;
+			/* Copy the TCP header after the IPv6 one */
+			memcpy(h6 + 1,
+			       skb->data + ETH_HLEN + sizeof(*h6) +
+					sizeof(struct hop_jumbo_hdr),
+			       tcp_hdrlen(skb));
+			/* Leave ipv6 payload_len set to 0, as LSO v2 specs request. */
+		} else {
+			memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
+		}
 		eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
 		dseg += wqe_attr.ds_cnt_inl;
 	}
 
-	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
+	num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs + attr.hopbyhop,
 					  attr.headlen, dseg);
 	if (unlikely(num_dma < 0))
 		goto err_drop;