diff mbox

[net-next] qed*: Utilize Firmware 8.15.3.0

Message ID 1489091851-1282-1-git-send-email-Yuval.Mintz@cavium.com (mailing list archive)
State Not Applicable, archived
Headers show

Commit Message

Mintz, Yuval March 9, 2017, 8:37 p.m. UTC
This patch advances the qed* drivers into using the newer firmware -
This solves several firmware bugs, mostly related [but not limited to]
various init/deinit issues in various offloaded protocols.

It also introduces a major 4-Cached SGE change in firmware, which can be
seen in the storage drivers' changes.

In addition, this firmware is required for supporting the new QL41xxx
series of adapters; While this patch doesn't add the actual support,
the firmware contains the necessary initialization & firmware logic to
operate such adapters [actual support would be added later on].

Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com>
Signed-off-by: Manish Rangankar <Manish.Rangankar@cavium.com>
Signed-off-by: Chad Dupuis <Chad.Dupuis@cavium.com>
Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
---
Hi Dave,

Sorry for the *very* long patch; Due to lack of modularity
in the firmware, we don't have a very good way of splitting
it betweeen the various drivers.
So I'm posting all of it to net-next [simliarly to the way we
used to do bnx2x/cnic/bnx2i/bnx2f] in a single patch, and CCing
all the other relevant mailing lists.
If someone can thing of another, better alternative - I'm all ears.

Yuval
---
 drivers/infiniband/hw/qedr/main.c                  |    5 +-
 drivers/infiniband/hw/qedr/qedr.h                  |    3 +-
 drivers/infiniband/hw/qedr/qedr_cm.c               |    3 -
 drivers/infiniband/hw/qedr/qedr_hsi.h              |   56 -
 drivers/infiniband/hw/qedr/verbs.c                 |    3 +-
 drivers/net/ethernet/qlogic/qed/qed.h              |    2 +-
 drivers/net/ethernet/qlogic/qed/qed_cxt.c          |    5 +-
 drivers/net/ethernet/qlogic/qed/qed_debug.c        | 1560 ++++++++++++--------
 drivers/net/ethernet/qlogic/qed/qed_hsi.h          |  763 +++++++++-
 .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c    |   55 +-
 drivers/net/ethernet/qlogic/qed/qed_init_ops.c     |    2 +-
 drivers/net/ethernet/qlogic/qed/qed_ll2.c          |   48 +-
 drivers/net/ethernet/qlogic/qed/qed_reg_addr.h     |   30 +
 drivers/net/ethernet/qlogic/qed/qed_roce.c         |  220 ++-
 drivers/net/ethernet/qlogic/qed/qed_roce.h         |   11 +-
 drivers/net/ethernet/qlogic/qed/qed_spq.c          |   13 +-
 drivers/net/ethernet/qlogic/qede/qede.h            |    2 +-
 drivers/scsi/qedf/Makefile                         |    2 +-
 drivers/scsi/qedf/drv_fcoe_fw_funcs.c              |  191 +++
 drivers/scsi/qedf/drv_fcoe_fw_funcs.h              |   94 ++
 drivers/scsi/qedf/drv_scsi_fw_funcs.c              |   45 +
 drivers/scsi/qedf/drv_scsi_fw_funcs.h              |   86 ++
 drivers/scsi/qedf/qedf.h                           |   23 +-
 drivers/scsi/qedf/qedf_els.c                       |   25 +-
 drivers/scsi/qedf/qedf_io.c                        |  667 +++------
 drivers/scsi/qedi/Makefile                         |    2 +-
 drivers/scsi/qedi/qedi_fw.c                        | 1068 ++++++--------
 drivers/scsi/qedi/qedi_fw_api.c                    |  782 ++++++++++
 drivers/scsi/qedi/qedi_fw_iscsi.h                  |  117 ++
 drivers/scsi/qedi/qedi_fw_scsi.h                   |   55 +
 drivers/scsi/qedi/qedi_iscsi.c                     |   12 +-
 drivers/scsi/qedi/qedi_iscsi.h                     |    2 +-
 drivers/scsi/qedi/qedi_version.h                   |    4 +-
 include/linux/qed/common_hsi.h                     |   33 +-
 include/linux/qed/eth_common.h                     |    3 +
 include/linux/qed/fcoe_common.h                    |  180 +--
 include/linux/qed/iscsi_common.h                   |  241 ++-
 include/linux/qed/roce_common.h                    |   17 +
 include/linux/qed/storage_common.h                 |   30 +-
 include/linux/qed/tcp_common.h                     |    1 +
 40 files changed, 4237 insertions(+), 2224 deletions(-)
 delete mode 100644 drivers/infiniband/hw/qedr/qedr_hsi.h
 create mode 100644 drivers/scsi/qedf/drv_fcoe_fw_funcs.c
 create mode 100644 drivers/scsi/qedf/drv_fcoe_fw_funcs.h
 create mode 100644 drivers/scsi/qedf/drv_scsi_fw_funcs.c
 create mode 100644 drivers/scsi/qedf/drv_scsi_fw_funcs.h
 create mode 100644 drivers/scsi/qedi/qedi_fw_api.c
 create mode 100644 drivers/scsi/qedi/qedi_fw_iscsi.h
 create mode 100644 drivers/scsi/qedi/qedi_fw_scsi.h

Comments

Christoph Hellwig March 9, 2017, 9:45 p.m. UTC | #1
We can't just require a new firmware version in the driver, as
users most likely won't have it by the time they install the new
kernel.  So you'll have to support the old firmware version as well.
Mintz, Yuval March 10, 2017, 5:44 a.m. UTC | #2
> We can't just require a new firmware version in the driver, as users most
> likely won't have it by the time they install the new kernel.  So you'll have to
> support the old firmware version as well.

Why not? That has been the paradigm forever.

The new firmware version is already available in linux-firmware.
In any reasonable distro that would update the driver, you'd expect
they'd also update the firmware version on their filesystem.
Mintz, Yuval March 10, 2017, 5:49 a.m. UTC | #3
> > We can't just require a new firmware version in the driver, as users
> > most likely won't have it by the time they install the new kernel.  So
> > you'll have to support the old firmware version as well.
> 
> Why not? That has been the paradigm forever.
> 
> The new firmware version is already available in linux-firmware.
> In any reasonable distro that would update the driver, you'd expect they'd
> also update the firmware version on their filesystem.

Just in case it wasn't clear from the original message - 
The firmware discussed here is the binary firmware, not the
management firmware.
For the management firmware [on persistent storage] there is
a backward/forward compatibility scheme in place.
kernel test robot March 10, 2017, 8:44 a.m. UTC | #4
Hi Yuval,

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Yuval-Mintz/qed-Utilize-Firmware-8-15-3-0/20170310-154337
config: xtensa-allmodconfig (attached as .config)
compiler: xtensa-linux-gcc (GCC) 4.9.0
reproduce:
        wget https://raw.githubusercontent.com/01org/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=xtensa 

All warnings (new ones prefixed by >>):

   In file included from include/linux/byteorder/big_endian.h:4:0,
                    from arch/xtensa/include/uapi/asm/byteorder.h:7,
                    from include/linux/qed/common_hsi.h:36,
                    from drivers/scsi/qedf/drv_scsi_fw_funcs.h:11,
                    from drivers/scsi/qedf/drv_fcoe_fw_funcs.h:11,
                    from drivers/scsi/qedf/drv_fcoe_fw_funcs.c:9:
   drivers/scsi/qedf/drv_fcoe_fw_funcs.c: In function 'init_initiator_rw_fcoe_task':
   include/uapi/linux/byteorder/big_endian.h:32:26: warning: large integer implicitly truncated to unsigned type [-Woverflow]
    #define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
                             ^
   include/linux/byteorder/generic.h:87:21: note: in expansion of macro '__cpu_to_le32'
    #define cpu_to_le32 __cpu_to_le32
                        ^
>> drivers/scsi/qedf/drv_fcoe_fw_funcs.c:63:31: note: in expansion of macro 'cpu_to_le32'
     t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
                                  ^
   drivers/scsi/qedf/drv_fcoe_fw_funcs.c: In function 'init_initiator_midpath_unsolicited_fcoe_task':
   include/uapi/linux/byteorder/big_endian.h:32:26: warning: large integer implicitly truncated to unsigned type [-Woverflow]
    #define __cpu_to_le32(x) ((__force __le32)__swab32((x)))
                             ^
   include/linux/byteorder/generic.h:87:21: note: in expansion of macro '__cpu_to_le32'
    #define cpu_to_le32 __cpu_to_le32
                        ^
   drivers/scsi/qedf/drv_fcoe_fw_funcs.c:155:31: note: in expansion of macro 'cpu_to_le32'
     t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
                                  ^

vim +/cpu_to_le32 +63 drivers/scsi/qedf/drv_fcoe_fw_funcs.c

     3	 *  Copyright (c) 2016 Cavium Inc.
     4	 *
     5	 *  This software is available under the terms of the GNU General Public License
     6	 *  (GPL) Version 2, available from the file COPYING in the main directory of
     7	 *  this source tree.
     8	 */
   > 9	#include "drv_fcoe_fw_funcs.h"
    10	#include "drv_scsi_fw_funcs.h"
    11	
    12	#define FCOE_RX_ID 0xFFFF
    13	
    14	static inline void init_common_sqe(struct fcoe_task_params *task_params,
    15					   enum fcoe_sqe_request_type request_type)
    16	{
    17		memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
    18		SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
    19			  request_type);
    20		task_params->sqe->task_id = task_params->itid;
    21	}
    22	
    23	int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
    24					struct scsi_sgl_task_params *sgl_task_params,
    25					struct regpair sense_data_buffer_phys_addr,
    26					u32 task_retry_id,
    27					u8 fcp_cmd_payload[32])
    28	{
    29		struct fcoe_task_context *ctx = task_params->context;
    30		struct ystorm_fcoe_task_st_ctx *y_st_ctx;
    31		struct tstorm_fcoe_task_st_ctx *t_st_ctx;
    32		struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
    33		struct mstorm_fcoe_task_st_ctx *m_st_ctx;
    34		u32 io_size, val;
    35		bool slow_sgl;
    36	
    37		memset(ctx, 0, sizeof(*(ctx)));
    38		slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
    39					    sgl_task_params->small_mid_sge);
    40		io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
    41			   task_params->tx_io_size : task_params->rx_io_size);
    42	
    43		/* Ystorm ctx */
    44		y_st_ctx = &ctx->ystorm_st_context;
    45		y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
    46		y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
    47		y_st_ctx->task_type = task_params->task_type;
    48		memcpy((void *)&y_st_ctx->tx_info_union.fcp_cmd_payload,
    49			       fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
    50	
    51		/* Tstorm ctx */
    52		t_st_ctx = &ctx->tstorm_st_context;
    53		t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
    54						FCOE_TASK_DEV_TYPE_TAPE :
    55						FCOE_TASK_DEV_TYPE_DISK);
    56		t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
    57		val = cpu_to_le32(task_params->cq_rss_number);
    58		t_st_ctx->read_only.glbl_q_num = val;
    59		t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
    60		t_st_ctx->read_only.task_type = task_params->task_type;
    61		SET_FIELD(t_st_ctx->read_write.flags,
    62			  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
  > 63		t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
    64	
    65		/* Ustorm ctx */
    66		u_ag_ctx = &ctx->ustorm_ag_context;

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox

Patch

diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index b9b47e5..ced0461 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -587,9 +587,8 @@  void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
 #define EVENT_TYPE_CQ		1
 #define EVENT_TYPE_QP		2
 	struct qedr_dev *dev = (struct qedr_dev *)context;
-	union event_ring_data *data = fw_handle;
-	u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
-			    data->roce_handle.lo;
+	struct regpair *async_handle = (struct regpair *)fw_handle;
+	u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
 	u8 event_type = EVENT_TYPE_NOT_DEFINED;
 	struct ib_event event;
 	struct ib_cq *ibcq;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index bb32e47..5cb9195 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -38,7 +38,8 @@ 
 #include <linux/qed/qed_chain.h>
 #include <linux/qed/qed_roce_if.h>
 #include <linux/qed/qede_roce.h>
-#include "qedr_hsi.h"
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
 
 #define QEDR_MODULE_VERSION	"8.10.10.0"
 #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 6996328..a6280ce 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -43,14 +43,11 @@ 
 #include <rdma/ib_addr.h>
 #include <rdma/ib_cache.h>
 
-#include "qedr_hsi.h"
 #include <linux/qed/qed_if.h>
 #include <linux/qed/qed_roce_if.h>
 #include "qedr.h"
-#include "qedr_hsi.h"
 #include "verbs.h"
 #include <rdma/qedr-abi.h>
-#include "qedr_hsi.h"
 #include "qedr_cm.h"
 
 void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
deleted file mode 100644
index 66d2752..0000000
--- a/drivers/infiniband/hw/qedr/qedr_hsi.h
+++ /dev/null
@@ -1,56 +0,0 @@ 
-/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __QED_HSI_ROCE__
-#define __QED_HSI_ROCE__
-
-#include <linux/qed/common_hsi.h>
-#include <linux/qed/roce_common.h>
-#include "qedr_hsi_rdma.h"
-
-/* Affiliated asynchronous events / errors enumeration */
-enum roce_async_events_type {
-	ROCE_ASYNC_EVENT_NONE = 0,
-	ROCE_ASYNC_EVENT_COMM_EST = 1,
-	ROCE_ASYNC_EVENT_SQ_DRAINED,
-	ROCE_ASYNC_EVENT_SRQ_LIMIT,
-	ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
-	ROCE_ASYNC_EVENT_CQ_ERR,
-	ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
-	ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
-	ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
-	ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
-	ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
-	ROCE_ASYNC_EVENT_SRQ_EMPTY,
-	MAX_ROCE_ASYNC_EVENTS_TYPE
-};
-
-#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6b3bb32..2091902 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -43,7 +43,8 @@ 
 #include <rdma/ib_addr.h>
 #include <rdma/ib_cache.h>
 
-#include "qedr_hsi.h"
+#include <linux/qed/common_hsi.h>
+#include "qedr_hsi_rdma.h"
 #include <linux/qed/qed_if.h>
 #include "qedr.h"
 #include "verbs.h"
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00c17fa..be99092 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -51,7 +51,7 @@ 
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.10.20"
+#define DRV_MODULE_VERSION "8.10.10.21"
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d42d03d..89d210a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -71,8 +71,7 @@ 
 #define TM_ALIGN        BIT(TM_SHIFT)
 #define TM_ELEM_SIZE    4
 
-/* For RoCE we configure to 64K to cover for RoCE max tasks 256K purpose. */
-#define ILT_DEFAULT_HW_P_SIZE	(IS_ENABLED(CONFIG_QED_RDMA) ? 4 : 3)
+#define ILT_DEFAULT_HW_P_SIZE	4
 
 #define ILT_PAGE_IN_BYTES(hw_p_size)	(1U << ((hw_p_size) + 12))
 #define ILT_CFG_REG(cli, reg)	PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
@@ -1126,7 +1125,7 @@  int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
 	clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
 	clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
 	clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
-	/* default ILT page size for all clients is 32K */
+	/* default ILT page size for all clients is 64K */
 	for (i = 0; i < ILT_CLI_MAX; i++)
 		p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 68f19ca..a597af7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -17,7 +17,6 @@ 
 
 /* Chip IDs enum */
 enum chip_ids {
-	CHIP_RESERVED,
 	CHIP_BB_B0,
 	CHIP_K2,
 	MAX_CHIP_IDS
@@ -40,6 +39,7 @@  enum mem_groups {
 	MEM_GROUP_BTB_RAM,
 	MEM_GROUP_RDIF_CTX,
 	MEM_GROUP_TDIF_CTX,
+	MEM_GROUP_CFC_MEM,
 	MEM_GROUP_CONN_CFC_MEM,
 	MEM_GROUP_TASK_CFC_MEM,
 	MEM_GROUP_CAU_PI,
@@ -72,6 +72,7 @@  enum mem_groups {
 	"BTB_RAM",
 	"RDIF_CTX",
 	"TDIF_CTX",
+	"CFC_MEM",
 	"CONN_CFC_MEM",
 	"TASK_CFC_MEM",
 	"CAU_PI",
@@ -185,13 +186,16 @@  struct dbg_array {
 	u32 size_in_dwords;
 };
 
+struct chip_platform_defs {
+	u8 num_ports;
+	u8 num_pfs;
+	u8 num_vfs;
+};
+
 /* Chip constant definitions */
 struct chip_defs {
 	const char *name;
-	struct {
-		u8 num_ports;
-		u8 num_pfs;
-	} per_platform[MAX_PLATFORM_IDS];
+	struct chip_platform_defs per_platform[MAX_PLATFORM_IDS];
 };
 
 /* Platform constant definitions */
@@ -405,22 +409,23 @@  struct phy_defs {
 /***************************** Constant Arrays *******************************/
 
 /* Debug arrays */
-static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {NULL} };
+static struct dbg_array s_dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE] = { {0} };
 
 /* Chip constant definitions array */
 static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
-	{ "reserved", { {0, 0}, {0, 0}, {0, 0}, {0, 0} } },
 	{ "bb_b0",
-	  { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB}, {0, 0}, {0, 0}, {0, 0} } },
-	{ "k2", { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2}, {0, 0}, {0, 0}, {0, 0} } }
+	  { {MAX_NUM_PORTS_BB, MAX_NUM_PFS_BB, MAX_NUM_VFS_BB}, {0, 0, 0},
+	    {0, 0, 0}, {0, 0, 0} } },
+	{ "k2",
+	  { {MAX_NUM_PORTS_K2, MAX_NUM_PFS_K2, MAX_NUM_VFS_K2}, {0, 0, 0},
+	    {0, 0, 0}, {0, 0, 0} } }
 };
 
 /* Storm constant definitions array */
 static struct storm_defs s_storm_defs[] = {
 	/* Tstorm */
 	{'T', BLOCK_TSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
-	  DBG_BUS_CLIENT_RBCT}, true,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, true,
 	 TSEM_REG_FAST_MEMORY,
 	 TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
 	 TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
@@ -432,8 +437,7 @@  struct phy_defs {
 	 4, TCM_REG_SM_TASK_CTX},
 	/* Mstorm */
 	{'M', BLOCK_MSEM,
-	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT,
-	  DBG_BUS_CLIENT_RBCM}, false,
+	 {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, false,
 	 MSEM_REG_FAST_MEMORY,
 	 MSEM_REG_DBG_FRAME_MODE, MSEM_REG_SLOW_DBG_ACTIVE,
 	 MSEM_REG_SLOW_DBG_MODE, MSEM_REG_DBG_MODE1_CFG,
@@ -445,8 +449,7 @@  struct phy_defs {
 	 7, MCM_REG_SM_TASK_CTX},
 	/* Ustorm */
 	{'U', BLOCK_USEM,
-	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU,
-	  DBG_BUS_CLIENT_RBCU}, false,
+	 {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, false,
 	 USEM_REG_FAST_MEMORY,
 	 USEM_REG_DBG_FRAME_MODE, USEM_REG_SLOW_DBG_ACTIVE,
 	 USEM_REG_SLOW_DBG_MODE, USEM_REG_DBG_MODE1_CFG,
@@ -458,8 +461,7 @@  struct phy_defs {
 	 3, UCM_REG_SM_TASK_CTX},
 	/* Xstorm */
 	{'X', BLOCK_XSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
-	  DBG_BUS_CLIENT_RBCX}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, false,
 	 XSEM_REG_FAST_MEMORY,
 	 XSEM_REG_DBG_FRAME_MODE, XSEM_REG_SLOW_DBG_ACTIVE,
 	 XSEM_REG_SLOW_DBG_MODE, XSEM_REG_DBG_MODE1_CFG,
@@ -471,8 +473,7 @@  struct phy_defs {
 	 0, 0},
 	/* Ystorm */
 	{'Y', BLOCK_YSEM,
-	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX,
-	  DBG_BUS_CLIENT_RBCY}, false,
+	 {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, false,
 	 YSEM_REG_FAST_MEMORY,
 	 YSEM_REG_DBG_FRAME_MODE, YSEM_REG_SLOW_DBG_ACTIVE,
 	 YSEM_REG_SLOW_DBG_MODE, YSEM_REG_DBG_MODE1_CFG,
@@ -484,8 +485,7 @@  struct phy_defs {
 	 12, YCM_REG_SM_TASK_CTX},
 	/* Pstorm */
 	{'P', BLOCK_PSEM,
-	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS,
-	  DBG_BUS_CLIENT_RBCS}, true,
+	 {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, true,
 	 PSEM_REG_FAST_MEMORY,
 	 PSEM_REG_DBG_FRAME_MODE, PSEM_REG_SLOW_DBG_ACTIVE,
 	 PSEM_REG_SLOW_DBG_MODE, PSEM_REG_DBG_MODE1_CFG,
@@ -499,8 +499,9 @@  struct phy_defs {
 
 /* Block definitions array */
 static struct block_defs block_grc_defs = {
-	"grc", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	"grc",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	GRC_REG_DBG_SELECT, GRC_REG_DBG_DWORD_ENABLE,
 	GRC_REG_DBG_SHIFT, GRC_REG_DBG_FORCE_VALID,
 	GRC_REG_DBG_FORCE_FRAME,
@@ -508,29 +509,30 @@  struct phy_defs {
 };
 
 static struct block_defs block_miscs_defs = {
-	"miscs", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"miscs", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_defs = {
-	"misc", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbu_defs = {
-	"dbu", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbu", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_pglue_b_defs = {
-	"pglue_b", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
+	"pglue_b",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCH, DBG_BUS_CLIENT_RBCH},
 	PGLUE_B_REG_DBG_SELECT, PGLUE_B_REG_DBG_DWORD_ENABLE,
 	PGLUE_B_REG_DBG_SHIFT, PGLUE_B_REG_DBG_FORCE_VALID,
 	PGLUE_B_REG_DBG_FORCE_FRAME,
@@ -538,8 +540,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_cnig_defs = {
-	"cnig", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+	"cnig",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
 	CNIG_REG_DBG_SELECT_K2, CNIG_REG_DBG_DWORD_ENABLE_K2,
 	CNIG_REG_DBG_SHIFT_K2, CNIG_REG_DBG_FORCE_VALID_K2,
 	CNIG_REG_DBG_FORCE_FRAME_K2,
@@ -547,15 +550,16 @@  struct phy_defs {
 };
 
 static struct block_defs block_cpmu_defs = {
-	"cpmu", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"cpmu", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 8
 };
 
 static struct block_defs block_ncsi_defs = {
-	"ncsi", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	"ncsi",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	NCSI_REG_DBG_SELECT, NCSI_REG_DBG_DWORD_ENABLE,
 	NCSI_REG_DBG_SHIFT, NCSI_REG_DBG_FORCE_VALID,
 	NCSI_REG_DBG_FORCE_FRAME,
@@ -563,15 +567,16 @@  struct phy_defs {
 };
 
 static struct block_defs block_opte_defs = {
-	"opte", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"opte", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 4
 };
 
 static struct block_defs block_bmb_defs = {
-	"bmb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
+	"bmb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCB},
 	BMB_REG_DBG_SELECT, BMB_REG_DBG_DWORD_ENABLE,
 	BMB_REG_DBG_SHIFT, BMB_REG_DBG_FORCE_VALID,
 	BMB_REG_DBG_FORCE_FRAME,
@@ -579,8 +584,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pcie_defs = {
-	"pcie", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	"pcie",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
 	PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
 	PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
 	PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -588,15 +594,16 @@  struct phy_defs {
 };
 
 static struct block_defs block_mcp_defs = {
-	"mcp", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"mcp", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_mcp2_defs = {
-	"mcp2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
+	"mcp2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCZ, DBG_BUS_CLIENT_RBCZ},
 	MCP2_REG_DBG_SELECT, MCP2_REG_DBG_DWORD_ENABLE,
 	MCP2_REG_DBG_SHIFT, MCP2_REG_DBG_FORCE_VALID,
 	MCP2_REG_DBG_FORCE_FRAME,
@@ -604,8 +611,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pswhst_defs = {
-	"pswhst", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswhst",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST_REG_DBG_SELECT, PSWHST_REG_DBG_DWORD_ENABLE,
 	PSWHST_REG_DBG_SHIFT, PSWHST_REG_DBG_FORCE_VALID,
 	PSWHST_REG_DBG_FORCE_FRAME,
@@ -613,8 +621,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pswhst2_defs = {
-	"pswhst2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswhst2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWHST2_REG_DBG_SELECT, PSWHST2_REG_DBG_DWORD_ENABLE,
 	PSWHST2_REG_DBG_SHIFT, PSWHST2_REG_DBG_FORCE_VALID,
 	PSWHST2_REG_DBG_FORCE_FRAME,
@@ -622,8 +631,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pswrd_defs = {
-	"pswrd", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrd",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD_REG_DBG_SELECT, PSWRD_REG_DBG_DWORD_ENABLE,
 	PSWRD_REG_DBG_SHIFT, PSWRD_REG_DBG_FORCE_VALID,
 	PSWRD_REG_DBG_FORCE_FRAME,
@@ -631,8 +641,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pswrd2_defs = {
-	"pswrd2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrd2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRD2_REG_DBG_SELECT, PSWRD2_REG_DBG_DWORD_ENABLE,
 	PSWRD2_REG_DBG_SHIFT, PSWRD2_REG_DBG_FORCE_VALID,
 	PSWRD2_REG_DBG_FORCE_FRAME,
@@ -640,8 +651,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pswwr_defs = {
-	"pswwr", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswwr",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWWR_REG_DBG_SELECT, PSWWR_REG_DBG_DWORD_ENABLE,
 	PSWWR_REG_DBG_SHIFT, PSWWR_REG_DBG_FORCE_VALID,
 	PSWWR_REG_DBG_FORCE_FRAME,
@@ -649,15 +661,16 @@  struct phy_defs {
 };
 
 static struct block_defs block_pswwr2_defs = {
-	"pswwr2", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"pswwr2", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISC_PL_HV, 3
 };
 
 static struct block_defs block_pswrq_defs = {
-	"pswrq", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrq",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ_REG_DBG_SELECT, PSWRQ_REG_DBG_DWORD_ENABLE,
 	PSWRQ_REG_DBG_SHIFT, PSWRQ_REG_DBG_FORCE_VALID,
 	PSWRQ_REG_DBG_FORCE_FRAME,
@@ -665,8 +678,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pswrq2_defs = {
-	"pswrq2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"pswrq2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PSWRQ2_REG_DBG_SELECT, PSWRQ2_REG_DBG_DWORD_ENABLE,
 	PSWRQ2_REG_DBG_SHIFT, PSWRQ2_REG_DBG_FORCE_VALID,
 	PSWRQ2_REG_DBG_FORCE_FRAME,
@@ -674,8 +688,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pglcs_defs = {
-	"pglcs", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	"pglcs",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
 	PGLCS_REG_DBG_SELECT, PGLCS_REG_DBG_DWORD_ENABLE,
 	PGLCS_REG_DBG_SHIFT, PGLCS_REG_DBG_FORCE_VALID,
 	PGLCS_REG_DBG_FORCE_FRAME,
@@ -683,8 +698,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_ptu_defs = {
-	"ptu", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"ptu",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	PTU_REG_DBG_SELECT, PTU_REG_DBG_DWORD_ENABLE,
 	PTU_REG_DBG_SHIFT, PTU_REG_DBG_FORCE_VALID,
 	PTU_REG_DBG_FORCE_FRAME,
@@ -692,8 +708,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_dmae_defs = {
-	"dmae", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"dmae",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	DMAE_REG_DBG_SELECT, DMAE_REG_DBG_DWORD_ENABLE,
 	DMAE_REG_DBG_SHIFT, DMAE_REG_DBG_FORCE_VALID,
 	DMAE_REG_DBG_FORCE_FRAME,
@@ -701,8 +718,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_tcm_defs = {
-	"tcm", {true, true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"tcm",
+	{true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TCM_REG_DBG_SELECT, TCM_REG_DBG_DWORD_ENABLE,
 	TCM_REG_DBG_SHIFT, TCM_REG_DBG_FORCE_VALID,
 	TCM_REG_DBG_FORCE_FRAME,
@@ -710,8 +728,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_mcm_defs = {
-	"mcm", {true, true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"mcm",
+	{true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	MCM_REG_DBG_SELECT, MCM_REG_DBG_DWORD_ENABLE,
 	MCM_REG_DBG_SHIFT, MCM_REG_DBG_FORCE_VALID,
 	MCM_REG_DBG_FORCE_FRAME,
@@ -719,8 +738,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_ucm_defs = {
-	"ucm", {true, true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"ucm",
+	{true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	UCM_REG_DBG_SELECT, UCM_REG_DBG_DWORD_ENABLE,
 	UCM_REG_DBG_SHIFT, UCM_REG_DBG_FORCE_VALID,
 	UCM_REG_DBG_FORCE_FRAME,
@@ -728,8 +748,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_xcm_defs = {
-	"xcm", {true, true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xcm",
+	{true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XCM_REG_DBG_SELECT, XCM_REG_DBG_DWORD_ENABLE,
 	XCM_REG_DBG_SHIFT, XCM_REG_DBG_FORCE_VALID,
 	XCM_REG_DBG_FORCE_FRAME,
@@ -737,8 +758,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_ycm_defs = {
-	"ycm", {true, true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"ycm",
+	{true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	YCM_REG_DBG_SELECT, YCM_REG_DBG_DWORD_ENABLE,
 	YCM_REG_DBG_SHIFT, YCM_REG_DBG_FORCE_VALID,
 	YCM_REG_DBG_FORCE_FRAME,
@@ -746,8 +768,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pcm_defs = {
-	"pcm", {true, true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"pcm",
+	{true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PCM_REG_DBG_SELECT, PCM_REG_DBG_DWORD_ENABLE,
 	PCM_REG_DBG_SHIFT, PCM_REG_DBG_FORCE_VALID,
 	PCM_REG_DBG_FORCE_FRAME,
@@ -755,8 +778,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_qm_defs = {
-	"qm", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
+	"qm",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCQ},
 	QM_REG_DBG_SELECT, QM_REG_DBG_DWORD_ENABLE,
 	QM_REG_DBG_SHIFT, QM_REG_DBG_FORCE_VALID,
 	QM_REG_DBG_FORCE_FRAME,
@@ -764,8 +788,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_tm_defs = {
-	"tm", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"tm",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TM_REG_DBG_SELECT, TM_REG_DBG_DWORD_ENABLE,
 	TM_REG_DBG_SHIFT, TM_REG_DBG_FORCE_VALID,
 	TM_REG_DBG_FORCE_FRAME,
@@ -773,8 +798,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_dorq_defs = {
-	"dorq", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"dorq",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	DORQ_REG_DBG_SELECT, DORQ_REG_DBG_DWORD_ENABLE,
 	DORQ_REG_DBG_SHIFT, DORQ_REG_DBG_FORCE_VALID,
 	DORQ_REG_DBG_FORCE_FRAME,
@@ -782,8 +808,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_brb_defs = {
-	"brb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	"brb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	BRB_REG_DBG_SELECT, BRB_REG_DBG_DWORD_ENABLE,
 	BRB_REG_DBG_SHIFT, BRB_REG_DBG_FORCE_VALID,
 	BRB_REG_DBG_FORCE_FRAME,
@@ -791,8 +818,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_src_defs = {
-	"src", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"src",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	SRC_REG_DBG_SELECT, SRC_REG_DBG_DWORD_ENABLE,
 	SRC_REG_DBG_SHIFT, SRC_REG_DBG_FORCE_VALID,
 	SRC_REG_DBG_FORCE_FRAME,
@@ -800,8 +828,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_prs_defs = {
-	"prs", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
+	"prs",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR},
 	PRS_REG_DBG_SELECT, PRS_REG_DBG_DWORD_ENABLE,
 	PRS_REG_DBG_SHIFT, PRS_REG_DBG_FORCE_VALID,
 	PRS_REG_DBG_FORCE_FRAME,
@@ -809,8 +838,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_tsdm_defs = {
-	"tsdm", {true, true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"tsdm",
+	{true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSDM_REG_DBG_SELECT, TSDM_REG_DBG_DWORD_ENABLE,
 	TSDM_REG_DBG_SHIFT, TSDM_REG_DBG_FORCE_VALID,
 	TSDM_REG_DBG_FORCE_FRAME,
@@ -818,8 +848,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_msdm_defs = {
-	"msdm", {true, true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"msdm",
+	{true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	MSDM_REG_DBG_SELECT, MSDM_REG_DBG_DWORD_ENABLE,
 	MSDM_REG_DBG_SHIFT, MSDM_REG_DBG_FORCE_VALID,
 	MSDM_REG_DBG_FORCE_FRAME,
@@ -827,8 +858,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_usdm_defs = {
-	"usdm", {true, true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"usdm",
+	{true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USDM_REG_DBG_SELECT, USDM_REG_DBG_DWORD_ENABLE,
 	USDM_REG_DBG_SHIFT, USDM_REG_DBG_FORCE_VALID,
 	USDM_REG_DBG_FORCE_FRAME,
@@ -836,8 +868,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_xsdm_defs = {
-	"xsdm", {true, true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xsdm",
+	{true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSDM_REG_DBG_SELECT, XSDM_REG_DBG_DWORD_ENABLE,
 	XSDM_REG_DBG_SHIFT, XSDM_REG_DBG_FORCE_VALID,
 	XSDM_REG_DBG_FORCE_FRAME,
@@ -845,8 +878,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_ysdm_defs = {
-	"ysdm", {true, true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"ysdm",
+	{true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	YSDM_REG_DBG_SELECT, YSDM_REG_DBG_DWORD_ENABLE,
 	YSDM_REG_DBG_SHIFT, YSDM_REG_DBG_FORCE_VALID,
 	YSDM_REG_DBG_FORCE_FRAME,
@@ -854,8 +888,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_psdm_defs = {
-	"psdm", {true, true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"psdm",
+	{true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSDM_REG_DBG_SELECT, PSDM_REG_DBG_DWORD_ENABLE,
 	PSDM_REG_DBG_SHIFT, PSDM_REG_DBG_FORCE_VALID,
 	PSDM_REG_DBG_FORCE_FRAME,
@@ -863,8 +898,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_tsem_defs = {
-	"tsem", {true, true, true}, true, DBG_TSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"tsem",
+	{true, true}, true, DBG_TSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	TSEM_REG_DBG_SELECT, TSEM_REG_DBG_DWORD_ENABLE,
 	TSEM_REG_DBG_SHIFT, TSEM_REG_DBG_FORCE_VALID,
 	TSEM_REG_DBG_FORCE_FRAME,
@@ -872,8 +908,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_msem_defs = {
-	"msem", {true, true, true}, true, DBG_MSTORM_ID,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"msem",
+	{true, true}, true, DBG_MSTORM_ID,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	MSEM_REG_DBG_SELECT, MSEM_REG_DBG_DWORD_ENABLE,
 	MSEM_REG_DBG_SHIFT, MSEM_REG_DBG_FORCE_VALID,
 	MSEM_REG_DBG_FORCE_FRAME,
@@ -881,8 +918,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_usem_defs = {
-	"usem", {true, true, true}, true, DBG_USTORM_ID,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"usem",
+	{true, true}, true, DBG_USTORM_ID,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	USEM_REG_DBG_SELECT, USEM_REG_DBG_DWORD_ENABLE,
 	USEM_REG_DBG_SHIFT, USEM_REG_DBG_FORCE_VALID,
 	USEM_REG_DBG_FORCE_FRAME,
@@ -890,8 +928,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_xsem_defs = {
-	"xsem", {true, true, true}, true, DBG_XSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xsem",
+	{true, true}, true, DBG_XSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XSEM_REG_DBG_SELECT, XSEM_REG_DBG_DWORD_ENABLE,
 	XSEM_REG_DBG_SHIFT, XSEM_REG_DBG_FORCE_VALID,
 	XSEM_REG_DBG_FORCE_FRAME,
@@ -899,8 +938,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_ysem_defs = {
-	"ysem", {true, true, true}, true, DBG_YSTORM_ID,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+	"ysem",
+	{true, true}, true, DBG_YSTORM_ID,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
 	YSEM_REG_DBG_SELECT, YSEM_REG_DBG_DWORD_ENABLE,
 	YSEM_REG_DBG_SHIFT, YSEM_REG_DBG_FORCE_VALID,
 	YSEM_REG_DBG_FORCE_FRAME,
@@ -908,8 +948,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_psem_defs = {
-	"psem", {true, true, true}, true, DBG_PSTORM_ID,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"psem",
+	{true, true}, true, DBG_PSTORM_ID,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	PSEM_REG_DBG_SELECT, PSEM_REG_DBG_DWORD_ENABLE,
 	PSEM_REG_DBG_SHIFT, PSEM_REG_DBG_FORCE_VALID,
 	PSEM_REG_DBG_FORCE_FRAME,
@@ -917,8 +958,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_rss_defs = {
-	"rss", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+	"rss",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
 	RSS_REG_DBG_SELECT, RSS_REG_DBG_DWORD_ENABLE,
 	RSS_REG_DBG_SHIFT, RSS_REG_DBG_FORCE_VALID,
 	RSS_REG_DBG_FORCE_FRAME,
@@ -926,8 +968,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_tmld_defs = {
-	"tmld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"tmld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	TMLD_REG_DBG_SELECT, TMLD_REG_DBG_DWORD_ENABLE,
 	TMLD_REG_DBG_SHIFT, TMLD_REG_DBG_FORCE_VALID,
 	TMLD_REG_DBG_FORCE_FRAME,
@@ -935,8 +978,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_muld_defs = {
-	"muld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"muld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	MULD_REG_DBG_SELECT, MULD_REG_DBG_DWORD_ENABLE,
 	MULD_REG_DBG_SHIFT, MULD_REG_DBG_FORCE_VALID,
 	MULD_REG_DBG_FORCE_FRAME,
@@ -944,8 +988,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_yuld_defs = {
-	"yuld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+	"yuld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
 	YULD_REG_DBG_SELECT, YULD_REG_DBG_DWORD_ENABLE,
 	YULD_REG_DBG_SHIFT, YULD_REG_DBG_FORCE_VALID,
 	YULD_REG_DBG_FORCE_FRAME,
@@ -953,8 +998,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_xyld_defs = {
-	"xyld", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+	"xyld",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
 	XYLD_REG_DBG_SELECT, XYLD_REG_DBG_DWORD_ENABLE,
 	XYLD_REG_DBG_SHIFT, XYLD_REG_DBG_FORCE_VALID,
 	XYLD_REG_DBG_FORCE_FRAME,
@@ -962,8 +1008,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_prm_defs = {
-	"prm", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"prm",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	PRM_REG_DBG_SELECT, PRM_REG_DBG_DWORD_ENABLE,
 	PRM_REG_DBG_SHIFT, PRM_REG_DBG_FORCE_VALID,
 	PRM_REG_DBG_FORCE_FRAME,
@@ -971,8 +1018,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pbf_pb1_defs = {
-	"pbf_pb1", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	"pbf_pb1",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
 	PBF_PB1_REG_DBG_SELECT, PBF_PB1_REG_DBG_DWORD_ENABLE,
 	PBF_PB1_REG_DBG_SHIFT, PBF_PB1_REG_DBG_FORCE_VALID,
 	PBF_PB1_REG_DBG_FORCE_FRAME,
@@ -981,8 +1029,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pbf_pb2_defs = {
-	"pbf_pb2", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	"pbf_pb2",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
 	PBF_PB2_REG_DBG_SELECT, PBF_PB2_REG_DBG_DWORD_ENABLE,
 	PBF_PB2_REG_DBG_SHIFT, PBF_PB2_REG_DBG_FORCE_VALID,
 	PBF_PB2_REG_DBG_FORCE_FRAME,
@@ -991,8 +1040,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_rpb_defs = {
-	"rpb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"rpb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	RPB_REG_DBG_SELECT, RPB_REG_DBG_DWORD_ENABLE,
 	RPB_REG_DBG_SHIFT, RPB_REG_DBG_FORCE_VALID,
 	RPB_REG_DBG_FORCE_FRAME,
@@ -1000,8 +1050,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_btb_defs = {
-	"btb", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
+	"btb",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCR, DBG_BUS_CLIENT_RBCV},
 	BTB_REG_DBG_SELECT, BTB_REG_DBG_DWORD_ENABLE,
 	BTB_REG_DBG_SHIFT, BTB_REG_DBG_FORCE_VALID,
 	BTB_REG_DBG_FORCE_FRAME,
@@ -1009,8 +1060,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_pbf_defs = {
-	"pbf", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
+	"pbf",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCV},
 	PBF_REG_DBG_SELECT, PBF_REG_DBG_DWORD_ENABLE,
 	PBF_REG_DBG_SHIFT, PBF_REG_DBG_FORCE_VALID,
 	PBF_REG_DBG_FORCE_FRAME,
@@ -1018,8 +1070,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_rdif_defs = {
-	"rdif", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+	"rdif",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
 	RDIF_REG_DBG_SELECT, RDIF_REG_DBG_DWORD_ENABLE,
 	RDIF_REG_DBG_SHIFT, RDIF_REG_DBG_FORCE_VALID,
 	RDIF_REG_DBG_FORCE_FRAME,
@@ -1027,8 +1080,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_tdif_defs = {
-	"tdif", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+	"tdif",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
 	TDIF_REG_DBG_SELECT, TDIF_REG_DBG_DWORD_ENABLE,
 	TDIF_REG_DBG_SHIFT, TDIF_REG_DBG_FORCE_VALID,
 	TDIF_REG_DBG_FORCE_FRAME,
@@ -1036,8 +1090,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_cdu_defs = {
-	"cdu", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"cdu",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CDU_REG_DBG_SELECT, CDU_REG_DBG_DWORD_ENABLE,
 	CDU_REG_DBG_SHIFT, CDU_REG_DBG_FORCE_VALID,
 	CDU_REG_DBG_FORCE_FRAME,
@@ -1045,8 +1100,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_ccfc_defs = {
-	"ccfc", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"ccfc",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	CCFC_REG_DBG_SELECT, CCFC_REG_DBG_DWORD_ENABLE,
 	CCFC_REG_DBG_SHIFT, CCFC_REG_DBG_FORCE_VALID,
 	CCFC_REG_DBG_FORCE_FRAME,
@@ -1054,8 +1110,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_tcfc_defs = {
-	"tcfc", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
+	"tcfc",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCF, DBG_BUS_CLIENT_RBCF},
 	TCFC_REG_DBG_SELECT, TCFC_REG_DBG_DWORD_ENABLE,
 	TCFC_REG_DBG_SHIFT, TCFC_REG_DBG_FORCE_VALID,
 	TCFC_REG_DBG_FORCE_FRAME,
@@ -1063,8 +1120,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_igu_defs = {
-	"igu", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"igu",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	IGU_REG_DBG_SELECT, IGU_REG_DBG_DWORD_ENABLE,
 	IGU_REG_DBG_SHIFT, IGU_REG_DBG_FORCE_VALID,
 	IGU_REG_DBG_FORCE_FRAME,
@@ -1072,8 +1130,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_cau_defs = {
-	"cau", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
+	"cau",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCP, DBG_BUS_CLIENT_RBCP},
 	CAU_REG_DBG_SELECT, CAU_REG_DBG_DWORD_ENABLE,
 	CAU_REG_DBG_SHIFT, CAU_REG_DBG_FORCE_VALID,
 	CAU_REG_DBG_FORCE_FRAME,
@@ -1081,8 +1140,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_umac_defs = {
-	"umac", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+	"umac",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
 	UMAC_REG_DBG_SELECT, UMAC_REG_DBG_DWORD_ENABLE,
 	UMAC_REG_DBG_SHIFT, UMAC_REG_DBG_FORCE_VALID,
 	UMAC_REG_DBG_FORCE_FRAME,
@@ -1090,22 +1150,23 @@  struct phy_defs {
 };
 
 static struct block_defs block_xmac_defs = {
-	"xmac", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"xmac", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_dbg_defs = {
-	"dbg", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"dbg", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, true, DBG_RESET_REG_MISC_PL_PDA_VAUX, 3
 };
 
 static struct block_defs block_nig_defs = {
-	"nig", {true, true, true}, false, 0,
-	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
+	"nig",
+	{true, true}, false, 0,
+	{DBG_BUS_CLIENT_RBCN, DBG_BUS_CLIENT_RBCN},
 	NIG_REG_DBG_SELECT, NIG_REG_DBG_DWORD_ENABLE,
 	NIG_REG_DBG_SHIFT, NIG_REG_DBG_FORCE_VALID,
 	NIG_REG_DBG_FORCE_FRAME,
@@ -1113,8 +1174,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_wol_defs = {
-	"wol", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+	"wol",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
 	WOL_REG_DBG_SELECT, WOL_REG_DBG_DWORD_ENABLE,
 	WOL_REG_DBG_SHIFT, WOL_REG_DBG_FORCE_VALID,
 	WOL_REG_DBG_FORCE_FRAME,
@@ -1122,8 +1184,9 @@  struct phy_defs {
 };
 
 static struct block_defs block_bmbn_defs = {
-	"bmbn", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
+	"bmbn",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCB},
 	BMBN_REG_DBG_SELECT, BMBN_REG_DBG_DWORD_ENABLE,
 	BMBN_REG_DBG_SHIFT, BMBN_REG_DBG_FORCE_VALID,
 	BMBN_REG_DBG_FORCE_FRAME,
@@ -1131,15 +1194,16 @@  struct phy_defs {
 };
 
 static struct block_defs block_ipc_defs = {
-	"ipc", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"ipc", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	true, false, DBG_RESET_REG_MISCS_PL_UA, 8
 };
 
 static struct block_defs block_nwm_defs = {
-	"nwm", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+	"nwm",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
 	NWM_REG_DBG_SELECT, NWM_REG_DBG_DWORD_ENABLE,
 	NWM_REG_DBG_SHIFT, NWM_REG_DBG_FORCE_VALID,
 	NWM_REG_DBG_FORCE_FRAME,
@@ -1147,22 +1211,29 @@  struct phy_defs {
 };
 
 static struct block_defs block_nws_defs = {
-	"nws", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
+	"nws",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCW},
+	NWS_REG_DBG_SELECT, NWS_REG_DBG_DWORD_ENABLE,
+	NWS_REG_DBG_SHIFT, NWS_REG_DBG_FORCE_VALID,
+	NWS_REG_DBG_FORCE_FRAME,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 12
 };
 
 static struct block_defs block_ms_defs = {
-	"ms", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
-	0, 0, 0, 0, 0,
+	"ms",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCZ},
+	MS_REG_DBG_SELECT, MS_REG_DBG_DWORD_ENABLE,
+	MS_REG_DBG_SHIFT, MS_REG_DBG_FORCE_VALID,
+	MS_REG_DBG_FORCE_FRAME,
 	true, false, DBG_RESET_REG_MISCS_PL_HV, 13
 };
 
 static struct block_defs block_phy_pcie_defs = {
-	"phy_pcie", {false, false, true}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
+	"phy_pcie",
+	{false, true}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, DBG_BUS_CLIENT_RBCH},
 	PCIE_REG_DBG_COMMON_SELECT, PCIE_REG_DBG_COMMON_DWORD_ENABLE,
 	PCIE_REG_DBG_COMMON_SHIFT, PCIE_REG_DBG_COMMON_FORCE_VALID,
 	PCIE_REG_DBG_COMMON_FORCE_FRAME,
@@ -1170,22 +1241,57 @@  struct phy_defs {
 };
 
 static struct block_defs block_led_defs = {
-	"led", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"led", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	true, false, DBG_RESET_REG_MISCS_PL_HV, 14
+};
+
+static struct block_defs block_avs_wrap_defs = {
+	"avs_wrap", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	true, false, DBG_RESET_REG_MISCS_PL_UA, 11
+};
+
+static struct block_defs block_rgfs_defs = {
+	"rgfs", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
-	true, true, DBG_RESET_REG_MISCS_PL_HV, 14
+	false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_tgfs_defs = {
+	"tgfs", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ptld_defs = {
+	"ptld", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	false, false, MAX_DBG_RESET_REGS, 0
+};
+
+static struct block_defs block_ypld_defs = {
+	"ypld", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	0, 0, 0, 0, 0,
+	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_misc_aeu_defs = {
-	"misc_aeu", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"misc_aeu", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
 
 static struct block_defs block_bar0_map_defs = {
-	"bar0_map", {false, false, false}, false, 0,
-	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
+	"bar0_map", {false, false}, false, 0,
+	{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
 	0, 0, 0, 0, 0,
 	false, false, MAX_DBG_RESET_REGS, 0
 };
@@ -1269,6 +1375,11 @@  struct phy_defs {
 	&block_ms_defs,
 	&block_phy_pcie_defs,
 	&block_led_defs,
+	&block_avs_wrap_defs,
+	&block_rgfs_defs,
+	&block_tgfs_defs,
+	&block_ptld_defs,
+	&block_ypld_defs,
 	&block_misc_aeu_defs,
 	&block_bar0_map_defs,
 };
@@ -1281,65 +1392,67 @@  struct phy_defs {
 };
 
 static struct grc_param_defs s_grc_param_defs[] = {
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_TSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_MSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_USTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_XSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_YSTORM */
-	{{1, 1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_PSTORM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_REGS */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RAM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PBUF */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IOR */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_VFC */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM_CTX */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_ILT */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RSS */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CAU */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_QM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MCP */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_RESERVED */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CFC */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IGU */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BRB */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BTB */
-	{{0, 0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BMB */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_NIG */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MULD */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PRS */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DMAE */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_TM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_SDM */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DIF */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_STATIC */
-	{{0, 0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_UNSTALL */
-	{{MAX_LCIDS, MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_TSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_MSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_USTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_XSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_YSTORM */
+	{{1, 1}, 0, 1, false, 1, 1},	/* DBG_GRC_PARAM_DUMP_PSTORM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_REGS */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RAM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PBUF */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IOR */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_VFC */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM_CTX */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_ILT */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_RSS */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CAU */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_QM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MCP */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_RESERVED */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CFC */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_IGU */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BRB */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BTB */
+	{{0, 0}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_BMB */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_NIG */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_MULD */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PRS */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DMAE */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_TM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_SDM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_DIF */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_STATIC */
+	{{0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_UNSTALL */
+	{{MAX_LCIDS, MAX_LCIDS}, 1, MAX_LCIDS, false, MAX_LCIDS,
 	 MAX_LCIDS},			/* DBG_GRC_PARAM_NUM_LCIDS */
-	{{MAX_LTIDS, MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
+	{{MAX_LTIDS, MAX_LTIDS}, 1, MAX_LTIDS, false, MAX_LTIDS,
 	 MAX_LTIDS},			/* DBG_GRC_PARAM_NUM_LTIDS */
-	{{0, 0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_EXCLUDE_ALL */
-	{{0, 0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_CRASH */
-	{{0, 0, 0}, 0, 1, false, 1, 0},	/* DBG_GRC_PARAM_PARITY_SAFE */
-	{{1, 1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM */
-	{{1, 1, 1}, 0, 1, false, 0, 1}	/* DBG_GRC_PARAM_DUMP_PHY */
+	{{0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_EXCLUDE_ALL */
+	{{0, 0}, 0, 1, true, 0, 0},	/* DBG_GRC_PARAM_CRASH */
+	{{0, 0}, 0, 1, false, 1, 0},	/* DBG_GRC_PARAM_PARITY_SAFE */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_CM */
+	{{1, 1}, 0, 1, false, 0, 1},	/* DBG_GRC_PARAM_DUMP_PHY */
+	{{0, 0}, 0, 1, false, 0, 0},	/* DBG_GRC_PARAM_NO_MCP */
+	{{0, 0}, 0, 1, false, 0, 0}	/* DBG_GRC_PARAM_NO_FW_VER */
 };
 
 static struct rss_mem_defs s_rss_mem_defs[] = {
 	{ "rss_mem_cid", "rss_cid", 0,
-	  {256, 256, 320},
-	  {32, 32, 32} },
+	  {256, 320},
+	  {32, 32} },
 	{ "rss_mem_key_msb", "rss_key", 1024,
-	  {128, 128, 208},
-	  {256, 256, 256} },
+	  {128, 208},
+	  {256, 256} },
 	{ "rss_mem_key_lsb", "rss_key", 2048,
-	  {128, 128, 208},
-	  {64, 64, 64} },
+	  {128, 208},
+	  {64, 64} },
 	{ "rss_mem_info", "rss_info", 3072,
-	  {128, 128, 208},
-	  {16, 16, 16} },
+	  {128, 208},
+	  {16, 16} },
 	{ "rss_mem_ind", "rss_ind", 4096,
-	  {(128 * 128), (128 * 128), (128 * 208)},
-	  {16, 16, 16} }
+	  {(128 * 128), (128 * 208)},
+	  {16, 16} }
 };
 
 static struct vfc_ram_defs s_vfc_ram_defs[] = {
@@ -1352,32 +1465,32 @@  struct phy_defs {
 static struct big_ram_defs s_big_ram_defs[] = {
 	{ "BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
 	  BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
-	  {4800, 4800, 5632} },
+	  {4800, 5632} },
 	{ "BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
 	  BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
-	  {2880, 2880, 3680} },
+	  {2880, 3680} },
 	{ "BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
 	  BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
-	  {1152, 1152, 1152} }
+	  {1152, 1152} }
 };
 
 static struct reset_reg_defs s_reset_regs_defs[] = {
 	{ MISCS_REG_RESET_PL_UA, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISCS_PL_UA */
+	  {true, true} },		/* DBG_RESET_REG_MISCS_PL_UA */
 	{ MISCS_REG_RESET_PL_HV, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISCS_PL_HV */
+	  {true, true} },		/* DBG_RESET_REG_MISCS_PL_HV */
 	{ MISCS_REG_RESET_PL_HV_2, 0x0,
-	  {false, false, true} },	/* DBG_RESET_REG_MISCS_PL_HV_2 */
+	  {false, true} },	/* DBG_RESET_REG_MISCS_PL_HV_2 */
 	{ MISC_REG_RESET_PL_UA, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_UA */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_UA */
 	{ MISC_REG_RESET_PL_HV, 0x0,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_HV */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_HV */
 	{ MISC_REG_RESET_PL_PDA_VMAIN_1, 0x4404040,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_1 */
 	{ MISC_REG_RESET_PL_PDA_VMAIN_2, 0x7c00007,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VMAIN_2 */
 	{ MISC_REG_RESET_PL_PDA_VAUX, 0x2,
-	  {true, true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
+	  {true, true} },		/* DBG_RESET_REG_MISC_PL_PDA_VAUX */
 };
 
 static struct phy_defs s_phy_defs[] = {
@@ -1410,6 +1523,26 @@  static u32 qed_read_unaligned_dword(u8 *buf)
 	return dword;
 }
 
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+			     enum dbg_grc_params grc_param)
+{
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+	return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+	if (!dev_data->grc.params_initialized) {
+		qed_dbg_grc_set_params_default(p_hwfn);
+		dev_data->grc.params_initialized = 1;
+	}
+}
+
 /* Initializes debug data for the specified device */
 static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
 					struct qed_ptt *p_ptt)
@@ -1431,6 +1564,10 @@  static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn,
 
 	dev_data->platform_id = PLATFORM_ASIC;
 	dev_data->mode_enable[MODE_ASIC] = 1;
+
+	/* Initializes the GRC parameters */
+	qed_dbg_grc_init_params(p_hwfn);
+
 	dev_data->initialized = true;
 	return DBG_STATUS_OK;
 }
@@ -1561,7 +1698,7 @@  static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
 	int printed_chars;
 	u32 offset = 0;
 
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
 		/* Read FW image/version from PRAM in a non-reset SEMI */
 		bool found = false;
 		u8 storm_id;
@@ -1622,7 +1759,7 @@  static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
 {
 	char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
 
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
 		u32 global_section_offsize, global_section_addr, mfw_ver;
 		u32 public_data_addr, global_section_offsize_addr;
 		int printed_chars;
@@ -1683,15 +1820,13 @@  static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
 					 bool dump,
 					 u8 num_specific_global_params)
 {
+	u8 num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params;
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 	u32 offset = 0;
 
 	/* Find platform string and dump global params section header */
 	offset += qed_dump_section_hdr(dump_buf + offset,
-				       dump,
-				       "global_params",
-				       NUM_COMMON_GLOBAL_PARAMS +
-				       num_specific_global_params);
+				       dump, "global_params", num_params);
 
 	/* Store params */
 	offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
@@ -1815,37 +1950,6 @@  static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
 	}
 }
 
-/* Returns the value of the specified GRC param */
-static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
-			     enum dbg_grc_params grc_param)
-{
-	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-
-	return dev_data->grc.param_val[grc_param];
-}
-
-/* Clear all GRC params */
-static void qed_dbg_grc_clear_params(struct qed_hwfn *p_hwfn)
-{
-	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 i;
-
-	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
-		dev_data->grc.param_set_by_user[i] = 0;
-}
-
-/* Assign default GRC param values */
-static void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
-{
-	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 i;
-
-	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
-		if (!dev_data->grc.param_set_by_user[i])
-			dev_data->grc.param_val[i] =
-			    s_grc_param_defs[i].default_val[dev_data->chip_id];
-}
-
 /* Returns true if the specified entity (indicated by GRC param) should be
  * included in the dump, false otherwise.
  */
@@ -1971,7 +2075,7 @@  static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
 	}
 }
 
-/* Returns the attention name offsets of the specified block */
+/* Returns the attention block data of the specified block */
 static const struct dbg_attn_block_type_data *
 qed_get_block_attn_data(enum block_id block_id, enum dbg_attn_type attn_type)
 {
@@ -2040,7 +2144,7 @@  static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
  * The following parameters are dumped:
  * - 'count' = num_dumped_entries
  * - 'split' = split_type
- * - 'id'i = split_id (dumped only if split_id >= 0)
+ * - 'id' = split_id (dumped only if split_id >= 0)
  * - 'param_name' = param_val (user param, dumped only if param_name != NULL and
  *	param_val != NULL)
  */
@@ -2069,21 +2173,81 @@  static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
 	return offset;
 }
 
-/* Dumps GRC register/memory. Returns the dumped size in dwords. */
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+				   struct qed_ptt *p_ptt, u32 *dump_buf,
+				   bool dump, u32 addr, u32 len)
+{
+	u32 byte_addr = DWORDS_TO_BYTES(addr), offset = 0, i;
+
+	if (dump)
+		for (i = 0; i < len; i++, byte_addr += BYTES_IN_DWORD, offset++)
+			*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
+	else
+		offset += len;
+	return offset;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, bool dump, u32 addr,
+				      u32 len)
+{
+	if (dump)
+		*dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+	return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords. */
 static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
 				  struct qed_ptt *p_ptt, u32 *dump_buf,
 				  bool dump, u32 addr, u32 len)
 {
-	u32 offset = 0, i;
+	u32 offset = 0;
+
+	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset, dump, addr, len);
+	return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+				       struct qed_ptt *p_ptt, u32 *dump_buf,
+				       bool dump, u32 addr, u32 total_len,
+				       u32 read_len, u32 skip_len)
+{
+	u32 offset = 0, reg_offset = 0;
 
+	offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
 	if (dump) {
-		*(dump_buf + offset++) = addr | (len << REG_DUMP_LEN_SHIFT);
-		for (i = 0; i < len; i++, addr++, offset++)
-			*(dump_buf + offset) = qed_rd(p_hwfn,
-						      p_ptt,
-						      DWORDS_TO_BYTES(addr));
+		while (reg_offset < total_len) {
+			u32 curr_len = min_t(u32,
+					     read_len,
+					     total_len - reg_offset);
+			offset += qed_grc_dump_addr_range(p_hwfn,
+							  p_ptt,
+							  dump_buf + offset,
+							  dump, addr, curr_len);
+			reg_offset += curr_len;
+			addr += curr_len;
+			if (reg_offset < total_len) {
+				curr_len = min_t(u32,
+						 skip_len,
+						 total_len - skip_len);
+				memset(dump_buf + offset, 0,
+				       DWORDS_TO_BYTES(curr_len));
+				offset += curr_len;
+				reg_offset += curr_len;
+				addr += curr_len;
+			}
+		}
 	} else {
-		offset += len + 1;
+		offset += total_len;
 	}
 
 	return offset;
@@ -2124,14 +2288,17 @@  static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
 				const struct dbg_dump_reg *reg =
 				    (const struct dbg_dump_reg *)
 				    &input_regs_arr.ptr[input_offset];
+				u32 addr, len;
 
+				addr = GET_FIELD(reg->data,
+						 DBG_DUMP_REG_ADDRESS);
+				len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
 				offset +=
-					qed_grc_dump_reg_entry(p_hwfn, p_ptt,
-						    dump_buf + offset, dump,
-						    GET_FIELD(reg->data,
-							DBG_DUMP_REG_ADDRESS),
-						    GET_FIELD(reg->data,
-							DBG_DUMP_REG_LENGTH));
+				    qed_grc_dump_reg_entry(p_hwfn, p_ptt,
+							   dump_buf + offset,
+							   dump,
+							   addr,
+							   len);
 				(*num_dumped_reg_entries)++;
 			}
 		} else {
@@ -2194,8 +2361,14 @@  static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 				  const char *param_name, const char *param_val)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	struct chip_platform_defs *p_platform_defs;
 	u32 offset = 0, input_offset = 0;
-	u8 port_id, pf_id;
+	struct chip_defs *p_chip_defs;
+	u8 port_id, pf_id, vf_id;
+	u16 fid;
+
+	p_chip_defs = &s_chip_defs[dev_data->chip_id];
+	p_platform_defs = &p_chip_defs->per_platform[dev_data->platform_id];
 
 	if (dump)
 		DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, "Dumping registers...\n");
@@ -2214,7 +2387,6 @@  static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 
 		switch (split_type_id) {
 		case SPLIT_TYPE_NONE:
-		case SPLIT_TYPE_VF:
 			offset += qed_grc_dump_split_data(p_hwfn,
 							  p_ptt,
 							  curr_input_regs_arr,
@@ -2227,10 +2399,7 @@  static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 							  param_val);
 			break;
 		case SPLIT_TYPE_PORT:
-			for (port_id = 0;
-			     port_id <
-			     s_chip_defs[dev_data->chip_id].
-			     per_platform[dev_data->platform_id].num_ports;
+			for (port_id = 0; port_id < p_platform_defs->num_ports;
 			     port_id++) {
 				if (dump)
 					qed_port_pretend(p_hwfn, p_ptt,
@@ -2247,20 +2416,48 @@  static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 			break;
 		case SPLIT_TYPE_PF:
 		case SPLIT_TYPE_PORT_PF:
-			for (pf_id = 0;
-			     pf_id <
-			     s_chip_defs[dev_data->chip_id].
-			     per_platform[dev_data->platform_id].num_pfs;
+			for (pf_id = 0; pf_id < p_platform_defs->num_pfs;
 			     pf_id++) {
-				if (dump)
-					qed_fid_pretend(p_hwfn, p_ptt, pf_id);
-				offset += qed_grc_dump_split_data(p_hwfn,
-							p_ptt,
-							curr_input_regs_arr,
-							dump_buf + offset,
-							dump, block_enable,
-							"pf", pf_id, param_name,
-							param_val);
+				u8 pfid_shift =
+					PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+
+				if (dump) {
+					fid = pf_id << pfid_shift;
+					qed_fid_pretend(p_hwfn, p_ptt, fid);
+				}
+
+				offset +=
+				    qed_grc_dump_split_data(p_hwfn, p_ptt,
+							    curr_input_regs_arr,
+							    dump_buf + offset,
+							    dump, block_enable,
+							    "pf", pf_id,
+							    param_name,
+							    param_val);
+			}
+			break;
+		case SPLIT_TYPE_VF:
+			for (vf_id = 0; vf_id < p_platform_defs->num_vfs;
+			     vf_id++) {
+				u8 vfvalid_shift =
+					PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT;
+				u8 vfid_shift =
+					PXP_PRETEND_CONCRETE_FID_VFID_SHIFT;
+
+				if (dump) {
+					fid = BIT(vfvalid_shift) |
+					      (vf_id << vfid_shift);
+					qed_fid_pretend(p_hwfn, p_ptt, fid);
+				}
+
+				offset +=
+				    qed_grc_dump_split_data(p_hwfn, p_ptt,
+							    curr_input_regs_arr,
+							    dump_buf + offset,
+							    dump, block_enable,
+							    "vf", vf_id,
+							    param_name,
+							    param_val);
 			}
 			break;
 		default:
@@ -2271,8 +2468,11 @@  static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
 	}
 
 	/* Pretend to original PF */
-	if (dump)
-		qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+	if (dump) {
+		fid = p_hwfn->rel_pf_id << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT;
+		qed_fid_pretend(p_hwfn, p_ptt, fid);
+	}
+
 	return offset;
 }
 
@@ -2291,13 +2491,14 @@  static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
 	/* Write reset registers */
 	for (i = 0; i < MAX_DBG_RESET_REGS; i++) {
 		if (s_reset_regs_defs[i].exists[dev_data->chip_id]) {
+			u32 addr = BYTES_TO_DWORDS(s_reset_regs_defs[i].addr);
+
 			offset += qed_grc_dump_reg_entry(p_hwfn,
 							 p_ptt,
 							 dump_buf + offset,
 							 dump,
-							 BYTES_TO_DWORDS
-							 (s_reset_regs_defs
-							  [i].addr), 1);
+							 addr,
+							 1);
 			num_regs++;
 		}
 	}
@@ -2339,6 +2540,7 @@  static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 				&attn_reg_arr[reg_idx];
 			u16 modes_buf_offset;
 			bool eval_mode;
+			u32 addr;
 
 			/* Check mode */
 			eval_mode = GET_FIELD(reg_data->mode.data,
@@ -2349,19 +2551,23 @@  static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 			if (!eval_mode ||
 			    qed_is_mode_match(p_hwfn, &modes_buf_offset)) {
 				/* Mode match - read and dump registers */
-				offset += qed_grc_dump_reg_entry(p_hwfn,
-							p_ptt,
-							dump_buf + offset,
-							dump,
-							reg_data->mask_address,
-							1);
-				offset += qed_grc_dump_reg_entry(p_hwfn,
-						p_ptt,
-						dump_buf + offset,
-						dump,
-						GET_FIELD(reg_data->data,
-						    DBG_ATTN_REG_STS_ADDRESS),
-						1);
+				addr = reg_data->mask_address;
+				offset +=
+				    qed_grc_dump_reg_entry(p_hwfn,
+							   p_ptt,
+							   dump_buf + offset,
+							   dump,
+							   addr,
+							   1);
+				addr = GET_FIELD(reg_data->data,
+						 DBG_ATTN_REG_STS_ADDRESS);
+				offset +=
+				    qed_grc_dump_reg_entry(p_hwfn,
+							   p_ptt,
+							   dump_buf + offset,
+							   dump,
+							   addr,
+							   1);
 				num_reg_entries += 2;
 			}
 		}
@@ -2369,18 +2575,21 @@  static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 
 	/* Write storm stall status registers */
 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+		u32 addr;
+
 		if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id] &&
 		    dump)
 			continue;
 
+		addr =
+		    BYTES_TO_DWORDS(s_storm_defs[storm_id].sem_fast_mem_addr +
+				    SEM_FAST_REG_STALLED);
 		offset += qed_grc_dump_reg_entry(p_hwfn,
-					p_ptt,
-					dump_buf + offset,
-					dump,
-					BYTES_TO_DWORDS(s_storm_defs[storm_id].
-							sem_fast_mem_addr +
-							SEM_FAST_REG_STALLED),
-					1);
+						 p_ptt,
+						 dump_buf + offset,
+						 dump,
+						 addr,
+						 1);
 		num_reg_entries++;
 	}
 
@@ -2392,11 +2601,47 @@  static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
 	return offset;
 }
 
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+				     struct qed_ptt *p_ptt,
+				     u32 *dump_buf, bool dump)
+{
+	u32 offset = 0, addr;
+
+	offset += qed_grc_dump_regs_hdr(dump_buf,
+					dump, 2, "eng", -1, NULL, NULL);
+
+	/* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+	 * skipped).
+	 */
+	addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+	offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+					      p_ptt,
+					      dump_buf + offset,
+					      dump,
+					      addr,
+					      RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+					      7,
+					      1);
+	addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+	offset +=
+	    qed_grc_dump_reg_entry_skip(p_hwfn,
+					p_ptt,
+					dump_buf + offset,
+					dump,
+					addr,
+					TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+					7,
+					1);
+
+	return offset;
+}
+
 /* Dumps a GRC memory header (section and params).
  * The following parameters are dumped:
  * name - name is dumped only if it's not NULL.
- * addr - byte_addr is dumped only if name is NULL.
- * len - dword_len is always dumped.
+ * addr - addr is dumped only if name is NULL.
+ * len - len is always dumped.
  * width - bit_width is dumped if it's not zero.
  * packed - packed=1 is dumped if it's not false.
  * mem_group - mem_group is always dumped.
@@ -2408,8 +2653,8 @@  static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 				u32 *dump_buf,
 				bool dump,
 				const char *name,
-				u32 byte_addr,
-				u32 dword_len,
+				u32 addr,
+				u32 len,
 				u32 bit_width,
 				bool packed,
 				const char *mem_group,
@@ -2419,7 +2664,7 @@  static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 	u32 offset = 0;
 	char buf[64];
 
-	if (!dword_len)
+	if (!len)
 		DP_NOTICE(p_hwfn,
 			  "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
 	if (bit_width)
@@ -2446,20 +2691,21 @@  static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
 			DP_VERBOSE(p_hwfn,
 				   QED_MSG_DEBUG,
 				   "Dumping %d registers from %s...\n",
-				   dword_len, buf);
+				   len, buf);
 	} else {
 		/* Dump address */
 		offset += qed_dump_num_param(dump_buf + offset,
-					     dump, "addr", byte_addr);
-		if (dump && dword_len > 64)
+					     dump, "addr",
+					     DWORDS_TO_BYTES(addr));
+		if (dump && len > 64)
 			DP_VERBOSE(p_hwfn,
 				   QED_MSG_DEBUG,
 				   "Dumping %d registers from address 0x%x...\n",
-				   dword_len, byte_addr);
+				   len, (u32)DWORDS_TO_BYTES(addr));
 	}
 
 	/* Dump len */
-	offset += qed_dump_num_param(dump_buf + offset, dump, "len", dword_len);
+	offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
 
 	/* Dump bit width */
 	if (bit_width)
@@ -2492,8 +2738,8 @@  static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
 			    u32 *dump_buf,
 			    bool dump,
 			    const char *name,
-			    u32 byte_addr,
-			    u32 dword_len,
+			    u32 addr,
+			    u32 len,
 			    u32 bit_width,
 			    bool packed,
 			    const char *mem_group,
@@ -2505,21 +2751,14 @@  static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
 				       dump_buf + offset,
 				       dump,
 				       name,
-				       byte_addr,
-				       dword_len,
+				       addr,
+				       len,
 				       bit_width,
 				       packed,
 				       mem_group, is_storm, storm_letter);
-	if (dump) {
-		u32 i;
-
-		for (i = 0; i < dword_len;
-		     i++, byte_addr += BYTES_IN_DWORD, offset++)
-			*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, byte_addr);
-	} else {
-		offset += dword_len;
-	}
-
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset, dump, addr, len);
 	return offset;
 }
 
@@ -2575,25 +2814,41 @@  static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
 			if (qed_grc_is_mem_included(p_hwfn,
 					(enum block_id)cond_hdr->block_id,
 					mem_group_id)) {
-				u32 mem_byte_addr =
-					DWORDS_TO_BYTES(GET_FIELD(mem->dword0,
-							DBG_DUMP_MEM_ADDRESS));
+				u32 mem_addr = GET_FIELD(mem->dword0,
+							 DBG_DUMP_MEM_ADDRESS);
 				u32 mem_len = GET_FIELD(mem->dword1,
 							DBG_DUMP_MEM_LENGTH);
+				enum dbg_grc_params grc_param;
 				char storm_letter = 'a';
 				bool is_storm = false;
 
 				/* Update memory length for CCFC/TCFC memories
 				 * according to number of LCIDs/LTIDs.
 				 */
-				if (mem_group_id == MEM_GROUP_CONN_CFC_MEM)
+				if (mem_group_id == MEM_GROUP_CONN_CFC_MEM) {
+					if (mem_len % MAX_LCIDS != 0) {
+						DP_NOTICE(p_hwfn,
+							  "Invalid CCFC connection memory size\n");
+						return 0;
+					}
+
+					grc_param = DBG_GRC_PARAM_NUM_LCIDS;
 					mem_len = qed_grc_get_param(p_hwfn,
-							DBG_GRC_PARAM_NUM_LCIDS)
-							* (mem_len / MAX_LCIDS);
-				else if (mem_group_id == MEM_GROUP_TASK_CFC_MEM)
+								    grc_param) *
+						  (mem_len / MAX_LCIDS);
+				} else if (mem_group_id ==
+					   MEM_GROUP_TASK_CFC_MEM) {
+					if (mem_len % MAX_LTIDS != 0) {
+						DP_NOTICE(p_hwfn,
+							  "Invalid TCFC task memory size\n");
+						return 0;
+					}
+
+					grc_param = DBG_GRC_PARAM_NUM_LTIDS;
 					mem_len = qed_grc_get_param(p_hwfn,
-							DBG_GRC_PARAM_NUM_LTIDS)
-							* (mem_len / MAX_LTIDS);
+								    grc_param) *
+						  (mem_len / MAX_LTIDS);
+				}
 
 				/* If memory is associated with Storm, update
 				 * Storm details.
@@ -2610,7 +2865,7 @@  static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
 				/* Dump memory */
 				offset += qed_grc_dump_mem(p_hwfn, p_ptt,
 						dump_buf + offset, dump, NULL,
-						mem_byte_addr, mem_len, 0,
+						mem_addr, mem_len, 0,
 						false,
 						s_mem_group_names[mem_group_id],
 						is_storm, storm_letter);
@@ -2799,29 +3054,31 @@  static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn,
 	u32 offset = 0;
 
 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
-		if (qed_grc_is_storm_included(p_hwfn,
-					      (enum dbg_storms)storm_id)) {
-			for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
-				u32 addr =
-				    s_storm_defs[storm_id].sem_fast_mem_addr +
-				    SEM_FAST_REG_STORM_REG_FILE +
-				    DWORDS_TO_BYTES(IOR_SET_OFFSET(set_id));
+		struct storm_defs *storm = &s_storm_defs[storm_id];
 
-				buf[strlen(buf) - 1] = '0' + set_id;
-				offset += qed_grc_dump_mem(p_hwfn,
-							   p_ptt,
-							   dump_buf + offset,
-							   dump,
-							   buf,
-							   addr,
-							   IORS_PER_SET,
-							   32,
-							   false,
-							   "ior",
-							   true,
-							   s_storm_defs
-							   [storm_id].letter);
-			}
+		if (!qed_grc_is_storm_included(p_hwfn,
+					       (enum dbg_storms)storm_id))
+			continue;
+
+		for (set_id = 0; set_id < NUM_IOR_SETS; set_id++) {
+			u32 dwords, addr;
+
+			dwords = storm->sem_fast_mem_addr +
+				 SEM_FAST_REG_STORM_REG_FILE;
+			addr = BYTES_TO_DWORDS(dwords) + IOR_SET_OFFSET(set_id);
+			buf[strlen(buf) - 1] = '0' + set_id;
+			offset += qed_grc_dump_mem(p_hwfn,
+						   p_ptt,
+						   dump_buf + offset,
+						   dump,
+						   buf,
+						   addr,
+						   IORS_PER_SET,
+						   32,
+						   false,
+						   "ior",
+						   true,
+						   storm->letter);
 		}
 	}
 
@@ -2990,34 +3247,39 @@  static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
 		struct rss_mem_defs *rss_defs = &s_rss_mem_defs[rss_mem_id];
 		u32 num_entries = rss_defs->num_entries[dev_data->chip_id];
 		u32 entry_width = rss_defs->entry_width[dev_data->chip_id];
-		u32 total_size = (num_entries * entry_width) / 32;
+		u32 total_dwords = (num_entries * entry_width) / 32;
+		u32 size = RSS_REG_RSS_RAM_DATA_SIZE;
 		bool packed = (entry_width == 16);
-		u32 addr = rss_defs->addr;
-		u32 i, j;
+		u32 rss_addr = rss_defs->addr;
+		u32 i, addr;
 
 		offset += qed_grc_dump_mem_hdr(p_hwfn,
 					       dump_buf + offset,
 					       dump,
 					       rss_defs->mem_name,
-					       addr,
-					       total_size,
+					       0,
+					       total_dwords,
 					       entry_width,
 					       packed,
 					       rss_defs->type_name, false, 0);
 
 		if (!dump) {
-			offset += total_size;
+			offset += total_dwords;
 			continue;
 		}
 
 		/* Dump RSS data */
-		for (i = 0; i < BYTES_TO_DWORDS(total_size); i++, addr++) {
-			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, addr);
-			for (j = 0; j < BYTES_IN_DWORD; j++, offset++)
-				*(dump_buf + offset) =
-					qed_rd(p_hwfn, p_ptt,
-					       RSS_REG_RSS_RAM_DATA +
-					       DWORDS_TO_BYTES(j));
+		for (i = 0; i < total_dwords;
+		     i += RSS_REG_RSS_RAM_DATA_SIZE, rss_addr++) {
+			addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+			qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+				offset += qed_grc_dump_addr_range(p_hwfn,
+								  p_ptt,
+								  dump_buf +
+								  offset,
+								  dump,
+								  addr,
+								  size);
 		}
 	}
 
@@ -3030,19 +3292,19 @@  static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 				u32 *dump_buf, bool dump, u8 big_ram_id)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	u32 total_blocks, ram_size, offset = 0, i;
 	char mem_name[12] = "???_BIG_RAM";
 	char type_name[8] = "???_RAM";
-	u32 ram_size, total_blocks;
-	u32 offset = 0, i, j;
+	struct big_ram_defs *big_ram;
 
-	total_blocks =
-		s_big_ram_defs[big_ram_id].num_of_blocks[dev_data->chip_id];
+	big_ram = &s_big_ram_defs[big_ram_id];
+	total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
 	ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
 
-	strncpy(type_name, s_big_ram_defs[big_ram_id].instance_name,
-		strlen(s_big_ram_defs[big_ram_id].instance_name));
-	strncpy(mem_name, s_big_ram_defs[big_ram_id].instance_name,
-		strlen(s_big_ram_defs[big_ram_id].instance_name));
+	strncpy(type_name, big_ram->instance_name,
+		strlen(big_ram->instance_name));
+	strncpy(mem_name, big_ram->instance_name,
+		strlen(big_ram->instance_name));
 
 	/* Dump memory header */
 	offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -3059,13 +3321,17 @@  static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
 
 	/* Read and dump Big RAM data */
 	for (i = 0; i < total_blocks / 2; i++) {
-		qed_wr(p_hwfn, p_ptt, s_big_ram_defs[big_ram_id].addr_reg_addr,
-		       i);
-		for (j = 0; j < 2 * BIG_RAM_BLOCK_SIZE_DWORDS; j++, offset++)
-			*(dump_buf + offset) = qed_rd(p_hwfn, p_ptt,
-						s_big_ram_defs[big_ram_id].
-							data_reg_addr +
-						DWORDS_TO_BYTES(j));
+		u32 addr, len;
+
+		qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+		addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+		len = 2 * BIG_RAM_BLOCK_SIZE_DWORDS;
+		offset += qed_grc_dump_addr_range(p_hwfn,
+						  p_ptt,
+						  dump_buf + offset,
+						  dump,
+						  addr,
+						  len);
 	}
 
 	return offset;
@@ -3075,11 +3341,11 @@  static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 			    struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
 {
 	bool block_enable[MAX_BLOCK_ID] = { 0 };
+	u32 offset = 0, addr;
 	bool halted = false;
-	u32 offset = 0;
 
 	/* Halt MCP */
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
 		if (!halted)
 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -3091,7 +3357,7 @@  static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 				   dump_buf + offset,
 				   dump,
 				   NULL,
-				   MCP_REG_SCRATCH,
+				   BYTES_TO_DWORDS(MCP_REG_SCRATCH),
 				   MCP_REG_SCRATCH_SIZE,
 				   0, false, "MCP", false, 0);
 
@@ -3101,7 +3367,7 @@  static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 				   dump_buf + offset,
 				   dump,
 				   NULL,
-				   MCP_REG_CPU_REG_FILE,
+				   BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
 				   MCP_REG_CPU_REG_FILE_SIZE,
 				   0, false, "MCP", false, 0);
 
@@ -3115,12 +3381,13 @@  static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
 	/* Dump required non-MCP registers */
 	offset += qed_grc_dump_regs_hdr(dump_buf + offset,
 					dump, 1, "eng", -1, "block", "MCP");
+	addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
 	offset += qed_grc_dump_reg_entry(p_hwfn,
 					 p_ptt,
 					 dump_buf + offset,
 					 dump,
-					 BYTES_TO_DWORDS
-					 (MISC_REG_SHARED_MEM_ADDR), 1);
+					 addr,
+					 1);
 
 	/* Release MCP */
 	if (halted && qed_mcp_resume(p_hwfn, p_ptt))
@@ -3212,7 +3479,7 @@  static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 {
 	u32 block_dwords = NUM_DBG_BUS_LINES * STATIC_DEBUG_LINE_DWORDS;
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
-	u32 offset = 0, block_id, line_id, addr, i;
+	u32 offset = 0, block_id, line_id;
 	struct block_defs *p_block_defs;
 
 	if (dump) {
@@ -3255,6 +3522,8 @@  static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 		if (dump && !dev_data->block_in_reset[block_id]) {
 			u8 dbg_client_id =
 				p_block_defs->dbg_client_id[dev_data->chip_id];
+			u32 addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+			u32 len = STATIC_DEBUG_LINE_DWORDS;
 
 			/* Enable block's client */
 			qed_bus_enable_clients(p_hwfn, p_ptt,
@@ -3270,11 +3539,13 @@  static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
 						    0xf, 0, 0, 0);
 
 				/* Read debug line info */
-				for (i = 0, addr = DBG_REG_CALENDAR_OUT_DATA;
-				     i < STATIC_DEBUG_LINE_DWORDS;
-				     i++, offset++, addr += BYTES_IN_DWORD)
-					dump_buf[offset] = qed_rd(p_hwfn, p_ptt,
-								  addr);
+				offset +=
+				    qed_grc_dump_addr_range(p_hwfn,
+							    p_ptt,
+							    dump_buf + offset,
+							    dump,
+							    addr,
+							    len);
 			}
 
 			/* Disable block's client and debug output */
@@ -3311,14 +3582,8 @@  static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 	u8 i, port_mode = 0;
 	u32 offset = 0;
 
-	/* Check if emulation platform */
 	*num_dumped_dwords = 0;
 
-	/* Fill GRC parameters that were not set by the user with their default
-	 * value.
-	 */
-	qed_dbg_grc_set_params_default(p_hwfn);
-
 	/* Find port mode */
 	if (dump) {
 		switch (qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE)) {
@@ -3370,15 +3635,14 @@  static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 	}
 
 	/* Disable all parities using MFW command */
-	if (dump) {
+	if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
 		parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
 		if (!parities_masked) {
+			DP_NOTICE(p_hwfn,
+				  "Failed to mask parities using MFW\n");
 			if (qed_grc_get_param
 			    (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
 				return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
-			else
-				DP_NOTICE(p_hwfn,
-					  "Failed to mask parities using MFW\n");
 		}
 	}
 
@@ -3409,6 +3673,11 @@  static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
 						 offset,
 						 dump,
 						 block_enable, NULL, NULL);
+
+		/* Dump special registers */
+		offset += qed_grc_dump_special_regs(p_hwfn,
+						    p_ptt,
+						    dump_buf + offset, dump);
 	}
 
 	/* Dump memories */
@@ -3583,9 +3852,9 @@  static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
 			}
 
 			if (mode_match) {
-				u32 grc_addr =
-					DWORDS_TO_BYTES(GET_FIELD(reg->data,
-						DBG_IDLE_CHK_INFO_REG_ADDRESS));
+				u32 addr =
+				    GET_FIELD(reg->data,
+					      DBG_IDLE_CHK_INFO_REG_ADDRESS);
 
 				/* Write register header */
 				struct dbg_idle_chk_result_reg_hdr *reg_hdr =
@@ -3597,16 +3866,19 @@  static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
 				memset(reg_hdr, 0, sizeof(*reg_hdr));
 				reg_hdr->size = reg->size;
 				SET_FIELD(reg_hdr->data,
-					DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
-					rule->num_cond_regs + reg_id);
+					  DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+					  rule->num_cond_regs + reg_id);
 
 				/* Write register values */
-				for (i = 0; i < reg->size;
-				     i++, offset++, grc_addr += 4)
-					dump_buf[offset] =
-						qed_rd(p_hwfn, p_ptt, grc_addr);
-				}
+				offset +=
+				    qed_grc_dump_addr_range(p_hwfn,
+							    p_ptt,
+							    dump_buf + offset,
+							    dump,
+							    addr,
+							    reg->size);
 			}
+		}
 	}
 
 	return offset;
@@ -3621,7 +3893,7 @@  static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
 	u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
-	u32 i, j, offset = 0;
+	u32 i, offset = 0;
 	u16 entry_id;
 	u8 reg_id;
 
@@ -3664,73 +3936,83 @@  static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
 		if (!check_rule && dump)
 			continue;
 
+		if (!dump) {
+			u32 entry_dump_size =
+				qed_idle_chk_dump_failure(p_hwfn,
+							  p_ptt,
+							  dump_buf + offset,
+							  false,
+							  rule->rule_id,
+							  rule,
+							  0,
+							  NULL);
+
+			offset += num_reg_entries * entry_dump_size;
+			(*num_failing_rules) += num_reg_entries;
+			continue;
+		}
+
 		/* Go over all register entries (number of entries is the same
 		 * for all condition registers).
 		 */
 		for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
 			/* Read current entry of all condition registers */
-			if (dump) {
-				u32 next_reg_offset = 0;
-
-				for (reg_id = 0;
-				     reg_id < rule->num_cond_regs;
-				     reg_id++) {
-					const struct dbg_idle_chk_cond_reg
-						*reg = &cond_regs[reg_id];
-
-					/* Find GRC address (if it's a memory,
-					 * the address of the specific entry is
-					 * calculated).
-					 */
-					u32 grc_addr =
-					   DWORDS_TO_BYTES(
-						GET_FIELD(reg->data,
-						    DBG_IDLE_CHK_COND_REG_ADDRESS));
-
-					if (reg->num_entries > 1 ||
-					    reg->start_entry > 0) {
-						u32 padded_entry_size =
-							reg->entry_size > 1 ?
-							roundup_pow_of_two
-							(reg->entry_size) : 1;
-
-						grc_addr +=
-							DWORDS_TO_BYTES(
-								(reg->start_entry +
-								entry_id)
-								* padded_entry_size);
-					}
+			u32 next_reg_offset = 0;
 
-					/* Read registers */
-					if (next_reg_offset + reg->entry_size >=
-					    IDLE_CHK_MAX_ENTRIES_SIZE) {
-						DP_NOTICE(p_hwfn,
-							  "idle check registers entry is too large\n");
-						return 0;
-					}
+			for (reg_id = 0; reg_id < rule->num_cond_regs;
+			     reg_id++) {
+				const struct dbg_idle_chk_cond_reg *reg =
+					&cond_regs[reg_id];
 
-					for (j = 0; j < reg->entry_size;
-					     j++, next_reg_offset++,
-					     grc_addr += 4)
-					     cond_reg_values[next_reg_offset] =
-						qed_rd(p_hwfn, p_ptt, grc_addr);
+				/* Find GRC address (if it's a memory,the
+				 * address of the specific entry is calculated).
+				 */
+				u32 addr =
+				    GET_FIELD(reg->data,
+					      DBG_IDLE_CHK_COND_REG_ADDRESS);
+
+				if (reg->num_entries > 1 ||
+				    reg->start_entry > 0) {
+					u32 padded_entry_size =
+					   reg->entry_size > 1 ?
+					   roundup_pow_of_two(reg->entry_size) :
+					   1;
+
+					addr += (reg->start_entry + entry_id) *
+						padded_entry_size;
 				}
+
+				/* Read registers */
+				if (next_reg_offset + reg->entry_size >=
+				    IDLE_CHK_MAX_ENTRIES_SIZE) {
+					DP_NOTICE(p_hwfn,
+						  "idle check registers entry is too large\n");
+					return 0;
+				}
+
+				next_reg_offset +=
+				    qed_grc_dump_addr_range(p_hwfn,
+							    p_ptt,
+							    cond_reg_values +
+							    next_reg_offset,
+							    dump, addr,
+							    reg->entry_size);
 			}
 
 			/* Call rule's condition function - a return value of
 			 * true indicates failure.
 			 */
 			if ((*cond_arr[rule->cond_id])(cond_reg_values,
-						       imm_values) || !dump) {
+						       imm_values)) {
 				offset +=
-					qed_idle_chk_dump_failure(p_hwfn,
-							p_ptt,
-							dump_buf + offset,
-							dump,
-							rule->rule_id,
-							rule,
-							entry_id,
-							cond_reg_values);
+				    qed_idle_chk_dump_failure(p_hwfn,
+							      p_ptt,
+							      dump_buf + offset,
+							      dump,
+							      rule->rule_id,
+							      rule,
+							      entry_id,
+							      cond_reg_values);
 				(*num_failing_rules)++;
 				break;
 			}
@@ -3818,13 +4100,18 @@  static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
 	struct mcp_file_att file_att;
 
 	/* Call NVRAM get file command */
-	if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_GET_FILE_ATT,
-			       image_type, &ret_mcp_resp, &ret_mcp_param,
-			       &ret_txn_size, (u32 *)&file_att) != 0)
-		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+	int nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+					    p_ptt,
+					    DRV_MSG_CODE_NVM_GET_FILE_ATT,
+					    image_type,
+					    &ret_mcp_resp,
+					    &ret_mcp_param,
+					    &ret_txn_size,
+					    (u32 *)&file_att);
 
 	/* Check response */
-	if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+	if (nvm_result ||
+	    (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
 		return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
 
 	/* Update return values */
@@ -3944,7 +4231,6 @@  static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
 	u32 running_mfw_addr =
 		MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
 		QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
-	enum dbg_status status;
 	u32 nvram_image_type;
 
 	*running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
@@ -3955,30 +4241,12 @@  static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
 	nvram_image_type =
 	    (*running_bundle_id ==
 	     DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
-	status = qed_find_nvram_image(p_hwfn,
-				      p_ptt,
-				      nvram_image_type,
-				      trace_meta_offset_bytes,
-				      trace_meta_size_bytes);
-
-	return status;
-}
-
-/* Reads the MCP Trace data from the specified GRC address into the specified
- * buffer.
- */
-static void qed_mcp_trace_read_data(struct qed_hwfn *p_hwfn,
-				    struct qed_ptt *p_ptt,
-				    u32 grc_addr, u32 size_in_dwords, u32 *buf)
-{
-	u32 i;
 
-	DP_VERBOSE(p_hwfn,
-		   QED_MSG_DEBUG,
-		   "mcp_trace_read_data: reading trace data of size %d dwords from GRC address 0x%x\n",
-		   size_in_dwords, grc_addr);
-	for (i = 0; i < size_in_dwords; i++, grc_addr += BYTES_IN_DWORD)
-		buf[i] = qed_rd(p_hwfn, p_ptt, grc_addr);
+	return qed_find_nvram_image(p_hwfn,
+				    p_ptt,
+				    nvram_image_type,
+				    trace_meta_offset_bytes,
+				    trace_meta_size_bytes);
 }
 
 /* Reads the MCP Trace meta data (from NVRAM or buffer) into the specified
@@ -4037,8 +4305,11 @@  static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 	u32 trace_meta_size_dwords, running_bundle_id, offset = 0;
 	u32 trace_meta_offset_bytes, trace_meta_size_bytes;
 	enum dbg_status status;
+	bool mcp_access;
 	int halted = 0;
 
+	mcp_access = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
 	*num_dumped_dwords = 0;
 
 	/* Get trace data info */
@@ -4060,7 +4331,7 @@  static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 	 * consistent if halt fails, MCP trace is taken anyway, with a small
 	 * risk that it may be corrupt.
 	 */
-	if (dump) {
+	if (dump && mcp_access) {
 		halted = !qed_mcp_halt(p_hwfn, p_ptt);
 		if (!halted)
 			DP_NOTICE(p_hwfn, "MCP halt failed!\n");
@@ -4078,13 +4349,12 @@  static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 				     dump, "size", trace_data_size_dwords);
 
 	/* Read trace data from scratchpad into dump buffer */
-	if (dump)
-		qed_mcp_trace_read_data(p_hwfn,
-					p_ptt,
-					trace_data_grc_addr,
-					trace_data_size_dwords,
-					dump_buf + offset);
-	offset += trace_data_size_dwords;
+	offset += qed_grc_dump_addr_range(p_hwfn,
+					  p_ptt,
+					  dump_buf + offset,
+					  dump,
+					  BYTES_TO_DWORDS(trace_data_grc_addr),
+					  trace_data_size_dwords);
 
 	/* Resume MCP (only if halt succeeded) */
 	if (halted && qed_mcp_resume(p_hwfn, p_ptt) != 0)
@@ -4095,38 +4365,38 @@  static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 				       dump, "mcp_trace_meta", 1);
 
 	/* Read trace meta info */
-	status = qed_mcp_trace_get_meta_info(p_hwfn,
-					     p_ptt,
-					     trace_data_size_bytes,
-					     &running_bundle_id,
-					     &trace_meta_offset_bytes,
-					     &trace_meta_size_bytes);
-	if (status != DBG_STATUS_OK)
-		return status;
+	if (mcp_access) {
+		status = qed_mcp_trace_get_meta_info(p_hwfn,
+						     p_ptt,
+						     trace_data_size_bytes,
+						     &running_bundle_id,
+						     &trace_meta_offset_bytes,
+						     &trace_meta_size_bytes);
+		if (status == DBG_STATUS_OK)
+			trace_meta_size_dwords =
+				BYTES_TO_DWORDS(trace_meta_size_bytes);
+	}
 
-	/* Dump trace meta size param (trace_meta_size_bytes is always
-	 * dword-aligned).
-	 */
-	trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
-	offset += qed_dump_num_param(dump_buf + offset,	dump, "size",
-				     trace_meta_size_dwords);
+	/* Dump trace meta size param */
+	offset += qed_dump_num_param(dump_buf + offset,
+				     dump, "size", trace_meta_size_dwords);
 
 	/* Read trace meta image into dump buffer */
-	if (dump) {
+	if (dump && trace_meta_size_dwords)
 		status = qed_mcp_trace_read_meta(p_hwfn,
-						p_ptt,
-						trace_meta_offset_bytes,
-						trace_meta_size_bytes,
-						dump_buf + offset);
-		if (status != DBG_STATUS_OK)
-			return status;
-	}
-
-	offset += trace_meta_size_dwords;
+						 p_ptt,
+						 trace_meta_offset_bytes,
+						 trace_meta_size_bytes,
+						 dump_buf + offset);
+	if (status == DBG_STATUS_OK)
+		offset += trace_meta_size_dwords;
 
 	*num_dumped_dwords = offset;
 
-	return DBG_STATUS_OK;
+	/* If no mcp access, indicate that the dump doesn't contain the meta
+	 * data from NVRAM.
+	 */
+	return mcp_access ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
 }
 
 /* Dump GRC FIFO */
@@ -4311,9 +4581,10 @@  static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 			       struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
 {
 	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	struct fw_asserts_ram_section *asserts;
 	char storm_letter_str[2] = "?";
 	struct fw_info fw_info;
-	u32 offset = 0, i;
+	u32 offset = 0;
 	u8 storm_id;
 
 	/* Dump global params */
@@ -4323,8 +4594,8 @@  static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 	offset += qed_dump_str_param(dump_buf + offset,
 				     dump, "dump-type", "fw-asserts");
 	for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
-		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx,
-			last_list_idx, element_addr;
+		u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+		u32 last_list_idx, addr;
 
 		if (dev_data->block_in_reset[s_storm_defs[storm_id].block_id])
 			continue;
@@ -4332,6 +4603,8 @@  static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		/* Read FW info for the current Storm */
 		qed_read_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
 
+		asserts = &fw_info.fw_asserts_section;
+
 		/* Dump FW Asserts section header and params */
 		storm_letter_str[0] = s_storm_defs[storm_id].letter;
 		offset += qed_dump_section_hdr(dump_buf + offset, dump,
@@ -4339,12 +4612,10 @@  static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		offset += qed_dump_str_param(dump_buf + offset, dump, "storm",
 					     storm_letter_str);
 		offset += qed_dump_num_param(dump_buf + offset, dump, "size",
-					     fw_info.fw_asserts_section.
-					     list_element_dword_size);
+					     asserts->list_element_dword_size);
 
 		if (!dump) {
-			offset += fw_info.fw_asserts_section.
-				  list_element_dword_size;
+			offset += asserts->list_element_dword_size;
 			continue;
 		}
 
@@ -4352,28 +4623,22 @@  static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 		fw_asserts_section_addr =
 			s_storm_defs[storm_id].sem_fast_mem_addr +
 			SEM_FAST_REG_INT_RAM +
-			RAM_LINES_TO_BYTES(fw_info.fw_asserts_section.
-					   section_ram_line_offset);
+			RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
 		next_list_idx_addr =
 			fw_asserts_section_addr +
-			DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-					list_next_index_dword_offset);
+			DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
 		next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
 		last_list_idx = (next_list_idx > 0
 				 ? next_list_idx
-				 : fw_info.fw_asserts_section.list_num_elements)
-				- 1;
-		element_addr =
-			fw_asserts_section_addr +
-			DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-					list_dword_offset) +
-			last_list_idx *
-			DWORDS_TO_BYTES(fw_info.fw_asserts_section.
-					list_element_dword_size);
-		for (i = 0;
-		     i < fw_info.fw_asserts_section.list_element_dword_size;
-		     i++, offset++, element_addr += BYTES_IN_DWORD)
-			dump_buf[offset] = qed_rd(p_hwfn, p_ptt, element_addr);
+				 : asserts->list_num_elements) - 1;
+		addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+		       asserts->list_dword_offset +
+		       last_list_idx * asserts->list_element_dword_size;
+		offset +=
+		    qed_grc_dump_addr_range(p_hwfn, p_ptt,
+					    dump_buf + offset,
+					    dump, addr,
+					    asserts->list_element_dword_size);
 	}
 
 	/* Dump last section */
@@ -4386,13 +4651,10 @@  static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
 {
 	/* Convert binary data to debug arrays */
-	u32 num_of_buffers = *(u32 *)bin_ptr;
-	struct bin_buffer_hdr *buf_array;
+	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
 	u8 buf_id;
 
-	buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
-	for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
 		s_dbg_arrays[buf_id].ptr =
 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
 		s_dbg_arrays[buf_id].size_in_dwords =
@@ -4402,6 +4664,17 @@  enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr)
 	return DBG_STATUS_OK;
 }
 
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+	struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+	u32 i;
+
+	for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+		dev_data->grc.param_val[i] =
+		    s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
 enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
 					      struct qed_ptt *p_ptt,
 					      u32 *buf_size)
@@ -4441,8 +4714,9 @@  enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
 	/* GRC Dump */
 	status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
 
-	/* Clear all GRC params */
-	qed_dbg_grc_clear_params(p_hwfn);
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
 	return status;
 }
 
@@ -4495,6 +4769,10 @@  enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
 
 	/* Idle Check Dump */
 	*num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
 	return DBG_STATUS_OK;
 }
 
@@ -4519,11 +4797,15 @@  enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 	u32 needed_buf_size_in_dwords;
 	enum dbg_status status;
 
-	status = qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
+	/* validate buffer size */
+	status =
+	    qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, p_ptt,
 						&needed_buf_size_in_dwords);
 
-	if (status != DBG_STATUS_OK)
+	if (status != DBG_STATUS_OK &&
+	    status != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
 		return status;
+
 	if (buf_size_in_dwords < needed_buf_size_in_dwords)
 		return DBG_STATUS_DUMP_BUF_TOO_SMALL;
 
@@ -4531,8 +4813,13 @@  enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
 
 	/* Perform dump */
-	return qed_mcp_trace_dump(p_hwfn,
-				  p_ptt, dump_buf, true, num_dumped_dwords);
+	status = qed_mcp_trace_dump(p_hwfn,
+				    p_ptt, dump_buf, true, num_dumped_dwords);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4567,8 +4854,14 @@  enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 
 	/* Update reset state */
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
-	return qed_reg_fifo_dump(p_hwfn,
-				 p_ptt, dump_buf, true, num_dumped_dwords);
+
+	status = qed_reg_fifo_dump(p_hwfn,
+				   p_ptt, dump_buf, true, num_dumped_dwords);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -4603,8 +4896,13 @@  enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
 
 	/* Update reset state */
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
-	return qed_igu_fifo_dump(p_hwfn,
-				 p_ptt, dump_buf, true, num_dumped_dwords);
+
+	status = qed_igu_fifo_dump(p_hwfn,
+				   p_ptt, dump_buf, true, num_dumped_dwords);
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status
@@ -4641,9 +4939,16 @@  enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
 
 	/* Update reset state */
 	qed_update_blocks_reset_state(p_hwfn, p_ptt);
-	return qed_protection_override_dump(p_hwfn,
-					    p_ptt,
-					    dump_buf, true, num_dumped_dwords);
+
+	status = qed_protection_override_dump(p_hwfn,
+					      p_ptt,
+					      dump_buf,
+					      true, num_dumped_dwords);
+
+	/* Revert GRC params to their default */
+	qed_dbg_grc_set_params_default(p_hwfn);
+
+	return status;
 }
 
 enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
@@ -5045,13 +5350,10 @@  struct igu_fifo_addr_data {
 enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr)
 {
 	/* Convert binary data to debug arrays */
-	u32 num_of_buffers = *(u32 *)bin_ptr;
-	struct bin_buffer_hdr *buf_array;
+	struct bin_buffer_hdr *buf_array = (struct bin_buffer_hdr *)bin_ptr;
 	u8 buf_id;
 
-	buf_array = (struct bin_buffer_hdr *)((u32 *)bin_ptr + 1);
-
-	for (buf_id = 0; buf_id < num_of_buffers; buf_id++) {
+	for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) {
 		s_dbg_arrays[buf_id].ptr =
 		    (u32 *)(bin_ptr + buf_array[buf_id].offset);
 		s_dbg_arrays[buf_id].size_in_dwords =
@@ -5874,16 +6176,16 @@  static enum dbg_status qed_parse_reg_fifo_dump(struct qed_hwfn *p_hwfn,
 		results_offset +=
 		    sprintf(qed_get_buf_ptr(results_buf,
 					    results_offset),
-			    "raw: 0x%016llx, address: 0x%07llx, access: %-5s, pf: %2lld, vf: %s, port: %lld, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
+			    "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, errors: ",
 			    elements[i].data,
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      REG_FIFO_ELEMENT_ADDRESS) *
 				      REG_FIFO_ELEMENT_ADDR_FACTOR,
 				      s_access_strs[GET_FIELD(elements[i].data,
 						    REG_FIFO_ELEMENT_ACCESS)],
-			    GET_FIELD(elements[i].data,
-				      REG_FIFO_ELEMENT_PF), vf_str,
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
+					   REG_FIFO_ELEMENT_PF), vf_str,
+			    (u32)GET_FIELD(elements[i].data,
 				      REG_FIFO_ELEMENT_PORT),
 				      s_privilege_strs[GET_FIELD(elements[i].
 				      data,
@@ -6189,13 +6491,13 @@  enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
 		results_offset +=
 		    sprintf(qed_get_buf_ptr(results_buf,
 					    results_offset),
-			    "window %2d, address: 0x%07x, size: %7lld regs, read: %lld, write: %lld, read protection: %-12s, write protection: %-12s\n",
+			    "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
 			    i, address,
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      PROTECTION_OVERRIDE_ELEMENT_READ),
-			    GET_FIELD(elements[i].data,
+			    (u32)GET_FIELD(elements[i].data,
 				      PROTECTION_OVERRIDE_ELEMENT_WRITE),
 			    s_protection_strs[GET_FIELD(elements[i].data,
 				PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
@@ -6508,7 +6810,7 @@  static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
 	 */
 	rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
 						       &buf_size_dwords);
-	if (rc != DBG_STATUS_OK)
+	if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
 		return rc;
 	feature->buf_size = buf_size_dwords * sizeof(u32);
 	feature->dump_buf = vmalloc(feature->buf_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 37c2bfb..91e6b94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -574,6 +574,7 @@  enum core_event_opcode {
 	CORE_EVENT_TX_QUEUE_STOP,
 	CORE_EVENT_RX_QUEUE_START,
 	CORE_EVENT_RX_QUEUE_STOP,
+	CORE_EVENT_RX_QUEUE_FLUSH,
 	MAX_CORE_EVENT_OPCODE
 };
 
@@ -625,6 +626,7 @@  enum core_ramrod_cmd_id {
 	CORE_RAMROD_TX_QUEUE_START,
 	CORE_RAMROD_RX_QUEUE_STOP,
 	CORE_RAMROD_TX_QUEUE_STOP,
+	CORE_RAMROD_RX_QUEUE_FLUSH,
 	MAX_CORE_RAMROD_CMD_ID
 };
 
@@ -698,7 +700,8 @@  struct core_rx_slow_path_cqe {
 	u8 type;
 	u8 ramrod_cmd_id;
 	__le16 echo;
-	__le32 reserved1[7];
+	struct core_rx_cqe_opaque_data opaque_data;
+	__le32 reserved1[5];
 };
 
 union core_rx_cqe_union {
@@ -735,45 +738,46 @@  struct core_rx_stop_ramrod_data {
 	__le16 reserved2[2];
 };
 
-struct core_tx_bd_flags {
-	u8 as_bitfield;
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK	0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT	0
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK	0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT	1
-#define CORE_TX_BD_FLAGS_START_BD_MASK	0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT	2
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK	0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT	3
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK	0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT	4
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK	0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT	5
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK	0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT	6
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK	0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+struct core_tx_bd_data {
+	__le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK	0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT     0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK	0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT      1
+#define CORE_TX_BD_DATA_START_BD_MASK	0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT            2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK	0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT             3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK	0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT             4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK	0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT            5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK	0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT         6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK	0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK	0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT                8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK	0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT           12
+#define CORE_TX_BD_DATA_IP_LEN_MASK	0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT              13
+#define CORE_TX_BD_DATA_RESERVED0_MASK            0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT           14
 };
 
 struct core_tx_bd {
 	struct regpair addr;
 	__le16 nbytes;
 	__le16 nw_vlan_or_lb_echo;
-	u8 bitfield0;
-#define CORE_TX_BD_NBDS_MASK	0xF
-#define CORE_TX_BD_NBDS_SHIFT	0
-#define CORE_TX_BD_ROCE_FLAV_MASK	0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT	4
-#define CORE_TX_BD_RESERVED0_MASK	0x7
-#define CORE_TX_BD_RESERVED0_SHIFT	5
-	struct core_tx_bd_flags bd_flags;
+	struct core_tx_bd_data bd_data;
 	__le16 bitfield1;
 #define CORE_TX_BD_L4_HDR_OFFSET_W_MASK	0x3FFF
 #define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
 #define CORE_TX_BD_TX_DST_MASK	0x1
 #define CORE_TX_BD_TX_DST_SHIFT	14
-#define CORE_TX_BD_RESERVED1_MASK	0x1
-#define CORE_TX_BD_RESERVED1_SHIFT	15
+#define CORE_TX_BD_RESERVED_MASK         0x1
+#define CORE_TX_BD_RESERVED_SHIFT        15
 };
 
 enum core_tx_dest {
@@ -800,6 +804,14 @@  struct core_tx_stop_ramrod_data {
 	__le32 reserved0[2];
 };
 
+enum dcb_dhcp_update_flag {
+	DONT_UPDATE_DCB_DHCP,
+	UPDATE_DCB,
+	UPDATE_DSCP,
+	UPDATE_DCB_DSCP,
+	MAX_DCB_DHCP_UPDATE_FLAG
+};
+
 struct eth_mstorm_per_pf_stat {
 	struct regpair gre_discard_pkts;
 	struct regpair vxlan_discard_pkts;
@@ -893,6 +905,12 @@  struct event_ring_next_addr {
 	struct event_ring_next_addr next_addr;
 };
 
+enum fw_flow_ctrl_mode {
+	flow_ctrl_pause,
+	flow_ctrl_pfc,
+	MAX_FW_FLOW_CTRL_MODE
+};
+
 /* Major and Minor hsi Versions */
 struct hsi_fp_ver_struct {
 	u8 minor_ver_arr[2];
@@ -921,6 +939,7 @@  enum malicious_vf_error_id {
 	ETH_EDPM_OUT_OF_SYNC,
 	ETH_TUNN_IPV6_EXT_NBD_ERR,
 	ETH_CONTROL_PACKET_VIOLATION,
+	ETH_ANTI_SPOOFING_ERR,
 	MAX_MALICIOUS_VF_ERROR_ID
 };
 
@@ -1106,8 +1125,9 @@  struct tstorm_per_port_stat {
 	struct regpair ll2_mac_filter_discard;
 	struct regpair ll2_conn_disabled_discard;
 	struct regpair iscsi_irregular_pkt;
-	struct regpair reserved;
+	struct regpair fcoe_irregular_pkt;
 	struct regpair roce_irregular_pkt;
+	struct regpair reserved;
 	struct regpair eth_irregular_pkt;
 	struct regpair reserved1;
 	struct regpair preroce_irregular_pkt;
@@ -1648,6 +1668,11 @@  enum block_addr {
 	GRCBASE_MS = 0x6a0000,
 	GRCBASE_PHY_PCIE = 0x620000,
 	GRCBASE_LED = 0x6b8000,
+	GRCBASE_AVS_WRAP = 0x6b0000,
+	GRCBASE_RGFS = 0x19d0000,
+	GRCBASE_TGFS = 0x19e0000,
+	GRCBASE_PTLD = 0x19f0000,
+	GRCBASE_YPLD = 0x1a10000,
 	GRCBASE_MISC_AEU = 0x8000,
 	GRCBASE_BAR0_MAP = 0x1c00000,
 	MAX_BLOCK_ADDR
@@ -1732,6 +1757,11 @@  enum block_id {
 	BLOCK_MS,
 	BLOCK_PHY_PCIE,
 	BLOCK_LED,
+	BLOCK_AVS_WRAP,
+	BLOCK_RGFS,
+	BLOCK_TGFS,
+	BLOCK_PTLD,
+	BLOCK_YPLD,
 	BLOCK_MISC_AEU,
 	BLOCK_BAR0_MAP,
 	MAX_BLOCK_ID
@@ -1783,9 +1813,9 @@  struct dbg_attn_reg_result {
 	__le32 data;
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK	0xFFFFFF
 #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT	0
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK	0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT	24
-	__le16 attn_idx_offset;
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK	0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT	24
+	__le16 block_attn_offset;
 	__le16 reserved;
 	__le32 sts_val;
 	__le32 mask_val;
@@ -1815,12 +1845,12 @@  struct dbg_mode_hdr {
 /* Attention register */
 struct dbg_attn_reg {
 	struct dbg_mode_hdr mode;
-	__le16 attn_idx_offset;
+	__le16 block_attn_offset;
 	__le32 data;
 #define DBG_ATTN_REG_STS_ADDRESS_MASK	0xFFFFFF
 #define DBG_ATTN_REG_STS_ADDRESS_SHIFT	0
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK	0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT	24
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK	0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
 	__le32 sts_clr_address;
 	__le32 mask_address;
 };
@@ -2001,6 +2031,20 @@  enum dbg_bus_clients {
 	MAX_DBG_BUS_CLIENTS
 };
 
+enum dbg_bus_constraint_ops {
+	DBG_BUS_CONSTRAINT_OP_EQ,
+	DBG_BUS_CONSTRAINT_OP_NE,
+	DBG_BUS_CONSTRAINT_OP_LT,
+	DBG_BUS_CONSTRAINT_OP_LTC,
+	DBG_BUS_CONSTRAINT_OP_LE,
+	DBG_BUS_CONSTRAINT_OP_LEC,
+	DBG_BUS_CONSTRAINT_OP_GT,
+	DBG_BUS_CONSTRAINT_OP_GTC,
+	DBG_BUS_CONSTRAINT_OP_GE,
+	DBG_BUS_CONSTRAINT_OP_GEC,
+	MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
 /* Debug Bus memory address */
 struct dbg_bus_mem_addr {
 	__le32 lo;
@@ -2092,10 +2136,18 @@  struct dbg_bus_data {
 					      * DBG_BUS_TARGET_ID_PCI.
 					      */
 	__le16 reserved;
-	struct dbg_bus_block_data blocks[80];/* Debug Bus data for each block */
+	struct dbg_bus_block_data blocks[88];/* Debug Bus data for each block */
 	struct dbg_bus_storm_data storms[6]; /* Debug Bus data for each block */
 };
 
+enum dbg_bus_filter_types {
+	DBG_BUS_FILTER_TYPE_OFF,
+	DBG_BUS_FILTER_TYPE_PRE,
+	DBG_BUS_FILTER_TYPE_POST,
+	DBG_BUS_FILTER_TYPE_ON,
+	MAX_DBG_BUS_FILTER_TYPES
+};
+
 /* Debug bus frame modes */
 enum dbg_bus_frame_modes {
 	DBG_BUS_FRAME_MODE_0HW_4ST = 0, /* 0 HW dwords, 4 Storm dwords */
@@ -2104,6 +2156,40 @@  enum dbg_bus_frame_modes {
 	MAX_DBG_BUS_FRAME_MODES
 };
 
+enum dbg_bus_input_types {
+	DBG_BUS_INPUT_TYPE_STORM,
+	DBG_BUS_INPUT_TYPE_BLOCK,
+	MAX_DBG_BUS_INPUT_TYPES
+};
+
+enum dbg_bus_other_engine_modes {
+	DBG_BUS_OTHER_ENGINE_MODE_NONE,
+	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+	DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+	DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+	MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+enum dbg_bus_post_trigger_types {
+	DBG_BUS_POST_TRIGGER_RECORD,
+	DBG_BUS_POST_TRIGGER_DROP,
+	MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+enum dbg_bus_pre_trigger_types {
+	DBG_BUS_PRE_TRIGGER_START_FROM_ZERO,
+	DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+	DBG_BUS_PRE_TRIGGER_DROP,
+	MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+enum dbg_bus_semi_frame_modes {
+	DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+	DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+	MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
 /* Debug bus states */
 enum dbg_bus_states {
 	DBG_BUS_STATE_IDLE, /* debug bus idle state (not recording) */
@@ -2115,6 +2201,19 @@  enum dbg_bus_states {
 	MAX_DBG_BUS_STATES
 };
 
+enum dbg_bus_storm_modes {
+	DBG_BUS_STORM_MODE_PRINTF,
+	DBG_BUS_STORM_MODE_PRAM_ADDR,
+	DBG_BUS_STORM_MODE_DRA_RW,
+	DBG_BUS_STORM_MODE_DRA_W,
+	DBG_BUS_STORM_MODE_LD_ST_ADDR,
+	DBG_BUS_STORM_MODE_DRA_FSM,
+	DBG_BUS_STORM_MODE_RH,
+	DBG_BUS_STORM_MODE_FOC,
+	DBG_BUS_STORM_MODE_EXT_STORE,
+	MAX_DBG_BUS_STORM_MODES
+};
+
 /* Debug bus target IDs */
 enum dbg_bus_targets {
 	/* records debug bus to DBG block internal buffer */
@@ -2128,13 +2227,10 @@  enum dbg_bus_targets {
 
 /* GRC Dump data */
 struct dbg_grc_data {
-	__le32 param_val[40]; /* Value of each GRC parameter. Array size must
-			       * match the enum dbg_grc_params.
-			       */
-	u8 param_set_by_user[40]; /* Indicates for each GRC parameter if it was
-				   * set by the user (0/1). Array size must
-				   * match the enum dbg_grc_params.
-				   */
+	u8 params_initialized;
+	u8 reserved1;
+	__le16 reserved2;
+	__le32 param_val[48];
 };
 
 /* Debug GRC params */
@@ -2181,6 +2277,8 @@  enum dbg_grc_params {
 	DBG_GRC_PARAM_PARITY_SAFE,
 	DBG_GRC_PARAM_DUMP_CM, /* dump CM memories (0/1) */
 	DBG_GRC_PARAM_DUMP_PHY, /* dump PHY memories (0/1) */
+	DBG_GRC_PARAM_NO_MCP,
+	DBG_GRC_PARAM_NO_FW_VER,
 	MAX_DBG_GRC_PARAMS
 };
 
@@ -2280,7 +2378,7 @@  struct dbg_tools_data {
 	struct dbg_bus_data bus; /* Debug Bus data */
 	struct idle_chk_data idle_chk; /* Idle Check data */
 	u8 mode_enable[40]; /* Indicates if a mode is enabled (0/1) */
-	u8 block_in_reset[80]; /* Indicates if a block is in reset state (0/1).
+	u8 block_in_reset[88]; /* Indicates if a block is in reset state (0/1).
 				*/
 	u8 chip_id; /* Chip ID (from enum chip_ids) */
 	u8 platform_id; /* Platform ID (from enum platform_ids) */
@@ -2418,7 +2516,6 @@  enum init_modes {
 	MODE_PORTS_PER_ENG_2,
 	MODE_PORTS_PER_ENG_4,
 	MODE_100G,
-	MODE_40G,
 	MODE_RESERVED6,
 	MAX_INIT_MODES
 };
@@ -2686,6 +2783,13 @@  struct iro {
  */
 enum dbg_status qed_dbg_set_bin_ptr(const u8 * const bin_ptr);
 /**
+ * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
+ *	default value.
+ *
+ * @param p_hwfn		- HW device data
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
  * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
  *	GRC Dump.
  *
@@ -3418,7 +3522,7 @@  void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 #define	MSTORM_TPA_TIMEOUT_US_SIZE			(IRO[21].size)
 #define	MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
 	(IRO[22].base + ((pf_id) * IRO[22].m1))
-#define	MSTORM_ETH_PF_STAT_SIZE				(IRO[21].size)
+#define	MSTORM_ETH_PF_STAT_SIZE				(IRO[22].size)
 #define	USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
 	(IRO[23].base + ((stat_counter_id) * IRO[23].m1))
 #define	USTORM_QUEUE_STAT_SIZE				(IRO[23].size)
@@ -3482,7 +3586,7 @@  void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 
 static const struct iro iro_arr[47] = {
 	{0x0, 0x0, 0x0, 0x0, 0x8},
-	{0x4cb0, 0x78, 0x0, 0x0, 0x78},
+	{0x4cb0, 0x80, 0x0, 0x0, 0x80},
 	{0x6318, 0x20, 0x0, 0x0, 0x20},
 	{0xb00, 0x8, 0x0, 0x0, 0x4},
 	{0xa80, 0x8, 0x0, 0x0, 0x4},
@@ -3521,13 +3625,13 @@  void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 	{0xd888, 0x38, 0x0, 0x0, 0x24},
 	{0x12c38, 0x10, 0x0, 0x0, 0x8},
 	{0x11aa0, 0x38, 0x0, 0x0, 0x18},
-	{0xa8c0, 0x30, 0x0, 0x0, 0x10},
-	{0x86f8, 0x28, 0x0, 0x0, 0x18},
+	{0xa8c0, 0x38, 0x0, 0x0, 0x10},
+	{0x86f8, 0x30, 0x0, 0x0, 0x18},
 	{0x101f8, 0x10, 0x0, 0x0, 0x10},
 	{0xdd08, 0x48, 0x0, 0x0, 0x38},
 	{0x10660, 0x20, 0x0, 0x0, 0x20},
 	{0x2b80, 0x80, 0x0, 0x0, 0x10},
-	{0x5000, 0x10, 0x0, 0x0, 0x10},
+	{0x5020, 0x10, 0x0, 0x0, 0x10},
 };
 
 /* Runtime array offsets */
@@ -4595,6 +4699,12 @@  enum eth_ipv4_frag_type {
 	MAX_ETH_IPV4_FRAG_TYPE
 };
 
+enum eth_ip_type {
+	ETH_IPV4,
+	ETH_IPV6,
+	MAX_ETH_IP_TYPE
+};
+
 enum eth_ramrod_cmd_id {
 	ETH_RAMROD_UNUSED,
 	ETH_RAMROD_VPORT_START,
@@ -4944,7 +5054,10 @@  struct vport_update_ramrod_data_cmn {
 	u8 update_mtu_flg;
 
 	__le16 mtu;
-	u8 reserved[2];
+	u8 update_ctl_frame_checks_en_flg;
+	u8 ctl_frame_mac_check_en;
+	u8 ctl_frame_ethtype_check_en;
+	u8 reserved[15];
 };
 
 struct vport_update_ramrod_mcast {
@@ -4962,6 +5075,491 @@  struct vport_update_ramrod_data {
 	struct eth_vport_rss_config rss_config;
 };
 
+struct mstorm_eth_conn_ag_ctx {
+	u8 byte0;
+	u8 byte1;
+	u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT         1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK	0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT          2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK	0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT          4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK	0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT          6
+	u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT        0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT        1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT        2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT      3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT      4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT      5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT      6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK	0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT      7
+	__le16 word0;
+	__le16 word1;
+	__le32 reg0;
+	__le32 reg1;
+};
+
+struct xstorm_eth_conn_agctxdq_ext_ldpart {
+	u8 reserved0;
+	u8 eth_state;
+	u8 flags0;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT           0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT              5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT              6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT              7
+	u8 flags1;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT              0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT              1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT              2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT           7
+	u8 flags2;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT                    6
+	u8 flags3;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT                    4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT                    6
+	u8 flags4;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT                    0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT                    2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT                   6
+	u8 flags5;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT                   0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT                   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT                   4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT                   6
+	u8 flags6;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT           6
+	u8 flags7;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT             2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT              4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT                  7
+	u8 flags8;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT                  0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT                  1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT                  2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT                  3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT                  4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT                  5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT                  6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT                  7
+	u8 flags9;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT                 0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT                 1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT                 2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT                 3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT                 4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT                 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT    7
+	u8 flags10;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT        1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT            2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT             3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT             6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT             7
+	u8 flags11;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT             0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT             1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT                3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT                4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT                5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT                7
+	u8 flags12;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT               4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT               5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT               6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT               7
+	u8 flags13;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT               0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT               1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT           2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT           3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT           4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT           5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT           6
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT           7
+	u8 flags14;
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK	0x1
+#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK	0x3
+#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT             6
+	u8 edpm_event_id;
+	__le16 physical_q0;
+	__le16 quota;
+	__le16 edpm_num_bds;
+	__le16 tx_bd_cons;
+	__le16 tx_bd_prod;
+	__le16 tx_class;
+	__le16 conn_dpi;
+	u8 byte3;
+	u8 byte4;
+	u8 byte5;
+	u8 byte6;
+	__le32 reg0;
+	__le32 reg1;
+	__le32 reg2;
+	__le32 reg3;
+	__le32 reg4;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+	u8 reserved0;
+	u8 eth_state;
+	u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT           0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT              5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT              6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT              7
+	u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT              0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT              1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT              2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT         6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT           7
+	u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT                    6
+	u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT                    4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT                    6
+	u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT                    0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT                    2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT                   6
+	u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT                   0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT                   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT                   4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT                   6
+	u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT       2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT           6
+	u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT             2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT              4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT                  7
+	u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT                  0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT                  1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT                  2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT                  3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT                  4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT                  5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT                  6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT                  7
+	u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT                 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT                 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT                 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT                 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT                 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT                 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT    6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT    7
+	u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT        1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT            2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT             3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT             6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT             7
+	u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT             0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT             1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT         2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT                3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT                4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT                5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT                7
+	u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT               4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT               5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT               6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT               7
+	u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT               0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT               1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT           2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT           3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT           4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT           5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT           6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT           7
+	u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT       0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT     1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT   2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT   3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT         4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK	0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT       5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK	0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT             6
+	u8 edpm_event_id;
+	__le16 physical_q0;
+	__le16 quota;
+	__le16 edpm_num_bds;
+	__le16 tx_bd_cons;
+	__le16 tx_bd_prod;
+	__le16 tx_class;
+	__le16 conn_dpi;
+};
 struct mstorm_rdma_task_st_ctx {
 	struct regpair temp[4];
 };
@@ -6165,7 +6763,7 @@  struct ystorm_roce_conn_st_ctx {
 };
 
 struct xstorm_roce_conn_st_ctx {
-	struct regpair temp[22];
+	struct regpair temp[24];
 };
 
 struct tstorm_roce_conn_st_ctx {
@@ -6220,7 +6818,7 @@  struct roce_create_qp_req_ramrod_data {
 	__le16 mtu;
 	__le16 pd;
 	__le16 sq_num_pages;
-	__le16 reseved2;
+	__le16 low_latency_phy_queue;
 	struct regpair sq_pbl_addr;
 	struct regpair orq_pbl_addr;
 	__le16 local_mac_addr[3];
@@ -6234,7 +6832,7 @@  struct roce_create_qp_req_ramrod_data {
 	u8 stats_counter_id;
 	u8 reserved3[7];
 	__le32 cq_cid;
-	__le16 physical_queue0;
+	__le16 regular_latency_phy_queue;
 	__le16 dpi;
 };
 
@@ -6282,15 +6880,16 @@  struct roce_create_qp_resp_ramrod_data {
 	__le32 dst_gid[4];
 	struct regpair qp_handle_for_cqe;
 	struct regpair qp_handle_for_async;
-	__le32 reserved2[2];
+	__le16 low_latency_phy_queue;
+	u8 reserved2[6];
 	__le32 cq_cid;
-	__le16 physical_queue0;
+	__le16 regular_latency_phy_queue;
 	__le16 dpi;
 };
 
 struct roce_destroy_qp_req_output_params {
 	__le32 num_bound_mw;
-	__le32 reserved;
+	__le32 cq_prod;
 };
 
 struct roce_destroy_qp_req_ramrod_data {
@@ -6299,7 +6898,7 @@  struct roce_destroy_qp_req_ramrod_data {
 
 struct roce_destroy_qp_resp_output_params {
 	__le32 num_invalidated_mw;
-	__le32 reserved;
+	__le32 cq_prod;
 };
 
 struct roce_destroy_qp_resp_ramrod_data {
@@ -7426,6 +8025,7 @@  struct ystorm_fcoe_conn_st_ctx {
 	u8 fcp_rsp_size;
 	__le16 mss;
 	struct regpair reserved;
+	__le16 min_frame_size;
 	u8 protection_info_flags;
 #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK  0x1
 #define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
@@ -7444,7 +8044,6 @@  struct ystorm_fcoe_conn_st_ctx {
 #define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK                0x3F
 #define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT               2
 	u8 fcp_xfer_size;
-	u8 reserved3[2];
 };
 
 struct fcoe_vlan_fields {
@@ -8273,10 +8872,10 @@  struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK                    0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT                   6
 	u8 flags7;
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT                   0
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK                    0x3
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT                   2
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK	0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT        0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK	0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT        2
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK                   0x3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT                  4
 #define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK                       0x1
@@ -8322,10 +8921,10 @@  struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT                     0
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK                 0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT                1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT                2
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK                 0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT                3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK	0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT     2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK	0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT     3
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK                0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT               4
 #define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK        0x1
@@ -8335,8 +8934,8 @@  struct xstorm_iscsi_conn_ag_ctx {
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK    0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT   7
 	u8 flags11;
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK                     0x1
-#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT                    0
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK	0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT              0
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK                     0x1
 #define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT                    1
 #define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK                   0x1
@@ -8440,7 +9039,7 @@  struct xstorm_iscsi_conn_ag_ctx {
 	__le32 reg10;
 	__le32 reg11;
 	__le32 exp_stat_sn;
-	__le32 reg13;
+	__le32 ongoing_fast_rxmit_seq;
 	__le32 reg14;
 	__le32 reg15;
 	__le32 reg16;
@@ -8466,10 +9065,10 @@  struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK                0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT               6
 	u8 flags1;
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT               0
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK                0x3
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT               2
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK	0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT      0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK	0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT      2
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK     0x3
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT    4
 #define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK                0x3
@@ -8490,10 +9089,10 @@  struct tstorm_iscsi_conn_ag_ctx {
 #define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT              2
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK              0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT             4
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT             5
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK              0x1
-#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT             6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK	0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT   5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK	0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT   6
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK  0x1
 #define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
 	u8 flags4;
@@ -8539,7 +9138,7 @@  struct tstorm_iscsi_conn_ag_ctx {
 	__le32 reg6;
 	__le32 reg7;
 	__le32 reg8;
-	u8 byte2;
+	u8 cid_offload_cnt;
 	u8 byte3;
 	__le16 word0;
 };
@@ -9067,6 +9666,10 @@  struct dcb_dscp_map {
 struct public_global {
 	u32 max_path;
 	u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
 	u32 debug_mb_offset;
 	u32 phymod_dbg_mb_offset;
 	struct couple_mode_teaming cmt;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index d891a68..9509192 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -215,13 +215,6 @@  static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
 {
 	u32 qm_line_crd;
 
-	/* In A0 - Limit the size of pbf queue so that only 511 commands with
-	 * the minimum size of 4 (FCoE minimum size)
-	 */
-	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
-
-	if (is_bb_a0)
-		cmdq_lines = min_t(u32, cmdq_lines, 1022);
 	qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
 	OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
 			 (u32)cmdq_lines);
@@ -343,13 +336,11 @@  static void qed_tx_pq_map_rt_init(
 	u16 first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
 	u16 last_pq_group = (p_params->start_pq + num_pqs - 1) /
 			    QM_PF_QUEUE_GROUP_SIZE;
-	bool is_bb_a0 = QED_IS_BB_A0(p_hwfn->cdev);
 	u16 i, pq_id, pq_group;
 
 	/* a bit per Tx PQ indicating if the PQ is associated with a VF */
 	u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
-	u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
-	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
+	u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
 	u32 pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
 	u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
 	u32 mem_addr_4kb = base_mem_addr_4kb;
@@ -371,6 +362,10 @@  static void qed_tx_pq_map_rt_init(
 		bool is_vf_pq = (i >= p_params->num_pf_pqs);
 		struct qm_rf_pq_map tx_pq_map;
 
+		bool rl_valid = p_params->pq_params[i].rl_valid &&
+				(p_params->pq_params[i].vport_id <
+				 MAX_QM_GLOBAL_RLS);
+
 		/* update first Tx PQ of VPORT/TC */
 		u8 vport_id_in_pf = p_params->pq_params[i].vport_id -
 				    p_params->start_vport;
@@ -389,14 +384,18 @@  static void qed_tx_pq_map_rt_init(
 				     (p_params->pf_id <<
 				      QM_WFQ_VP_PQ_PF_SHIFT));
 		}
+
+		if (p_params->pq_params[i].rl_valid && !rl_valid)
+			DP_NOTICE(p_hwfn,
+				  "Invalid VPORT ID for rate limiter configuration");
 		/* fill PQ map entry */
 		memset(&tx_pq_map, 0, sizeof(tx_pq_map));
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
-		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
-			  p_params->pq_params[i].rl_valid ? 1 : 0);
+		SET_FIELD(tx_pq_map.reg,
+			  QM_RF_PQ_MAP_RL_VALID, rl_valid ? 1 : 0);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
-			  p_params->pq_params[i].rl_valid ?
+			  rl_valid ?
 			  p_params->pq_params[i].vport_id : 0);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
 		SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
@@ -413,8 +412,9 @@  static void qed_tx_pq_map_rt_init(
 			/* if PQ is associated with a VF, add indication
 			 * to PQ VF mask
 			 */
-			tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
-				(1 << (pq_id % tx_pq_vf_mask_width));
+			tx_pq_vf_mask[pq_id /
+				      QM_PF_QUEUE_GROUP_SIZE] |=
+			    BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
 			mem_addr_4kb += vport_pq_mem_4kb;
 		} else {
 			mem_addr_4kb += pq_mem_4kb;
@@ -480,8 +480,8 @@  static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 	if (p_params->pf_id < MAX_NUM_PFS_BB)
 		crd_reg_offset = QM_REG_WFQPFCRD_RT_OFFSET;
 	else
-		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET +
-				 (p_params->pf_id % MAX_NUM_PFS_BB);
+		crd_reg_offset = QM_REG_WFQPFCRD_MSB_RT_OFFSET;
+	crd_reg_offset += p_params->pf_id % MAX_NUM_PFS_BB;
 
 	inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
 	if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
@@ -498,11 +498,11 @@  static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
 				 QM_WFQ_CRD_REG_SIGN_BIT);
 	}
 
-	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
-		     inc_val);
 	STORE_RT_REG(p_hwfn,
 		     QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
 		     QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+	STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+		     inc_val);
 	return 0;
 }
 
@@ -576,6 +576,12 @@  static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
 {
 	u8 i, vport_id;
 
+	if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+		DP_NOTICE(p_hwfn,
+			  "Invalid VPORT ID for rate limiter configuration");
+		return -1;
+	}
+
 	/* go over all PF VPORTs */
 	for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
 		u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -784,6 +790,11 @@  int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
 		      struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
 {
 	u32 inc_val = QM_RL_INC_VAL(vport_rl);
+	if (vport_id >= MAX_QM_GLOBAL_RLS) {
+		DP_NOTICE(p_hwfn,
+			  "Invalid VPORT ID for rate limiter configuration");
+		return -1;
+	}
 
 	if (inc_val > QM_RL_MAX_INC_VAL) {
 		DP_NOTICE(p_hwfn, "Invalid VPORT rate-limit configuration");
@@ -940,12 +951,6 @@  void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
 	       eth_geneve_enable ? 1 : 0);
 	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
 
-	/* comp ver */
-	reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
-	qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
-	qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
-	qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
-
 	/* EDPM with geneve tunnel not supported in BB_B0 */
 	if (QED_IS_BB_B0(p_hwfn->cdev))
 		return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 243b64e..4a2e7be 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -554,7 +554,7 @@  int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
 	}
 
 	/* First Dword contains metadata and should be skipped */
-	buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+	buf_hdr = (struct bin_buffer_hdr *)data;
 
 	offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
 	fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 9a0b9af..161d903 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -594,7 +594,7 @@  static u8 qed_ll2_convert_rx_parse_to_tx_flags(u16 parse_flags)
 	u8 bd_flags = 0;
 
 	if (GET_FIELD(parse_flags, PARSING_AND_ERR_FLAGS_TAG8021QEXIST))
-		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_VLAN_INSERTION, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_DATA_VLAN_INSERTION, 1);
 
 	return bd_flags;
 }
@@ -755,8 +755,8 @@  static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
 			     p_buffer->placement_offset;
 		parse_flags = p_buffer->parse_flags;
 		bd_flags = qed_ll2_convert_rx_parse_to_tx_flags(parse_flags);
-		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_FORCE_VLAN_MODE, 1);
-		SET_FIELD(bd_flags, CORE_TX_BD_FLAGS_L4_PROTOCOL, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+		SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
 
 		rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
 					       p_buffer->vlan, bd_flags,
@@ -1588,33 +1588,34 @@  static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
 	p_tx->cur_send_frag_num++;
 }
 
-static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
-					     struct qed_ll2_info *p_ll2,
-					     struct qed_ll2_tx_packet *p_curp,
-					     u8 num_of_bds,
-					     enum core_tx_dest tx_dest,
-					     u16 vlan,
-					     u8 bd_flags,
-					     u16 l4_hdr_offset_w,
-					     enum core_roce_flavor_type type,
-					     dma_addr_t first_frag,
-					     u16 first_frag_len)
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+				 struct qed_ll2_info *p_ll2,
+				 struct qed_ll2_tx_packet *p_curp,
+				 u8 num_of_bds,
+				 enum core_tx_dest tx_dest,
+				 u16 vlan,
+				 u8 bd_flags,
+				 u16 l4_hdr_offset_w,
+				 enum core_roce_flavor_type roce_flavor,
+				 dma_addr_t first_frag,
+				 u16 first_frag_len)
 {
 	struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
 	u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
 	struct core_tx_bd *start_bd = NULL;
-	u16 frag_idx;
+	u16 bd_data = 0, frag_idx;
 
 	start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
 	start_bd->nw_vlan_or_lb_echo = cpu_to_le16(vlan);
 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
 		  cpu_to_le16(l4_hdr_offset_w));
 	SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
-	start_bd->bd_flags.as_bitfield = bd_flags;
-	start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK <<
-	    CORE_TX_BD_FLAGS_START_BD_SHIFT;
-	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds);
-	SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type);
+	bd_data |= bd_flags;
+	SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+	SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, num_of_bds);
+	SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+	start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
 	DMA_REGPAIR_LE(start_bd->addr, first_frag);
 	start_bd->nbytes = cpu_to_le16(first_frag_len);
 
@@ -1639,9 +1640,8 @@  static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
 		struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
 
 		*p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
-		(*p_bd)->bd_flags.as_bitfield = 0;
+		(*p_bd)->bd_data.as_bitfield = 0;
 		(*p_bd)->bitfield1 = 0;
-		(*p_bd)->bitfield0 = 0;
 		p_curp->bds_set[frag_idx].tx_frag = 0;
 		p_curp->bds_set[frag_idx].frag_len = 0;
 	}
@@ -2238,11 +2238,11 @@  static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb)
 	/* Request HW to calculate IP csum */
 	if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
 	      ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
-		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+		flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
 
 	if (skb_vlan_tag_present(skb)) {
 		vlan = skb_vlan_tag_get(skb);
-		flags |= BIT(CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT);
+		flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
 	}
 
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev),
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index d59d9df..36ae361 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -356,6 +356,10 @@ 
 	0x238804UL
 #define  RDIF_REG_STOP_ON_ERROR \
 	0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+	0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+	64
 #define  SRC_REG_SOFT_RST \
 	0x23874cUL
 #define  TCFC_REG_ACTIVITY_COUNTER \
@@ -370,6 +374,10 @@ 
 	0x1700004UL
 #define  TDIF_REG_STOP_ON_ERROR \
 	0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+	0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+	64
 #define  UCM_REG_INIT \
 	0x1280000UL
 #define  UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
@@ -1236,6 +1244,26 @@ 
 	0x1901534UL
 #define USEM_REG_DBG_FORCE_FRAME \
 	0x1901538UL
+#define NWS_REG_DBG_SELECT \
+	0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE \
+	0x70012cUL
+#define NWS_REG_DBG_SHIFT \
+	0x700130UL
+#define NWS_REG_DBG_FORCE_VALID	\
+	0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME	\
+	0x700138UL
+#define MS_REG_DBG_SELECT \
+	0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE \
+	0x6a022cUL
+#define MS_REG_DBG_SHIFT \
+	0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID \
+	0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME \
+	0x6a0238UL
 #define PCIE_REG_DBG_COMMON_SELECT \
 	0x054398UL
 #define PCIE_REG_DBG_COMMON_DWORD_ENABLE \
@@ -1448,6 +1476,8 @@ 
 	0x000b48UL
 #define RSS_REG_RSS_RAM_DATA \
 	0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+	4
 #define MISC_REG_BLOCK_256B_EN \
 	0x008c14UL
 #define NWS_REG_NWS_CMU	\
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index d9ff6b2..4bef5c59 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -66,13 +66,27 @@ 
 #include "qed_roce.h"
 #include "qed_ll2.h"
 
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
-			  struct event_ring_entry *p_eqe)
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+			  u8 fw_event_code, union rdma_eqe_data *rdma_data)
 {
-	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+	if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+		u16 icid =
+		    (u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid);
+
+		/* icid release in this async event can occur only if the icid
+		 * was offloaded to the FW. In case it wasn't offloaded this is
+		 * handled in qed_roce_sp_destroy_qp.
+		 */
+		qed_roce_free_real_icid(p_hwfn, icid);
+	} else {
+		struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
 
-	p_rdma_info->events.affiliated_event(p_rdma_info->events.context,
-					     p_eqe->opcode, &p_eqe->data);
+		events->affiliated_event(p_hwfn->p_rdma_info->events.context,
+					 fw_event_code,
+					 &rdma_data->async_handle);
+	}
 }
 
 static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
@@ -113,6 +127,15 @@  static int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
 	return 0;
 }
 
+static void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+			    struct qed_bmap *bmap, u32 id_num)
+{
+	if (id_num >= bmap->max_count)
+		return;
+
+	__set_bit(id_num, bmap->bitmap);
+}
+
 static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
 				struct qed_bmap *bmap, u32 id_num)
 {
@@ -129,6 +152,15 @@  static void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
 	}
 }
 
+static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+			    struct qed_bmap *bmap, u32 id_num)
+{
+	if (id_num >= bmap->max_count)
+		return -1;
+
+	return test_bit(id_num, bmap->bitmap);
+}
+
 static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
 {
 	/* First sb id for RoCE is after all the l2 sb */
@@ -170,7 +202,8 @@  static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 	/* Queue zone lines are shared between RoCE and L2 in such a way that
 	 * they can be used by each without obstructing the other.
 	 */
-	p_rdma_info->queue_zone_base = (u16)FEAT_NUM(p_hwfn, QED_L2_QUEUE);
+	p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+	p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
 
 	/* Allocate a struct with device params and fill it */
 	p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
@@ -248,9 +281,18 @@  static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 		goto free_tid_map;
 	}
 
+	/* Allocate bitmap for cids used for responders/requesters. */
+	rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons);
+	if (rc) {
+		DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+			   "Failed to allocate real cid bitmap, rc = %d\n", rc);
+		goto free_cid_map;
+	}
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
 	return 0;
 
+free_cid_map:
+	kfree(p_rdma_info->cid_map.bitmap);
 free_tid_map:
 	kfree(p_rdma_info->tid_map.bitmap);
 free_toggle_map:
@@ -273,7 +315,22 @@  static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
 
 static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 {
+	struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+	int wait_count = 0;
+
+	/* when destroying a_RoCE QP the control is returned to the user after
+	 * the synchronous part. The asynchronous part may take a little longer.
+	 * We delay for a short while if an async destroy QP is still expected.
+	 * Beyond the added delay we clear the bitmap anyway.
+	 */
+	while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+		msleep(100);
+		if (wait_count++ > 20) {
+			DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+			break;
+		}
+	}
 
 	kfree(p_rdma_info->cid_map.bitmap);
 	kfree(p_rdma_info->tid_map.bitmap);
@@ -724,6 +781,14 @@  static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
 	u32 addr;
 
 	p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+	if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+		DP_NOTICE(p_hwfn,
+			  "queue zone offset %d is too large (max is %d)\n",
+			  qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+		return;
+	}
+
 	qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
 	       USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
@@ -1080,6 +1145,14 @@  static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 	return flavor;
 }
 
+void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+	qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 {
 	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
@@ -1139,6 +1212,13 @@  static int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
 	return rc;
 }
 
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+	spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+	qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 					struct qed_rdma_qp *qp)
 {
@@ -1147,7 +1227,8 @@  static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 	union qed_qm_pq_params qm_params;
 	enum roce_flavor roce_flavor;
 	struct qed_spq_entry *p_ent;
-	u16 physical_queue0 = 0;
+	u16 regular_latency_queue;
+	enum protocol_type proto;
 	int rc;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1229,15 +1310,19 @@  static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 	p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
 	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
-	p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
 	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
 				       qp->rq_cq_id);
 
 	memset(&qm_params, 0, sizeof(qm_params));
 	qm_params.roce.qpid = qp->icid >> 1;
-	physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+	regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
+					      &qm_params);
+
+	p_ramrod->regular_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
+	p_ramrod->low_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
 
-	p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
 	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 
 	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1253,13 +1338,19 @@  static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
 
 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
-	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d physical_queue0 = 0x%x\n",
-		   rc, physical_queue0);
+	DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+		   "rc = %d regular physical queue = 0x%x\n", rc,
+		   regular_latency_queue);
 
 	if (rc)
 		goto err;
 
 	qp->resp_offloaded = true;
+	qp->cq_prod = 0;
+
+	proto = p_hwfn->p_rdma_info->proto;
+	qed_roce_set_real_cid(p_hwfn, qp->icid -
+			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
 
 	return rc;
 
@@ -1280,7 +1371,8 @@  static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 	union qed_qm_pq_params qm_params;
 	enum roce_flavor roce_flavor;
 	struct qed_spq_entry *p_ent;
-	u16 physical_queue0 = 0;
+	u16 regular_latency_queue;
+	enum protocol_type proto;
 	int rc;
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
@@ -1351,15 +1443,19 @@  static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 	p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
 	p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
 	p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
-	p_ramrod->stats_counter_id = p_hwfn->rel_pf_id;
-	p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
-				       qp->sq_cq_id);
+	p_ramrod->cq_cid =
+	    cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 
 	memset(&qm_params, 0, sizeof(qm_params));
 	qm_params.roce.qpid = qp->icid >> 1;
-	physical_queue0 = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE, &qm_params);
+	regular_latency_queue = qed_get_qm_pq(p_hwfn, PROTOCOLID_ROCE,
+					      &qm_params);
+
+	p_ramrod->regular_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
+	p_ramrod->low_latency_phy_queue =
+	    cpu_to_le16(regular_latency_queue);
 
-	p_ramrod->physical_queue0 = cpu_to_le16(physical_queue0);
 	p_ramrod->dpi = cpu_to_le16(qp->dpi);
 
 	qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
@@ -1378,6 +1474,10 @@  static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
 		goto err;
 
 	qp->req_offloaded = true;
+	proto = p_hwfn->p_rdma_info->proto;
+	qed_roce_set_real_cid(p_hwfn,
+			      qp->icid + 1 -
+			      qed_cxt_get_proto_cid_start(p_hwfn, proto));
 
 	return rc;
 
@@ -1577,7 +1677,8 @@  static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
 
 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 					    struct qed_rdma_qp *qp,
-					    u32 *num_invalidated_mw)
+					    u32 *num_invalidated_mw,
+					    u32 *cq_prod)
 {
 	struct roce_destroy_qp_resp_output_params *p_ramrod_res;
 	struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
@@ -1588,8 +1689,22 @@  static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 
 	DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
 
-	if (!qp->resp_offloaded)
+	*num_invalidated_mw = 0;
+	*cq_prod = qp->cq_prod;
+
+	if (!qp->resp_offloaded) {
+		/* If a responder was never offload, we need to free the cids
+		 * allocated in create_qp as a FW async event will never arrive
+		 */
+		u32 cid;
+
+		cid = qp->icid -
+		      qed_cxt_get_proto_cid_start(p_hwfn,
+						  p_hwfn->p_rdma_info->proto);
+		qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
 		return 0;
+	}
 
 	/* Get SPQ entry */
 	memset(&init_data, 0, sizeof(init_data));
@@ -1624,6 +1739,8 @@  static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
 		goto err;
 
 	*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
+	*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+	qp->cq_prod = *cq_prod;
 
 	/* Free IRQ - only if ramrod succeeded, in case FW is still using it */
 	dma_free_coherent(&p_hwfn->cdev->pdev->dev,
@@ -1827,10 +1944,8 @@  static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 
 	out_params->draining = false;
 
-	if (rq_err_state)
+	if (rq_err_state || sq_err_state)
 		qp->cur_state = QED_ROCE_QP_STATE_ERR;
-	else if (sq_err_state)
-		qp->cur_state = QED_ROCE_QP_STATE_SQE;
 	else if (sq_draining)
 		out_params->draining = true;
 	out_params->state = qp->cur_state;
@@ -1849,10 +1964,9 @@  static int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
 
 static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 {
-	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
 	u32 num_invalidated_mw = 0;
 	u32 num_bound_mw = 0;
-	u32 start_cid;
+	u32 cq_prod;
 	int rc;
 
 	/* Destroys the specified QP */
@@ -1866,7 +1980,8 @@  static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 
 	if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
 		rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
-						      &num_invalidated_mw);
+						      &num_invalidated_mw,
+						      &cq_prod);
 		if (rc)
 			return rc;
 
@@ -1881,21 +1996,6 @@  static int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
 				  "number of invalidate memory windows is different from bounded ones\n");
 			return -EINVAL;
 		}
-
-		spin_lock_bh(&p_rdma_info->lock);
-
-		start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
-							p_rdma_info->proto);
-
-		/* Release responder's icid */
-		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
-				    qp->icid - start_cid);
-
-		/* Release requester's icid */
-		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map,
-				    qp->icid + 1 - start_cid);
-
-		spin_unlock_bh(&p_rdma_info->lock);
 	}
 
 	return 0;
@@ -2110,12 +2210,19 @@  static int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
 		return rc;
 	} else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
 		/* Any state -> RESET */
+		u32 cq_prod;
+
+		/* Send destroy responder ramrod */
+		rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+						      qp,
+						      &num_invalidated_mw,
+						      &cq_prod);
 
-		rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
-						      &num_invalidated_mw);
 		if (rc)
 			return rc;
 
+		qp->cq_prod = cq_prod;
+
 		rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
 						      &num_bound_mw);
 
@@ -2454,6 +2561,31 @@  static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
 	return rc;
 }
 
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+	struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+	u32 start_cid, cid, xcid;
+
+	/* an even icid belongs to a responder while an odd icid belongs to a
+	 * requester. The 'cid' received as an input can be either. We calculate
+	 * the "partner" icid and call it xcid. Only if both are free then the
+	 * "cid" map can be cleared.
+	 */
+	start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+	cid = icid - start_cid;
+	xcid = cid ^ 1;
+
+	spin_lock_bh(&p_rdma_info->lock);
+
+	qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+	if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+		qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+	}
+
+	spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
 static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
 {
 	return QED_LEADING_HWFN(cdev);
@@ -2773,7 +2905,7 @@  static int qed_roce_ll2_tx(struct qed_dev *cdev,
 						      : QED_LL2_RROCE;
 
 	if (pkt->roce_mode == ROCE_V2_IPV4)
-		flags |= BIT(CORE_TX_BD_FLAGS_IP_CSUM_SHIFT);
+		flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
 
 	/* Tx header */
 	rc = qed_ll2_prepare_tx_packet(QED_LEADING_HWFN(cdev), roce_ll2->handle,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
index 36cf4b2..3ccc08a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -82,6 +82,7 @@  struct qed_rdma_info {
 	struct qed_bmap qp_map;
 	struct qed_bmap srq_map;
 	struct qed_bmap cid_map;
+	struct qed_bmap real_cid_map;
 	struct qed_bmap dpi_map;
 	struct qed_bmap toggle_bits;
 	struct qed_rdma_events events;
@@ -92,6 +93,7 @@  struct qed_rdma_info {
 	u32 num_qps;
 	u32 num_mrs;
 	u16 queue_zone_base;
+	u16 max_queue_zones;
 	enum protocol_type proto;
 };
 
@@ -153,6 +155,7 @@  struct qed_rdma_qp {
 	dma_addr_t irq_phys_addr;
 	u8 irq_num_pages;
 	bool resp_offloaded;
+	u32 cq_prod;
 
 	u8 remote_mac_addr[6];
 	u8 local_mac_addr[6];
@@ -163,8 +166,8 @@  struct qed_rdma_qp {
 
 #if IS_ENABLED(CONFIG_QED_RDMA)
 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
-void qed_async_roce_event(struct qed_hwfn *p_hwfn,
-			  struct event_ring_entry *p_eqe);
+void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+			  u8 fw_event_code, union rdma_eqe_data *rdma_data);
 void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
 				     u8 connection_handle,
 				     void *cookie,
@@ -187,7 +190,9 @@  void qed_ll2b_complete_rx_gsi_packet(struct qed_hwfn *p_hwfn,
 				     u16 src_mac_addr_lo, bool b_last_packet);
 #else
 static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
-static inline void qed_async_roce_event(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) {}
+static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
+					u8 fw_event_code,
+					union rdma_eqe_data *rdma_data) {}
 static inline void qed_ll2b_complete_tx_gsi_packet(struct qed_hwfn *p_hwfn,
 						   u8 connection_handle,
 						   void *cookie,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 645328a..54fbe37 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -296,9 +296,12 @@  static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 			   struct event_ring_entry *p_eqe)
 {
 	switch (p_eqe->protocol_id) {
+#if IS_ENABLED(CONFIG_QED_RDMA)
 	case PROTOCOLID_ROCE:
-		qed_async_roce_event(p_hwfn, p_eqe);
+		qed_roce_async_event(p_hwfn, p_eqe->opcode,
+				     &p_eqe->data.rdma_data);
 		return 0;
+#endif
 	case PROTOCOLID_COMMON:
 		return qed_sriov_eqe_event(p_hwfn,
 					   p_eqe->opcode,
@@ -306,14 +309,6 @@  static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
 	case PROTOCOLID_ISCSI:
 		if (!IS_ENABLED(CONFIG_QED_ISCSI))
 			return -EINVAL;
-		if (p_eqe->opcode == ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES) {
-			u32 cid = le32_to_cpu(p_eqe->data.iscsi_info.cid);
-
-			qed_ooo_release_connection_isles(p_hwfn,
-							 p_hwfn->p_ooo_info,
-							 cid);
-			return 0;
-		}
 
 		if (p_hwfn->p_iscsi_info->event_cb) {
 			struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index f2aaef2..8d02fb6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -50,7 +50,7 @@ 
 #define QEDE_MAJOR_VERSION		8
 #define QEDE_MINOR_VERSION		10
 #define QEDE_REVISION_VERSION		10
-#define QEDE_ENGINEERING_VERSION	20
+#define QEDE_ENGINEERING_VERSION	21
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
 		__stringify(QEDE_MINOR_VERSION) "."		\
 		__stringify(QEDE_REVISION_VERSION) "."		\
diff --git a/drivers/scsi/qedf/Makefile b/drivers/scsi/qedf/Makefile
index 64e9f50..414f2a7 100644
--- a/drivers/scsi/qedf/Makefile
+++ b/drivers/scsi/qedf/Makefile
@@ -1,5 +1,5 @@ 
 obj-$(CONFIG_QEDF) := qedf.o
 qedf-y = qedf_dbg.o qedf_main.o qedf_io.o qedf_fip.o \
-	 qedf_attr.o qedf_els.o
+	 qedf_attr.o qedf_els.o drv_scsi_fw_funcs.o drv_fcoe_fw_funcs.o
 
 qedf-$(CONFIG_DEBUG_FS) += qedf_debugfs.o
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
new file mode 100644
index 0000000..c71a166
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -0,0 +1,191 @@ 
+/*
+ *  QLogic FCoE Offload Driver
+ *  Copyright (c) 2016 Cavium Inc.
+ *
+ *  This software is available under the terms of the GNU General Public License
+ *  (GPL) Version 2, available from the file COPYING in the main directory of
+ *  this source tree.
+ */
+#include "drv_fcoe_fw_funcs.h"
+#include "drv_scsi_fw_funcs.h"
+
+#define FCOE_RX_ID 0xFFFF
+
+static inline void init_common_sqe(struct fcoe_task_params *task_params,
+				   enum fcoe_sqe_request_type request_type)
+{
+	memset(task_params->sqe, 0, sizeof(*(task_params->sqe)));
+	SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE,
+		  request_type);
+	task_params->sqe->task_id = task_params->itid;
+}
+
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+				struct scsi_sgl_task_params *sgl_task_params,
+				struct regpair sense_data_buffer_phys_addr,
+				u32 task_retry_id,
+				u8 fcp_cmd_payload[32])
+{
+	struct fcoe_task_context *ctx = task_params->context;
+	struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+	struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+	struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+	struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+	u32 io_size, val;
+	bool slow_sgl;
+
+	memset(ctx, 0, sizeof(*(ctx)));
+	slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
+				    sgl_task_params->small_mid_sge);
+	io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
+		   task_params->tx_io_size : task_params->rx_io_size);
+
+	/* Ystorm ctx */
+	y_st_ctx = &ctx->ystorm_st_context;
+	y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+	y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
+	y_st_ctx->task_type = task_params->task_type;
+	memcpy((void *)&y_st_ctx->tx_info_union.fcp_cmd_payload,
+		       fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
+
+	/* Tstorm ctx */
+	t_st_ctx = &ctx->tstorm_st_context;
+	t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
+					FCOE_TASK_DEV_TYPE_TAPE :
+					FCOE_TASK_DEV_TYPE_DISK);
+	t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+	val = cpu_to_le32(task_params->cq_rss_number);
+	t_st_ctx->read_only.glbl_q_num = val;
+	t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
+	t_st_ctx->read_only.task_type = task_params->task_type;
+	SET_FIELD(t_st_ctx->read_write.flags,
+		  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+	t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+	/* Ustorm ctx */
+	u_ag_ctx = &ctx->ustorm_ag_context;
+	u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+	/* Mstorm buffer for sense/rsp data placement */
+	m_st_ctx = &ctx->mstorm_st_context;
+	val = cpu_to_le32(sense_data_buffer_phys_addr.hi);
+	m_st_ctx->rsp_buf_addr.hi = val;
+	val = cpu_to_le32(sense_data_buffer_phys_addr.lo);
+	m_st_ctx->rsp_buf_addr.lo = val;
+
+	if (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
+		/* Ystorm ctx */
+		y_st_ctx->expect_first_xfer = 1;
+
+		/* Set the amount of super SGEs. Can be up to 4. */
+		SET_FIELD(y_st_ctx->sgl_mode,
+			  YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+			  (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+		init_scsi_sgl_context(&y_st_ctx->sgl_params,
+				      &y_st_ctx->data_desc,
+				      sgl_task_params);
+
+		/* Mstorm ctx */
+		SET_FIELD(m_st_ctx->flags,
+			  MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
+			  (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+	} else {
+		/* Tstorm ctx */
+		SET_FIELD(t_st_ctx->read_write.flags,
+			  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
+			  (slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
+
+		/* Mstorm ctx */
+		m_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
+		init_scsi_sgl_context(&m_st_ctx->sgl_params,
+				      &m_st_ctx->data_desc,
+				      sgl_task_params);
+	}
+
+	init_common_sqe(task_params, SEND_FCOE_CMD);
+	return 0;
+}
+
+int init_initiator_midpath_unsolicited_fcoe_task(
+	struct fcoe_task_params *task_params,
+	struct fcoe_tx_mid_path_params *mid_path_fc_header,
+	struct scsi_sgl_task_params *tx_sgl_task_params,
+	struct scsi_sgl_task_params *rx_sgl_task_params,
+	u8 fw_to_place_fc_header)
+{
+	struct fcoe_task_context *ctx = task_params->context;
+	struct ystorm_fcoe_task_st_ctx *y_st_ctx;
+	struct tstorm_fcoe_task_st_ctx *t_st_ctx;
+	struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+	struct mstorm_fcoe_task_st_ctx *m_st_ctx;
+	u32 val;
+
+	memset(ctx, 0, sizeof(*(ctx)));
+
+	/* Init Ystorm */
+	y_st_ctx = &ctx->ystorm_st_context;
+	init_scsi_sgl_context(&y_st_ctx->sgl_params,
+			      &y_st_ctx->data_desc,
+			      tx_sgl_task_params);
+	SET_FIELD(y_st_ctx->sgl_mode,
+		  YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
+	y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
+	y_st_ctx->task_type = task_params->task_type;
+	memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
+	       mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
+
+	/* Init Mstorm */
+	m_st_ctx = &ctx->mstorm_st_context;
+	init_scsi_sgl_context(&m_st_ctx->sgl_params,
+			      &m_st_ctx->data_desc,
+			      rx_sgl_task_params);
+	SET_FIELD(m_st_ctx->flags,
+		  MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER,
+		  fw_to_place_fc_header);
+	m_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->rx_io_size);
+
+	/* Init Tstorm */
+	t_st_ctx = &ctx->tstorm_st_context;
+	t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
+	val = cpu_to_le32(task_params->cq_rss_number);
+	t_st_ctx->read_only.glbl_q_num = val;
+	t_st_ctx->read_only.task_type = task_params->task_type;
+	SET_FIELD(t_st_ctx->read_write.flags,
+		  FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
+	t_st_ctx->read_write.rx_id = cpu_to_le32(FCOE_RX_ID);
+
+	/* Init Ustorm */
+	u_ag_ctx = &ctx->ustorm_ag_context;
+	u_ag_ctx->global_cq_num = cpu_to_le32(task_params->cq_rss_number);
+
+	/* Init SQE */
+	init_common_sqe(task_params, SEND_FCOE_MIDPATH);
+	task_params->sqe->additional_info_union.burst_length =
+				    tx_sgl_task_params->total_buffer_size;
+	SET_FIELD(task_params->sqe->flags,
+		  FCOE_WQE_NUM_SGES, tx_sgl_task_params->num_sges);
+	SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE,
+		  SCSI_FAST_SGL);
+
+	return 0;
+}
+
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params)
+{
+	init_common_sqe(task_params, SEND_FCOE_ABTS_REQUEST);
+	return 0;
+}
+
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
+{
+	init_common_sqe(task_params, FCOE_EXCHANGE_CLEANUP);
+	return 0;
+}
+
+int init_initiator_sequence_recovery_fcoe_task(
+	struct fcoe_task_params *task_params, u32 off)
+{
+	init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
+	task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
+	return 0;
+}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
new file mode 100644
index 0000000..5fd10d9
--- /dev/null
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -0,0 +1,94 @@ 
+/*
+ *  QLogic FCoE Offload Driver
+ *  Copyright (c) 2016 Cavium Inc.
+ *
+ *  This software is available under the terms of the GNU General Public License
+ *  (GPL) Version 2, available from the file COPYING in the main directory of
+ *  this source tree.
+ */
+#ifndef _FCOE_FW_FUNCS_H
+#define _FCOE_FW_FUNCS_H
+#include "drv_scsi_fw_funcs.h"
+#include "qedf_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct fcoe_task_params {
+	/* Output parameter [set/filled by the HSI function] */
+	struct fcoe_task_context *context;
+
+	/* Output parameter [set/filled by the HSI function] */
+	struct fcoe_wqe *sqe;
+	enum fcoe_task_type task_type;
+	u32 tx_io_size; /* in bytes */
+	u32 rx_io_size; /* in bytes */
+	u32 conn_cid;
+	u16 itid;
+	u8 cq_rss_number;
+
+	 /* Whether it's Tape device or not (0=Disk, 1=Tape) */
+	u8 is_tape_device;
+};
+
+/**
+ * @brief init_initiator_rw_fcoe_task - Initializes FCoE task context for
+ * read/write task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param sense_data_buffer_phys_addr - Pointer to sense data buffer
+ * @param task_retry_id - retry identification - Used only for Tape device
+ * @param fcp_cmnd_payload - FCP CMD Payload
+ */
+int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
+	struct scsi_sgl_task_params *sgl_task_params,
+	struct regpair sense_data_buffer_phys_addr,
+	u32 task_retry_id,
+	u8 fcp_cmd_payload[32]);
+
+/**
+ * @brief init_initiator_midpath_fcoe_task - Initializes FCoE task context for
+ * midpath/unsolicited task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param mid_path_fc_header - FC header
+ * @param tx_sgl_task_params - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params - Pointer to Rx SGL task params
+ * @param fw_to_place_fc_header	- Indication if the FW will place the FC header
+ * in addition to the data arrives.
+ */
+int init_initiator_midpath_unsolicited_fcoe_task(
+	struct fcoe_task_params *task_params,
+	struct fcoe_tx_mid_path_params *mid_path_fc_header,
+	struct scsi_sgl_task_params *tx_sgl_task_params,
+	struct scsi_sgl_task_params *rx_sgl_task_params,
+	u8 fw_to_place_fc_header);
+
+/**
+ * @brief init_initiator_abort_fcoe_task - Initializes FCoE task context for
+ * abort task types and init fcoe_sqe
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_abort_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * cleanup task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params);
+
+/**
+ * @brief init_initiator_cleanup_fcoe_task - Initializes FCoE task context for
+ * sequence recovery task types and init fcoe_sqe
+ *
+ *
+ * @param task_params - Pointer to task parameters struct
+ * @param desired_offset - The desired offest the task will be re-sent from
+ */
+int init_initiator_sequence_recovery_fcoe_task(
+	struct fcoe_task_params *task_params,
+	u32 desired_offset);
+#endif
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.c b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
new file mode 100644
index 0000000..7332eb4
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.c
@@ -0,0 +1,45 @@ 
+/*
+ *  QLogic FCoE Offload Driver
+ *  Copyright (c) 2016 Cavium Inc.
+ *
+ *  This software is available under the terms of the GNU General Public License
+ *  (GPL) Version 2, available from the file COPYING in the main directory of
+ *  this source tree.
+ */
+#include "drv_scsi_fw_funcs.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+	return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+			   struct scsi_cached_sges *ctx_data_desc,
+			   struct scsi_sgl_task_params *sgl_task_params)
+{
+	/* no need to check for sgl_task_params->sgl validity */
+	u8 num_sges_to_init = sgl_task_params->num_sges >
+			      SCSI_NUM_SGES_IN_CACHE ? SCSI_NUM_SGES_IN_CACHE :
+			      sgl_task_params->num_sges;
+	u8 sge_index;
+	u32 val;
+
+	val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+	ctx_sgl_params->sgl_addr.lo = val;
+	val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+	ctx_sgl_params->sgl_addr.hi = val;
+	val = cpu_to_le32(sgl_task_params->total_buffer_size);
+	ctx_sgl_params->sgl_total_length = val;
+	ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+	for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+		ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+		ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+		ctx_data_desc->sge[sge_index].sge_len = val;
+	}
+}
diff --git a/drivers/scsi/qedf/drv_scsi_fw_funcs.h b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
new file mode 100644
index 0000000..e42e6ae
--- /dev/null
+++ b/drivers/scsi/qedf/drv_scsi_fw_funcs.h
@@ -0,0 +1,86 @@ 
+/*
+ *  QLogic FCoE Offload Driver
+ *  Copyright (c) 2016 Cavium Inc.
+ *
+ *  This software is available under the terms of the GNU General Public License
+ *  (GPL) Version 2, available from the file COPYING in the main directory of
+ *  this source tree.
+ */
+#ifndef _SCSI_FW_FUNCS_H
+#define _SCSI_FW_FUNCS_H
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/fcoe_common.h>
+
+struct scsi_sgl_task_params {
+	struct scsi_sge *sgl;
+	struct regpair sgl_phys_addr;
+	u32 total_buffer_size;
+	u16 num_sges;
+
+	 /* true if SGL contains a small (< 4KB) SGE in middle(not 1st or last)
+	  * -> relevant for tx only
+	  */
+	bool small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+	u32 initial_ref_tag;
+	bool initial_ref_tag_is_valid;
+	u16 application_tag;
+	u16 application_tag_mask;
+	u16 dif_block_size_log;
+	bool dif_on_network;
+	bool dif_on_host;
+	u8 host_guard_type;
+	u8 protection_type;
+	u8 ref_tag_mask;
+	bool crc_seed;
+
+	 /* Enable Connection error upon DIF error (segments with DIF errors are
+	  * dropped)
+	  */
+	bool tx_dif_conn_err_en;
+	bool ignore_app_tag;
+	bool keep_ref_tag_const;
+	bool validate_guard;
+	bool validate_app_tag;
+	bool validate_ref_tag;
+	bool forward_guard;
+	bool forward_app_tag;
+	bool forward_ref_tag;
+	bool forward_app_tag_with_mask;
+	bool forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+	 /* for cdb_size > default CDB size (extended CDB > 16 bytes) ->
+	  * pointer to the CDB buffer SGE
+	  */
+	struct scsi_sge extended_cdb_sge;
+
+	/* Physical address of sense data buffer for sense data - 256B buffer */
+	struct regpair sense_data_buffer_phys_addr;
+};
+
+/**
+ * @brief scsi_is_slow_sgl - checks for slow SGL
+ *
+ * @param num_sges - number of sges in SGL
+ * @param small_mid_sge - True is the SGL contains an SGE which is smaller than
+ * 4KB and its not the 1st or last SGE in the SGL
+ */
+bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge);
+
+/**
+ * @brief init_scsi_sgl_context - initializes SGL task context
+ *
+ * @param sgl_params - SGL context parameters to initialize (output parameter)
+ * @param data_desc - context struct containing SGEs array to set (output
+ * parameter)
+ * @param sgl_task_params - SGL parameters (input)
+ */
+void init_scsi_sgl_context(struct scsi_sgl_params *sgl_params,
+	struct scsi_cached_sges *ctx_data_desc,
+	struct scsi_sgl_task_params *sgl_task_params);
+#endif
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index 96346a1..40aeb6b 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -26,6 +26,7 @@ 
 #include <linux/qed/qed_ll2_if.h>
 #include "qedf_version.h"
 #include "qedf_dbg.h"
+#include "drv_fcoe_fw_funcs.h"
 
 /* Helpers to extract upper and lower 32-bits of pointer */
 #define U64_HI(val) ((u32)(((u64)(val)) >> 32))
@@ -59,19 +60,17 @@ 
 #define UPSTREAM_KEEP		1
 
 struct qedf_mp_req {
-	uint8_t tm_flags;
-
 	uint32_t req_len;
 	void *req_buf;
 	dma_addr_t req_buf_dma;
-	struct fcoe_sge *mp_req_bd;
+	struct scsi_sge *mp_req_bd;
 	dma_addr_t mp_req_bd_dma;
 	struct fc_frame_header req_fc_hdr;
 
 	uint32_t resp_len;
 	void *resp_buf;
 	dma_addr_t resp_buf_dma;
-	struct fcoe_sge *mp_resp_bd;
+	struct scsi_sge *mp_resp_bd;
 	dma_addr_t mp_resp_bd_dma;
 	struct fc_frame_header resp_fc_hdr;
 };
@@ -119,6 +118,7 @@  struct qedf_ioreq {
 #define QEDF_CMD_IN_CLEANUP		0x2
 #define QEDF_CMD_SRR_SENT		0x3
 	u8 io_req_flags;
+	uint8_t tm_flags;
 	struct qedf_rport *fcport;
 	unsigned long flags;
 	enum qedf_ioreq_event event;
@@ -130,6 +130,8 @@  struct qedf_ioreq {
 	struct completion tm_done;
 	struct completion abts_done;
 	struct fcoe_task_context *task;
+	struct fcoe_task_params *task_params;
+	struct scsi_sgl_task_params *sgl_task_params;
 	int idx;
 /*
  * Need to allocate enough room for both sense data and FCP response data
@@ -199,8 +201,8 @@  struct qedf_rport {
 	dma_addr_t sq_pbl_dma;
 	u32 sq_pbl_size;
 	u32 sid;
-#define	QEDF_RPORT_TYPE_DISK		1
-#define	QEDF_RPORT_TYPE_TAPE		2
+#define	QEDF_RPORT_TYPE_DISK		0
+#define	QEDF_RPORT_TYPE_TAPE		1
 	uint dev_type; /* Disk or tape */
 	struct list_head peers;
 };
@@ -391,7 +393,7 @@  struct qedf_ctx {
 
 struct io_bdt {
 	struct qedf_ioreq *io_req;
-	struct fcoe_sge *bd_tbl;
+	struct scsi_sge *bd_tbl;
 	dma_addr_t bd_tbl_dma;
 	u16 bd_valid;
 };
@@ -400,7 +402,7 @@  struct qedf_cmd_mgr {
 	struct qedf_ctx *qedf;
 	u16 idx;
 	struct io_bdt **io_bdt_pool;
-#define FCOE_PARAMS_NUM_TASKS		4096
+#define FCOE_PARAMS_NUM_TASKS		2048
 	struct qedf_ioreq cmds[FCOE_PARAMS_NUM_TASKS];
 	spinlock_t lock;
 	atomic_t free_list_cnt;
@@ -465,9 +467,8 @@  extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
 	unsigned int timer_msec);
 extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
 extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
-	struct fcoe_task_context *task_ctx);
-extern void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid,
-	u32 ptu_invalidate, enum fcoe_task_type req_type, u32 offset);
+	struct fcoe_task_context *task_ctx, struct fcoe_wqe *wqe);
+extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
 extern void qedf_ring_doorbell(struct qedf_rport *fcport);
 extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 	struct qedf_ioreq *els_req);
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 59f3e5c..9a5c149 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -25,6 +25,9 @@  static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
 	uint16_t xid;
 	uint32_t start_time = jiffies / HZ;
 	uint32_t current_time;
+	struct fcoe_wqe *sqe;
+	unsigned long flags;
+	u16 sqe_idx;
 
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n");
 
@@ -113,20 +116,25 @@  static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
 	/* Obtain exchange id */
 	xid = els_req->xid;
 
+	spin_lock_irqsave(&fcport->rport_lock, flags);
+
+	sqe_idx = qedf_get_sqe_idx(fcport);
+	sqe = &fcport->sq[sqe_idx];
+	memset(sqe, 0, sizeof(struct fcoe_wqe));
+
 	/* Initialize task context for this IO request */
 	task = qedf_get_task_mem(&qedf->tasks, xid);
-	qedf_init_mp_task(els_req, task);
+	qedf_init_mp_task(els_req, task, sqe);
 
 	/* Put timer on original I/O request */
 	if (timer_msec)
 		qedf_cmd_timer_set(qedf, els_req, timer_msec);
 
-	qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
-
 	/* Ring doorbell */
 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS "
 		   "req\n");
 	qedf_ring_doorbell(fcport);
+	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 els_err:
 	return rc;
 }
@@ -604,6 +612,8 @@  static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
 	struct qedf_rport *fcport;
 	unsigned long flags;
 	struct qedf_els_cb_arg *cb_arg;
+	struct fcoe_wqe *sqe;
+	u16 sqe_idx;
 
 	fcport = orig_io_req->fcport;
 
@@ -631,8 +641,13 @@  static void qedf_initiate_seq_cleanup(struct qedf_ioreq *orig_io_req,
 
 	spin_lock_irqsave(&fcport->rport_lock, flags);
 
-	qedf_add_to_sq(fcport, orig_io_req->xid, 0,
-	    FCOE_TASK_TYPE_SEQUENCE_CLEANUP, offset);
+	sqe_idx = qedf_get_sqe_idx(fcport);
+	sqe = &fcport->sq[sqe_idx];
+	memset(sqe, 0, sizeof(struct fcoe_wqe));
+	orig_io_req->task_params->sqe = sqe;
+
+	init_initiator_sequence_recovery_fcoe_task(orig_io_req->task_params,
+	    offset);
 	qedf_ring_doorbell(fcport);
 
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ee0dcf9..34ef066 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -96,7 +96,7 @@  void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
 	if (!cmgr->io_bdt_pool)
 		goto free_cmd_pool;
 
-	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge);
+	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
 	for (i = 0; i < num_ios; i++) {
 		bdt_info = cmgr->io_bdt_pool[i];
 		if (bdt_info->bd_tbl) {
@@ -119,6 +119,8 @@  void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
 
 	for (i = 0; i < num_ios; i++) {
 		io_req = &cmgr->cmds[i];
+		kfree(io_req->sgl_task_params);
+		kfree(io_req->task_params);
 		/* Make sure we free per command sense buffer */
 		if (io_req->sense_buffer)
 			dma_free_coherent(&qedf->pdev->dev,
@@ -178,7 +180,7 @@  struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
 	spin_lock_init(&cmgr->lock);
 
 	/*
-	 * Initialize list of qedf_ioreq.
+	 * Initialize I/O request fields.
 	 */
 	xid = QEDF_MIN_XID;
 
@@ -196,6 +198,29 @@  struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
 		    GFP_KERNEL);
 		if (!io_req->sense_buffer)
 			goto mem_err;
+
+		/* Allocate task parameters to pass to f/w init funcions */
+		io_req->task_params = kzalloc(sizeof(struct fcoe_task_params),
+		    GFP_KERNEL);
+		if (!io_req->task_params) {
+			QEDF_ERR(&(qedf->dbg_ctx),
+				 "Failed to allocate task_params for xid=0x%x\n",
+				 i);
+			goto mem_err;
+		}
+
+		/*
+		 * Allocate scatter/gather list info to pass to f/w init
+		 * functions.
+		 */
+		io_req->sgl_task_params = kzalloc(
+		    sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
+		if (!io_req->sgl_task_params) {
+			QEDF_ERR(&(qedf->dbg_ctx),
+				 "Failed to allocate sgl_task_params for xid=0x%x\n",
+				 i);
+			goto mem_err;
+		}
 	}
 
 	/* Allocate pool of io_bdts - one for each qedf_ioreq */
@@ -211,8 +236,8 @@  struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
 		cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
 		    GFP_KERNEL);
 		if (!cmgr->io_bdt_pool[i]) {
-			QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
-				   "io_bdt_pool[%d].\n", i);
+			QEDF_WARN(&(qedf->dbg_ctx),
+				  "Failed to alloc io_bdt_pool[%d].\n", i);
 			goto mem_err;
 		}
 	}
@@ -220,11 +245,11 @@  struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
 	for (i = 0; i < num_ios; i++) {
 		bdt_info = cmgr->io_bdt_pool[i];
 		bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
-		    QEDF_MAX_BDS_PER_CMD * sizeof(struct fcoe_sge),
+		    QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
 		    &bdt_info->bd_tbl_dma, GFP_KERNEL);
 		if (!bdt_info->bd_tbl) {
-			QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc "
-				   "bdt_tbl[%d].\n", i);
+			QEDF_WARN(&(qedf->dbg_ctx),
+				  "Failed to alloc bdt_tbl[%d].\n", i);
 			goto mem_err;
 		}
 	}
@@ -318,6 +343,7 @@  struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
 	}
 	bd_tbl->io_req = io_req;
 	io_req->cmd_type = cmd_type;
+	io_req->tm_flags = 0;
 
 	/* Reset sequence offset data */
 	io_req->rx_buf_off = 0;
@@ -336,10 +362,9 @@  static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
 {
 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
 	struct qedf_ctx *qedf = io_req->fcport->qedf;
-	uint64_t sz = sizeof(struct fcoe_sge);
+	uint64_t sz = sizeof(struct scsi_sge);
 
 	/* clear tm flags */
-	mp_req->tm_flags = 0;
 	if (mp_req->mp_req_bd) {
 		dma_free_coherent(&qedf->pdev->dev, sz,
 		    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
@@ -387,7 +412,7 @@  void qedf_release_cmd(struct kref *ref)
 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
 	int bd_index)
 {
-	struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
 	int frag_size, sg_frags;
 
 	sg_frags = 0;
@@ -398,7 +423,7 @@  static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
 			frag_size = sg_len;
 		bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
 		bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
-		bd[bd_index + sg_frags].size = (uint16_t)frag_size;
+		bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
 
 		addr += (u64)frag_size;
 		sg_frags++;
@@ -413,7 +438,7 @@  static int qedf_map_sg(struct qedf_ioreq *io_req)
 	struct Scsi_Host *host = sc->device->host;
 	struct fc_lport *lport = shost_priv(host);
 	struct qedf_ctx *qedf = lport_priv(lport);
-	struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
 	struct scatterlist *sg;
 	int byte_count = 0;
 	int sg_count = 0;
@@ -439,7 +464,7 @@  static int qedf_map_sg(struct qedf_ioreq *io_req)
 
 		bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
 		bd[bd_count].sge_addr.hi = (addr >> 32);
-		bd[bd_count].size = (u16)sg_len;
+		bd[bd_count].sge_len = (u16)sg_len;
 
 		return ++bd_count;
 	}
@@ -480,7 +505,7 @@  static int qedf_map_sg(struct qedf_ioreq *io_req)
 			sg_frags = 1;
 			bd[bd_count].sge_addr.lo = U64_LO(addr);
 			bd[bd_count].sge_addr.hi  = U64_HI(addr);
-			bd[bd_count].size = (uint16_t)sg_len;
+			bd[bd_count].sge_len = (uint16_t)sg_len;
 		}
 
 		bd_count += sg_frags;
@@ -498,7 +523,7 @@  static int qedf_map_sg(struct qedf_ioreq *io_req)
 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
 {
 	struct scsi_cmnd *sc = io_req->sc_cmd;
-	struct fcoe_sge *bd = io_req->bd_tbl->bd_tbl;
+	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
 	int bd_count;
 
 	if (scsi_sg_count(sc)) {
@@ -508,7 +533,7 @@  static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
 	} else {
 		bd_count = 0;
 		bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
-		bd[0].size = 0;
+		bd[0].sge_len = 0;
 	}
 	io_req->bd_tbl->bd_valid = bd_count;
 
@@ -529,430 +554,224 @@  static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
 
 	/* 4 bytes: flag info */
 	fcp_cmnd->fc_pri_ta = 0;
-	fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
+	fcp_cmnd->fc_tm_flags = io_req->tm_flags;
 	fcp_cmnd->fc_flags = io_req->io_req_flags;
 	fcp_cmnd->fc_cmdref = 0;
 
 	/* Populate data direction */
-	if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
-		fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
-	else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD)
 		fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+	else {
+		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
+			fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
+		else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
+			fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
+	}
 
 	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
 
 	/* 16 bytes: CDB information */
-	memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
+	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
+		memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
 
 	/* 4 bytes: FCP data length */
 	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
-
 }
 
 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
-	struct qedf_ioreq *io_req, u32 *ptu_invalidate,
-	struct fcoe_task_context *task_ctx)
+	struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
+	struct fcoe_wqe *sqe)
 {
 	enum fcoe_task_type task_type;
 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
 	struct io_bdt *bd_tbl = io_req->bd_tbl;
-	union fcoe_data_desc_ctx *data_desc;
-	u32 *fcp_cmnd;
+	u8 fcp_cmnd[32];
 	u32 tmp_fcp_cmnd[8];
-	int cnt, i;
-	int bd_count;
+	int bd_count = 0;
 	struct qedf_ctx *qedf = fcport->qedf;
 	uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
-	u8 tmp_sgl_mode = 0;
-	u8 mst_sgl_mode = 0;
+	struct regpair sense_data_buffer_phys_addr;
+	u32 tx_io_size = 0;
+	u32 rx_io_size = 0;
+	int i, cnt;
 
-	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+	/* Note init_initiator_rw_fcoe_task memsets the task context */
 	io_req->task = task_ctx;
+	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
+	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 
-	if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
-		task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
-	else
+	/* Set task type bassed on DMA directio of command */
+	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
 		task_type = FCOE_TASK_TYPE_READ_INITIATOR;
-
-	/* Y Storm context */
-	task_ctx->ystorm_st_context.expect_first_xfer = 1;
-	task_ctx->ystorm_st_context.data_2_trns_rem = io_req->data_xfer_len;
-	/* Check if this is required */
-	task_ctx->ystorm_st_context.ox_id = io_req->xid;
-	task_ctx->ystorm_st_context.task_rety_identifier =
-	    io_req->task_retry_identifier;
-
-	/* T Storm ag context */
-	SET_FIELD(task_ctx->tstorm_ag_context.flags0,
-	    TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, PROTOCOLID_FCOE);
-	task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
-
-	/* T Storm st context */
-	SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
-	    FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
-	    1);
-	task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
-
-	task_ctx->tstorm_st_context.read_only.dev_type =
-	    FCOE_TASK_DEV_TYPE_DISK;
-	task_ctx->tstorm_st_context.read_only.conf_supported = 0;
-	task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
-
-	/* Completion queue for response. */
-	task_ctx->tstorm_st_context.read_only.glbl_q_num = cq_idx;
-	task_ctx->tstorm_st_context.read_only.fcp_cmd_trns_size =
-	    io_req->data_xfer_len;
-	task_ctx->tstorm_st_context.read_write.e_d_tov_exp_timeout_val =
-	    lport->e_d_tov;
-
-	task_ctx->ustorm_ag_context.global_cq_num = cq_idx;
-	io_req->fp_idx = cq_idx;
-
-	bd_count = bd_tbl->bd_valid;
-	if (task_type == FCOE_TASK_TYPE_WRITE_INITIATOR) {
-		/* Setup WRITE task */
-		struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
-		task_ctx->ystorm_st_context.task_type =
-		    FCOE_TASK_TYPE_WRITE_INITIATOR;
-		data_desc = &task_ctx->ystorm_st_context.data_desc;
-
-		if (io_req->use_slowpath) {
-			SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
-			    YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
-			    FCOE_SLOW_SGL);
-			data_desc->slow.base_sgl_addr.lo =
-			    U64_LO(bd_tbl->bd_tbl_dma);
-			data_desc->slow.base_sgl_addr.hi =
-			    U64_HI(bd_tbl->bd_tbl_dma);
-			data_desc->slow.remainder_num_sges = bd_count;
-			data_desc->slow.curr_sge_off = 0;
-			data_desc->slow.curr_sgl_index = 0;
-			qedf->slow_sge_ios++;
-			io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
-		} else {
-			SET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
-			    YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
-			    (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
-			    FCOE_MUL_FAST_SGES);
-
-			if (bd_count == 1) {
-				data_desc->single_sge.sge_addr.lo =
-				    fcoe_bd_tbl->sge_addr.lo;
-				data_desc->single_sge.sge_addr.hi =
-				    fcoe_bd_tbl->sge_addr.hi;
-				data_desc->single_sge.size =
-				    fcoe_bd_tbl->size;
-				data_desc->single_sge.is_valid_sge = 0;
-				qedf->single_sge_ios++;
-				io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
-			} else {
-				data_desc->fast.sgl_start_addr.lo =
-				    U64_LO(bd_tbl->bd_tbl_dma);
-				data_desc->fast.sgl_start_addr.hi =
-				    U64_HI(bd_tbl->bd_tbl_dma);
-				data_desc->fast.sgl_byte_offset =
-				    data_desc->fast.sgl_start_addr.lo &
-				    (QEDF_PAGE_SIZE - 1);
-				if (data_desc->fast.sgl_byte_offset > 0)
-					QEDF_ERR(&(qedf->dbg_ctx),
-					    "byte_offset=%u for xid=0x%x.\n",
-					    io_req->xid,
-					    data_desc->fast.sgl_byte_offset);
-				data_desc->fast.task_reuse_cnt =
-				    io_req->reuse_count;
-				io_req->reuse_count++;
-				if (io_req->reuse_count == QEDF_MAX_REUSE) {
-					*ptu_invalidate = 1;
-					io_req->reuse_count = 0;
-				}
-				qedf->fast_sge_ios++;
-				io_req->sge_type = QEDF_IOREQ_FAST_SGE;
-			}
-		}
-
-		/* T Storm context */
-		task_ctx->tstorm_st_context.read_only.task_type =
-		    FCOE_TASK_TYPE_WRITE_INITIATOR;
-
-		/* M Storm context */
-		tmp_sgl_mode = GET_FIELD(task_ctx->ystorm_st_context.sgl_mode,
-		    YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE);
-		SET_FIELD(task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-		    FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE,
-		    tmp_sgl_mode);
-
 	} else {
-		/* Setup READ task */
-
-		/* M Storm context */
-		struct fcoe_sge *fcoe_bd_tbl = bd_tbl->bd_tbl;
-
-		data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
-		task_ctx->mstorm_st_context.fp.data_2_trns_rem =
-		    io_req->data_xfer_len;
-
-		if (io_req->use_slowpath) {
-			SET_FIELD(
-			    task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-			    FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
-			    FCOE_SLOW_SGL);
-			data_desc->slow.base_sgl_addr.lo =
-			    U64_LO(bd_tbl->bd_tbl_dma);
-			data_desc->slow.base_sgl_addr.hi =
-			    U64_HI(bd_tbl->bd_tbl_dma);
-			data_desc->slow.remainder_num_sges =
-			    bd_count;
-			data_desc->slow.curr_sge_off = 0;
-			data_desc->slow.curr_sgl_index = 0;
-			qedf->slow_sge_ios++;
-			io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+			task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
+			tx_io_size = io_req->data_xfer_len;
 		} else {
-			SET_FIELD(
-			    task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-			    FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE,
-			    (bd_count <= 4) ? (enum fcoe_sgl_mode)bd_count :
-			    FCOE_MUL_FAST_SGES);
-
-			if (bd_count == 1) {
-				data_desc->single_sge.sge_addr.lo =
-				    fcoe_bd_tbl->sge_addr.lo;
-				data_desc->single_sge.sge_addr.hi =
-				    fcoe_bd_tbl->sge_addr.hi;
-				data_desc->single_sge.size =
-				    fcoe_bd_tbl->size;
-				data_desc->single_sge.is_valid_sge = 0;
-				qedf->single_sge_ios++;
-				io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
-			} else {
-				data_desc->fast.sgl_start_addr.lo =
-				    U64_LO(bd_tbl->bd_tbl_dma);
-				data_desc->fast.sgl_start_addr.hi =
-				    U64_HI(bd_tbl->bd_tbl_dma);
-				data_desc->fast.sgl_byte_offset = 0;
-				data_desc->fast.task_reuse_cnt =
-				    io_req->reuse_count;
-				io_req->reuse_count++;
-				if (io_req->reuse_count == QEDF_MAX_REUSE) {
-					*ptu_invalidate = 1;
-					io_req->reuse_count = 0;
-				}
-				qedf->fast_sge_ios++;
-				io_req->sge_type = QEDF_IOREQ_FAST_SGE;
-			}
+			task_type = FCOE_TASK_TYPE_READ_INITIATOR;
+			rx_io_size = io_req->data_xfer_len;
 		}
-
-		/* Y Storm context */
-		task_ctx->ystorm_st_context.expect_first_xfer = 0;
-		task_ctx->ystorm_st_context.task_type =
-		    FCOE_TASK_TYPE_READ_INITIATOR;
-
-		/* T Storm context */
-		task_ctx->tstorm_st_context.read_only.task_type =
-		    FCOE_TASK_TYPE_READ_INITIATOR;
-		mst_sgl_mode = GET_FIELD(
-		    task_ctx->mstorm_st_context.non_fp.tx_rx_sgl_mode,
-		    FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE);
-		SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
-		    FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE,
-		    mst_sgl_mode);
 	}
 
+	/* Setup the fields for fcoe_task_params */
+	io_req->task_params->context = task_ctx;
+	io_req->task_params->sqe = sqe;
+	io_req->task_params->task_type = task_type;
+	io_req->task_params->tx_io_size = tx_io_size;
+	io_req->task_params->rx_io_size = rx_io_size;
+	io_req->task_params->conn_cid = fcport->fw_cid;
+	io_req->task_params->itid = io_req->xid;
+	io_req->task_params->cq_rss_number = cq_idx;
+	io_req->task_params->is_tape_device = fcport->dev_type;
+
+	/* Fill in information for scatter/gather list */
+	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
+		bd_count = bd_tbl->bd_valid;
+		io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
+		io_req->sgl_task_params->sgl_phys_addr.lo =
+			U64_LO(bd_tbl->bd_tbl_dma);
+		io_req->sgl_task_params->sgl_phys_addr.hi =
+			U64_HI(bd_tbl->bd_tbl_dma);
+		io_req->sgl_task_params->num_sges = bd_count;
+		io_req->sgl_task_params->total_buffer_size =
+		    scsi_bufflen(io_req->sc_cmd);
+		io_req->sgl_task_params->small_mid_sge =
+			io_req->use_slowpath;
+	}
+
+	/* Fill in physical address of sense buffer */
+	sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
+	sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
+
 	/* fill FCP_CMND IU */
-	fcp_cmnd = (u32 *)task_ctx->ystorm_st_context.tx_info_union.fcp_cmd_payload.opaque;
-	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
+	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
 
 	/* Swap fcp_cmnd since FC is big endian */
 	cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
-
 	for (i = 0; i < cnt; i++) {
-		*fcp_cmnd = cpu_to_be32(tmp_fcp_cmnd[i]);
-		fcp_cmnd++;
+		tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
 	}
+	memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
+
+
+	init_initiator_rw_fcoe_task(io_req->task_params,
+	    io_req->sgl_task_params, sense_data_buffer_phys_addr,
+	    io_req->task_retry_identifier, fcp_cmnd);
 
-	/* M Storm context - Sense buffer */
-	task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
-		U64_LO(io_req->sense_buffer_dma);
-	task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
-		U64_HI(io_req->sense_buffer_dma);
+	/* Increment SGL type counters */
+	if (bd_count == 1) {
+		qedf->single_sge_ios++;
+		io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
+	} else if (io_req->use_slowpath == true) {
+		qedf->slow_sge_ios++;
+		io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
+	} else {
+		qedf->fast_sge_ios++;
+		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
+	}
 }
 
 void qedf_init_mp_task(struct qedf_ioreq *io_req,
-	struct fcoe_task_context *task_ctx)
+	struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
 {
 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
 	struct qedf_rport *fcport = io_req->fcport;
 	struct qedf_ctx *qedf = io_req->fcport->qedf;
 	struct fc_frame_header *fc_hdr;
-	enum fcoe_task_type task_type = 0;
-	union fcoe_data_desc_ctx *data_desc;
+	struct fcoe_tx_mid_path_params task_fc_hdr;
+	/* DMA memory info for request buffer */
+	struct scsi_sgl_task_params tx_sgl_task_params;
+	/* DMA memory info for response buffer */
+	struct scsi_sgl_task_params rx_sgl_task_params;
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Initializing MP task "
-		   "for cmd_type = %d\n", io_req->cmd_type);
+	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+		  "Initializing MP task for cmd_type=%d\n",
+		  io_req->cmd_type);
 
 	qedf->control_requests++;
 
-	/* Obtain task_type */
-	if ((io_req->cmd_type == QEDF_TASK_MGMT_CMD) ||
-	    (io_req->cmd_type == QEDF_ELS)) {
-		task_type = FCOE_TASK_TYPE_MIDPATH;
-	} else if (io_req->cmd_type == QEDF_ABTS) {
-		task_type = FCOE_TASK_TYPE_ABTS;
-	}
-
+	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
+	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
 	memset(task_ctx, 0, sizeof(struct fcoe_task_context));
+	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
 
 	/* Setup the task from io_req for easy reference */
 	io_req->task = task_ctx;
 
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "task type = %d\n",
-		   task_type);
-
-	/* YSTORM only */
-	{
-		/* Initialize YSTORM task context */
-		struct fcoe_tx_mid_path_params *task_fc_hdr =
-		    &task_ctx->ystorm_st_context.tx_info_union.tx_params.mid_path;
-		memset(task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
-		task_ctx->ystorm_st_context.task_rety_identifier =
-		    io_req->task_retry_identifier;
-
-		/* Init SGL parameters */
-		if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
-		    (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
-			data_desc = &task_ctx->ystorm_st_context.data_desc;
-			data_desc->slow.base_sgl_addr.lo =
-			    U64_LO(mp_req->mp_req_bd_dma);
-			data_desc->slow.base_sgl_addr.hi =
-			    U64_HI(mp_req->mp_req_bd_dma);
-			data_desc->slow.remainder_num_sges = 1;
-			data_desc->slow.curr_sge_off = 0;
-			data_desc->slow.curr_sgl_index = 0;
-		}
-
-		fc_hdr = &(mp_req->req_fc_hdr);
-		if (task_type == FCOE_TASK_TYPE_MIDPATH) {
-			fc_hdr->fh_ox_id = io_req->xid;
-			fc_hdr->fh_rx_id = htons(0xffff);
-		} else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
-			fc_hdr->fh_rx_id = io_req->xid;
-		}
+	/* Setup the fields for fcoe_task_params */
+	io_req->task_params->context = task_ctx;
+	io_req->task_params->sqe = sqe;
+	io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
+	io_req->task_params->tx_io_size = io_req->data_xfer_len;
+	/* rx_io_size tells the f/w how large a response buffer we have */
+	io_req->task_params->rx_io_size = PAGE_SIZE;
+	io_req->task_params->conn_cid = fcport->fw_cid;
+	io_req->task_params->itid = io_req->xid;
+	/* Return middle path commands on CQ 0 */
+	io_req->task_params->cq_rss_number = 0;
+	io_req->task_params->is_tape_device = fcport->dev_type;
+
+	fc_hdr = &(mp_req->req_fc_hdr);
+	/* Set OX_ID and RX_ID based on driver task id */
+	fc_hdr->fh_ox_id = io_req->xid;
+	fc_hdr->fh_rx_id = htons(0xffff);
+
+	/* Set up FC header information */
+	task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
+	task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
+	task_fc_hdr.type = fc_hdr->fh_type;
+	task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
+	task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
+	task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
+	task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
+
+	/* Set up s/g list parameters for request buffer */
+	tx_sgl_task_params.sgl = mp_req->mp_req_bd;
+	tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
+	tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
+	tx_sgl_task_params.num_sges = 1;
+	/* Set PAGE_SIZE for now since sg element is that size ??? */
+	tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
+	tx_sgl_task_params.small_mid_sge = 0;
+
+	/* Set up s/g list parameters for request buffer */
+	rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
+	rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
+	rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
+	rx_sgl_task_params.num_sges = 1;
+	/* Set PAGE_SIZE for now since sg element is that size ??? */
+	rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
+	rx_sgl_task_params.small_mid_sge = 0;
 
-		/* Fill FC Header into middle path buffer */
-		task_fc_hdr->parameter = fc_hdr->fh_parm_offset;
-		task_fc_hdr->r_ctl = fc_hdr->fh_r_ctl;
-		task_fc_hdr->type = fc_hdr->fh_type;
-		task_fc_hdr->cs_ctl = fc_hdr->fh_cs_ctl;
-		task_fc_hdr->df_ctl = fc_hdr->fh_df_ctl;
-		task_fc_hdr->rx_id = fc_hdr->fh_rx_id;
-		task_fc_hdr->ox_id = fc_hdr->fh_ox_id;
-
-		task_ctx->ystorm_st_context.data_2_trns_rem =
-		    io_req->data_xfer_len;
-		task_ctx->ystorm_st_context.task_type = task_type;
-	}
-
-	/* TSTORM ONLY */
-	{
-		task_ctx->tstorm_ag_context.icid = (u16)fcport->fw_cid;
-		task_ctx->tstorm_st_context.read_only.cid = fcport->fw_cid;
-		/* Always send middle-path repsonses on CQ #0 */
-		task_ctx->tstorm_st_context.read_only.glbl_q_num = 0;
-		io_req->fp_idx = 0;
-		SET_FIELD(task_ctx->tstorm_ag_context.flags0,
-		    TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE,
-		    PROTOCOLID_FCOE);
-		task_ctx->tstorm_st_context.read_only.task_type = task_type;
-		SET_FIELD(task_ctx->tstorm_st_context.read_write.flags,
-		    FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME,
-		    1);
-		task_ctx->tstorm_st_context.read_write.rx_id = 0xffff;
-	}
-
-	/* MSTORM only */
-	{
-		if (task_type == FCOE_TASK_TYPE_MIDPATH) {
-			/* Initialize task context */
-			data_desc = &task_ctx->mstorm_st_context.fp.data_desc;
-
-			/* Set cache sges address and length */
-			data_desc->slow.base_sgl_addr.lo =
-			    U64_LO(mp_req->mp_resp_bd_dma);
-			data_desc->slow.base_sgl_addr.hi =
-			    U64_HI(mp_req->mp_resp_bd_dma);
-			data_desc->slow.remainder_num_sges = 1;
-			data_desc->slow.curr_sge_off = 0;
-			data_desc->slow.curr_sgl_index = 0;
 
-			/*
-			 * Also need to fil in non-fastpath response address
-			 * for middle path commands.
-			 */
-			task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.lo =
-			    U64_LO(mp_req->mp_resp_bd_dma);
-			task_ctx->mstorm_st_context.non_fp.rsp_buf_addr.hi =
-			    U64_HI(mp_req->mp_resp_bd_dma);
-		}
-	}
+	/*
+	 * Last arg is 0 as previous code did not set that we wanted the
+	 * fc header information.
+	 */
+	init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
+	    &task_fc_hdr, &tx_sgl_task_params, &rx_sgl_task_params, 0);
 
-	/* USTORM ONLY */
-	{
-		task_ctx->ustorm_ag_context.global_cq_num = 0;
-	}
+	/* Midpath requests always consume 1 SGE */
+	qedf->single_sge_ios++;
 
-	/* I/O stats. Middle path commands always use slow SGEs */
-	qedf->slow_sge_ios++;
-	io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
 }
 
-void qedf_add_to_sq(struct qedf_rport *fcport, u16 xid, u32 ptu_invalidate,
-	enum fcoe_task_type req_type, u32 offset)
+/* Presumed that fcport->rport_lock is held */
+u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
 {
-	struct fcoe_wqe *sqe;
 	uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
+	u16 rval;
 
-	sqe = &fcport->sq[fcport->sq_prod_idx];
+	rval = fcport->sq_prod_idx;
 
+	/* Adjust ring index */
 	fcport->sq_prod_idx++;
 	fcport->fw_sq_prod_idx++;
 	if (fcport->sq_prod_idx == total_sqe)
 		fcport->sq_prod_idx = 0;
 
-	switch (req_type) {
-	case FCOE_TASK_TYPE_WRITE_INITIATOR:
-	case FCOE_TASK_TYPE_READ_INITIATOR:
-		SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_CMD);
-		if (ptu_invalidate)
-			SET_FIELD(sqe->flags, FCOE_WQE_INVALIDATE_PTU, 1);
-		break;
-	case FCOE_TASK_TYPE_MIDPATH:
-		SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE, SEND_FCOE_MIDPATH);
-		break;
-	case FCOE_TASK_TYPE_ABTS:
-		SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
-		    SEND_FCOE_ABTS_REQUEST);
-		break;
-	case FCOE_TASK_TYPE_EXCHANGE_CLEANUP:
-		SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
-		     FCOE_EXCHANGE_CLEANUP);
-		break;
-	case FCOE_TASK_TYPE_SEQUENCE_CLEANUP:
-		SET_FIELD(sqe->flags, FCOE_WQE_REQ_TYPE,
-		    FCOE_SEQUENCE_RECOVERY);
-		/* NOTE: offset param only used for sequence recovery */
-		sqe->additional_info_union.seq_rec_updated_offset = offset;
-		break;
-	case FCOE_TASK_TYPE_UNSOLICITED:
-		break;
-	default:
-		break;
-	}
-
-	sqe->task_id = xid;
-
-	/* Make sure SQ data is coherent */
-	wmb();
-
+	return rval;
 }
 
 void qedf_ring_doorbell(struct qedf_rport *fcport)
@@ -1029,7 +848,8 @@  int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
 	struct fcoe_task_context *task_ctx;
 	u16 xid;
 	enum fcoe_task_type req_type = 0;
-	u32 ptu_invalidate = 0;
+	struct fcoe_wqe *sqe;
+	u16 sqe_idx;
 
 	/* Initialize rest of io_req fileds */
 	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
@@ -1061,6 +881,16 @@  int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
 		return -EAGAIN;
 	}
 
+	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
+		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
+		kref_put(&io_req->refcount, qedf_release_cmd);
+	}
+
+	/* Obtain free SQE */
+	sqe_idx = qedf_get_sqe_idx(fcport);
+	sqe = &fcport->sq[sqe_idx];
+	memset(sqe, 0, sizeof(struct fcoe_wqe));
+
 	/* Get the task context */
 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
 	if (!task_ctx) {
@@ -1070,15 +900,7 @@  int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
 		return -EINVAL;
 	}
 
-	qedf_init_task(fcport, lport, io_req, &ptu_invalidate, task_ctx);
-
-	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
-		kref_put(&io_req->refcount, qedf_release_cmd);
-	}
-
-	/* Obtain free SQ entry */
-	qedf_add_to_sq(fcport, xid, ptu_invalidate, req_type, 0);
+	qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
 
 	/* Ring doorbell */
 	qedf_ring_doorbell(fcport);
@@ -1661,6 +1483,8 @@  int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
 	u32 r_a_tov = 0;
 	int rc = 0;
 	unsigned long flags;
+	struct fcoe_wqe *sqe;
+	u16 sqe_idx;
 
 	r_a_tov = rdata->r_a_tov;
 	lport = qedf->lport;
@@ -1712,10 +1536,12 @@  int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
 
 	spin_lock_irqsave(&fcport->rport_lock, flags);
 
-	/* Add ABTS to send queue */
-	qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_ABTS, 0);
+	sqe_idx = qedf_get_sqe_idx(fcport);
+	sqe = &fcport->sq[sqe_idx];
+	memset(sqe, 0, sizeof(struct fcoe_wqe));
+	io_req->task_params->sqe = sqe;
 
-	/* Ring doorbell */
+	init_initiator_abort_fcoe_task(io_req->task_params);
 	qedf_ring_doorbell(fcport);
 
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
@@ -1784,8 +1610,8 @@  void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 int qedf_init_mp_req(struct qedf_ioreq *io_req)
 {
 	struct qedf_mp_req *mp_req;
-	struct fcoe_sge *mp_req_bd;
-	struct fcoe_sge *mp_resp_bd;
+	struct scsi_sge *mp_req_bd;
+	struct scsi_sge *mp_resp_bd;
 	struct qedf_ctx *qedf = io_req->fcport->qedf;
 	dma_addr_t addr;
 	uint64_t sz;
@@ -1819,7 +1645,7 @@  int qedf_init_mp_req(struct qedf_ioreq *io_req)
 	}
 
 	/* Allocate and map mp_req_bd and mp_resp_bd */
-	sz = sizeof(struct fcoe_sge);
+	sz = sizeof(struct scsi_sge);
 	mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
 	    &mp_req->mp_req_bd_dma, GFP_KERNEL);
 	if (!mp_req->mp_req_bd) {
@@ -1841,7 +1667,7 @@  int qedf_init_mp_req(struct qedf_ioreq *io_req)
 	mp_req_bd = mp_req->mp_req_bd;
 	mp_req_bd->sge_addr.lo = U64_LO(addr);
 	mp_req_bd->sge_addr.hi = U64_HI(addr);
-	mp_req_bd->size = QEDF_PAGE_SIZE;
+	mp_req_bd->sge_len = QEDF_PAGE_SIZE;
 
 	/*
 	 * MP buffer is either a task mgmt command or an ELS.
@@ -1852,7 +1678,7 @@  int qedf_init_mp_req(struct qedf_ioreq *io_req)
 	addr = mp_req->resp_buf_dma;
 	mp_resp_bd->sge_addr.lo = U64_LO(addr);
 	mp_resp_bd->sge_addr.hi = U64_HI(addr);
-	mp_resp_bd->size = QEDF_PAGE_SIZE;
+	mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
 
 	return 0;
 }
@@ -1895,6 +1721,8 @@  int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 	int tmo = 0;
 	int rc = SUCCESS;
 	unsigned long flags;
+	struct fcoe_wqe *sqe;
+	u16 sqe_idx;
 
 	fcport = io_req->fcport;
 	if (!fcport) {
@@ -1940,12 +1768,16 @@  int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
 
 	init_completion(&io_req->tm_done);
 
-	/* Obtain free SQ entry */
 	spin_lock_irqsave(&fcport->rport_lock, flags);
-	qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_EXCHANGE_CLEANUP, 0);
 
-	/* Ring doorbell */
+	sqe_idx = qedf_get_sqe_idx(fcport);
+	sqe = &fcport->sq[sqe_idx];
+	memset(sqe, 0, sizeof(struct fcoe_wqe));
+	io_req->task_params->sqe = sqe;
+
+	init_initiator_cleanup_fcoe_task(io_req->task_params);
 	qedf_ring_doorbell(fcport);
+
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
 	tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -1991,16 +1823,15 @@  static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 	uint8_t tm_flags)
 {
 	struct qedf_ioreq *io_req;
-	struct qedf_mp_req *tm_req;
 	struct fcoe_task_context *task;
-	struct fc_frame_header *fc_hdr;
-	struct fcp_cmnd *fcp_cmnd;
 	struct qedf_ctx *qedf = fcport->qedf;
+	struct fc_lport *lport = qedf->lport;
 	int rc = 0;
 	uint16_t xid;
-	uint32_t sid, did;
 	int tmo = 0;
 	unsigned long flags;
+	struct fcoe_wqe *sqe;
+	u16 sqe_idx;
 
 	if (!sc_cmd) {
 		QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
@@ -2031,36 +1862,14 @@  static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 	/* Set the return CPU to be the same as the request one */
 	io_req->cpu = smp_processor_id();
 
-	tm_req = (struct qedf_mp_req *)&(io_req->mp_req);
-
-	rc = qedf_init_mp_req(io_req);
-	if (rc == FAILED) {
-		QEDF_ERR(&(qedf->dbg_ctx), "Task mgmt MP request init "
-			  "failed\n");
-		kref_put(&io_req->refcount, qedf_release_cmd);
-		goto reset_tmf_err;
-	}
-
 	/* Set TM flags */
-	io_req->io_req_flags = 0;
-	tm_req->tm_flags = tm_flags;
+	io_req->io_req_flags = QEDF_READ;
+	io_req->data_xfer_len = 0;
+	io_req->tm_flags = tm_flags;
 
 	/* Default is to return a SCSI command when an error occurs */
 	io_req->return_scsi_cmd_on_abts = true;
 
-	/* Fill FCP_CMND */
-	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf);
-	fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf;
-	memset(fcp_cmnd->fc_cdb, 0, FCP_CMND_LEN);
-	fcp_cmnd->fc_dl = 0;
-
-	/* Fill FC header */
-	fc_hdr = &(tm_req->req_fc_hdr);
-	sid = fcport->sid;
-	did = fcport->rdata->ids.port_id;
-	__fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, sid, did,
-			   FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ |
-			   FC_FC_SEQ_INIT, 0);
 	/* Obtain exchange id */
 	xid = io_req->xid;
 
@@ -2069,16 +1878,18 @@  static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
 
 	/* Initialize task context for this IO request */
 	task = qedf_get_task_mem(&qedf->tasks, xid);
-	qedf_init_mp_task(io_req, task);
 
 	init_completion(&io_req->tm_done);
 
-	/* Obtain free SQ entry */
 	spin_lock_irqsave(&fcport->rport_lock, flags);
-	qedf_add_to_sq(fcport, xid, 0, FCOE_TASK_TYPE_MIDPATH, 0);
 
-	/* Ring doorbell */
+	sqe_idx = qedf_get_sqe_idx(fcport);
+	sqe = &fcport->sq[sqe_idx];
+	memset(sqe, 0, sizeof(struct fcoe_wqe));
+
+	qedf_init_task(fcport, lport, io_req, task, sqe);
 	qedf_ring_doorbell(fcport);
+
 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
 
 	tmo = wait_for_completion_timeout(&io_req->tm_done,
@@ -2162,14 +1973,6 @@  void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
 	struct qedf_ioreq *io_req)
 {
 	struct fcoe_cqe_rsp_info *fcp_rsp;
-	struct fcoe_cqe_midpath_info *mp_info;
-
-
-	/* Get TMF response length from CQE */
-	mp_info = &cqe->cqe_info.midpath_info;
-	io_req->mp_req.resp_len = mp_info->data_placement_size;
-	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
-	    "Response len is %d.\n", io_req->mp_req.resp_len);
 
 	fcp_rsp = &cqe->cqe_info.rsp_info;
 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
diff --git a/drivers/scsi/qedi/Makefile b/drivers/scsi/qedi/Makefile
index 2b3e16b..90a6925 100644
--- a/drivers/scsi/qedi/Makefile
+++ b/drivers/scsi/qedi/Makefile
@@ -1,5 +1,5 @@ 
 obj-$(CONFIG_QEDI) := qedi.o
 qedi-y := qedi_main.o qedi_iscsi.o qedi_fw.o qedi_sysfs.o \
-	    qedi_dbg.o
+	    qedi_dbg.o qedi_fw_api.o
 
 qedi-$(CONFIG_DEBUG_FS) += qedi_debugfs.o
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index c9f0ef4..b0e2222 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -14,6 +14,8 @@ 
 #include "qedi.h"
 #include "qedi_iscsi.h"
 #include "qedi_gbl.h"
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
 
 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 			       struct iscsi_task *mtask);
@@ -53,8 +55,8 @@  static void qedi_process_logout_resp(struct qedi_ctx *qedi,
 	resp_hdr->exp_cmdsn = cpu_to_be32(cqe_logout_response->exp_cmd_sn);
 	resp_hdr->max_cmdsn = cpu_to_be32(cqe_logout_response->max_cmd_sn);
 
-	resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time2wait);
-	resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time2retain);
+	resp_hdr->t2wait = cpu_to_be32(cqe_logout_response->time_2_wait);
+	resp_hdr->t2retain = cpu_to_be32(cqe_logout_response->time_2_retain);
 
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_TID,
 		  "Freeing tid=0x%x for cid=0x%x\n",
@@ -975,81 +977,6 @@  void qedi_fp_process_cqes(struct qedi_work *work)
 	return;
 }
 
-static void qedi_add_to_sq(struct qedi_conn *qedi_conn, struct iscsi_task *task,
-			   u16 tid, uint16_t ptu_invalidate, int is_cleanup)
-{
-	struct iscsi_wqe *wqe;
-	struct iscsi_wqe_field *cont_field;
-	struct qedi_endpoint *ep;
-	struct scsi_cmnd *sc = task->sc;
-	struct iscsi_login_req *login_hdr;
-	struct qedi_cmd *cmd = task->dd_data;
-
-	login_hdr = (struct iscsi_login_req *)task->hdr;
-	ep = qedi_conn->ep;
-	wqe = &ep->sq[ep->sq_prod_idx];
-
-	memset(wqe, 0, sizeof(*wqe));
-
-	ep->sq_prod_idx++;
-	ep->fw_sq_prod_idx++;
-	if (ep->sq_prod_idx == QEDI_SQ_SIZE)
-		ep->sq_prod_idx = 0;
-
-	if (is_cleanup) {
-		SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-			  ISCSI_WQE_TYPE_TASK_CLEANUP);
-		wqe->task_id = tid;
-		return;
-	}
-
-	if (ptu_invalidate) {
-		SET_FIELD(wqe->flags, ISCSI_WQE_PTU_INVALIDATE,
-			  ISCSI_WQE_SET_PTU_INVALIDATE);
-	}
-
-	cont_field = &wqe->cont_prevtid_union.cont_field;
-
-	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
-	case ISCSI_OP_LOGIN:
-	case ISCSI_OP_TEXT:
-		SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-			  ISCSI_WQE_TYPE_MIDDLE_PATH);
-		SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
-			  1);
-		cont_field->contlen_cdbsize_field = ntoh24(login_hdr->dlength);
-		break;
-	case ISCSI_OP_LOGOUT:
-	case ISCSI_OP_NOOP_OUT:
-	case ISCSI_OP_SCSI_TMFUNC:
-		 SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-			   ISCSI_WQE_TYPE_NORMAL);
-		break;
-	default:
-		if (!sc)
-			break;
-
-		SET_FIELD(wqe->flags, ISCSI_WQE_WQE_TYPE,
-			  ISCSI_WQE_TYPE_NORMAL);
-		cont_field->contlen_cdbsize_field =
-				(sc->sc_data_direction == DMA_TO_DEVICE) ?
-				scsi_bufflen(sc) : 0;
-		if (cmd->use_slowpath)
-			SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES, 0);
-		else
-			SET_FIELD(wqe->flags, ISCSI_WQE_NUM_FAST_SGES,
-				  (sc->sc_data_direction ==
-				   DMA_TO_DEVICE) ?
-				  min((u16)QEDI_FAST_SGE_COUNT,
-				      (u16)cmd->io_tbl.sge_valid) : 0);
-		break;
-	}
-
-	wqe->task_id = tid;
-	/* Make sure SQ data is coherent */
-	wmb();
-}
-
 static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
 {
 	struct iscsi_db_data dbell = { 0 };
@@ -1076,96 +1003,116 @@  static void qedi_ring_doorbell(struct qedi_conn *qedi_conn)
 		  qedi_conn->iscsi_conn_id);
 }
 
+static u16 qedi_get_wqe_idx(struct qedi_conn *qedi_conn)
+{
+	struct qedi_endpoint *ep;
+	u16 rval;
+
+	ep = qedi_conn->ep;
+	rval = ep->sq_prod_idx;
+
+	/* Increament SQ index */
+	ep->sq_prod_idx++;
+	ep->fw_sq_prod_idx++;
+	if (ep->sq_prod_idx == QEDI_SQ_SIZE)
+		ep->sq_prod_idx = 0;
+
+	return rval;
+}
+
 int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 			  struct iscsi_task *task)
 {
-	struct qedi_ctx *qedi = qedi_conn->qedi;
+	struct iscsi_login_req_hdr login_req_pdu_header;
+	struct scsi_sgl_task_params tx_sgl_task_params;
+	struct scsi_sgl_task_params rx_sgl_task_params;
+	struct iscsi_task_params task_params;
 	struct iscsi_task_context *fw_task_ctx;
+	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_login_req *login_hdr;
-	struct iscsi_login_req_hdr *fw_login_req = NULL;
-	struct iscsi_cached_sge_ctx *cached_sge = NULL;
-	struct iscsi_sge *single_sge = NULL;
-	struct iscsi_sge *req_sge = NULL;
-	struct iscsi_sge *resp_sge = NULL;
+	struct scsi_sge *req_sge = NULL;
+	struct scsi_sge *resp_sge = NULL;
 	struct qedi_cmd *qedi_cmd;
-	s16 ptu_invalidate = 0;
+	struct qedi_endpoint *ep;
 	s16 tid = 0;
+	u16 sq_idx = 0;
+	int rval = 0;
 
-	req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-	resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+	req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+	resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 	qedi_cmd = (struct qedi_cmd *)task->dd_data;
+	ep = qedi_conn->ep;
 	login_hdr = (struct iscsi_login_req *)task->hdr;
 
 	tid = qedi_get_task_idx(qedi);
 	if (tid == -1)
 		return -ENOMEM;
 
-	fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+	fw_task_ctx =
+	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
-	/* Ystorm context */
-	fw_login_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.login_req;
-	fw_login_req->opcode = login_hdr->opcode;
-	fw_login_req->version_min = login_hdr->min_version;
-	fw_login_req->version_max = login_hdr->max_version;
-	fw_login_req->flags_attr = login_hdr->flags;
-	fw_login_req->isid_tabc = *((u16 *)login_hdr->isid + 2);
-	fw_login_req->isid_d = *((u32 *)login_hdr->isid);
-	fw_login_req->tsih = login_hdr->tsih;
-	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
-	fw_login_req->itt = qedi_set_itt(tid, get_itt(task->itt));
-	fw_login_req->cid = qedi_conn->iscsi_conn_id;
-	fw_login_req->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
-	fw_login_req->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
-	fw_login_req->exp_stat_sn = 0;
-
-	if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-		ptu_invalidate = 1;
-		qedi->tid_reuse_count[tid] = 0;
-	}
+	memset(&task_params, 0, sizeof(task_params));
+	memset(&login_req_pdu_header, 0, sizeof(login_req_pdu_header));
+	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+	/* Update header info */
+	login_req_pdu_header.opcode = login_hdr->opcode;
+	login_req_pdu_header.version_min = login_hdr->min_version;
+	login_req_pdu_header.version_max = login_hdr->max_version;
+	login_req_pdu_header.flags_attr = login_hdr->flags;
+	login_req_pdu_header.isid_tabc = swab32p((u32 *)login_hdr->isid);
+	login_req_pdu_header.isid_d = swab16p((u16 *)&login_hdr->isid[4]);
+
+	login_req_pdu_header.tsih = login_hdr->tsih;
+	login_req_pdu_header.hdr_second_dword = ntoh24(login_hdr->dlength);
 
-	fw_task_ctx->ystorm_st_context.state.reuse_count =
-						qedi->tid_reuse_count[tid];
-	fw_task_ctx->mstorm_st_context.reuse_count =
-						qedi->tid_reuse_count[tid]++;
-	cached_sge =
-	       &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
-	cached_sge->sge.sge_len = req_sge->sge_len;
-	cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
-	cached_sge->sge.sge_addr.hi =
-			     (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
-	/* Mstorm context */
-	single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-	fw_task_ctx->mstorm_st_context.task_type = 0x2;
-	fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-	single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-	single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-	single_sge->sge_len = resp_sge->sge_len;
-
-	SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-		  ISCSI_MFLAGS_SINGLE_SGE, 1);
-	SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-		  ISCSI_MFLAGS_SLOW_IO, 0);
-	fw_task_ctx->mstorm_st_context.sgl_size = 1;
-	fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-	/* Ustorm context */
-	fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
-	fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
-						ntoh24(login_hdr->dlength);
-	fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-	fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-	fw_task_ctx->ustorm_st_context.task_type = 0x2;
-	fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-	fw_task_ctx->ustorm_ag_context.exp_data_acked =
-						 ntoh24(login_hdr->dlength);
-	SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-	SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-		  USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
+	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+	login_req_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+	login_req_pdu_header.cid = qedi_conn->iscsi_conn_id;
+	login_req_pdu_header.cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+	login_req_pdu_header.exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+	login_req_pdu_header.exp_stat_sn = 0;
+
+	/* Fill tx AHS and rx buffer */
+	tx_sgl_task_params.sgl =
+			       (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+	tx_sgl_task_params.sgl_phys_addr.lo =
+					 (u32)(qedi_conn->gen_pdu.req_dma_addr);
+	tx_sgl_task_params.sgl_phys_addr.hi =
+			      (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+	tx_sgl_task_params.total_buffer_size = ntoh24(login_hdr->dlength);
+	tx_sgl_task_params.num_sges = 1;
+
+	rx_sgl_task_params.sgl =
+			      (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+	rx_sgl_task_params.sgl_phys_addr.lo =
+					(u32)(qedi_conn->gen_pdu.resp_dma_addr);
+	rx_sgl_task_params.sgl_phys_addr.hi =
+			     (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+	rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+	rx_sgl_task_params.num_sges = 1;
+
+	/* Fill fw input params */
+	task_params.context = fw_task_ctx;
+	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+	task_params.itid = tid;
+	task_params.cq_rss_number = 0;
+	task_params.tx_io_size = ntoh24(login_hdr->dlength);
+	task_params.rx_io_size = resp_sge->sge_len;
+
+	sq_idx = qedi_get_wqe_idx(qedi_conn);
+	task_params.sqe = &ep->sq[sq_idx];
+
+	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+	rval = init_initiator_login_request_task(&task_params,
+						 &login_req_pdu_header,
+						 &tx_sgl_task_params,
+						 &rx_sgl_task_params);
+	if (rval)
+		return -1;
 
 	spin_lock(&qedi_conn->list_lock);
 	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1173,7 +1120,6 @@  int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 	qedi_conn->active_cmd_count++;
 	spin_unlock(&qedi_conn->list_lock);
 
-	qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
 	qedi_ring_doorbell(qedi_conn);
 	return 0;
 }
@@ -1181,65 +1127,64 @@  int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
 int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
 			   struct iscsi_task *task)
 {
-	struct qedi_ctx *qedi = qedi_conn->qedi;
-	struct iscsi_logout_req_hdr *fw_logout_req = NULL;
-	struct iscsi_task_context *fw_task_ctx = NULL;
+	struct iscsi_logout_req_hdr logout_pdu_header;
+	struct scsi_sgl_task_params tx_sgl_task_params;
+	struct scsi_sgl_task_params rx_sgl_task_params;
+	struct iscsi_task_params task_params;
+	struct iscsi_task_context *fw_task_ctx;
 	struct iscsi_logout *logout_hdr = NULL;
-	struct qedi_cmd *qedi_cmd = NULL;
-	s16  tid = 0;
-	s16 ptu_invalidate = 0;
+	struct qedi_ctx *qedi = qedi_conn->qedi;
+	struct qedi_cmd *qedi_cmd;
+	struct qedi_endpoint *ep;
+	s16 tid = 0;
+	u16 sq_idx = 0;
+	int rval = 0;
 
 	qedi_cmd = (struct qedi_cmd *)task->dd_data;
 	logout_hdr = (struct iscsi_logout *)task->hdr;
+	ep = qedi_conn->ep;
 
 	tid = qedi_get_task_idx(qedi);
 	if (tid == -1)
 		return -ENOMEM;
 
-	fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
-
+	fw_task_ctx =
+	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
 	qedi_cmd->task_id = tid;
 
-	/* Ystorm context */
-	fw_logout_req = &fw_task_ctx->ystorm_st_context.pdu_hdr.logout_req;
-	fw_logout_req->opcode = ISCSI_OPCODE_LOGOUT_REQUEST;
-	fw_logout_req->reason_code = 0x80 | logout_hdr->flags;
-	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
-	fw_logout_req->itt = qedi_set_itt(tid, get_itt(task->itt));
-	fw_logout_req->exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
-	fw_logout_req->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+	memset(&task_params, 0, sizeof(task_params));
+	memset(&logout_pdu_header, 0, sizeof(logout_pdu_header));
+	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
 
-	if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-		ptu_invalidate = 1;
-		qedi->tid_reuse_count[tid] = 0;
-	}
-	fw_task_ctx->ystorm_st_context.state.reuse_count =
-						  qedi->tid_reuse_count[tid];
-	fw_task_ctx->mstorm_st_context.reuse_count =
-						qedi->tid_reuse_count[tid]++;
-	fw_logout_req->cid = qedi_conn->iscsi_conn_id;
-	fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
-
-	/* Mstorm context */
-	fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
-	fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
-	/* Ustorm context */
-	fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
-	fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
-	fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-	fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
-	fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
-	SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-		  USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-	SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-		  ISCSI_REG1_NUM_FAST_SGES, 0);
-
-	fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-	SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+	/* Update header info */
+	logout_pdu_header.opcode = logout_hdr->opcode;
+	logout_pdu_header.reason_code = 0x80 | logout_hdr->flags;
+	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
+	logout_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+	logout_pdu_header.exp_stat_sn = be32_to_cpu(logout_hdr->exp_statsn);
+	logout_pdu_header.cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+	logout_pdu_header.cid = qedi_conn->iscsi_conn_id;
+
+	/* Fill fw input params */
+	task_params.context = fw_task_ctx;
+	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+	task_params.itid = tid;
+	task_params.cq_rss_number = 0;
+	task_params.tx_io_size = 0;
+	task_params.rx_io_size = 0;
+
+	sq_idx = qedi_get_wqe_idx(qedi_conn);
+	task_params.sqe = &ep->sq[sq_idx];
+	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+	rval = init_initiator_logout_request_task(&task_params,
+						  &logout_pdu_header,
+						  NULL, NULL);
+	if (rval)
+		return -1;
 
 	spin_lock(&qedi_conn->list_lock);
 	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1247,9 +1192,7 @@  int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
 	qedi_conn->active_cmd_count++;
 	spin_unlock(&qedi_conn->list_lock);
 
-	qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
 	qedi_ring_doorbell(qedi_conn);
-
 	return 0;
 }
 
@@ -1533,47 +1476,46 @@  static void qedi_tmf_work(struct work_struct *work)
 static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 			       struct iscsi_task *mtask)
 {
-	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
+	struct iscsi_tmf_request_hdr tmf_pdu_header;
+	struct iscsi_task_params task_params;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_task_context *fw_task_ctx;
-	struct iscsi_tmf_request_hdr *fw_tmf_request;
-	struct iscsi_sge *single_sge;
-	struct qedi_cmd *qedi_cmd;
-	struct qedi_cmd *cmd;
+	struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
 	struct iscsi_task *ctask;
 	struct iscsi_tm *tmf_hdr;
-	struct iscsi_sge *req_sge;
-	struct iscsi_sge *resp_sge;
-	u32 lun[2];
-	s16 tid = 0, ptu_invalidate = 0;
+	struct qedi_cmd *qedi_cmd;
+	struct qedi_cmd *cmd;
+	struct qedi_endpoint *ep;
+	u32 scsi_lun[2];
+	s16 tid = 0;
+	u16 sq_idx = 0;
+	int rval = 0;
 
-	req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-	resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
-	qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
 	tmf_hdr = (struct iscsi_tm *)mtask->hdr;
+	qedi_cmd = (struct qedi_cmd *)mtask->dd_data;
+	ep = qedi_conn->ep;
 
-	tid = qedi_cmd->task_id;
-	qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+	tid = qedi_get_task_idx(qedi);
+	if (tid == -1)
+		return -ENOMEM;
 
-	fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+	fw_task_ctx =
+	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 
-	fw_tmf_request = &fw_task_ctx->ystorm_st_context.pdu_hdr.tmf_request;
-	fw_tmf_request->itt = qedi_set_itt(tid, get_itt(mtask->itt));
-	fw_tmf_request->cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+	qedi_cmd->task_id = tid;
 
-	memcpy(lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
-	fw_tmf_request->lun.lo = be32_to_cpu(lun[0]);
-	fw_tmf_request->lun.hi = be32_to_cpu(lun[1]);
+	memset(&task_params, 0, sizeof(task_params));
+	memset(&tmf_pdu_header, 0, sizeof(tmf_pdu_header));
 
-	if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-		ptu_invalidate = 1;
-		qedi->tid_reuse_count[tid] = 0;
-	}
-	fw_task_ctx->ystorm_st_context.state.reuse_count =
-						qedi->tid_reuse_count[tid];
-	fw_task_ctx->mstorm_st_context.reuse_count =
-						qedi->tid_reuse_count[tid]++;
+	/* Update header info */
+	qedi_update_itt_map(qedi, tid, mtask->itt, qedi_cmd);
+	tmf_pdu_header.itt = qedi_set_itt(tid, get_itt(mtask->itt));
+	tmf_pdu_header.cmd_sn = be32_to_cpu(tmf_hdr->cmdsn);
+
+	memcpy(scsi_lun, &tmf_hdr->lun, sizeof(struct scsi_lun));
+	tmf_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+	tmf_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
 
 	if ((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
 	     ISCSI_TM_FUNC_ABORT_TASK) {
@@ -1584,53 +1526,34 @@  static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 			return 0;
 		}
 		cmd = (struct qedi_cmd *)ctask->dd_data;
-		fw_tmf_request->rtt =
+		tmf_pdu_header.rtt =
 				qedi_set_itt(cmd->task_id,
 					     get_itt(tmf_hdr->rtt));
 	} else {
-		fw_tmf_request->rtt = ISCSI_RESERVED_TAG;
+		tmf_pdu_header.rtt = ISCSI_RESERVED_TAG;
 	}
 
-	fw_tmf_request->opcode = tmf_hdr->opcode;
-	fw_tmf_request->function = tmf_hdr->flags;
-	fw_tmf_request->hdr_second_dword = ntoh24(tmf_hdr->dlength);
-	fw_tmf_request->ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
-
-	single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-	fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
-	fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-	single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-	single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-	single_sge->sge_len = resp_sge->sge_len;
-
-	SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-		  ISCSI_MFLAGS_SINGLE_SGE, 1);
-	SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-		  ISCSI_MFLAGS_SLOW_IO, 0);
-	fw_task_ctx->mstorm_st_context.sgl_size = 1;
-	fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-	/* Ustorm context */
-	fw_task_ctx->ustorm_st_context.rem_rcv_len = 0;
-	fw_task_ctx->ustorm_st_context.exp_data_transfer_len = 0;
-	fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-	fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
-	fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
-	SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-		  USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-	SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-		  ISCSI_REG1_NUM_FAST_SGES, 0);
-
-	fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-	SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-	fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
-	fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
+	tmf_pdu_header.opcode = tmf_hdr->opcode;
+	tmf_pdu_header.function = tmf_hdr->flags;
+	tmf_pdu_header.hdr_second_dword = ntoh24(tmf_hdr->dlength);
+	tmf_pdu_header.ref_cmd_sn = be32_to_cpu(tmf_hdr->refcmdsn);
 
-	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
-		  "Add TMF to SQ, tmf tid=0x%x, itt=0x%x, cid=0x%x\n",
-		  tid,  mtask->itt, qedi_conn->iscsi_conn_id);
+	/* Fill fw input params */
+	task_params.context = fw_task_ctx;
+	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+	task_params.itid = tid;
+	task_params.cq_rss_number = 0;
+	task_params.tx_io_size = 0;
+	task_params.rx_io_size = 0;
+
+	sq_idx = qedi_get_wqe_idx(qedi_conn);
+	task_params.sqe = &ep->sq[sq_idx];
+
+	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+	rval = init_initiator_tmf_request_task(&task_params,
+					       &tmf_pdu_header);
+	if (rval)
+		return -1;
 
 	spin_lock(&qedi_conn->list_lock);
 	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1638,7 +1561,6 @@  static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn,
 	qedi_conn->active_cmd_count++;
 	spin_unlock(&qedi_conn->list_lock);
 
-	qedi_add_to_sq(qedi_conn, mtask, tid, ptu_invalidate, false);
 	qedi_ring_doorbell(qedi_conn);
 	return 0;
 }
@@ -1689,101 +1611,98 @@  int qedi_iscsi_abort_work(struct qedi_conn *qedi_conn,
 int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
 			 struct iscsi_task *task)
 {
-	struct qedi_ctx *qedi = qedi_conn->qedi;
+	struct iscsi_text_request_hdr text_request_pdu_header;
+	struct scsi_sgl_task_params tx_sgl_task_params;
+	struct scsi_sgl_task_params rx_sgl_task_params;
+	struct iscsi_task_params task_params;
 	struct iscsi_task_context *fw_task_ctx;
-	struct iscsi_text_request_hdr *fw_text_request;
-	struct iscsi_cached_sge_ctx *cached_sge;
-	struct iscsi_sge *single_sge;
-	struct qedi_cmd *qedi_cmd;
-	/* For 6.5 hdr iscsi_hdr */
+	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_text *text_hdr;
-	struct iscsi_sge *req_sge;
-	struct iscsi_sge *resp_sge;
-	s16 ptu_invalidate = 0;
+	struct scsi_sge *req_sge = NULL;
+	struct scsi_sge *resp_sge = NULL;
+	struct qedi_cmd *qedi_cmd;
+	struct qedi_endpoint *ep;
 	s16 tid = 0;
+	u16 sq_idx = 0;
+	int rval = 0;
 
-	req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-	resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+	req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+	resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 	qedi_cmd = (struct qedi_cmd *)task->dd_data;
 	text_hdr = (struct iscsi_text *)task->hdr;
+	ep = qedi_conn->ep;
 
 	tid = qedi_get_task_idx(qedi);
 	if (tid == -1)
 		return -ENOMEM;
 
-	fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+	fw_task_ctx =
+	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
 
 	qedi_cmd->task_id = tid;
 
-	/* Ystorm context */
-	fw_text_request =
-			&fw_task_ctx->ystorm_st_context.pdu_hdr.text_request;
-	fw_text_request->opcode = text_hdr->opcode;
-	fw_text_request->flags_attr = text_hdr->flags;
+	memset(&task_params, 0, sizeof(task_params));
+	memset(&text_request_pdu_header, 0, sizeof(text_request_pdu_header));
+	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+	/* Update header info */
+	text_request_pdu_header.opcode = text_hdr->opcode;
+	text_request_pdu_header.flags_attr = text_hdr->flags;
 
 	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
-	fw_text_request->itt = qedi_set_itt(tid, get_itt(task->itt));
-	fw_text_request->ttt = text_hdr->ttt;
-	fw_text_request->cmd_sn = be32_to_cpu(text_hdr->cmdsn);
-	fw_text_request->exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
-	fw_text_request->hdr_second_dword = ntoh24(text_hdr->dlength);
-
-	if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-		ptu_invalidate = 1;
-		qedi->tid_reuse_count[tid] = 0;
-	}
-	fw_task_ctx->ystorm_st_context.state.reuse_count =
-						     qedi->tid_reuse_count[tid];
-	fw_task_ctx->mstorm_st_context.reuse_count =
-						   qedi->tid_reuse_count[tid]++;
-
-	cached_sge =
-	       &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
-	cached_sge->sge.sge_len = req_sge->sge_len;
-	cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
-	cached_sge->sge.sge_addr.hi =
+	text_request_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+	text_request_pdu_header.ttt = text_hdr->ttt;
+	text_request_pdu_header.cmd_sn = be32_to_cpu(text_hdr->cmdsn);
+	text_request_pdu_header.exp_stat_sn = be32_to_cpu(text_hdr->exp_statsn);
+	text_request_pdu_header.hdr_second_dword = ntoh24(text_hdr->dlength);
+
+	/* Fill tx AHS and rx buffer */
+	tx_sgl_task_params.sgl =
+			       (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+	tx_sgl_task_params.sgl_phys_addr.lo =
+					 (u32)(qedi_conn->gen_pdu.req_dma_addr);
+	tx_sgl_task_params.sgl_phys_addr.hi =
 			      (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+	tx_sgl_task_params.total_buffer_size = req_sge->sge_len;
+	tx_sgl_task_params.num_sges = 1;
+
+	rx_sgl_task_params.sgl =
+			      (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+	rx_sgl_task_params.sgl_phys_addr.lo =
+					(u32)(qedi_conn->gen_pdu.resp_dma_addr);
+	rx_sgl_task_params.sgl_phys_addr.hi =
+			     (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+	rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+	rx_sgl_task_params.num_sges = 1;
+
+	/* Fill fw input params */
+	task_params.context = fw_task_ctx;
+	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+	task_params.itid = tid;
+	task_params.cq_rss_number = 0;
+	task_params.tx_io_size = ntoh24(text_hdr->dlength);
+	task_params.rx_io_size = resp_sge->sge_len;
+
+	sq_idx = qedi_get_wqe_idx(qedi_conn);
+	task_params.sqe = &ep->sq[sq_idx];
+
+	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+	rval = init_initiator_text_request_task(&task_params,
+						&text_request_pdu_header,
+						&tx_sgl_task_params,
+						&rx_sgl_task_params);
+	if (rval)
+		return -1;
 
-	/* Mstorm context */
-	single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-	fw_task_ctx->mstorm_st_context.task_type = 0x2;
-	fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-	single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-	single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-	single_sge->sge_len = resp_sge->sge_len;
-
-	SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-		  ISCSI_MFLAGS_SINGLE_SGE, 1);
-	SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-		  ISCSI_MFLAGS_SLOW_IO, 0);
-	fw_task_ctx->mstorm_st_context.sgl_size = 1;
-	fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-	/* Ustorm context */
-	fw_task_ctx->ustorm_ag_context.exp_data_acked =
-						      ntoh24(text_hdr->dlength);
-	fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
-	fw_task_ctx->ustorm_st_context.exp_data_transfer_len =
-						      ntoh24(text_hdr->dlength);
-	fw_task_ctx->ustorm_st_context.exp_data_sn =
-					      be32_to_cpu(text_hdr->exp_statsn);
-	fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-	fw_task_ctx->ustorm_st_context.task_type = 0x2;
-	fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-	SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
-	/*  Add command in active command list */
 	spin_lock(&qedi_conn->list_lock);
 	list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
 	qedi_cmd->io_cmd_in_list = true;
 	qedi_conn->active_cmd_count++;
 	spin_unlock(&qedi_conn->list_lock);
 
-	qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
 	qedi_ring_doorbell(qedi_conn);
-
 	return 0;
 }
 
@@ -1791,58 +1710,62 @@  int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 			   struct iscsi_task *task,
 			   char *datap, int data_len, int unsol)
 {
+	struct iscsi_nop_out_hdr nop_out_pdu_header;
+	struct scsi_sgl_task_params tx_sgl_task_params;
+	struct scsi_sgl_task_params rx_sgl_task_params;
+	struct iscsi_task_params task_params;
 	struct qedi_ctx *qedi = qedi_conn->qedi;
 	struct iscsi_task_context *fw_task_ctx;
-	struct iscsi_nop_out_hdr *fw_nop_out;
-	struct qedi_cmd *qedi_cmd;
-	/* For 6.5 hdr iscsi_hdr */
 	struct iscsi_nopout *nopout_hdr;
-	struct iscsi_cached_sge_ctx *cached_sge;
-	struct iscsi_sge *single_sge;
-	struct iscsi_sge *req_sge;
-	struct iscsi_sge *resp_sge;
-	u32 lun[2];
-	s16 ptu_invalidate = 0;
+	struct scsi_sge *req_sge = NULL;
+	struct scsi_sge *resp_sge = NULL;
+	struct qedi_cmd *qedi_cmd;
+	struct qedi_endpoint *ep;
+	u32 scsi_lun[2];
 	s16 tid = 0;
+	u16 sq_idx = 0;
+	int rval = 0;
 
-	req_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
-	resp_sge = (struct iscsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+	req_sge = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+	resp_sge = (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
 	qedi_cmd = (struct qedi_cmd *)task->dd_data;
 	nopout_hdr = (struct iscsi_nopout *)task->hdr;
+	ep = qedi_conn->ep;
 
 	tid = qedi_get_task_idx(qedi);
-	if (tid == -1) {
-		QEDI_WARN(&qedi->dbg_ctx, "Invalid tid\n");
+	if (tid == -1)
 		return -ENOMEM;
-	}
-
-	fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
 
+	fw_task_ctx =
+	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
+
 	qedi_cmd->task_id = tid;
 
-	/* Ystorm context */
-	fw_nop_out = &fw_task_ctx->ystorm_st_context.pdu_hdr.nop_out;
-	SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
-	SET_FIELD(fw_nop_out->flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
+	memset(&task_params, 0, sizeof(task_params));
+	memset(&nop_out_pdu_header, 0, sizeof(nop_out_pdu_header));
+	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+
+	/* Update header info */
+	nop_out_pdu_header.opcode = nopout_hdr->opcode;
+	SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_CONST1, 1);
+	SET_FIELD(nop_out_pdu_header.flags_attr, ISCSI_NOP_OUT_HDR_RSRV, 0);
 
-	memcpy(lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
-	fw_nop_out->lun.lo = be32_to_cpu(lun[0]);
-	fw_nop_out->lun.hi = be32_to_cpu(lun[1]);
+	memcpy(scsi_lun, &nopout_hdr->lun, sizeof(struct scsi_lun));
+	nop_out_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+	nop_out_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
+	nop_out_pdu_header.cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+	nop_out_pdu_header.exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
 
 	qedi_update_itt_map(qedi, tid, task->itt, qedi_cmd);
 
 	if (nopout_hdr->ttt != ISCSI_TTT_ALL_ONES) {
-		fw_nop_out->itt = be32_to_cpu(nopout_hdr->itt);
-		fw_nop_out->ttt = be32_to_cpu(nopout_hdr->ttt);
-		fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
-		fw_task_ctx->ystorm_st_context.state.local_comp = 1;
-		SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-			  USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+		nop_out_pdu_header.itt = be32_to_cpu(nopout_hdr->itt);
+		nop_out_pdu_header.ttt = be32_to_cpu(nopout_hdr->ttt);
 	} else {
-		fw_nop_out->itt = qedi_set_itt(tid, get_itt(task->itt));
-		fw_nop_out->ttt = ISCSI_TTT_ALL_ONES;
-		fw_task_ctx->ystorm_st_context.state.buffer_offset[0] = 0;
+		nop_out_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+		nop_out_pdu_header.ttt = ISCSI_TTT_ALL_ONES;
 
 		spin_lock(&qedi_conn->list_lock);
 		list_add_tail(&qedi_cmd->io_cmd, &qedi_conn->active_cmd_list);
@@ -1851,53 +1774,46 @@  int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 		spin_unlock(&qedi_conn->list_lock);
 	}
 
-	fw_nop_out->opcode = ISCSI_OPCODE_NOP_OUT;
-	fw_nop_out->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
-	fw_nop_out->exp_stat_sn = be32_to_cpu(nopout_hdr->exp_statsn);
-
-	cached_sge =
-	       &fw_task_ctx->ystorm_st_context.state.sgl_ctx_union.cached_sge;
-	cached_sge->sge.sge_len = req_sge->sge_len;
-	cached_sge->sge.sge_addr.lo = (u32)(qedi_conn->gen_pdu.req_dma_addr);
-	cached_sge->sge.sge_addr.hi =
-			(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
-
-	/* Mstorm context */
-	fw_task_ctx->mstorm_st_context.task_type = ISCSI_TASK_TYPE_MIDPATH;
-	fw_task_ctx->mstorm_ag_context.task_cid = (u16)qedi_conn->iscsi_conn_id;
-
-	single_sge = &fw_task_ctx->mstorm_st_context.sgl_union.single_sge;
-	single_sge->sge_addr.lo = resp_sge->sge_addr.lo;
-	single_sge->sge_addr.hi = resp_sge->sge_addr.hi;
-	single_sge->sge_len = resp_sge->sge_len;
-	fw_task_ctx->mstorm_st_context.rem_task_size = resp_sge->sge_len;
-
-	if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-		ptu_invalidate = 1;
-		qedi->tid_reuse_count[tid] = 0;
-	}
-	fw_task_ctx->ystorm_st_context.state.reuse_count =
-						qedi->tid_reuse_count[tid];
-	fw_task_ctx->mstorm_st_context.reuse_count =
-						qedi->tid_reuse_count[tid]++;
-	/* Ustorm context */
-	fw_task_ctx->ustorm_st_context.rem_rcv_len = resp_sge->sge_len;
-	fw_task_ctx->ustorm_st_context.exp_data_transfer_len = data_len;
-	fw_task_ctx->ustorm_st_context.exp_data_sn = 0;
-	fw_task_ctx->ustorm_st_context.task_type =  ISCSI_TASK_TYPE_MIDPATH;
-	fw_task_ctx->ustorm_st_context.cq_rss_number = 0;
-
-	SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-		  ISCSI_REG1_NUM_FAST_SGES, 0);
-
-	fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-	SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-
-	fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
-	fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
-	qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
+	/* Fill tx AHS and rx buffer */
+	if (data_len) {
+		tx_sgl_task_params.sgl =
+			       (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+		tx_sgl_task_params.sgl_phys_addr.lo =
+					 (u32)(qedi_conn->gen_pdu.req_dma_addr);
+		tx_sgl_task_params.sgl_phys_addr.hi =
+			      (u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
+		tx_sgl_task_params.total_buffer_size = data_len;
+		tx_sgl_task_params.num_sges = 1;
+
+		rx_sgl_task_params.sgl =
+			      (struct scsi_sge *)qedi_conn->gen_pdu.resp_bd_tbl;
+		rx_sgl_task_params.sgl_phys_addr.lo =
+					(u32)(qedi_conn->gen_pdu.resp_dma_addr);
+		rx_sgl_task_params.sgl_phys_addr.hi =
+			     (u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
+		rx_sgl_task_params.total_buffer_size = resp_sge->sge_len;
+		rx_sgl_task_params.num_sges = 1;
+	}
+
+	/* Fill fw input params */
+	task_params.context = fw_task_ctx;
+	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+	task_params.itid = tid;
+	task_params.cq_rss_number = 0;
+	task_params.tx_io_size = data_len;
+	task_params.rx_io_size = resp_sge->sge_len;
+
+	sq_idx = qedi_get_wqe_idx(qedi_conn);
+	task_params.sqe = &ep->sq[sq_idx];
+
+	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+	rval = init_initiator_nop_out_task(&task_params,
+					   &nop_out_pdu_header,
+					   &tx_sgl_task_params,
+					   &rx_sgl_task_params);
+	if (rval)
+		return -1;
+
 	qedi_ring_doorbell(qedi_conn);
 	return 0;
 }
@@ -1905,7 +1821,7 @@  int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
 static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
 			 int bd_index)
 {
-	struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+	struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
 	int frag_size, sg_frags;
 
 	sg_frags = 0;
@@ -1938,7 +1854,7 @@  static int qedi_split_bd(struct qedi_cmd *cmd, u64 addr, int sg_len,
 static int qedi_map_scsi_sg(struct qedi_ctx *qedi, struct qedi_cmd *cmd)
 {
 	struct scsi_cmnd *sc = cmd->scsi_cmd;
-	struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+	struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
 	struct scatterlist *sg;
 	int byte_count = 0;
 	int bd_count = 0;
@@ -2040,7 +1956,7 @@  static void qedi_iscsi_map_sg_list(struct qedi_cmd *cmd)
 		if (bd_count == 0)
 			return;
 	} else {
-		struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
+		struct scsi_sge *bd = cmd->io_tbl.sge_tbl;
 
 		bd[0].sge_addr.lo = 0;
 		bd[0].sge_addr.hi = 0;
@@ -2136,244 +2052,182 @@  int qedi_iscsi_send_ioreq(struct iscsi_task *task)
 	struct qedi_conn *qedi_conn = conn->dd_data;
 	struct qedi_cmd *cmd = task->dd_data;
 	struct scsi_cmnd *sc = task->sc;
+	struct iscsi_cmd_hdr cmd_pdu_header;
+	struct scsi_sgl_task_params tx_sgl_task_params;
+	struct scsi_sgl_task_params rx_sgl_task_params;
+	struct scsi_sgl_task_params *prx_sgl = NULL;
+	struct scsi_sgl_task_params *ptx_sgl = NULL;
+	struct iscsi_task_params task_params;
+	struct iscsi_conn_params conn_params;
+	struct scsi_initiator_cmd_params cmd_params;
 	struct iscsi_task_context *fw_task_ctx;
-	struct iscsi_cached_sge_ctx *cached_sge;
-	struct iscsi_phys_sgl_ctx *phys_sgl;
-	struct iscsi_virt_sgl_ctx *virt_sgl;
-	struct ystorm_iscsi_task_st_ctx *yst_cxt;
-	struct mstorm_iscsi_task_st_ctx *mst_cxt;
-	struct iscsi_sgl *sgl_struct;
-	struct iscsi_sge *single_sge;
+	struct iscsi_cls_conn *cls_conn;
 	struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
-	struct iscsi_sge *bd = cmd->io_tbl.sge_tbl;
-	enum iscsi_task_type task_type;
-	struct iscsi_cmd_hdr *fw_cmd;
-	u32 lun[2];
-	u32 exp_data;
-	u16 cq_idx = smp_processor_id() % qedi->num_queues;
-	s16 ptu_invalidate = 0;
+	enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
+	struct qedi_endpoint *ep;
+	u32 scsi_lun[2];
 	s16 tid = 0;
-	u8 num_fast_sgs;
+	u16 sq_idx = 0;
+	u16 cq_idx;
+	int rval = 0;
 
-	tid = qedi_get_task_idx(qedi);
-	if (tid == -1)
-		return -ENOMEM;
+	ep = qedi_conn->ep;
+	cls_conn = qedi_conn->cls_conn;
+	conn = cls_conn->dd_data;
 
 	qedi_iscsi_map_sg_list(cmd);
+	int_to_scsilun(sc->device->lun, (struct scsi_lun *)scsi_lun);
 
-	int_to_scsilun(sc->device->lun, (struct scsi_lun *)lun);
-	fw_task_ctx = qedi_get_task_mem(&qedi->tasks, tid);
+	tid = qedi_get_task_idx(qedi);
+	if (tid == -1)
+		return -ENOMEM;
 
+	fw_task_ctx =
+	     (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks, tid);
 	memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
-	cmd->task_id = tid;
 
-	/* Ystorm context */
-	fw_cmd = &fw_task_ctx->ystorm_st_context.pdu_hdr.cmd;
-	SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_ATTR, ISCSI_ATTR_SIMPLE);
+	cmd->task_id = tid;
 
+	memset(&task_params, 0, sizeof(task_params));
+	memset(&cmd_pdu_header, 0, sizeof(cmd_pdu_header));
+	memset(&tx_sgl_task_params, 0, sizeof(tx_sgl_task_params));
+	memset(&rx_sgl_task_params, 0, sizeof(rx_sgl_task_params));
+	memset(&conn_params, 0, sizeof(conn_params));
+	memset(&cmd_params, 0, sizeof(cmd_params));
+
+	cq_idx = smp_processor_id() % qedi->num_queues;
+	/* Update header info */
+	SET_FIELD(cmd_pdu_header.flags_attr, ISCSI_CMD_HDR_ATTR,
+		  ISCSI_ATTR_SIMPLE);
 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
-		if (conn->session->initial_r2t_en) {
-			exp_data = min((conn->session->imm_data_en *
-					conn->max_xmit_dlength),
-				       conn->session->first_burst);
-			exp_data = min(exp_data, scsi_bufflen(sc));
-			fw_task_ctx->ustorm_ag_context.exp_data_acked =
-							  cpu_to_le32(exp_data);
-		} else {
-			fw_task_ctx->ustorm_ag_context.exp_data_acked =
-			      min(conn->session->first_burst, scsi_bufflen(sc));
-		}
-
-		SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_WRITE, 1);
+		SET_FIELD(cmd_pdu_header.flags_attr,
+			  ISCSI_CMD_HDR_WRITE, 1);
 		task_type = ISCSI_TASK_TYPE_INITIATOR_WRITE;
 	} else {
-		if (scsi_bufflen(sc))
-			SET_FIELD(fw_cmd->flags_attr, ISCSI_CMD_HDR_READ, 1);
+		SET_FIELD(cmd_pdu_header.flags_attr,
+			  ISCSI_CMD_HDR_READ, 1);
 		task_type = ISCSI_TASK_TYPE_INITIATOR_READ;
 	}
 
-	fw_cmd->lun.lo = be32_to_cpu(lun[0]);
-	fw_cmd->lun.hi = be32_to_cpu(lun[1]);
+	cmd_pdu_header.lun.lo = be32_to_cpu(scsi_lun[0]);
+	cmd_pdu_header.lun.hi = be32_to_cpu(scsi_lun[1]);
 
 	qedi_update_itt_map(qedi, tid, task->itt, cmd);
-	fw_cmd->itt = qedi_set_itt(tid, get_itt(task->itt));
-	fw_cmd->expected_transfer_length = scsi_bufflen(sc);
-	fw_cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
-	fw_cmd->opcode = hdr->opcode;
-	qedi_cpy_scsi_cdb(sc, (u32 *)fw_cmd->cdb);
-
-	/* Mstorm context */
-	fw_task_ctx->mstorm_st_context.sense_db.lo = (u32)cmd->sense_buffer_dma;
-	fw_task_ctx->mstorm_st_context.sense_db.hi =
-					(u32)((u64)cmd->sense_buffer_dma >> 32);
-	fw_task_ctx->mstorm_ag_context.task_cid = qedi_conn->iscsi_conn_id;
-	fw_task_ctx->mstorm_st_context.task_type = task_type;
-
-	if (qedi->tid_reuse_count[tid] == QEDI_MAX_TASK_NUM) {
-		ptu_invalidate = 1;
-		qedi->tid_reuse_count[tid] = 0;
-	}
-	fw_task_ctx->ystorm_st_context.state.reuse_count =
-						     qedi->tid_reuse_count[tid];
-	fw_task_ctx->mstorm_st_context.reuse_count =
-						   qedi->tid_reuse_count[tid]++;
-
-	/* Ustorm context */
-	fw_task_ctx->ustorm_st_context.rem_rcv_len = scsi_bufflen(sc);
-	fw_task_ctx->ustorm_st_context.exp_data_transfer_len = scsi_bufflen(sc);
-	fw_task_ctx->ustorm_st_context.exp_data_sn =
-						   be32_to_cpu(hdr->exp_statsn);
-	fw_task_ctx->ustorm_st_context.task_type = task_type;
-	fw_task_ctx->ustorm_st_context.cq_rss_number = cq_idx;
-	fw_task_ctx->ustorm_ag_context.icid = (u16)qedi_conn->iscsi_conn_id;
-
-	SET_FIELD(fw_task_ctx->ustorm_ag_context.flags1,
-		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
-	SET_FIELD(fw_task_ctx->ustorm_st_context.flags,
-		  USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 0);
-
-	num_fast_sgs = (cmd->io_tbl.sge_valid ?
-			min((u16)QEDI_FAST_SGE_COUNT,
-			    (u16)cmd->io_tbl.sge_valid) : 0);
-	SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-		  ISCSI_REG1_NUM_FAST_SGES, num_fast_sgs);
-
-	fw_task_ctx->ustorm_st_context.lun.lo = be32_to_cpu(lun[0]);
-	fw_task_ctx->ustorm_st_context.lun.hi = be32_to_cpu(lun[1]);
-
-	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO, "Total sge count [%d]\n",
-		  cmd->io_tbl.sge_valid);
-
-	yst_cxt = &fw_task_ctx->ystorm_st_context;
-	mst_cxt = &fw_task_ctx->mstorm_st_context;
-	/* Tx path */
+	cmd_pdu_header.itt = qedi_set_itt(tid, get_itt(task->itt));
+	cmd_pdu_header.expected_transfer_length = cpu_to_be32(hdr->data_length);
+	cmd_pdu_header.hdr_second_dword = ntoh24(hdr->dlength);
+	cmd_pdu_header.cmd_sn = be32_to_cpu(hdr->cmdsn);
+	cmd_pdu_header.opcode = hdr->opcode;
+	qedi_cpy_scsi_cdb(sc, (u32 *)cmd_pdu_header.cdb);
+
+	/* Fill tx AHS and rx buffer */
 	if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
-		/* not considering  superIO or FastIO */
-		if (cmd->io_tbl.sge_valid == 1) {
-			cached_sge = &yst_cxt->state.sgl_ctx_union.cached_sge;
-			cached_sge->sge.sge_addr.lo = bd[0].sge_addr.lo;
-			cached_sge->sge.sge_addr.hi = bd[0].sge_addr.hi;
-			cached_sge->sge.sge_len = bd[0].sge_len;
-			qedi->cached_sgls++;
-		} else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
-			SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-				  ISCSI_MFLAGS_SLOW_IO, 1);
-			SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-				  ISCSI_REG1_NUM_FAST_SGES, 0);
-			phys_sgl = &yst_cxt->state.sgl_ctx_union.phys_sgl;
-			phys_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
-			phys_sgl->sgl_base.hi =
-				     (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-			phys_sgl->sgl_size = cmd->io_tbl.sge_valid;
-			qedi->slow_sgls++;
-		} else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
-			SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-				  ISCSI_MFLAGS_SLOW_IO, 0);
-			SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-				  ISCSI_REG1_NUM_FAST_SGES,
-				  min((u16)QEDI_FAST_SGE_COUNT,
-				      (u16)cmd->io_tbl.sge_valid));
-			virt_sgl = &yst_cxt->state.sgl_ctx_union.virt_sgl;
-			virt_sgl->sgl_base.lo = (u32)(cmd->io_tbl.sge_tbl_dma);
-			virt_sgl->sgl_base.hi =
+		tx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+		tx_sgl_task_params.sgl_phys_addr.lo =
+						 (u32)(cmd->io_tbl.sge_tbl_dma);
+		tx_sgl_task_params.sgl_phys_addr.hi =
 				      (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-			virt_sgl->sgl_initial_offset =
-				 (u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
-			qedi->fast_sgls++;
-		}
-		fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
-		fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
-	} else {
-	/* Rx path */
-		if (cmd->io_tbl.sge_valid == 1) {
-			SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-				  ISCSI_MFLAGS_SLOW_IO, 0);
-			SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-				  ISCSI_MFLAGS_SINGLE_SGE, 1);
-			single_sge = &mst_cxt->sgl_union.single_sge;
-			single_sge->sge_addr.lo = bd[0].sge_addr.lo;
-			single_sge->sge_addr.hi = bd[0].sge_addr.hi;
-			single_sge->sge_len = bd[0].sge_len;
-			qedi->cached_sgls++;
-		} else if ((cmd->io_tbl.sge_valid != 1) && cmd->use_slowpath) {
-			sgl_struct = &mst_cxt->sgl_union.sgl_struct;
-			sgl_struct->sgl_addr.lo =
-						(u32)(cmd->io_tbl.sge_tbl_dma);
-			sgl_struct->sgl_addr.hi =
-				     (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-			SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-				  ISCSI_MFLAGS_SLOW_IO, 1);
-			SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-				  ISCSI_REG1_NUM_FAST_SGES, 0);
-			sgl_struct->updated_sge_size = 0;
-			sgl_struct->updated_sge_offset = 0;
-			qedi->slow_sgls++;
-		} else if ((cmd->io_tbl.sge_valid != 1) && !cmd->use_slowpath) {
-			sgl_struct = &mst_cxt->sgl_union.sgl_struct;
-			sgl_struct->sgl_addr.lo =
-						(u32)(cmd->io_tbl.sge_tbl_dma);
-			sgl_struct->sgl_addr.hi =
-				     (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
-			sgl_struct->byte_offset =
-				(u32)bd[0].sge_addr.lo & (QEDI_PAGE_SIZE - 1);
-			SET_FIELD(fw_task_ctx->mstorm_st_context.flags.mflags,
-				  ISCSI_MFLAGS_SLOW_IO, 0);
-			SET_FIELD(fw_task_ctx->ustorm_st_context.reg1.reg1_map,
-				  ISCSI_REG1_NUM_FAST_SGES, 0);
-			sgl_struct->updated_sge_size = 0;
-			sgl_struct->updated_sge_offset = 0;
-			qedi->fast_sgls++;
-		}
-		fw_task_ctx->mstorm_st_context.sgl_size = cmd->io_tbl.sge_valid;
-		fw_task_ctx->mstorm_st_context.rem_task_size = scsi_bufflen(sc);
-	}
-
-	if (cmd->io_tbl.sge_valid == 1)
-		/* Singel-SGL */
-		qedi->use_cached_sge = true;
-	else {
+		tx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+		tx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
 		if (cmd->use_slowpath)
-			qedi->use_slow_sge = true;
-		else
-			qedi->use_fast_sge = true;
-	}
+			tx_sgl_task_params.small_mid_sge = true;
+	} else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+		rx_sgl_task_params.sgl = cmd->io_tbl.sge_tbl;
+		rx_sgl_task_params.sgl_phys_addr.lo =
+						 (u32)(cmd->io_tbl.sge_tbl_dma);
+		rx_sgl_task_params.sgl_phys_addr.hi =
+				      (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32);
+		rx_sgl_task_params.total_buffer_size = scsi_bufflen(sc);
+		rx_sgl_task_params.num_sges = cmd->io_tbl.sge_valid;
+	}
+
+	/* Add conn param */
+	conn_params.first_burst_length = conn->session->first_burst;
+	conn_params.max_send_pdu_length = conn->max_xmit_dlength;
+	conn_params.max_burst_length = conn->session->max_burst;
+	if (conn->session->initial_r2t_en)
+		conn_params.initial_r2t = true;
+	if (conn->session->imm_data_en)
+		conn_params.immediate_data = true;
+
+	/* Add cmd params */
+	cmd_params.sense_data_buffer_phys_addr.lo = (u32)cmd->sense_buffer_dma;
+	cmd_params.sense_data_buffer_phys_addr.hi =
+					(u32)((u64)cmd->sense_buffer_dma >> 32);
+	/* Fill fw input params */
+	task_params.context = fw_task_ctx;
+	task_params.conn_icid = (u16)qedi_conn->iscsi_conn_id;
+	task_params.itid = tid;
+	task_params.cq_rss_number = cq_idx;
+	if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE)
+		task_params.tx_io_size = scsi_bufflen(sc);
+	else if (task_type == ISCSI_TASK_TYPE_INITIATOR_READ)
+		task_params.rx_io_size = scsi_bufflen(sc);
+
+	sq_idx = qedi_get_wqe_idx(qedi_conn);
+	task_params.sqe = &ep->sq[sq_idx];
+
 	QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
-		  "%s: %s-SGL: num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
+		  "%s: %s-SGL: sg_len=0x%x num_sges=0x%x first-sge-lo=0x%x first-sge-hi=0x%x",
 		  (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) ?
 		  "Write " : "Read ", (cmd->io_tbl.sge_valid == 1) ?
 		  "Single" : (cmd->use_slowpath ? "SLOW" : "FAST"),
-		  (u16)cmd->io_tbl.sge_valid, (u32)(cmd->io_tbl.sge_tbl_dma),
+		  (u16)cmd->io_tbl.sge_valid, scsi_bufflen(sc),
+		  (u32)(cmd->io_tbl.sge_tbl_dma),
 		  (u32)((u64)cmd->io_tbl.sge_tbl_dma >> 32));
 
-	/*  Add command in active command list */
+	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+
+	if (task_params.tx_io_size != 0)
+		ptx_sgl = &tx_sgl_task_params;
+	if (task_params.rx_io_size != 0)
+		prx_sgl = &rx_sgl_task_params;
+
+	rval = init_initiator_rw_iscsi_task(&task_params, &conn_params,
+					    &cmd_params, &cmd_pdu_header,
+					    ptx_sgl, prx_sgl,
+					    NULL);
+	if (rval)
+		return -1;
+
 	spin_lock(&qedi_conn->list_lock);
 	list_add_tail(&cmd->io_cmd, &qedi_conn->active_cmd_list);
 	cmd->io_cmd_in_list = true;
 	qedi_conn->active_cmd_count++;
 	spin_unlock(&qedi_conn->list_lock);
 
-	qedi_add_to_sq(qedi_conn, task, tid, ptu_invalidate, false);
 	qedi_ring_doorbell(qedi_conn);
-	if (qedi_io_tracing)
-		qedi_trace_io(qedi, task, tid, QEDI_IO_TRACE_REQ);
-
 	return 0;
 }
 
 int qedi_iscsi_cleanup_task(struct iscsi_task *task, bool mark_cmd_node_deleted)
 {
+	struct iscsi_task_params task_params;
+	struct qedi_endpoint *ep;
 	struct iscsi_conn *conn = task->conn;
 	struct qedi_conn *qedi_conn = conn->dd_data;
 	struct qedi_cmd *cmd = task->dd_data;
-	s16 ptu_invalidate = 0;
+	u16 sq_idx = 0;
+	int rval = 0;
 
 	QEDI_INFO(&qedi_conn->qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
 		  "issue cleanup tid=0x%x itt=0x%x task_state=%d cmd_state=0%x cid=0x%x\n",
 		  cmd->task_id, get_itt(task->itt), task->state,
 		  cmd->state, qedi_conn->iscsi_conn_id);
 
-	qedi_add_to_sq(qedi_conn, task, cmd->task_id, ptu_invalidate, true);
-	qedi_ring_doorbell(qedi_conn);
+	memset(&task_params, 0, sizeof(task_params));
+	ep = qedi_conn->ep;
+
+	sq_idx = qedi_get_wqe_idx(qedi_conn);
+
+	task_params.sqe = &ep->sq[sq_idx];
+	memset(task_params.sqe, 0, sizeof(struct iscsi_wqe));
+	task_params.itid = cmd->task_id;
 
+	rval = init_cleanup_task(&task_params);
+	if (rval)
+		return rval;
+
+	qedi_ring_doorbell(qedi_conn);
 	return 0;
 }
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
new file mode 100644
index 0000000..c2b6144
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -0,0 +1,782 @@ 
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+#include "qedi_fw_iscsi.h"
+#include "qedi_fw_scsi.h"
+
+#define SCSI_NUM_SGES_IN_CACHE 0x4
+
+static bool scsi_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+	return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+static
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+			   struct scsi_cached_sges *ctx_data_desc,
+			   struct scsi_sgl_task_params *sgl_task_params)
+{
+	u8 sge_index;
+	u8 num_sges;
+	u32 val;
+
+	num_sges = (sgl_task_params->num_sges > SCSI_NUM_SGES_IN_CACHE) ?
+			     SCSI_NUM_SGES_IN_CACHE : sgl_task_params->num_sges;
+
+	/* sgl params */
+	val = cpu_to_le32(sgl_task_params->sgl_phys_addr.lo);
+	ctx_sgl_params->sgl_addr.lo = val;
+	val = cpu_to_le32(sgl_task_params->sgl_phys_addr.hi);
+	ctx_sgl_params->sgl_addr.hi = val;
+	val = cpu_to_le32(sgl_task_params->total_buffer_size);
+	ctx_sgl_params->sgl_total_length = val;
+	ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_task_params->num_sges);
+
+	for (sge_index = 0; sge_index < num_sges; sge_index++) {
+		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.lo);
+		ctx_data_desc->sge[sge_index].sge_addr.lo = val;
+		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_addr.hi);
+		ctx_data_desc->sge[sge_index].sge_addr.hi = val;
+		val = cpu_to_le32(sgl_task_params->sgl[sge_index].sge_len);
+		ctx_data_desc->sge[sge_index].sge_len = val;
+	}
+}
+
+static u32 calc_rw_task_size(struct iscsi_task_params *task_params,
+			     enum iscsi_task_type task_type,
+			     struct scsi_sgl_task_params *sgl_task_params,
+			     struct scsi_dif_task_params *dif_task_params)
+{
+	u32 io_size;
+
+	if (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+	    task_type == ISCSI_TASK_TYPE_TARGET_READ)
+		io_size = task_params->tx_io_size;
+	else
+		io_size = task_params->rx_io_size;
+
+	if (!io_size)
+		return 0;
+
+	if (!dif_task_params)
+		return io_size;
+
+	return !dif_task_params->dif_on_network ?
+	       io_size : sgl_task_params->total_buffer_size;
+}
+
+static void
+init_dif_context_flags(struct iscsi_dif_flags *ctx_dif_flags,
+		       struct scsi_dif_task_params *dif_task_params)
+{
+	if (!dif_task_params)
+		return;
+
+	SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG,
+		  dif_task_params->dif_block_size_log);
+	SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_DIF_TO_PEER,
+		  dif_task_params->dif_on_network ? 1 : 0);
+	SET_FIELD(ctx_dif_flags->flags, ISCSI_DIF_FLAGS_HOST_INTERFACE,
+		  dif_task_params->dif_on_host ? 1 : 0);
+}
+
+static void init_sqe(struct iscsi_task_params *task_params,
+		     struct scsi_sgl_task_params *sgl_task_params,
+		     struct scsi_dif_task_params *dif_task_params,
+		     struct iscsi_common_hdr *pdu_header,
+		     struct scsi_initiator_cmd_params *cmd_params,
+		     enum iscsi_task_type task_type,
+		     bool is_cleanup)
+{
+	if (!task_params->sqe)
+		return;
+
+	memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+	task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+	if (is_cleanup) {
+		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+			  ISCSI_WQE_TYPE_TASK_CLEANUP);
+		return;
+	}
+
+	switch (task_type) {
+	case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+	{
+		u32 buf_size = 0;
+		u32 num_sges = 0;
+
+		init_dif_context_flags(&task_params->sqe->prot_flags,
+				       dif_task_params);
+
+		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+			  ISCSI_WQE_TYPE_NORMAL);
+
+		if (task_params->tx_io_size) {
+			buf_size = calc_rw_task_size(task_params, task_type,
+						     sgl_task_params,
+						     dif_task_params);
+
+		if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+				     sgl_task_params->small_mid_sge))
+			num_sges = ISCSI_WQE_NUM_SGES_SLOWIO;
+		else
+			num_sges = min(sgl_task_params->num_sges,
+				       (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+	}
+
+	SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges);
+	SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN,
+		  buf_size);
+
+	if (GET_FIELD(pdu_header->hdr_second_dword,
+		      ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+		SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE,
+			  cmd_params->extended_cdb_sge.sge_len);
+	}
+		break;
+	case ISCSI_TASK_TYPE_INITIATOR_READ:
+		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+			  ISCSI_WQE_TYPE_NORMAL);
+
+		if (GET_FIELD(pdu_header->hdr_second_dword,
+			      ISCSI_CMD_HDR_TOTAL_AHS_LEN))
+			SET_FIELD(task_params->sqe->contlen_cdbsize,
+				  ISCSI_WQE_CDB_SIZE,
+				  cmd_params->extended_cdb_sge.sge_len);
+		break;
+	case ISCSI_TASK_TYPE_LOGIN_RESPONSE:
+	case ISCSI_TASK_TYPE_MIDPATH:
+	{
+		bool advance_statsn = true;
+
+		if (task_type == ISCSI_TASK_TYPE_LOGIN_RESPONSE)
+			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+				  ISCSI_WQE_TYPE_LOGIN);
+		else
+			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE,
+				  ISCSI_WQE_TYPE_MIDDLE_PATH);
+
+		if (task_type == ISCSI_TASK_TYPE_MIDPATH) {
+			u8 opcode = GET_FIELD(pdu_header->hdr_first_byte,
+					      ISCSI_COMMON_HDR_OPCODE);
+
+			if (opcode != ISCSI_OPCODE_TEXT_RESPONSE &&
+			    (opcode != ISCSI_OPCODE_NOP_IN ||
+			    pdu_header->itt == ISCSI_TTT_ALL_ONES))
+				advance_statsn = false;
+		}
+
+		SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE,
+			  advance_statsn ? 1 : 0);
+
+		if (task_params->tx_io_size) {
+			SET_FIELD(task_params->sqe->contlen_cdbsize,
+				  ISCSI_WQE_CONT_LEN, task_params->tx_io_size);
+
+		if (scsi_is_slow_sgl(sgl_task_params->num_sges,
+				     sgl_task_params->small_mid_sge))
+			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+				  ISCSI_WQE_NUM_SGES_SLOWIO);
+		else
+			SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES,
+				  min(sgl_task_params->num_sges,
+				      (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+		}
+	}
+		break;
+	default:
+		break;
+	}
+}
+
+static void init_default_iscsi_task(struct iscsi_task_params *task_params,
+				    struct data_hdr *pdu_header,
+				    enum iscsi_task_type task_type)
+{
+	struct iscsi_task_context *context;
+	u16 index;
+	u32 val;
+
+	context = task_params->context;
+	memset(context, 0, sizeof(*context));
+
+	for (index = 0; index <
+	     ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
+	     index++) {
+		val = cpu_to_le32(pdu_header->data[index]);
+		context->ystorm_st_context.pdu_hdr.data.data[index] = val;
+	}
+
+	context->mstorm_st_context.task_type = task_type;
+	context->mstorm_ag_context.task_cid =
+					    cpu_to_le16(task_params->conn_icid);
+
+	SET_FIELD(context->ustorm_ag_context.flags1,
+		  USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+
+	context->ustorm_st_context.task_type = task_type;
+	context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+	context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+static
+void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
+					  struct scsi_initiator_cmd_params *cmd)
+{
+	union iscsi_task_hdr *ctx_pdu_hdr = &ystc->pdu_hdr;
+	u32 val;
+
+	if (!cmd->extended_cdb_sge.sge_len)
+		return;
+
+	SET_FIELD(ctx_pdu_hdr->ext_cdb_cmd.hdr_second_dword,
+		  ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE,
+		  cmd->extended_cdb_sge.sge_len);
+	val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.lo);
+	ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.lo = val;
+	val = cpu_to_le32(cmd->extended_cdb_sge.sge_addr.hi);
+	ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_addr.hi = val;
+	val = cpu_to_le32(cmd->extended_cdb_sge.sge_len);
+	ctx_pdu_hdr->ext_cdb_cmd.cdb_sge.sge_len  = val;
+}
+
+static
+void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
+			       struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+			       u32 remaining_recv_len,
+			       u32 expected_data_transfer_len,
+			       u8 num_sges, bool tx_dif_conn_err_en)
+{
+	u32 val;
+
+	ustorm_st_cxt->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+	ustorm_ag_cxt->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+	val = cpu_to_le32(expected_data_transfer_len);
+	ustorm_st_cxt->exp_data_transfer_len = val;
+	SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
+	SET_FIELD(ustorm_ag_cxt->flags2,
+		  USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+		  tx_dif_conn_err_en ? 1 : 0);
+}
+
+static
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
+					struct iscsi_conn_params  *conn_params,
+					enum iscsi_task_type task_type,
+					u32 task_size,
+					u32 exp_data_transfer_len,
+					u8 total_ahs_length)
+{
+	u32 max_unsolicited_data = 0, val;
+
+	if (total_ahs_length &&
+	    (task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE ||
+	     task_type == ISCSI_TASK_TYPE_INITIATOR_READ))
+		SET_FIELD(context->ustorm_st_context.flags2,
+			  USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST, 1);
+
+	switch (task_type) {
+	case ISCSI_TASK_TYPE_INITIATOR_WRITE:
+		if (!conn_params->initial_r2t)
+			max_unsolicited_data = conn_params->first_burst_length;
+		else if (conn_params->immediate_data)
+			max_unsolicited_data =
+					  min(conn_params->first_burst_length,
+					      conn_params->max_send_pdu_length);
+
+		context->ustorm_ag_context.exp_data_acked =
+				   cpu_to_le32(total_ahs_length == 0 ?
+						min(exp_data_transfer_len,
+						    max_unsolicited_data) :
+						((u32)(total_ahs_length +
+						       ISCSI_AHS_CNTL_SIZE)));
+		break;
+	case ISCSI_TASK_TYPE_TARGET_READ:
+		val = cpu_to_le32(exp_data_transfer_len);
+		context->ustorm_ag_context.exp_data_acked = val;
+		break;
+	case ISCSI_TASK_TYPE_INITIATOR_READ:
+		context->ustorm_ag_context.exp_data_acked =
+					cpu_to_le32((total_ahs_length == 0 ? 0 :
+						     total_ahs_length +
+						     ISCSI_AHS_CNTL_SIZE));
+		break;
+	case ISCSI_TASK_TYPE_TARGET_WRITE:
+		val = cpu_to_le32(task_size);
+		context->ustorm_ag_context.exp_cont_len = val;
+		break;
+	default:
+		break;
+	}
+}
+
+static
+void init_rtdif_task_context(struct rdif_task_context *rdif_context,
+			     struct tdif_task_context *tdif_context,
+			     struct scsi_dif_task_params *dif_task_params,
+			     enum iscsi_task_type task_type)
+{
+	u32 val;
+
+	if (!dif_task_params->dif_on_network || !dif_task_params->dif_on_host)
+		return;
+
+	if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE ||
+	    task_type == ISCSI_TASK_TYPE_INITIATOR_READ) {
+		rdif_context->app_tag_value =
+				  cpu_to_le16(dif_task_params->application_tag);
+		rdif_context->partial_crc_value = cpu_to_le16(0xffff);
+		val = cpu_to_le32(dif_task_params->initial_ref_tag);
+		rdif_context->initial_ref_tag = val;
+		rdif_context->app_tag_mask =
+			     cpu_to_le16(dif_task_params->application_tag_mask);
+		SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
+			  dif_task_params->crc_seed ? 1 : 0);
+		SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+			  dif_task_params->host_guard_type);
+		SET_FIELD(rdif_context->flags0,
+			  RDIF_TASK_CONTEXT_PROTECTIONTYPE,
+			  dif_task_params->protection_type);
+		SET_FIELD(rdif_context->flags0,
+			  RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
+		SET_FIELD(rdif_context->flags0,
+			  RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+			  dif_task_params->keep_ref_tag_const ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+			  (dif_task_params->validate_app_tag &&
+			  dif_task_params->dif_on_network) ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_VALIDATEGUARD,
+			  (dif_task_params->validate_guard &&
+			  dif_task_params->dif_on_network) ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_VALIDATEREFTAG,
+			  (dif_task_params->validate_ref_tag &&
+			  dif_task_params->dif_on_network) ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_HOSTINTERFACE,
+			  dif_task_params->dif_on_host ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_NETWORKINTERFACE,
+			  dif_task_params->dif_on_network ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_FORWARDGUARD,
+			  dif_task_params->forward_guard ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_FORWARDAPPTAG,
+			  dif_task_params->forward_app_tag ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_FORWARDREFTAG,
+			  dif_task_params->forward_ref_tag ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+		SET_FIELD(rdif_context->flags1,
+			  RDIF_TASK_CONTEXT_INTERVALSIZE,
+			  dif_task_params->dif_block_size_log - 9);
+		SET_FIELD(rdif_context->state,
+			  RDIF_TASK_CONTEXT_REFTAGMASK,
+			  dif_task_params->ref_tag_mask);
+		SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
+			  dif_task_params->ignore_app_tag);
+	}
+
+	if (task_type == ISCSI_TASK_TYPE_TARGET_READ ||
+	    task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
+		tdif_context->app_tag_value =
+				  cpu_to_le16(dif_task_params->application_tag);
+		tdif_context->partial_crc_valueB =
+		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+		tdif_context->partial_crc_value_a =
+		       cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
+		SET_FIELD(tdif_context->flags0, TDIF_TASK_CONTEXT_CRC_SEED,
+			  dif_task_params->crc_seed ? 1 : 0);
+
+		SET_FIELD(tdif_context->flags0,
+			  TDIF_TASK_CONTEXT_SETERRORWITHEOP,
+			  dif_task_params->tx_dif_conn_err_en ? 1 : 0);
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
+			  dif_task_params->forward_guard   ? 1 : 0);
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
+			  dif_task_params->forward_app_tag ? 1 : 0);
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
+			  dif_task_params->forward_ref_tag ? 1 : 0);
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
+			  dif_task_params->dif_block_size_log - 9);
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
+			  dif_task_params->dif_on_host    ? 1 : 0);
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_NETWORKINTERFACE,
+			  dif_task_params->dif_on_network ? 1 : 0);
+		val = cpu_to_le32(dif_task_params->initial_ref_tag);
+		tdif_context->initial_ref_tag = val;
+		tdif_context->app_tag_mask =
+			     cpu_to_le16(dif_task_params->application_tag_mask);
+		SET_FIELD(tdif_context->flags0,
+			  TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
+			  dif_task_params->host_guard_type);
+		SET_FIELD(tdif_context->flags0,
+			  TDIF_TASK_CONTEXT_PROTECTIONTYPE,
+			  dif_task_params->protection_type);
+		SET_FIELD(tdif_context->flags0,
+			  TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
+			  dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
+		SET_FIELD(tdif_context->flags0,
+			  TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
+			  dif_task_params->keep_ref_tag_const ? 1 : 0);
+		SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
+			  (dif_task_params->validate_guard &&
+			   dif_task_params->dif_on_host) ? 1 : 0);
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
+			  (dif_task_params->validate_app_tag &&
+			  dif_task_params->dif_on_host) ? 1 : 0);
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_VALIDATEREFTAG,
+			  (dif_task_params->validate_ref_tag &&
+			   dif_task_params->dif_on_host) ? 1 : 0);
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
+			  dif_task_params->forward_app_tag_with_mask ? 1 : 0);
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
+			  dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
+		SET_FIELD(tdif_context->flags1,
+			  TDIF_TASK_CONTEXT_REFTAGMASK,
+			  dif_task_params->ref_tag_mask);
+		SET_FIELD(tdif_context->flags0,
+			  TDIF_TASK_CONTEXT_IGNOREAPPTAG,
+			  dif_task_params->ignore_app_tag ? 1 : 0);
+	}
+}
+
+static void set_local_completion_context(struct iscsi_task_context *context)
+{
+	SET_FIELD(context->ystorm_st_context.state.flags,
+		  YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
+	SET_FIELD(context->ustorm_st_context.flags,
+		  USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
+			      enum iscsi_task_type task_type,
+			      struct iscsi_conn_params *conn_params,
+			      struct iscsi_common_hdr *pdu_header,
+			      struct scsi_sgl_task_params *sgl_task_params,
+			      struct scsi_initiator_cmd_params *cmd_params,
+			      struct scsi_dif_task_params *dif_task_params)
+{
+	u32 exp_data_transfer_len = conn_params->max_burst_length;
+	struct iscsi_task_context *cxt;
+	bool slow_io = false;
+	u32 task_size, val;
+	u8 num_sges = 0;
+
+	task_size = calc_rw_task_size(task_params, task_type, sgl_task_params,
+				      dif_task_params);
+
+	init_default_iscsi_task(task_params, (struct data_hdr *)pdu_header,
+				task_type);
+
+	cxt = task_params->context;
+
+	val = cpu_to_le32(task_size);
+	cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
+	init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
+					     cmd_params);
+	val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
+	cxt->mstorm_st_context.sense_db.lo = val;
+
+	val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
+	cxt->mstorm_st_context.sense_db.hi = val;
+
+	if (task_params->tx_io_size) {
+		init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
+				       dif_task_params);
+		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+				      &cxt->ystorm_st_context.state.data_desc,
+				      sgl_task_params);
+
+		slow_io = scsi_is_slow_sgl(sgl_task_params->num_sges,
+					   sgl_task_params->small_mid_sge);
+
+		num_sges = !slow_io ? min_t(u16, sgl_task_params->num_sges,
+					    (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+				      ISCSI_WQE_NUM_SGES_SLOWIO;
+
+		if (slow_io) {
+			SET_FIELD(cxt->ystorm_st_context.state.flags,
+				  YSTORM_ISCSI_TASK_STATE_SLOW_IO, 1);
+		}
+	} else if (task_params->rx_io_size) {
+		init_dif_context_flags(&cxt->mstorm_st_context.dif_flags,
+				       dif_task_params);
+		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+				      &cxt->mstorm_st_context.data_desc,
+				      sgl_task_params);
+		num_sges = !scsi_is_slow_sgl(sgl_task_params->num_sges,
+				sgl_task_params->small_mid_sge) ?
+				min_t(u16, sgl_task_params->num_sges,
+				      (u16)SCSI_NUM_SGES_SLOW_SGL_THR) :
+				ISCSI_WQE_NUM_SGES_SLOWIO;
+		cxt->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+	}
+
+	if (exp_data_transfer_len > task_size  ||
+	    task_type != ISCSI_TASK_TYPE_TARGET_WRITE)
+		exp_data_transfer_len = task_size;
+
+	init_ustorm_task_contexts(&task_params->context->ustorm_st_context,
+				  &task_params->context->ustorm_ag_context,
+				  task_size, exp_data_transfer_len, num_sges,
+				  dif_task_params ?
+				  dif_task_params->tx_dif_conn_err_en : false);
+
+	set_rw_exp_data_acked_and_cont_len(task_params->context, conn_params,
+					   task_type, task_size,
+					   exp_data_transfer_len,
+					GET_FIELD(pdu_header->hdr_second_dword,
+						  ISCSI_CMD_HDR_TOTAL_AHS_LEN));
+
+	if (dif_task_params)
+		init_rtdif_task_context(&task_params->context->rdif_context,
+					&task_params->context->tdif_context,
+					dif_task_params, task_type);
+
+	init_sqe(task_params, sgl_task_params, dif_task_params, pdu_header,
+		 cmd_params, task_type, false);
+
+	return 0;
+}
+
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+				 struct iscsi_conn_params *conn_params,
+				 struct scsi_initiator_cmd_params *cmd_params,
+				 struct iscsi_cmd_hdr *cmd_header,
+				 struct scsi_sgl_task_params *tx_sgl_params,
+				 struct scsi_sgl_task_params *rx_sgl_params,
+				 struct scsi_dif_task_params *dif_task_params)
+{
+	if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_WRITE))
+		return init_rw_iscsi_task(task_params,
+					  ISCSI_TASK_TYPE_INITIATOR_WRITE,
+					  conn_params,
+					  (struct iscsi_common_hdr *)cmd_header,
+					  tx_sgl_params, cmd_params,
+					  dif_task_params);
+	else if (GET_FIELD(cmd_header->flags_attr, ISCSI_CMD_HDR_READ))
+		return init_rw_iscsi_task(task_params,
+					  ISCSI_TASK_TYPE_INITIATOR_READ,
+					  conn_params,
+					  (struct iscsi_common_hdr *)cmd_header,
+					  rx_sgl_params, cmd_params,
+					  dif_task_params);
+	else
+		return -1;
+}
+
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+				      struct iscsi_login_req_hdr  *login_header,
+				      struct scsi_sgl_task_params *tx_params,
+				      struct scsi_sgl_task_params *rx_params)
+{
+	struct iscsi_task_context *cxt;
+
+	cxt = task_params->context;
+
+	init_default_iscsi_task(task_params,
+				(struct data_hdr *)login_header,
+				ISCSI_TASK_TYPE_MIDPATH);
+
+	init_ustorm_task_contexts(&cxt->ustorm_st_context,
+				  &cxt->ustorm_ag_context,
+				  task_params->rx_io_size ?
+				  rx_params->total_buffer_size : 0,
+				  task_params->tx_io_size ?
+				  tx_params->total_buffer_size : 0, 0,
+				  0);
+
+	if (task_params->tx_io_size)
+		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+				      &cxt->ystorm_st_context.state.data_desc,
+				      tx_params);
+
+	if (task_params->rx_io_size)
+		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+				      &cxt->mstorm_st_context.data_desc,
+				      rx_params);
+
+	cxt->mstorm_st_context.rem_task_size =
+			cpu_to_le32(task_params->rx_io_size ?
+				    rx_params->total_buffer_size : 0);
+
+	init_sqe(task_params, tx_params, NULL,
+		 (struct iscsi_common_hdr *)login_header, NULL,
+		 ISCSI_TASK_TYPE_MIDPATH, false);
+
+	return 0;
+}
+
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+				struct iscsi_nop_out_hdr *nop_out_pdu_header,
+				struct scsi_sgl_task_params *tx_sgl_task_params,
+				struct scsi_sgl_task_params *rx_sgl_task_params)
+{
+	struct iscsi_task_context *cxt;
+
+	cxt = task_params->context;
+
+	init_default_iscsi_task(task_params,
+				(struct data_hdr *)nop_out_pdu_header,
+				ISCSI_TASK_TYPE_MIDPATH);
+
+	if (nop_out_pdu_header->itt == ISCSI_ITT_ALL_ONES)
+		set_local_completion_context(task_params->context);
+
+	if (task_params->tx_io_size)
+		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+				      &cxt->ystorm_st_context.state.data_desc,
+				      tx_sgl_task_params);
+
+	if (task_params->rx_io_size)
+		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+				      &cxt->mstorm_st_context.data_desc,
+				      rx_sgl_task_params);
+
+	init_ustorm_task_contexts(&cxt->ustorm_st_context,
+				  &cxt->ustorm_ag_context,
+				  task_params->rx_io_size ?
+				  rx_sgl_task_params->total_buffer_size : 0,
+				  task_params->tx_io_size ?
+				  tx_sgl_task_params->total_buffer_size : 0,
+				  0, 0);
+
+	cxt->mstorm_st_context.rem_task_size =
+				cpu_to_le32(task_params->rx_io_size ?
+					rx_sgl_task_params->total_buffer_size :
+					0);
+
+	init_sqe(task_params, tx_sgl_task_params, NULL,
+		 (struct iscsi_common_hdr *)nop_out_pdu_header, NULL,
+		 ISCSI_TASK_TYPE_MIDPATH, false);
+
+	return 0;
+}
+
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+				       struct iscsi_logout_req_hdr *logout_hdr,
+				       struct scsi_sgl_task_params *tx_params,
+				       struct scsi_sgl_task_params *rx_params)
+{
+	struct iscsi_task_context *cxt;
+
+	cxt = task_params->context;
+
+	init_default_iscsi_task(task_params,
+				(struct data_hdr *)logout_hdr,
+				ISCSI_TASK_TYPE_MIDPATH);
+
+	if (task_params->tx_io_size)
+		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+				      &cxt->ystorm_st_context.state.data_desc,
+				      tx_params);
+
+	if (task_params->rx_io_size)
+		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+				      &cxt->mstorm_st_context.data_desc,
+				      rx_params);
+
+	init_ustorm_task_contexts(&cxt->ustorm_st_context,
+				  &cxt->ustorm_ag_context,
+				  task_params->rx_io_size ?
+				  rx_params->total_buffer_size : 0,
+				  task_params->tx_io_size ?
+				  tx_params->total_buffer_size : 0,
+				  0, 0);
+
+	cxt->mstorm_st_context.rem_task_size =
+					cpu_to_le32(task_params->rx_io_size ?
+					rx_params->total_buffer_size : 0);
+
+	init_sqe(task_params, tx_params, NULL,
+		 (struct iscsi_common_hdr *)logout_hdr, NULL,
+		 ISCSI_TASK_TYPE_MIDPATH, false);
+
+	return 0;
+}
+
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+				    struct iscsi_tmf_request_hdr *tmf_header)
+{
+	init_default_iscsi_task(task_params, (struct data_hdr *)tmf_header,
+				ISCSI_TASK_TYPE_MIDPATH);
+
+	init_sqe(task_params, NULL, NULL,
+		 (struct iscsi_common_hdr *)tmf_header, NULL,
+		 ISCSI_TASK_TYPE_MIDPATH, false);
+
+	return 0;
+}
+
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+				     struct iscsi_text_request_hdr *text_header,
+				     struct scsi_sgl_task_params *tx_params,
+				     struct scsi_sgl_task_params *rx_params)
+{
+	struct iscsi_task_context *cxt;
+
+	cxt = task_params->context;
+
+	init_default_iscsi_task(task_params,
+				(struct data_hdr *)text_header,
+				ISCSI_TASK_TYPE_MIDPATH);
+
+	if (task_params->tx_io_size)
+		init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
+				      &cxt->ystorm_st_context.state.data_desc,
+				      tx_params);
+
+	if (task_params->rx_io_size)
+		init_scsi_sgl_context(&cxt->mstorm_st_context.sgl_params,
+				      &cxt->mstorm_st_context.data_desc,
+				      rx_params);
+
+	cxt->mstorm_st_context.rem_task_size =
+				cpu_to_le32(task_params->rx_io_size ?
+					rx_params->total_buffer_size : 0);
+
+	init_ustorm_task_contexts(&cxt->ustorm_st_context,
+				  &cxt->ustorm_ag_context,
+				  task_params->rx_io_size ?
+				  rx_params->total_buffer_size : 0,
+				  task_params->tx_io_size ?
+				  tx_params->total_buffer_size : 0, 0, 0);
+
+	init_sqe(task_params, tx_params, NULL,
+		 (struct iscsi_common_hdr *)text_header, NULL,
+		 ISCSI_TASK_TYPE_MIDPATH, false);
+
+	return 0;
+}
+
+int init_cleanup_task(struct iscsi_task_params *task_params)
+{
+	init_sqe(task_params, NULL, NULL, NULL, NULL, ISCSI_TASK_TYPE_MIDPATH,
+		 true);
+	return 0;
+}
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
new file mode 100644
index 0000000..b6f24f9
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -0,0 +1,117 @@ 
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_ISCSI_H_
+#define _QEDI_FW_ISCSI_H_
+
+#include "qedi_fw_scsi.h"
+
+struct iscsi_task_params {
+	struct iscsi_task_context *context;
+	struct iscsi_wqe	  *sqe;
+	u32			  tx_io_size;
+	u32			  rx_io_size;
+	u16			  conn_icid;
+	u16			  itid;
+	u8			  cq_rss_number;
+};
+
+struct iscsi_conn_params {
+	u32	first_burst_length;
+	u32	max_send_pdu_length;
+	u32	max_burst_length;
+	bool	initial_r2t;
+	bool	immediate_data;
+};
+
+/* @brief init_initiator_read_iscsi_task - initializes iSCSI Initiator Read
+ * task context.
+ *
+ * @param task_params	  - Pointer to task parameters struct
+ * @param conn_params	  - Connection Parameters
+ * @param cmd_params	  - command specific parameters
+ * @param cmd_pdu_header  - PDU Header Parameters
+ * @param sgl_task_params - Pointer to SGL task params
+ * @param dif_task_params - Pointer to DIF parameters struct
+ */
+int init_initiator_rw_iscsi_task(struct iscsi_task_params *task_params,
+				 struct iscsi_conn_params *conn_params,
+				 struct scsi_initiator_cmd_params *cmd_params,
+				 struct iscsi_cmd_hdr *cmd_pdu_header,
+				 struct scsi_sgl_task_params *tx_sgl_params,
+				 struct scsi_sgl_task_params *rx_sgl_params,
+				 struct scsi_dif_task_params *dif_task_params);
+
+/* @brief init_initiator_login_request_task - initializes iSCSI Initiator Login
+ * Request task context.
+ *
+ * @param task_params		  - Pointer to task parameters struct
+ * @param login_req_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params	  - Pointer to SGL task params
+ * @param rx_sgl_task_params	  - Pointer to SGL task params
+ */
+int init_initiator_login_request_task(struct iscsi_task_params *task_params,
+				      struct iscsi_login_req_hdr *login_header,
+				      struct scsi_sgl_task_params *tx_params,
+				      struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_nop_out_task - initializes iSCSI Initiator NOP Out
+ * task context.
+ *
+ * @param task_params		- Pointer to task parameters struct
+ * @param nop_out_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params	- Pointer to SGL task params
+ * @param rx_sgl_task_params	- Pointer to SGL task params
+ */
+int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
+				struct iscsi_nop_out_hdr *nop_out_pdu_header,
+				struct scsi_sgl_task_params *tx_sgl_params,
+				struct scsi_sgl_task_params *rx_sgl_params);
+
+/* @brief init_initiator_logout_request_task - initializes iSCSI Initiator
+ * Logout Request task context.
+ *
+ * @param task_params		- Pointer to task parameters struct
+ * @param logout_pdu_header  - PDU Header Parameters
+ * @param tx_sgl_task_params	- Pointer to SGL task params
+ * @param rx_sgl_task_params	- Pointer to SGL task params
+ */
+int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
+				       struct iscsi_logout_req_hdr *logout_hdr,
+				       struct scsi_sgl_task_params *tx_params,
+				       struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_initiator_tmf_request_task - initializes iSCSI Initiator TMF
+ * task context.
+ *
+ * @param task_params	- Pointer to task parameters struct
+ * @param tmf_pdu_header - PDU Header Parameters
+ */
+int init_initiator_tmf_request_task(struct iscsi_task_params *task_params,
+				    struct iscsi_tmf_request_hdr *tmf_header);
+
+/* @brief init_initiator_text_request_task - initializes iSCSI Initiator Text
+ * Request task context.
+ *
+ * @param task_params		     - Pointer to task parameters struct
+ * @param text_request_pdu_header    - PDU Header Parameters
+ * @param tx_sgl_task_params	     - Pointer to Tx SGL task params
+ * @param rx_sgl_task_params	     - Pointer to Rx SGL task params
+ */
+int init_initiator_text_request_task(struct iscsi_task_params *task_params,
+				     struct iscsi_text_request_hdr *text_header,
+				     struct scsi_sgl_task_params *tx_params,
+				     struct scsi_sgl_task_params *rx_params);
+
+/* @brief init_cleanup_task - initializes Clean task (SQE)
+ *
+ * @param task_params - Pointer to task parameters struct
+ */
+int init_cleanup_task(struct iscsi_task_params *task_params);
+#endif
diff --git a/drivers/scsi/qedi/qedi_fw_scsi.h b/drivers/scsi/qedi/qedi_fw_scsi.h
new file mode 100644
index 0000000..cdaf918
--- /dev/null
+++ b/drivers/scsi/qedi/qedi_fw_scsi.h
@@ -0,0 +1,55 @@ 
+/*
+ * QLogic iSCSI Offload Driver
+ * Copyright (c) 2016 Cavium Inc.
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QEDI_FW_SCSI_H_
+#define _QEDI_FW_SCSI_H_
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include "qedi_hsi.h"
+#include <linux/qed/qed_if.h>
+
+struct scsi_sgl_task_params {
+	struct scsi_sge	*sgl;
+	struct regpair	sgl_phys_addr;
+	u32		total_buffer_size;
+	u16		num_sges;
+	bool		small_mid_sge;
+};
+
+struct scsi_dif_task_params {
+	u32	initial_ref_tag;
+	bool	initial_ref_tag_is_valid;
+	u16	application_tag;
+	u16	application_tag_mask;
+	u16	dif_block_size_log;
+	bool	dif_on_network;
+	bool	dif_on_host;
+	u8	host_guard_type;
+	u8	protection_type;
+	u8	ref_tag_mask;
+	bool	crc_seed;
+	bool	tx_dif_conn_err_en;
+	bool	ignore_app_tag;
+	bool	keep_ref_tag_const;
+	bool	validate_guard;
+	bool	validate_app_tag;
+	bool	validate_ref_tag;
+	bool	forward_guard;
+	bool	forward_app_tag;
+	bool	forward_ref_tag;
+	bool	forward_app_tag_with_mask;
+	bool	forward_ref_tag_with_mask;
+};
+
+struct scsi_initiator_cmd_params {
+	struct scsi_sge	extended_cdb_sge;
+	struct regpair	sense_data_buffer_phys_addr;
+};
+#endif
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index b9f79d3..d5eff68 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -175,7 +175,7 @@  static void qedi_destroy_cmd_pool(struct qedi_ctx *qedi,
 		if (cmd->io_tbl.sge_tbl)
 			dma_free_coherent(&qedi->pdev->dev,
 					  QEDI_ISCSI_MAX_BDS_PER_CMD *
-					  sizeof(struct iscsi_sge),
+					  sizeof(struct scsi_sge),
 					  cmd->io_tbl.sge_tbl,
 					  cmd->io_tbl.sge_tbl_dma);
 
@@ -191,7 +191,7 @@  static int qedi_alloc_sget(struct qedi_ctx *qedi, struct iscsi_session *session,
 			   struct qedi_cmd *cmd)
 {
 	struct qedi_io_bdt *io = &cmd->io_tbl;
-	struct iscsi_sge *sge;
+	struct scsi_sge *sge;
 
 	io->sge_tbl = dma_alloc_coherent(&qedi->pdev->dev,
 					 QEDI_ISCSI_MAX_BDS_PER_CMD *
@@ -708,22 +708,20 @@  static void qedi_conn_get_stats(struct iscsi_cls_conn *cls_conn,
 
 static void qedi_iscsi_prep_generic_pdu_bd(struct qedi_conn *qedi_conn)
 {
-	struct iscsi_sge *bd_tbl;
+	struct scsi_sge *bd_tbl;
 
-	bd_tbl = (struct iscsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
+	bd_tbl = (struct scsi_sge *)qedi_conn->gen_pdu.req_bd_tbl;
 
 	bd_tbl->sge_addr.hi =
 		(u32)((u64)qedi_conn->gen_pdu.req_dma_addr >> 32);
 	bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.req_dma_addr;
 	bd_tbl->sge_len = qedi_conn->gen_pdu.req_wr_ptr -
 				qedi_conn->gen_pdu.req_buf;
-	bd_tbl->reserved0 = 0;
-	bd_tbl = (struct iscsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
+	bd_tbl = (struct scsi_sge  *)qedi_conn->gen_pdu.resp_bd_tbl;
 	bd_tbl->sge_addr.hi =
 			(u32)((u64)qedi_conn->gen_pdu.resp_dma_addr >> 32);
 	bd_tbl->sge_addr.lo = (u32)qedi_conn->gen_pdu.resp_dma_addr;
 	bd_tbl->sge_len = ISCSI_DEF_MAX_RECV_SEG_LEN;
-	bd_tbl->reserved0 = 0;
 }
 
 static int qedi_iscsi_send_generic_request(struct iscsi_task *task)
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index d3c06bb..3247287 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -102,7 +102,7 @@  struct qedi_endpoint {
 #define QEDI_SQ_WQES_MIN	16
 
 struct qedi_io_bdt {
-	struct iscsi_sge *sge_tbl;
+	struct scsi_sge *sge_tbl;
 	dma_addr_t sge_tbl_dma;
 	u16 sge_valid;
 };
diff --git a/drivers/scsi/qedi/qedi_version.h b/drivers/scsi/qedi/qedi_version.h
index 9543a1b..d61e3ac 100644
--- a/drivers/scsi/qedi/qedi_version.h
+++ b/drivers/scsi/qedi/qedi_version.h
@@ -7,8 +7,8 @@ 
  * this source tree.
  */
 
-#define QEDI_MODULE_VERSION	"8.10.3.0"
+#define QEDI_MODULE_VERSION	"8.10.4.0"
 #define QEDI_DRIVER_MAJOR_VER		8
 #define QEDI_DRIVER_MINOR_VER		10
-#define QEDI_DRIVER_REV_VER		3
+#define QEDI_DRIVER_REV_VER		4
 #define QEDI_DRIVER_ENG_VER		0
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 52966b9..4269bff 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -100,8 +100,8 @@ 
 #define MAX_NUM_LL2_TX_STATS_COUNTERS	32
 
 #define FW_MAJOR_VERSION	8
-#define FW_MINOR_VERSION	10
-#define FW_REVISION_VERSION	10
+#define FW_MINOR_VERSION	15
+#define FW_REVISION_VERSION	3
 #define FW_ENGINEERING_VERSION	0
 
 /***********************/
@@ -187,6 +187,9 @@ 
 
 /* DEMS */
 #define DQ_DEMS_LEGACY			0
+#define DQ_DEMS_TOE_MORE_TO_SEND	3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND	4
+#define DQ_DEMS_ROCE_CQ_CONS		7
 
 /* XCM agg val selection */
 #define DQ_XCM_AGG_VAL_SEL_WORD2  0
@@ -214,6 +217,10 @@ 
 #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
 #define DQ_XCM_ISCSI_EXP_STAT_SN_CMD	DQ_XCM_AGG_VAL_SEL_REG6
 #define DQ_XCM_ROCE_SQ_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD	DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD	DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+
 
 /* UCM agg val selection (HW) */
 #define	DQ_UCM_AGG_VAL_SEL_WORD0	0
@@ -269,6 +276,9 @@ 
 #define DQ_XCM_ISCSI_DQ_FLUSH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
 #define DQ_XCM_ISCSI_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
 #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD		BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD	BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
+
 
 /* UCM agg counter flag selection (HW) */
 #define	DQ_UCM_AGG_FLG_SHIFT_CF0	0
@@ -285,6 +295,9 @@ 
 #define DQ_UCM_ETH_PMD_RX_ARM_CMD	BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
 #define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD	BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
 #define DQ_UCM_ROCE_CQ_ARM_CF_CMD	BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD	BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD	BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD		BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
 
 /* TCM agg counter flag selection (HW) */
 #define DQ_TCM_AGG_FLG_SHIFT_CF0	0
@@ -301,6 +314,10 @@ 
 #define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD      BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
 #define DQ_TCM_ISCSI_FLUSH_Q0_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
 #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD		BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD	BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+
 
 /* PWM address mapping */
 #define DQ_PWM_OFFSET_DPM_BASE	0x0
@@ -689,6 +706,16 @@  struct iscsi_eqe_data {
 #define ISCSI_EQE_DATA_RESERVED0_SHIFT			7
 };
 
+struct rdma_eqe_destroy_qp {
+	__le32 cid;
+	u8 reserved[4];
+};
+
+union rdma_eqe_data {
+	struct regpair async_handle;
+	struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
 struct malicious_vf_eqe_data {
 	u8 vf_id;
 	u8 err_id;
@@ -705,9 +732,9 @@  struct initial_cleanup_eqe_data {
 	u8 bytes[8];
 	struct vf_pf_channel_eqe_data vf_pf_channel;
 	struct iscsi_eqe_data iscsi_info;
+	union rdma_eqe_data rdma_data;
 	struct malicious_vf_eqe_data malicious_vf;
 	struct initial_cleanup_eqe_data vf_init_cleanup;
-	struct regpair roce_handle;
 };
 
 /* Event Ring Entry */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 4b402fb..34d93eb 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -49,6 +49,9 @@ 
 #define ETH_RX_CQE_PAGE_SIZE_BYTES                      4096
 #define ETH_RX_NUM_NEXT_PAGE_BDS                        2
 
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET          253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET          251
+
 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT                          1
 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET                       18
 #define ETH_TX_MAX_BDS_PER_LSO_PACKET	255
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 2e417a4..947a635 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -109,13 +109,6 @@  struct fcoe_conn_terminate_ramrod_data {
 	struct regpair terminate_params_addr;
 };
 
-struct fcoe_fast_sgl_ctx {
-	struct regpair sgl_start_addr;
-	__le32 sgl_byte_offset;
-	__le16 task_reuse_cnt;
-	__le16 init_offset_in_first_sge;
-};
-
 struct fcoe_slow_sgl_ctx {
 	struct regpair base_sgl_addr;
 	__le16 curr_sge_off;
@@ -124,23 +117,16 @@  struct fcoe_slow_sgl_ctx {
 	__le16 reserved;
 };
 
-struct fcoe_sge {
-	struct regpair sge_addr;
-	__le16 size;
-	__le16 reserved0;
-	u8 reserved1[3];
-	u8 is_valid_sge;
-};
-
-union fcoe_data_desc_ctx {
-	struct fcoe_fast_sgl_ctx fast;
-	struct fcoe_slow_sgl_ctx slow;
-	struct fcoe_sge single_sge;
-};
-
 union fcoe_dix_desc_ctx {
 	struct fcoe_slow_sgl_ctx dix_sgl;
-	struct fcoe_sge cached_dix_sge;
+	struct scsi_sge cached_dix_sge;
+};
+
+struct fcoe_fast_sgl_ctx {
+	struct regpair sgl_start_addr;
+	__le32 sgl_byte_offset;
+	__le16 task_reuse_cnt;
+	__le16 init_offset_in_first_sge;
 };
 
 struct fcoe_fcp_cmd_payload {
@@ -172,57 +158,6 @@  enum fcoe_mode_type {
 	MAX_FCOE_MODE_TYPE
 };
 
-struct fcoe_mstorm_fcoe_task_st_ctx_fp {
-	__le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK                 0x7FFF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT                0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK  0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
-	__le16 difDataResidue;
-	__le16 parent_id;
-	__le16 single_sge_saved_offset;
-	__le32 data_2_trns_rem;
-	__le32 offset_in_io;
-	union fcoe_dix_desc_ctx dix_desc;
-	union fcoe_data_desc_ctx data_desc;
-};
-
-struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
-	__le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK            0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT           0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK               0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT              2
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK      0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT     3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK         0xF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT        4
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK            0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT           8
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK                  0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT                 10
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK  0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK      0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT     12
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK        0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT       13
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK        0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT       14
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK             0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT            15
-	u8 tx_rx_sgl_mode;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK               0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT              0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK               0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT              3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK                     0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT                    6
-	u8 rsrv2;
-	__le32 num_prm_zero_read;
-	struct regpair rsp_buf_addr;
-};
-
 struct fcoe_rx_stat {
 	struct regpair fcoe_rx_byte_cnt;
 	struct regpair fcoe_rx_data_pkt_cnt;
@@ -236,16 +171,6 @@  struct fcoe_rx_stat {
 	__le32 rsrv;
 };
 
-enum fcoe_sgl_mode {
-	FCOE_SLOW_SGL,
-	FCOE_SINGLE_FAST_SGE,
-	FCOE_2_FAST_SGE,
-	FCOE_3_FAST_SGE,
-	FCOE_4_FAST_SGE,
-	FCOE_MUL_FAST_SGES,
-	MAX_FCOE_SGL_MODE
-};
-
 struct fcoe_stat_ramrod_data {
 	struct regpair stat_params_addr;
 };
@@ -328,22 +253,24 @@  struct fcoe_tx_params {
 struct ystorm_fcoe_task_st_ctx {
 	u8 task_type;
 	u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x7
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK  0x1
 #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x1F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        3
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK         0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT        1
 	u8 cached_dix_sge;
 	u8 expect_first_xfer;
 	__le32 num_pbf_zero_write;
 	union protection_info_union_ctx protection_info_union;
 	__le32 data_2_trns_rem;
+	struct scsi_sgl_params sgl_params;
+	u8 reserved1[12];
 	union fcoe_tx_info_union_ctx tx_info_union;
 	union fcoe_dix_desc_ctx dix_desc;
-	union fcoe_data_desc_ctx data_desc;
+	struct scsi_cached_sges data_desc;
 	__le16 ox_id;
 	__le16 rx_id;
 	__le32 task_rety_identifier;
-	__le32 reserved1[2];
+	u8 reserved2[8];
 };
 
 struct ystorm_fcoe_task_ag_ctx {
@@ -484,22 +411,22 @@  struct tstorm_fcoe_task_ag_ctx {
 struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
 	union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
 	__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK       0x1
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT      0
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT  1
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK        0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT       2
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK       0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT      3
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK  0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK   0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT  5
 #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK        0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       8
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0x3F
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            10
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT       6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK             0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT            8
 	__le16 seq_cnt;
 	u8 seq_id;
 	u8 ooo_rx_seq_id;
@@ -582,8 +509,34 @@  struct mstorm_fcoe_task_ag_ctx {
 };
 
 struct mstorm_fcoe_task_st_ctx {
-	struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
-	struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
+	struct regpair rsp_buf_addr;
+	__le32 rsrv[2];
+	struct scsi_sgl_params sgl_params;
+	__le32 data_2_trns_rem;
+	__le32 data_buffer_offset;
+	__le16 parent_id;
+	__le16 flags;
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK     0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT    0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK        0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT       4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK           0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT          6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK  0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK        0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT       8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK  0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK    0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT   11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK         0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT        12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK           0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT          13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK              0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT             14
+	struct scsi_cached_sges data_desc;
 };
 
 struct ustorm_fcoe_task_ag_ctx {
@@ -646,6 +599,7 @@  struct ustorm_fcoe_task_ag_ctx {
 
 struct fcoe_task_context {
 	struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+	struct regpair ystorm_st_padding[2];
 	struct tdif_task_context tdif_context;
 	struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
 	struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
@@ -668,20 +622,20 @@  struct fcoe_tx_stat {
 struct fcoe_wqe {
 	__le16 task_id;
 	__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK        0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT       0
-#define FCOE_WQE_SGL_MODE_MASK        0x7
-#define FCOE_WQE_SGL_MODE_SHIFT       4
-#define FCOE_WQE_CONTINUATION_MASK    0x1
-#define FCOE_WQE_CONTINUATION_SHIFT   7
-#define FCOE_WQE_INVALIDATE_PTU_MASK  0x1
-#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
-#define FCOE_WQE_SUPER_IO_MASK        0x1
-#define FCOE_WQE_SUPER_IO_SHIFT       9
-#define FCOE_WQE_SEND_AUTO_RSP_MASK   0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT  10
-#define FCOE_WQE_RESERVED0_MASK       0x1F
-#define FCOE_WQE_RESERVED0_SHIFT      11
+#define FCOE_WQE_REQ_TYPE_MASK       0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT      0
+#define FCOE_WQE_SGL_MODE_MASK       0x1
+#define FCOE_WQE_SGL_MODE_SHIFT      4
+#define FCOE_WQE_CONTINUATION_MASK   0x1
+#define FCOE_WQE_CONTINUATION_SHIFT  5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK  0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK       0x1
+#define FCOE_WQE_RESERVED_SHIFT      7
+#define FCOE_WQE_NUM_SGES_MASK       0xF
+#define FCOE_WQE_NUM_SGES_SHIFT      8
+#define FCOE_WQE_RESERVED1_MASK      0xF
+#define FCOE_WQE_RESERVED1_SHIFT     12
 	union fcoe_additional_info_union additional_info_union;
 };
 
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 4c5747b..69949f8 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -39,17 +39,9 @@ 
 /* iSCSI HSI constants */
 #define ISCSI_DEFAULT_MTU       (1500)
 
-/* Current iSCSI HSI version number composed of two fields (16 bit) */
-#define ISCSI_HSI_MAJOR_VERSION (0)
-#define ISCSI_HSI_MINOR_VERSION (0)
-
 /* KWQ (kernel work queue) layer codes */
 #define ISCSI_SLOW_PATH_LAYER_CODE   (6)
 
-/* CQE completion status */
-#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
-#define ISCSI_EQE_RST_CONN_RCVD (0x1)
-
 /* iSCSI parameter defaults */
 #define ISCSI_DEFAULT_HEADER_DIGEST         (0)
 #define ISCSI_DEFAULT_DATA_DIGEST           (0)
@@ -68,6 +60,10 @@ 
 #define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T   (1)
 #define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T   (0xff)
 
+#define ISCSI_AHS_CNTL_SIZE 4
+
+#define ISCSI_WQE_NUM_SGES_SLOWIO           (0xf)
+
 /* iSCSI reserved params */
 #define ISCSI_ITT_ALL_ONES	(0xffffffff)
 #define ISCSI_TTT_ALL_ONES	(0xffffffff)
@@ -173,19 +169,6 @@  struct iscsi_async_msg_hdr {
 	__le32 reserved7;
 };
 
-struct iscsi_sge {
-	struct regpair sge_addr;
-	__le16 sge_len;
-	__le16 reserved0;
-	__le32 reserved1;
-};
-
-struct iscsi_cached_sge_ctx {
-	struct iscsi_sge sge;
-	struct regpair reserved;
-	__le32 dsgl_curr_offset[2];
-};
-
 struct iscsi_cmd_hdr {
 	__le16 reserved1;
 	u8 flags_attr;
@@ -229,8 +212,13 @@  struct iscsi_common_hdr {
 #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT  0
 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
-	__le32 lun_reserved[4];
-	__le32 data[6];
+	struct regpair lun_reserved;
+	__le32 itt;
+	__le32 ttt;
+	__le32 cmdstat_sn;
+	__le32 exp_statcmd_sn;
+	__le32 max_cmd_sn;
+	__le32 data[3];
 };
 
 struct iscsi_conn_offload_params {
@@ -246,8 +234,10 @@  struct iscsi_conn_offload_params {
 #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK     0x1
 #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT    1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK       0x3F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT      2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK	0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT	2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK	0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT	3
 	u8 pbl_page_size_log;
 	u8 pbe_page_size_log;
 	u8 default_cq;
@@ -278,8 +268,12 @@  struct iscsi_conn_update_ramrod_params {
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT    2
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK  0x1
 #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0xF
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK  0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK  0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK       0x3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT      6
 	u8 reserved0[3];
 	__le32 max_seq_size;
 	__le32 max_send_pdu_length;
@@ -312,7 +306,7 @@  struct iscsi_ext_cdb_cmd_hdr {
 	__le32 expected_transfer_length;
 	__le32 cmd_sn;
 	__le32 exp_stat_sn;
-	struct iscsi_sge cdb_sge;
+	struct scsi_sge cdb_sge;
 };
 
 struct iscsi_login_req_hdr {
@@ -519,8 +513,8 @@  struct iscsi_logout_response_hdr {
 	__le32 exp_cmd_sn;
 	__le32 max_cmd_sn;
 	__le32 reserved4;
-	__le16 time2retain;
-	__le16 time2wait;
+	__le16 time_2_retain;
+	__le16 time_2_wait;
 	__le32 reserved5[1];
 };
 
@@ -602,7 +596,7 @@  struct iscsi_tmf_response_hdr {
 #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
 	struct regpair reserved0;
 	__le32 itt;
-	__le32 rtt;
+	__le32 reserved1;
 	__le32 stat_sn;
 	__le32 exp_cmd_sn;
 	__le32 max_cmd_sn;
@@ -641,7 +635,7 @@  struct iscsi_reject_hdr {
 #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK  0xFF
 #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
 	struct regpair reserved0;
-	__le32 reserved1;
+	__le32 all_ones;
 	__le32 reserved2;
 	__le32 stat_sn;
 	__le32 exp_cmd_sn;
@@ -688,7 +682,9 @@  struct iscsi_cqe_solicited {
 	__le16 itid;
 	u8 task_type;
 	u8 fw_dbg_field;
-	__le32 reserved1[2];
+	u8 caused_conn_err;
+	u8 reserved0[3];
+	__le32 reserved1[1];
 	union iscsi_task_hdr iscsi_hdr;
 };
 
@@ -727,35 +723,6 @@  enum iscsi_cqe_unsolicited_type {
 	MAX_ISCSI_CQE_UNSOLICITED_TYPE
 };
 
-struct iscsi_virt_sgl_ctx {
-	struct regpair sgl_base;
-	struct regpair dsgl_base;
-	__le32 sgl_initial_offset;
-	__le32 dsgl_initial_offset;
-	__le32 dsgl_curr_offset[2];
-};
-
-struct iscsi_sgl_var_params {
-	u8 sgl_ptr;
-	u8 dsgl_ptr;
-	__le16 sge_offset;
-	__le16 dsge_offset;
-};
-
-struct iscsi_phys_sgl_ctx {
-	struct regpair sgl_base;
-	struct regpair dsgl_base;
-	u8 sgl_size;
-	u8 dsgl_size;
-	__le16 reserved;
-	struct iscsi_sgl_var_params var_params[2];
-};
-
-union iscsi_data_desc_ctx {
-	struct iscsi_virt_sgl_ctx virt_sgl;
-	struct iscsi_phys_sgl_ctx phys_sgl;
-	struct iscsi_cached_sge_ctx cached_sge;
-};
 
 struct iscsi_debug_modes {
 	u8 flags;
@@ -771,8 +738,10 @@  struct iscsi_debug_modes {
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK              0x1
 #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT             5
-#define ISCSI_DEBUG_MODES_RESERVED0_MASK                       0x3
-#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT                      6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK     0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT    6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK             0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT            7
 };
 
 struct iscsi_dif_flags {
@@ -806,7 +775,6 @@  enum iscsi_eqe_opcode {
 	ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
 	ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
 	ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
-	ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
 	MAX_ISCSI_EQE_OPCODE
 };
 
@@ -856,31 +824,11 @@  enum iscsi_error_types {
 	ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
 	ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
 	ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+	ISCSI_CONN_ERROR_INVALID_ITT,
 	ISCSI_ERROR_UNKNOWN,
 	MAX_ISCSI_ERROR_TYPES
 };
 
-struct iscsi_mflags {
-	u8 mflags;
-#define ISCSI_MFLAGS_SLOW_IO_MASK     0x1
-#define ISCSI_MFLAGS_SLOW_IO_SHIFT    0
-#define ISCSI_MFLAGS_SINGLE_SGE_MASK  0x1
-#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
-#define ISCSI_MFLAGS_RESERVED_MASK    0x3F
-#define ISCSI_MFLAGS_RESERVED_SHIFT   2
-};
-
-struct iscsi_sgl {
-	struct regpair sgl_addr;
-	__le16 updated_sge_size;
-	__le16 updated_sge_offset;
-	__le32 byte_offset;
-};
-
-union iscsi_mstorm_sgl {
-	struct iscsi_sgl sgl_struct;
-	struct iscsi_sge single_sge;
-};
 
 enum iscsi_ramrod_cmd_id {
 	ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@@ -896,10 +844,10 @@  enum iscsi_ramrod_cmd_id {
 
 struct iscsi_reg1 {
 	__le32 reg1_map;
-#define ISCSI_REG1_NUM_FAST_SGES_MASK  0x7
-#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK      0x1FFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT     3
+#define ISCSI_REG1_NUM_SGES_MASK   0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT  0
+#define ISCSI_REG1_RESERVED1_MASK  0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
 };
 
 union iscsi_seq_num {
@@ -967,22 +915,33 @@  struct iscsi_spe_func_init {
 };
 
 struct ystorm_iscsi_task_state {
-	union iscsi_data_desc_ctx sgl_ctx_union;
-	__le32 buffer_offset[2];
-	__le16 bytes_nxt_dif;
-	__le16 rxmit_bytes_nxt_dif;
-	union iscsi_seq_num seq_num_union;
-	u8 dif_bytes_leftover;
-	u8 rxmit_dif_bytes_leftover;
-	__le16 reuse_count;
-	struct iscsi_dif_flags dif_flags;
-	u8 local_comp;
+	struct scsi_cached_sges data_desc;
+	struct scsi_sgl_params sgl_params;
 	__le32 exp_r2t_sn;
-	__le32 sgl_offset[2];
+	__le32 buffer_offset;
+	union iscsi_seq_num seq_num;
+	struct iscsi_dif_flags dif_flags;
+	u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK  0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK     0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT    1
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK   0x3F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT  2
+};
+
+struct ystorm_iscsi_task_rxmit_opt {
+	__le32 fast_rxmit_sge_offset;
+	__le32 scan_start_buffer_offset;
+	__le32 fast_rxmit_buffer_offset;
+	u8 scan_start_sgl_index;
+	u8 fast_rxmit_sgl_index;
+	__le16 reserved;
 };
 
 struct ystorm_iscsi_task_st_ctx {
 	struct ystorm_iscsi_task_state state;
+	struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
 	union iscsi_task_hdr pdu_hdr;
 };
 
@@ -1152,25 +1111,16 @@  struct ustorm_iscsi_task_ag_ctx {
 };
 
 struct mstorm_iscsi_task_st_ctx {
-	union iscsi_mstorm_sgl sgl_union;
-	struct iscsi_dif_flags dif_flags;
-	struct iscsi_mflags flags;
-	u8 sgl_size;
-	u8 host_sge_index;
-	__le16 dix_cur_sge_offset;
-	__le16 dix_cur_sge_size;
-	__le32 data_offset_rtid;
-	u8 dif_offset;
-	u8 dix_sgl_size;
-	u8 dix_sge_index;
+	struct scsi_cached_sges data_desc;
+	struct scsi_sgl_params sgl_params;
+	__le32 rem_task_size;
+	__le32 data_buffer_offset;
 	u8 task_type;
+	struct iscsi_dif_flags dif_flags;
+	u8 reserved0[2];
 	struct regpair sense_db;
-	struct regpair dix_sgl_cur_sge;
-	__le32 rem_task_size;
-	__le16 reuse_count;
-	__le16 dif_data_residue;
-	u8 reserved0[4];
-	__le32 reserved1[1];
+	__le32 expected_itt;
+	__le32 reserved1;
 };
 
 struct ustorm_iscsi_task_st_ctx {
@@ -1184,7 +1134,7 @@  struct ustorm_iscsi_task_st_ctx {
 #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT            0
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK             0x7F
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT            1
-	u8 reserved2;
+	struct iscsi_dif_flags dif_flags;
 	__le16 reserved3;
 	__le32 reserved4;
 	__le32 reserved5;
@@ -1207,10 +1157,10 @@  struct ustorm_iscsi_task_st_ctx {
 #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT           2
 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK        0x1
 #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT       3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK   0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT  4
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK        0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT       5
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK  0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK        0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT       5
 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK         0x1
 #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT        6
 #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK             0x1
@@ -1220,7 +1170,6 @@  struct ustorm_iscsi_task_st_ctx {
 
 struct iscsi_task_context {
 	struct ystorm_iscsi_task_st_ctx ystorm_st_context;
-	struct regpair ystorm_st_padding[2];
 	struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
 	struct regpair ystorm_ag_padding[2];
 	struct tdif_task_context tdif_context;
@@ -1272,32 +1221,22 @@  struct iscsi_uhqe {
 #define ISCSI_UHQE_TASK_ID_LO_SHIFT         24
 };
 
-struct iscsi_wqe_field {
-	__le32 contlen_cdbsize_field;
-#define ISCSI_WQE_FIELD_CONT_LEN_MASK  0xFFFFFF
-#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_FIELD_CDB_SIZE_MASK  0xFF
-#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
-};
-
-union iscsi_wqe_field_union {
-	struct iscsi_wqe_field cont_field;
-	__le32 prev_tid;
-};
 
 struct iscsi_wqe {
 	__le16 task_id;
 	u8 flags;
 #define ISCSI_WQE_WQE_TYPE_MASK        0x7
 #define ISCSI_WQE_WQE_TYPE_SHIFT       0
-#define ISCSI_WQE_NUM_FAST_SGES_MASK   0x7
-#define ISCSI_WQE_NUM_FAST_SGES_SHIFT  3
-#define ISCSI_WQE_PTU_INVALIDATE_MASK  0x1
-#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_NUM_SGES_MASK  0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
 #define ISCSI_WQE_RESPONSE_MASK        0x1
 #define ISCSI_WQE_RESPONSE_SHIFT       7
 	struct iscsi_dif_flags prot_flags;
-	union iscsi_wqe_field_union cont_prevtid_union;
+	__le32 contlen_cdbsize;
+#define ISCSI_WQE_CONT_LEN_MASK  0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK  0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
 };
 
 enum iscsi_wqe_type {
@@ -1318,17 +1257,15 @@  struct iscsi_xhqe {
 	u8 total_ahs_length;
 	u8 opcode;
 	u8 flags;
-#define ISCSI_XHQE_NUM_FAST_SGES_MASK  0x7
-#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_XHQE_FINAL_MASK          0x1
-#define ISCSI_XHQE_FINAL_SHIFT         3
-#define ISCSI_XHQE_SUPER_IO_MASK       0x1
-#define ISCSI_XHQE_SUPER_IO_SHIFT      4
-#define ISCSI_XHQE_STATUS_BIT_MASK     0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT    5
-#define ISCSI_XHQE_RESERVED_MASK       0x3
-#define ISCSI_XHQE_RESERVED_SHIFT      6
-	union iscsi_seq_num seq_num_union;
+#define ISCSI_XHQE_FINAL_MASK       0x1
+#define ISCSI_XHQE_FINAL_SHIFT      0
+#define ISCSI_XHQE_STATUS_BIT_MASK  0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK    0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT   2
+#define ISCSI_XHQE_RESERVED0_MASK   0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT  6
+	union iscsi_seq_num seq_num;
 	__le16 reserved1;
 };
 
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index bad02df..866f063 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -38,4 +38,21 @@ 
 
 #define ROCE_MAX_QPS	(32 * 1024)
 
+enum roce_async_events_type {
+	ROCE_ASYNC_EVENT_NONE = 0,
+	ROCE_ASYNC_EVENT_COMM_EST = 1,
+	ROCE_ASYNC_EVENT_SQ_DRAINED,
+	ROCE_ASYNC_EVENT_SRQ_LIMIT,
+	ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+	ROCE_ASYNC_EVENT_CQ_ERR,
+	ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+	ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+	ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+	ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+	ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+	ROCE_ASYNC_EVENT_SRQ_EMPTY,
+	ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+	MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
 #endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 03f3e37..08df82a 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -40,6 +40,8 @@ 
 #define BDQ_ID_IMM_DATA          (1)
 #define BDQ_NUM_IDS          (2)
 
+#define SCSI_NUM_SGES_SLOW_SGL_THR      8
+
 #define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
 
 struct scsi_bd {
@@ -52,6 +54,16 @@  struct scsi_bdq_ram_drv_data {
 	__le16 reserved0[3];
 };
 
+struct scsi_sge {
+	struct regpair sge_addr;
+	__le32 sge_len;
+	__le32 reserved;
+};
+
+struct scsi_cached_sges {
+	struct scsi_sge sge[4];
+};
+
 struct scsi_drv_cmdq {
 	__le16 cmdq_cons;
 	__le16 reserved0;
@@ -99,11 +111,19 @@  struct scsi_ram_per_bdq_resource_drv_data {
 	struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
 };
 
-struct scsi_sge {
-	struct regpair sge_addr;
-	__le16 sge_len;
-	__le16 reserved0;
-	__le32 reserved1;
+enum scsi_sgl_mode {
+	SCSI_TX_SLOW_SGL,
+	SCSI_FAST_SGL,
+	MAX_SCSI_SGL_MODE
+};
+
+struct scsi_sgl_params {
+	struct regpair sgl_addr;
+	__le32 sgl_total_length;
+	__le32 sge_offset;
+	__le16 sgl_num_sges;
+	u8 sgl_index;
+	u8 reserved;
 };
 
 struct scsi_terminate_extra_params {
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index 46fe785..a5e8432 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -173,6 +173,7 @@  enum tcp_seg_placement_event {
 	TCP_EVENT_ADD_ISLE_RIGHT,
 	TCP_EVENT_ADD_ISLE_LEFT,
 	TCP_EVENT_JOIN,
+	TCP_EVENT_DELETE_ISLES,
 	TCP_EVENT_NOP,
 	MAX_TCP_SEG_PLACEMENT_EVENT
 };