diff mbox series

[RFC,v5,17/27] qedn: Add qedn probe

Message ID 20210519111340.20613-18-smalin@marvell.com (mailing list archive)
State Superseded
Headers show
Series NVMeTCP Offload ULP and QEDN Device Driver | expand

Checks

Context Check Description
netdev/cover_letter success Link
netdev/fixes_present success Link
netdev/patch_count fail Series longer than 15 patches
netdev/tree_selection success Guessed tree name to be net-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 1 maintainers not CCed: hare@suse.de
netdev/source_inline fail Was 0 now: 4
netdev/verify_signedoff success Link
netdev/module_param success Was 0 now: 0
netdev/build_32bit fail Errors and warnings before: 4 this patch: 53
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success Link
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 256 lines checked
netdev/build_allmodconfig_warn fail Errors and warnings before: 4 this patch: 53
netdev/header_inline success Link

Commit Message

Shai Malin May 19, 2021, 11:13 a.m. UTC
This patch introduces the functionality of loading and unloading
physical function.
qedn_probe() loads the offload device PF(physical function), and
initialize the HW and the FW with the PF parameters using the
HW ops->qed_nvmetcp_ops, which are similar to other "qed_*_ops" which
are used by the qede, qedr, qedf and qedi device drivers.
qedn_remove() unloads the offload device PF, re-initialize the HW and
the FW with the PF parameters.

The struct qedn_ctx is per PF container for PF-specific attributes and
resources.

Acked-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Dean Balandin <dbalandin@marvell.com>
Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
Signed-off-by: Ariel Elior <aelior@marvell.com>
Signed-off-by: Shai Malin <smalin@marvell.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
 drivers/nvme/hw/Kconfig          |   1 +
 drivers/nvme/hw/qedn/qedn.h      |  35 +++++++
 drivers/nvme/hw/qedn/qedn_main.c | 159 ++++++++++++++++++++++++++++++-
 3 files changed, 190 insertions(+), 5 deletions(-)

Comments

Leon Romanovsky May 19, 2021, 12:31 p.m. UTC | #1
On Wed, May 19, 2021 at 02:13:30PM +0300, Shai Malin wrote:
> This patch introduces the functionality of loading and unloading
> physical function.
> qedn_probe() loads the offload device PF(physical function), and
> initialize the HW and the FW with the PF parameters using the
> HW ops->qed_nvmetcp_ops, which are similar to other "qed_*_ops" which
> are used by the qede, qedr, qedf and qedi device drivers.
> qedn_remove() unloads the offload device PF, re-initialize the HW and
> the FW with the PF parameters.
> 
> The struct qedn_ctx is per PF container for PF-specific attributes and
> resources.
> 
> Acked-by: Igor Russkikh <irusskikh@marvell.com>
> Signed-off-by: Dean Balandin <dbalandin@marvell.com>
> Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
> Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
> Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
> Signed-off-by: Ariel Elior <aelior@marvell.com>
> Signed-off-by: Shai Malin <smalin@marvell.com>
> Reviewed-by: Hannes Reinecke <hare@suse.de>
> ---
>  drivers/nvme/hw/Kconfig          |   1 +
>  drivers/nvme/hw/qedn/qedn.h      |  35 +++++++
>  drivers/nvme/hw/qedn/qedn_main.c | 159 ++++++++++++++++++++++++++++++-
>  3 files changed, 190 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/nvme/hw/Kconfig b/drivers/nvme/hw/Kconfig
> index 374f1f9dbd3d..91b1bd6f07d8 100644
> --- a/drivers/nvme/hw/Kconfig
> +++ b/drivers/nvme/hw/Kconfig
> @@ -2,6 +2,7 @@
>  config NVME_QEDN
>  	tristate "Marvell NVM Express over Fabrics TCP offload"
>  	depends on NVME_TCP_OFFLOAD
> +	select QED_NVMETCP
>  	help
>  	  This enables the Marvell NVMe TCP offload support (qedn).
>  
> diff --git a/drivers/nvme/hw/qedn/qedn.h b/drivers/nvme/hw/qedn/qedn.h
> index bcd0748a10fd..f13073afbced 100644
> --- a/drivers/nvme/hw/qedn/qedn.h
> +++ b/drivers/nvme/hw/qedn/qedn.h
> @@ -6,14 +6,49 @@
>  #ifndef _QEDN_H_
>  #define _QEDN_H_
>  
> +#include <linux/qed/qed_if.h>
> +#include <linux/qed/qed_nvmetcp_if.h>
> +
>  /* Driver includes */
>  #include "../../host/tcp-offload.h"
>  
> +#define QEDN_MAJOR_VERSION		8
> +#define QEDN_MINOR_VERSION		62
> +#define QEDN_REVISION_VERSION		10
> +#define QEDN_ENGINEERING_VERSION	0
> +#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
> +		__stringify(QEDE_MINOR_VERSION) "."		\
> +		__stringify(QEDE_REVISION_VERSION) "."		\
> +		__stringify(QEDE_ENGINEERING_VERSION)
> +

This driver module version is not used in this series and more
important the module version have no meaning in upstream at all
and the community strongly against addition of new such code.

>  #define QEDN_MODULE_NAME "qedn"

And the general note, it will be great if you convert your probe/remove
flows to use auxiliary bus like other drivers that cross subsystems.

Thanks
Shai Malin May 19, 2021, 2:29 p.m. UTC | #2
On Wed, 19 May 2021 at 15:31, Leon Romanovsky wrote:
> On Wed, May 19, 2021 at 02:13:30PM +0300, Shai Malin wrote:
> > This patch introduces the functionality of loading and unloading
> > physical function.
> > qedn_probe() loads the offload device PF(physical function), and
> > initialize the HW and the FW with the PF parameters using the
> > HW ops->qed_nvmetcp_ops, which are similar to other "qed_*_ops" which
> > are used by the qede, qedr, qedf and qedi device drivers.
> > qedn_remove() unloads the offload device PF, re-initialize the HW and
> > the FW with the PF parameters.
> >
> > The struct qedn_ctx is per PF container for PF-specific attributes and
> > resources.
> >
> > Acked-by: Igor Russkikh <irusskikh@marvell.com>
> > Signed-off-by: Dean Balandin <dbalandin@marvell.com>
> > Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
> > Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
> > Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
> > Signed-off-by: Ariel Elior <aelior@marvell.com>
> > Signed-off-by: Shai Malin <smalin@marvell.com>
> > Reviewed-by: Hannes Reinecke <hare@suse.de>
> > ---
> >  drivers/nvme/hw/Kconfig          |   1 +
> >  drivers/nvme/hw/qedn/qedn.h      |  35 +++++++
> >  drivers/nvme/hw/qedn/qedn_main.c | 159 ++++++++++++++++++++++++++++++-
> >  3 files changed, 190 insertions(+), 5 deletions(-)
> >
> > diff --git a/drivers/nvme/hw/Kconfig b/drivers/nvme/hw/Kconfig
> > index 374f1f9dbd3d..91b1bd6f07d8 100644
> > --- a/drivers/nvme/hw/Kconfig
> > +++ b/drivers/nvme/hw/Kconfig
> > @@ -2,6 +2,7 @@
> >  config NVME_QEDN
> >       tristate "Marvell NVM Express over Fabrics TCP offload"
> >       depends on NVME_TCP_OFFLOAD
> > +     select QED_NVMETCP
> >       help
> >         This enables the Marvell NVMe TCP offload support (qedn).
> >
> > diff --git a/drivers/nvme/hw/qedn/qedn.h b/drivers/nvme/hw/qedn/qedn.h
> > index bcd0748a10fd..f13073afbced 100644
> > --- a/drivers/nvme/hw/qedn/qedn.h
> > +++ b/drivers/nvme/hw/qedn/qedn.h
> > @@ -6,14 +6,49 @@
> >  #ifndef _QEDN_H_
> >  #define _QEDN_H_
> >
> > +#include <linux/qed/qed_if.h>
> > +#include <linux/qed/qed_nvmetcp_if.h>
> > +
> >  /* Driver includes */
> >  #include "../../host/tcp-offload.h"
> >
> > +#define QEDN_MAJOR_VERSION           8
> > +#define QEDN_MINOR_VERSION           62
> > +#define QEDN_REVISION_VERSION                10
> > +#define QEDN_ENGINEERING_VERSION     0
> > +#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."       \
> > +             __stringify(QEDE_MINOR_VERSION) "."             \
> > +             __stringify(QEDE_REVISION_VERSION) "."          \
> > +             __stringify(QEDE_ENGINEERING_VERSION)
> > +
>
> This driver module version is not used in this series and more
> important the module version have no meaning in upstream at all
> and the community strongly against addition of new such code.

Will be fixed.

>
> >  #define QEDN_MODULE_NAME "qedn"
>
> And the general note, it will be great if you convert your probe/remove
> flows to use auxiliary bus like other drivers that cross subsystems.

qedn is simply fitting in with the existing design of qed/qede/qedr/qedf/qedi.
Changing the entire multi-protocol design to auxiliary bus is being studied.
Leon Romanovsky May 19, 2021, 3:31 p.m. UTC | #3
On Wed, May 19, 2021 at 05:29:32PM +0300, Shai Malin wrote:
> On Wed, 19 May 2021 at 15:31, Leon Romanovsky wrote:
> > On Wed, May 19, 2021 at 02:13:30PM +0300, Shai Malin wrote:
> > > This patch introduces the functionality of loading and unloading
> > > physical function.
> > > qedn_probe() loads the offload device PF(physical function), and
> > > initialize the HW and the FW with the PF parameters using the
> > > HW ops->qed_nvmetcp_ops, which are similar to other "qed_*_ops" which
> > > are used by the qede, qedr, qedf and qedi device drivers.
> > > qedn_remove() unloads the offload device PF, re-initialize the HW and
> > > the FW with the PF parameters.
> > >
> > > The struct qedn_ctx is per PF container for PF-specific attributes and
> > > resources.
> > >
> > > Acked-by: Igor Russkikh <irusskikh@marvell.com>
> > > Signed-off-by: Dean Balandin <dbalandin@marvell.com>
> > > Signed-off-by: Prabhakar Kushwaha <pkushwaha@marvell.com>
> > > Signed-off-by: Omkar Kulkarni <okulkarni@marvell.com>
> > > Signed-off-by: Michal Kalderon <mkalderon@marvell.com>
> > > Signed-off-by: Ariel Elior <aelior@marvell.com>
> > > Signed-off-by: Shai Malin <smalin@marvell.com>
> > > Reviewed-by: Hannes Reinecke <hare@suse.de>
> > > ---
> > >  drivers/nvme/hw/Kconfig          |   1 +
> > >  drivers/nvme/hw/qedn/qedn.h      |  35 +++++++
> > >  drivers/nvme/hw/qedn/qedn_main.c | 159 ++++++++++++++++++++++++++++++-
> > >  3 files changed, 190 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/drivers/nvme/hw/Kconfig b/drivers/nvme/hw/Kconfig
> > > index 374f1f9dbd3d..91b1bd6f07d8 100644
> > > --- a/drivers/nvme/hw/Kconfig
> > > +++ b/drivers/nvme/hw/Kconfig
> > > @@ -2,6 +2,7 @@
> > >  config NVME_QEDN
> > >       tristate "Marvell NVM Express over Fabrics TCP offload"
> > >       depends on NVME_TCP_OFFLOAD
> > > +     select QED_NVMETCP
> > >       help
> > >         This enables the Marvell NVMe TCP offload support (qedn).
> > >
> > > diff --git a/drivers/nvme/hw/qedn/qedn.h b/drivers/nvme/hw/qedn/qedn.h
> > > index bcd0748a10fd..f13073afbced 100644
> > > --- a/drivers/nvme/hw/qedn/qedn.h
> > > +++ b/drivers/nvme/hw/qedn/qedn.h
> > > @@ -6,14 +6,49 @@
> > >  #ifndef _QEDN_H_
> > >  #define _QEDN_H_
> > >
> > > +#include <linux/qed/qed_if.h>
> > > +#include <linux/qed/qed_nvmetcp_if.h>
> > > +
> > >  /* Driver includes */
> > >  #include "../../host/tcp-offload.h"
> > >
> > > +#define QEDN_MAJOR_VERSION           8
> > > +#define QEDN_MINOR_VERSION           62
> > > +#define QEDN_REVISION_VERSION                10
> > > +#define QEDN_ENGINEERING_VERSION     0
> > > +#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."       \
> > > +             __stringify(QEDE_MINOR_VERSION) "."             \
> > > +             __stringify(QEDE_REVISION_VERSION) "."          \
> > > +             __stringify(QEDE_ENGINEERING_VERSION)
> > > +
> >
> > This driver module version is not used in this series and more
> > important the module version have no meaning in upstream at all
> > and the community strongly against addition of new such code.
> 
> Will be fixed.
> 
> >
> > >  #define QEDN_MODULE_NAME "qedn"
> >
> > And the general note, it will be great if you convert your probe/remove
> > flows to use auxiliary bus like other drivers that cross subsystems.
> 
> qedn is simply fitting in with the existing design of qed/qede/qedr/qedf/qedi.

I know.

> Changing the entire multi-protocol design to auxiliary bus is being studied.

It will be required to do at some point of time.

Thanks
diff mbox series

Patch

diff --git a/drivers/nvme/hw/Kconfig b/drivers/nvme/hw/Kconfig
index 374f1f9dbd3d..91b1bd6f07d8 100644
--- a/drivers/nvme/hw/Kconfig
+++ b/drivers/nvme/hw/Kconfig
@@ -2,6 +2,7 @@ 
 config NVME_QEDN
 	tristate "Marvell NVM Express over Fabrics TCP offload"
 	depends on NVME_TCP_OFFLOAD
+	select QED_NVMETCP
 	help
 	  This enables the Marvell NVMe TCP offload support (qedn).
 
diff --git a/drivers/nvme/hw/qedn/qedn.h b/drivers/nvme/hw/qedn/qedn.h
index bcd0748a10fd..f13073afbced 100644
--- a/drivers/nvme/hw/qedn/qedn.h
+++ b/drivers/nvme/hw/qedn/qedn.h
@@ -6,14 +6,49 @@ 
 #ifndef _QEDN_H_
 #define _QEDN_H_
 
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+
 /* Driver includes */
 #include "../../host/tcp-offload.h"
 
+#define QEDN_MAJOR_VERSION		8
+#define QEDN_MINOR_VERSION		62
+#define QEDN_REVISION_VERSION		10
+#define QEDN_ENGINEERING_VERSION	0
+#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "."	\
+		__stringify(QEDE_MINOR_VERSION) "."		\
+		__stringify(QEDE_REVISION_VERSION) "."		\
+		__stringify(QEDE_ENGINEERING_VERSION)
+
 #define QEDN_MODULE_NAME "qedn"
 
+#define QEDN_MAX_TASKS_PER_PF (16 * 1024)
+#define QEDN_MAX_CONNS_PER_PF (4 * 1024)
+#define QEDN_FW_CQ_SIZE (4 * 1024)
+#define QEDN_PROTO_CQ_PROD_IDX	0
+#define QEDN_NVMETCP_NUM_FW_CONN_QUEUE_PAGES 2
+
+enum qedn_state {
+	QEDN_STATE_CORE_PROBED = 0,
+	QEDN_STATE_CORE_OPEN,
+	QEDN_STATE_MFW_STATE,
+	QEDN_STATE_REGISTERED_OFFLOAD_DEV,
+	QEDN_STATE_MODULE_REMOVE_ONGOING,
+};
+
 struct qedn_ctx {
 	struct pci_dev *pdev;
+	struct qed_dev *cdev;
+	struct qed_dev_nvmetcp_info dev_info;
 	struct nvme_tcp_ofld_dev qedn_ofld_dev;
+	struct qed_pf_params pf_params;
+
+	/* Accessed with atomic bit ops, used with enum qedn_state */
+	unsigned long state;
+
+	/* Fast path queues */
+	u8 num_fw_cqs;
 };
 
 #endif /* _QEDN_H_ */
diff --git a/drivers/nvme/hw/qedn/qedn_main.c b/drivers/nvme/hw/qedn/qedn_main.c
index f892383c7494..46110b5a779b 100644
--- a/drivers/nvme/hw/qedn/qedn_main.c
+++ b/drivers/nvme/hw/qedn/qedn_main.c
@@ -14,6 +14,9 @@ 
 
 #define CHIP_NUM_AHP_NVMETCP 0x8194
 
+const struct qed_nvmetcp_ops *qed_ops;
+
+/* Global context instance */
 static struct pci_device_id qedn_pci_tbl[] = {
 	{ PCI_VDEVICE(QLOGIC, CHIP_NUM_AHP_NVMETCP), 0 },
 	{0, 0},
@@ -115,12 +118,113 @@  static struct nvme_tcp_ofld_ops qedn_ofld_ops = {
 	.commit_rqs = qedn_commit_rqs,
 };
 
+static inline void qedn_init_pf_struct(struct qedn_ctx *qedn)
+{
+	/* Placeholder - Initialize qedn fields */
+}
+
+static inline void
+qedn_init_core_probe_params(struct qed_probe_params *probe_params)
+{
+	memset(probe_params, 0, sizeof(*probe_params));
+	probe_params->protocol = QED_PROTOCOL_NVMETCP;
+	probe_params->is_vf = false;
+	probe_params->recov_in_prog = 0;
+}
+
+static inline int qedn_core_probe(struct qedn_ctx *qedn)
+{
+	struct qed_probe_params probe_params;
+	int rc = 0;
+
+	qedn_init_core_probe_params(&probe_params);
+	pr_info("Starting QED probe\n");
+	qedn->cdev = qed_ops->common->probe(qedn->pdev, &probe_params);
+	if (!qedn->cdev) {
+		rc = -ENODEV;
+		pr_err("QED probe failed\n");
+	}
+
+	return rc;
+}
+
+static int qedn_set_nvmetcp_pf_param(struct qedn_ctx *qedn)
+{
+	u32 fw_conn_queue_pages = QEDN_NVMETCP_NUM_FW_CONN_QUEUE_PAGES;
+	struct qed_nvmetcp_pf_params *pf_params;
+
+	pf_params = &qedn->pf_params.nvmetcp_pf_params;
+	memset(pf_params, 0, sizeof(*pf_params));
+	qedn->num_fw_cqs = min_t(u8, qedn->dev_info.num_cqs, num_online_cpus());
+
+	pf_params->num_cons = QEDN_MAX_CONNS_PER_PF;
+	pf_params->num_tasks = QEDN_MAX_TASKS_PER_PF;
+
+	/* Placeholder - Initialize function level queues */
+
+	/* Placeholder - Initialize TCP params */
+
+	/* Queues */
+	pf_params->num_sq_pages_in_ring = fw_conn_queue_pages;
+	pf_params->num_r2tq_pages_in_ring = fw_conn_queue_pages;
+	pf_params->num_uhq_pages_in_ring = fw_conn_queue_pages;
+	pf_params->num_queues = qedn->num_fw_cqs;
+	pf_params->cq_num_entries = QEDN_FW_CQ_SIZE;
+
+	/* the CQ SB pi */
+	pf_params->gl_rq_pi = QEDN_PROTO_CQ_PROD_IDX;
+
+	return 0;
+}
+
+static inline int qedn_slowpath_start(struct qedn_ctx *qedn)
+{
+	struct qed_slowpath_params sp_params = {};
+	int rc = 0;
+
+	/* Start the Slowpath-process */
+	sp_params.int_mode = QED_INT_MODE_MSIX;
+	sp_params.drv_major = QEDN_MAJOR_VERSION;
+	sp_params.drv_minor = QEDN_MINOR_VERSION;
+	sp_params.drv_rev = QEDN_REVISION_VERSION;
+	sp_params.drv_eng = QEDN_ENGINEERING_VERSION;
+	strscpy(sp_params.name, "qedn NVMeTCP", QED_DRV_VER_STR_SIZE);
+	rc = qed_ops->common->slowpath_start(qedn->cdev, &sp_params);
+	if (rc)
+		pr_err("Cannot start slowpath\n");
+
+	return rc;
+}
+
 static void __qedn_remove(struct pci_dev *pdev)
 {
 	struct qedn_ctx *qedn = pci_get_drvdata(pdev);
+	int rc;
+
+	pr_notice("qedn remove started: abs PF id=%u\n",
+		  qedn->dev_info.common.abs_pf_id);
+
+	if (test_and_set_bit(QEDN_STATE_MODULE_REMOVE_ONGOING, &qedn->state)) {
+		pr_err("Remove already ongoing\n");
+
+		return;
+	}
+
+	if (test_and_clear_bit(QEDN_STATE_REGISTERED_OFFLOAD_DEV, &qedn->state))
+		nvme_tcp_ofld_unregister_dev(&qedn->qedn_ofld_dev);
+
+	if (test_and_clear_bit(QEDN_STATE_MFW_STATE, &qedn->state)) {
+		rc = qed_ops->common->update_drv_state(qedn->cdev, false);
+		if (rc)
+			pr_err("Failed to send drv state to MFW\n");
+	}
+
+	if (test_and_clear_bit(QEDN_STATE_CORE_OPEN, &qedn->state))
+		qed_ops->common->slowpath_stop(qedn->cdev);
+
+	if (test_and_clear_bit(QEDN_STATE_CORE_PROBED, &qedn->state))
+		qed_ops->common->remove(qedn->cdev);
 
-	pr_notice("Starting qedn_remove\n");
-	nvme_tcp_ofld_unregister_dev(&qedn->qedn_ofld_dev);
 	kfree(qedn);
 	pr_notice("Ending qedn_remove successfully\n");
 }
@@ -160,16 +264,53 @@  static int __qedn_probe(struct pci_dev *pdev)
 	if (!qedn)
 		return -ENODEV;
 
+	qedn_init_pf_struct(qedn);
+
+	/* QED probe */
+	rc = qedn_core_probe(qedn);
+	if (rc)
+		goto exit_probe_and_release_mem;
+
+	set_bit(QEDN_STATE_CORE_PROBED, &qedn->state);
+
+	rc = qed_ops->fill_dev_info(qedn->cdev, &qedn->dev_info);
+	if (rc) {
+		pr_err("fill_dev_info failed\n");
+		goto exit_probe_and_release_mem;
+	}
+
+	rc = qedn_set_nvmetcp_pf_param(qedn);
+	if (rc)
+		goto exit_probe_and_release_mem;
+
+	qed_ops->common->update_pf_params(qedn->cdev, &qedn->pf_params);
+	rc = qedn_slowpath_start(qedn);
+	if (rc)
+		goto exit_probe_and_release_mem;
+
+	set_bit(QEDN_STATE_CORE_OPEN, &qedn->state);
+
+	rc = qed_ops->common->update_drv_state(qedn->cdev, true);
+	if (rc) {
+		pr_err("Failed to send drv state to MFW\n");
+		goto exit_probe_and_release_mem;
+	}
+
+	set_bit(QEDN_STATE_MFW_STATE, &qedn->state);
+
 	qedn->qedn_ofld_dev.private_data = (void *)qedn;
 	qedn->qedn_ofld_dev.ops = &qedn_ofld_ops;
 	INIT_LIST_HEAD(&qedn->qedn_ofld_dev.entry);
 	rc = nvme_tcp_ofld_register_dev(&qedn->qedn_ofld_dev);
 	if (rc)
-		goto release_qedn;
+		goto exit_probe_and_release_mem;
+
+	set_bit(QEDN_STATE_REGISTERED_OFFLOAD_DEV, &qedn->state);
 
 	return 0;
-release_qedn:
-	kfree(qedn);
+exit_probe_and_release_mem:
+	__qedn_remove(pdev);
+	pr_err("probe ended with error\n");
 
 	return rc;
 }
@@ -191,6 +332,13 @@  static int __init qedn_init(void)
 {
 	int rc;
 
+	qed_ops = qed_get_nvmetcp_ops();
+	if (!qed_ops) {
+		pr_err("Failed to get QED NVMeTCP ops\n");
+
+		return -EINVAL;
+	}
+
 	rc = pci_register_driver(&qedn_pci_driver);
 	if (rc) {
 		pr_err("Failed to register pci driver\n");
@@ -206,6 +354,7 @@  static int __init qedn_init(void)
 static void __exit qedn_cleanup(void)
 {
 	pci_unregister_driver(&qedn_pci_driver);
+	qed_put_nvmetcp_ops();
 	pr_notice("Unloading qedn ended\n");
 }