diff mbox series

[v2,1/5] qla2xxx_nvmet: Add files for FC-NVMe Target support

Message ID 20180926040339.9715-2-himanshu.madhani@cavium.com (mailing list archive)
State Superseded
Headers show
Series qla2xxx: Add FC-NVMe Target support | expand

Commit Message

Madhani, Himanshu Sept. 26, 2018, 4:03 a.m. UTC
From: Anil Gurumurthy <anil.gurumurthy@cavium.com>

This patch adds initial files to enable NVMe Target Support

Signed-off-by: Anil Gurumurthy <anil.gurumurthy@cavium.com>
Signed-off-by: Giridhar Malavali <giridhar.malavali@cavium.com>
Signed-off-by: Darren Trapp <darren.trapp@cavium.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
---
 drivers/scsi/qla2xxx/Makefile    |   3 +-
 drivers/scsi/qla2xxx/qla_nvmet.c | 798 +++++++++++++++++++++++++++++++++++++++
 drivers/scsi/qla2xxx/qla_nvmet.h | 129 +++++++
 3 files changed, 929 insertions(+), 1 deletion(-)
 create mode 100644 drivers/scsi/qla2xxx/qla_nvmet.c
 create mode 100644 drivers/scsi/qla2xxx/qla_nvmet.h

Comments

kernel test robot Sept. 26, 2018, 6:14 p.m. UTC | #1
Hi Anil,

I love your patch! Yet something to improve:

[auto build test ERROR on mkp-scsi/for-next]
[also build test ERROR on v4.19-rc5 next-20180926]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Himanshu-Madhani/qla2xxx-Add-FC-NVMe-Target-support/20180926-132955
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git for-next
config: powerpc-pseries_defconfig (attached as .config)
compiler: powerpc64-linux-gnu-gcc (Debian 7.2.0-11) 7.2.0
reproduce:
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        GCC_VERSION=7.2.0 make.cross ARCH=powerpc 

Note: the linux-review/Himanshu-Madhani/qla2xxx-Add-FC-NVMe-Target-support/20180926-132955 HEAD ac552b3775986d6ba46be8dfa15cc47511c2e47c builds fine.
      It only hurts bisectibility.

All errors (new ones prefixed by >>):

   In file included from drivers/scsi/qla2xxx/qla_nvmet.c:14:0:
>> drivers/scsi/qla2xxx/qla_nvmet.h:30:25: error: field 'nvme_cmd_iu' has incomplete type
     struct atio7_nvme_cmnd nvme_cmd_iu;
                            ^~~~~~~~~~~
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qlt_nvmet_ls_done':
>> drivers/scsi/qla2xxx/qla_nvmet.c:48:46: error: 'struct <anonymous>' has no member named 'cmd'
     struct qla_nvmet_cmd *tgt_cmd = nvme->u.nvme.cmd;
                                                 ^
   drivers/scsi/qla2xxx/qla_nvmet.c:58:47: error: 'struct <anonymous>' has no member named 'cmd'
      sp, sp->vha, nvme->u.nvme.desc, nvme->u.nvme.cmd);
                                                  ^
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_ls_rsp':
>> drivers/scsi/qla2xxx/qla_nvmet.c:95:13: error: 'SRB_NVMET_LS' undeclared (first use in this function); did you mean 'SRB_NVME_LS'?
     sp->type = SRB_NVMET_LS;
                ^~~~~~~~~~~~
                SRB_NVME_LS
   drivers/scsi/qla2xxx/qla_nvmet.c:95:13: note: each undeclared identifier is reported only once for each function it appears in
>> drivers/scsi/qla2xxx/qla_nvmet.c:103:14: error: 'struct <anonymous>' has no member named 'exchange_address'
     nvme->u.nvme.exchange_address = tgt_cmd->atio.u.pt_ls4.exchange_address;
                 ^
>> drivers/scsi/qla2xxx/qla_nvmet.c:103:49: error: 'union <anonymous>' has no member named 'pt_ls4'
     nvme->u.nvme.exchange_address = tgt_cmd->atio.u.pt_ls4.exchange_address;
                                                    ^
>> drivers/scsi/qla2xxx/qla_nvmet.c:104:14: error: 'struct <anonymous>' has no member named 'nport_handle'
     nvme->u.nvme.nport_handle = tgt_cmd->atio.u.pt_ls4.nport_handle;
                 ^
   drivers/scsi/qla2xxx/qla_nvmet.c:104:45: error: 'union <anonymous>' has no member named 'pt_ls4'
     nvme->u.nvme.nport_handle = tgt_cmd->atio.u.pt_ls4.nport_handle;
                                                ^
>> drivers/scsi/qla2xxx/qla_nvmet.c:105:14: error: 'struct <anonymous>' has no member named 'vp_index'
     nvme->u.nvme.vp_index = tgt_cmd->atio.u.pt_ls4.vp_index;
                 ^
   drivers/scsi/qla2xxx/qla_nvmet.c:105:41: error: 'union <anonymous>' has no member named 'pt_ls4'
     nvme->u.nvme.vp_index = tgt_cmd->atio.u.pt_ls4.vp_index;
                                            ^
   drivers/scsi/qla2xxx/qla_nvmet.c:107:14: error: 'struct <anonymous>' has no member named 'cmd'
     nvme->u.nvme.cmd = tgt_cmd; /* To be freed */
                 ^
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_fcp_abort':
   drivers/scsi/qla2xxx/qla_nvmet.c:178:13: error: 'SRB_NVMET_SEND_ABTS' undeclared (first use in this function); did you mean 'CF_NVME_ENABLE'?
     sp->type = SRB_NVMET_SEND_ABTS;
                ^~~~~~~~~~~~~~~~~~~
                CF_NVME_ENABLE
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_create_targetport':
>> drivers/scsi/qla2xxx/qla_nvmet.c:240:9: error: 'ql_dbg_nvme' undeclared (first use in this function); did you mean 'ql_dbg_timer'?
     ql_dbg(ql_dbg_nvme, vha, 0xe081,
            ^~~~~~~~~~~
            ql_dbg_timer
>> drivers/scsi/qla2xxx/qla_nvmet.c:250:10: error: 'struct scsi_qla_host' has no member named 'targetport'
         &vha->targetport);
             ^~
   drivers/scsi/qla2xxx/qla_nvmet.c:257:41: error: 'struct scsi_qla_host' has no member named 'targetport'
     tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
                                            ^~
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_delete':
>> drivers/scsi/qla2xxx/qla_nvmet.c:276:18: error: 'volatile struct <anonymous>' has no member named 'nvmet_enabled'; did you mean 'nvme_enabled'?
     if (!vha->flags.nvmet_enabled)
                     ^~~~~~~~~~~~~
                     nvme_enabled
   drivers/scsi/qla2xxx/qla_nvmet.c:278:9: error: 'struct scsi_qla_host' has no member named 'targetport'
     if (vha->targetport) {
            ^~
   drivers/scsi/qla2xxx/qla_nvmet.c:279:42: error: 'struct scsi_qla_host' has no member named 'targetport'
      tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
                                             ^~
   drivers/scsi/qla2xxx/qla_nvmet.c:281:10: error: 'ql_dbg_nvme' undeclared (first use in this function); did you mean 'ql_dbg_timer'?
      ql_dbg(ql_dbg_nvme, vha, 0xe083,
             ^~~~~~~~~~~
             ql_dbg_timer
   drivers/scsi/qla2xxx/qla_nvmet.c:284:37: error: 'struct scsi_qla_host' has no member named 'targetport'
      nvmet_fc_unregister_targetport(vha->targetport);
                                        ^~
>> drivers/scsi/qla2xxx/qla_nvmet.c:287:3: error: implicit declaration of function 'nvmet_release_sessions'; did you mean 'pci_release_regions'? [-Werror=implicit-function-declaration]
      nvmet_release_sessions(vha);
      ^~~~~~~~~~~~~~~~~~~~~~
      pci_release_regions
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_handle_ls':
>> drivers/scsi/qla2xxx/qla_nvmet.c:316:9: error: implicit declaration of function 'qla_nvmet_find_sess_by_s_id'; did you mean 'qla_nvmet_send_resp_ctio'? [-Werror=implicit-function-declaration]
     sess = qla_nvmet_find_sess_by_s_id(vha, look_up_sid);
            ^~~~~~~~~~~~~~~~~~~~~~~~~~~
            qla_nvmet_send_resp_ctio
   drivers/scsi/qla2xxx/qla_nvmet.c:316:7: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
     sess = qla_nvmet_find_sess_by_s_id(vha, look_up_sid);
          ^
   drivers/scsi/qla2xxx/qla_nvmet.c:330:25: error: 'union <anonymous>' has no member named 'pt_ls4'
     memcpy(&tgt_cmd->atio.u.pt_ls4, pt_ls4, sizeof(struct pt_ls4_rx_unsol));
                            ^
   drivers/scsi/qla2xxx/qla_nvmet.c:341:31: error: 'struct scsi_qla_host' has no member named 'targetport'
     ret = nvmet_fc_rcv_ls_req(vha->targetport,
                                  ^~
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_process_cmd':
   drivers/scsi/qla2xxx/qla_nvmet.c:373:32: error: 'struct scsi_qla_host' has no member named 'targetport'
     ret = nvmet_fc_rcv_fcp_req(vha->targetport, &tgt_cmd->cmd.fcp_req,
                                   ^~
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_handle_abts':
   drivers/scsi/qla2xxx/qla_nvmet.c:414:28: error: 'struct scsi_qla_host' has no member named 'targetport'
     nvmet_fc_rcv_fcp_abort(vha->targetport, &cmd->cmd.fcp_req);
                               ^~
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_send_resp_ctio':
>> drivers/scsi/qla2xxx/qla_nvmet.c:472:34: error: 'union <anonymous>' has no member named 'nvme_isp27'
     struct fcp_hdr *fchdr = &atio->u.nvme_isp27.fcp_hdr;
                                     ^
   drivers/scsi/qla2xxx/qla_nvmet.c:493:13: error: 'SRB_NVMET_FCP' undeclared (first use in this function); did you mean 'SRB_NVME_LS'?
     sp->type = SRB_NVMET_FCP;
                ^~~~~~~~~~~~~
                SRB_NVME_LS
   drivers/scsi/qla2xxx/qla_nvmet.c:497:23: error: 'struct <anonymous>' has no member named 'cmd'
     sp->u.iocb_cmd.u.nvme.cmd = cmd;
                          ^
   drivers/scsi/qla2xxx/qla_nvmet.c:501:10: error: 'ql_dbg_nvme' undeclared (first use in this function); did you mean 'ql_dbg_timer'?
      ql_dbg(ql_dbg_nvme, vha, 0x3067,
             ^~~~~~~~~~~
             ql_dbg_timer
   drivers/scsi/qla2xxx/qla_nvmet.c:517:31: error: 'union <anonymous>' has no member named 'nvme_isp27'
     ctio->exchange_addr = atio->u.nvme_isp27.exchange_addr;
                                  ^
   drivers/scsi/qla2xxx/qla_nvmet.c:521:19: error: 'union <anonymous>' has no member named 'nvme_isp27'
     c_flags = atio->u.nvme_isp27.attr << 9;
                      ^
   drivers/scsi/qla2xxx/qla_nvmet.c: In function 'qla_nvmet_send_abts_ctio':
>> drivers/scsi/qla2xxx/qla_nvmet.c:747:13: error: 'SRB_NVMET_ABTS' undeclared (first use in this function); did you mean 'SRB_NVME_LS'?
     sp->type = SRB_NVMET_ABTS;
                ^~~~~~~~~~~~~~
                SRB_NVME_LS
   drivers/scsi/qla2xxx/qla_nvmet.c:753:10: error: 'ql_dbg_nvme' undeclared (first use in this function); did you mean 'ql_dbg_timer'?
      ql_dbg(ql_dbg_nvme, vha, 0x3067,
             ^~~~~~~~~~~
             ql_dbg_timer
   cc1: some warnings being treated as errors

vim +/nvme_cmd_iu +30 drivers/scsi/qla2xxx/qla_nvmet.h

    21	
    22	struct qla_nvmet_cmd {
    23		union {
    24			struct nvmefc_tgt_ls_req ls_req;
    25			struct nvmefc_tgt_fcp_req fcp_req;
    26		} cmd;
    27		struct scsi_qla_host *vha;
    28		void *buf;
    29		struct atio_from_isp atio;
  > 30		struct atio7_nvme_cmnd nvme_cmd_iu;
    31		uint16_t cmd_len;
    32		spinlock_t nvme_cmd_lock;
    33		struct list_head cmd_list; /* List of cmds */
    34		struct work_struct work;
    35	
    36		struct scatterlist *sg;	/* cmd data buffer SG vector */
    37		int sg_cnt;		/* SG segments count */
    38		int bufflen;		/* cmd buffer length */
    39		int offset;
    40		enum dma_data_direction dma_data_direction;
    41		uint16_t ox_id;
    42		struct fc_port *fcport;
    43	};
    44	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
kernel test robot Sept. 26, 2018, 6:53 p.m. UTC | #2
Hi Anil,

I love your patch! Yet something to improve:

[auto build test ERROR on mkp-scsi/for-next]
[also build test ERROR on v4.19-rc5 next-20180926]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Himanshu-Madhani/qla2xxx-Add-FC-NVMe-Target-support/20180926-132955
base:   https://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git for-next
config: x86_64-randconfig-s0-09270145 (attached as .config)
compiler: gcc-6 (Debian 6.4.0-9) 6.4.0 20171026
reproduce:
        # save the attached .config to linux build tree
        make ARCH=x86_64 

Note: the linux-review/Himanshu-Madhani/qla2xxx-Add-FC-NVMe-Target-support/20180926-132955 HEAD ac552b3775986d6ba46be8dfa15cc47511c2e47c builds fine.
      It only hurts bisectibility.

All errors (new ones prefixed by >>):

   In file included from drivers/scsi//qla2xxx/qla_nvmet.c:14:0:
   drivers/scsi//qla2xxx/qla_nvmet.h:30:25: error: field 'nvme_cmd_iu' has incomplete type
     struct atio7_nvme_cmnd nvme_cmd_iu;
                            ^~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qlt_nvmet_ls_done':
   drivers/scsi//qla2xxx/qla_nvmet.c:48:46: error: 'struct <anonymous>' has no member named 'cmd'
     struct qla_nvmet_cmd *tgt_cmd = nvme->u.nvme.cmd;
                                                 ^
   drivers/scsi//qla2xxx/qla_nvmet.c:58:47: error: 'struct <anonymous>' has no member named 'cmd'
      sp, sp->vha, nvme->u.nvme.desc, nvme->u.nvme.cmd);
                                                  ^
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_ls_rsp':
>> drivers/scsi//qla2xxx/qla_nvmet.c:95:13: error: 'SRB_NVMET_LS' undeclared (first use in this function)
     sp->type = SRB_NVMET_LS;
                ^~~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c:95:13: note: each undeclared identifier is reported only once for each function it appears in
   drivers/scsi//qla2xxx/qla_nvmet.c:103:14: error: 'struct <anonymous>' has no member named 'exchange_address'
     nvme->u.nvme.exchange_address = tgt_cmd->atio.u.pt_ls4.exchange_address;
                 ^
   drivers/scsi//qla2xxx/qla_nvmet.c:103:49: error: 'union <anonymous>' has no member named 'pt_ls4'
     nvme->u.nvme.exchange_address = tgt_cmd->atio.u.pt_ls4.exchange_address;
                                                    ^
   drivers/scsi//qla2xxx/qla_nvmet.c:104:14: error: 'struct <anonymous>' has no member named 'nport_handle'
     nvme->u.nvme.nport_handle = tgt_cmd->atio.u.pt_ls4.nport_handle;
                 ^
   drivers/scsi//qla2xxx/qla_nvmet.c:104:45: error: 'union <anonymous>' has no member named 'pt_ls4'
     nvme->u.nvme.nport_handle = tgt_cmd->atio.u.pt_ls4.nport_handle;
                                                ^
   drivers/scsi//qla2xxx/qla_nvmet.c:105:14: error: 'struct <anonymous>' has no member named 'vp_index'
     nvme->u.nvme.vp_index = tgt_cmd->atio.u.pt_ls4.vp_index;
                 ^
   drivers/scsi//qla2xxx/qla_nvmet.c:105:41: error: 'union <anonymous>' has no member named 'pt_ls4'
     nvme->u.nvme.vp_index = tgt_cmd->atio.u.pt_ls4.vp_index;
                                            ^
   drivers/scsi//qla2xxx/qla_nvmet.c:107:14: error: 'struct <anonymous>' has no member named 'cmd'
     nvme->u.nvme.cmd = tgt_cmd; /* To be freed */
                 ^
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_fcp_abort':
>> drivers/scsi//qla2xxx/qla_nvmet.c:178:13: error: 'SRB_NVMET_SEND_ABTS' undeclared (first use in this function)
     sp->type = SRB_NVMET_SEND_ABTS;
                ^~~~~~~~~~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_create_targetport':
>> drivers/scsi//qla2xxx/qla_nvmet.c:240:9: error: 'ql_dbg_nvme' undeclared (first use in this function)
     ql_dbg(ql_dbg_nvme, vha, 0xe081,
            ^~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c:250:10: error: 'struct scsi_qla_host' has no member named 'targetport'
         &vha->targetport);
             ^~
   drivers/scsi//qla2xxx/qla_nvmet.c:257:41: error: 'struct scsi_qla_host' has no member named 'targetport'
     tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
                                            ^~
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_delete':
   drivers/scsi//qla2xxx/qla_nvmet.c:276:17: error: 'volatile struct <anonymous>' has no member named 'nvmet_enabled'; did you mean 'nvme_enabled'?
     if (!vha->flags.nvmet_enabled)
                    ^
   drivers/scsi//qla2xxx/qla_nvmet.c:278:9: error: 'struct scsi_qla_host' has no member named 'targetport'
     if (vha->targetport) {
            ^~
   drivers/scsi//qla2xxx/qla_nvmet.c:279:42: error: 'struct scsi_qla_host' has no member named 'targetport'
      tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
                                             ^~
   drivers/scsi//qla2xxx/qla_nvmet.c:281:10: error: 'ql_dbg_nvme' undeclared (first use in this function)
      ql_dbg(ql_dbg_nvme, vha, 0xe083,
             ^~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c:284:37: error: 'struct scsi_qla_host' has no member named 'targetport'
      nvmet_fc_unregister_targetport(vha->targetport);
                                        ^~
>> drivers/scsi//qla2xxx/qla_nvmet.c:287:3: error: implicit declaration of function 'nvmet_release_sessions' [-Werror=implicit-function-declaration]
      nvmet_release_sessions(vha);
      ^~~~~~~~~~~~~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_handle_ls':
>> drivers/scsi//qla2xxx/qla_nvmet.c:316:9: error: implicit declaration of function 'qla_nvmet_find_sess_by_s_id' [-Werror=implicit-function-declaration]
     sess = qla_nvmet_find_sess_by_s_id(vha, look_up_sid);
            ^~~~~~~~~~~~~~~~~~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c:316:7: warning: assignment makes pointer from integer without a cast [-Wint-conversion]
     sess = qla_nvmet_find_sess_by_s_id(vha, look_up_sid);
          ^
   drivers/scsi//qla2xxx/qla_nvmet.c:330:25: error: 'union <anonymous>' has no member named 'pt_ls4'
     memcpy(&tgt_cmd->atio.u.pt_ls4, pt_ls4, sizeof(struct pt_ls4_rx_unsol));
                            ^
   drivers/scsi//qla2xxx/qla_nvmet.c:341:31: error: 'struct scsi_qla_host' has no member named 'targetport'
     ret = nvmet_fc_rcv_ls_req(vha->targetport,
                                  ^~
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_process_cmd':
   drivers/scsi//qla2xxx/qla_nvmet.c:373:32: error: 'struct scsi_qla_host' has no member named 'targetport'
     ret = nvmet_fc_rcv_fcp_req(vha->targetport, &tgt_cmd->cmd.fcp_req,
                                   ^~
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_handle_abts':
   drivers/scsi//qla2xxx/qla_nvmet.c:414:28: error: 'struct scsi_qla_host' has no member named 'targetport'
     nvmet_fc_rcv_fcp_abort(vha->targetport, &cmd->cmd.fcp_req);
                               ^~
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_send_resp_ctio':
   drivers/scsi//qla2xxx/qla_nvmet.c:472:34: error: 'union <anonymous>' has no member named 'nvme_isp27'
     struct fcp_hdr *fchdr = &atio->u.nvme_isp27.fcp_hdr;
                                     ^
>> drivers/scsi//qla2xxx/qla_nvmet.c:493:13: error: 'SRB_NVMET_FCP' undeclared (first use in this function)
     sp->type = SRB_NVMET_FCP;
                ^~~~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c:497:23: error: 'struct <anonymous>' has no member named 'cmd'
     sp->u.iocb_cmd.u.nvme.cmd = cmd;
                          ^
   drivers/scsi//qla2xxx/qla_nvmet.c:501:10: error: 'ql_dbg_nvme' undeclared (first use in this function)
      ql_dbg(ql_dbg_nvme, vha, 0x3067,
             ^~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c:517:31: error: 'union <anonymous>' has no member named 'nvme_isp27'
     ctio->exchange_addr = atio->u.nvme_isp27.exchange_addr;
                                  ^
   drivers/scsi//qla2xxx/qla_nvmet.c:521:19: error: 'union <anonymous>' has no member named 'nvme_isp27'
     c_flags = atio->u.nvme_isp27.attr << 9;
                      ^
   drivers/scsi//qla2xxx/qla_nvmet.c: In function 'qla_nvmet_send_abts_ctio':
>> drivers/scsi//qla2xxx/qla_nvmet.c:747:13: error: 'SRB_NVMET_ABTS' undeclared (first use in this function)
     sp->type = SRB_NVMET_ABTS;
                ^~~~~~~~~~~~~~
   drivers/scsi//qla2xxx/qla_nvmet.c:753:10: error: 'ql_dbg_nvme' undeclared (first use in this function)
      ql_dbg(ql_dbg_nvme, vha, 0x3067,
             ^~~~~~~~~~~
   cc1: some warnings being treated as errors

vim +/SRB_NVMET_LS +95 drivers/scsi//qla2xxx/qla_nvmet.c

    36	
    37	/*
    38	 * qlt_nvmet_ls_done -
    39	 * Invoked by the firmware interface to indicate the completion
    40	 * of an LS cmd
    41	 * Free all associated resources of the LS cmd
    42	 */
    43	static void qlt_nvmet_ls_done(void *ptr, int res)
    44	{
    45		struct srb *sp = ptr;
    46		struct srb_iocb   *nvme = &sp->u.iocb_cmd;
    47		struct nvmefc_tgt_ls_req *rsp = nvme->u.nvme.desc;
  > 48		struct qla_nvmet_cmd *tgt_cmd = nvme->u.nvme.cmd;
    49	
    50		if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
    51			return;
    52	
    53		ql_log(ql_log_info, sp->vha, 0x11000,
    54			"Done with NVME LS4 req\n");
    55	
    56		ql_log(ql_log_info, sp->vha, 0x11001,
    57			"sp: %p vha: %p, rsp: %p, cmd: %p\n",
    58			sp, sp->vha, nvme->u.nvme.desc, nvme->u.nvme.cmd);
    59	
    60		rsp->done(rsp);
    61		/* Free tgt_cmd */
    62		kfree(tgt_cmd->buf);
    63		kfree(tgt_cmd);
    64		qla2x00_rel_sp(sp);
    65	}
    66	
    67	/*
    68	 * qla_nvmet_ls_rsp -
    69	 * Invoked by the nvme-t to complete the LS req.
    70	 * Prepare and send a response CTIO to the firmware.
    71	 */
    72	static int
    73	qla_nvmet_ls_rsp(struct nvmet_fc_target_port *tgtport,
    74				struct nvmefc_tgt_ls_req *rsp)
    75	{
    76		struct qla_nvmet_cmd *tgt_cmd =
    77			container_of(rsp, struct qla_nvmet_cmd, cmd.ls_req);
    78		struct scsi_qla_host *vha = tgt_cmd->vha;
    79		struct srb_iocb   *nvme;
    80		int     rval = QLA_FUNCTION_FAILED;
    81		srb_t *sp;
    82	
    83		ql_log(ql_log_info, vha, 0x11002,
    84			"Dumping the NVMET-LS response buffer\n");
    85		ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
    86			(uint8_t *)rsp->rspbuf, rsp->rsplen);
    87	
    88		/* Alloc SRB structure */
    89		sp = qla2x00_get_sp(vha, NULL, GFP_ATOMIC);
    90		if (!sp) {
    91			ql_log(ql_log_info, vha, 0x11003, "Failed to allocate SRB\n");
    92			return -ENOMEM;
    93		}
    94	
  > 95		sp->type = SRB_NVMET_LS;
    96		sp->done = qlt_nvmet_ls_done;
    97		sp->vha = vha;
    98		sp->fcport = tgt_cmd->fcport;
    99	
   100		nvme = &sp->u.iocb_cmd;
   101		nvme->u.nvme.rsp_dma = rsp->rspdma;
   102		nvme->u.nvme.rsp_len = rsp->rsplen;
   103		nvme->u.nvme.exchange_address = tgt_cmd->atio.u.pt_ls4.exchange_address;
   104		nvme->u.nvme.nport_handle = tgt_cmd->atio.u.pt_ls4.nport_handle;
 > 105		nvme->u.nvme.vp_index = tgt_cmd->atio.u.pt_ls4.vp_index;
   106	
 > 107		nvme->u.nvme.cmd = tgt_cmd; /* To be freed */
   108		nvme->u.nvme.desc = rsp; /* Call back to nvmet */
   109	
   110		rval = qla2x00_start_sp(sp);
   111		if (rval != QLA_SUCCESS) {
   112			ql_log(ql_log_warn, vha, 0x11004,
   113				"qla2x00_start_sp failed = %d\n", rval);
   114			return rval;
   115		}
   116	
   117		return 0;
   118	}
   119	
   120	/*
   121	 * qla_nvmet_fcp_op -
   122	 * Invoked by the nvme-t to complete the IO.
   123	 * Prepare and send a response CTIO to the firmware.
   124	 */
   125	static int
   126	qla_nvmet_fcp_op(struct nvmet_fc_target_port *tgtport,
   127				struct nvmefc_tgt_fcp_req *rsp)
   128	{
   129		struct qla_nvmet_cmd *tgt_cmd =
   130			container_of(rsp, struct qla_nvmet_cmd, cmd.fcp_req);
   131		struct scsi_qla_host *vha = tgt_cmd->vha;
   132	
   133		if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
   134			return 0;
   135	
   136		/* Prepare and send CTIO 82h */
   137		qla_nvmet_send_resp_ctio(vha->qpair, tgt_cmd, rsp);
   138	
   139		return 0;
   140	}
   141	
   142	/*
   143	 * qla_nvmet_fcp_abort_done
   144	 * free up the used resources
   145	 */
   146	static void qla_nvmet_fcp_abort_done(void *ptr, int res)
   147	{
   148		srb_t *sp = ptr;
   149	
   150		qla2x00_rel_sp(sp);
   151	}
   152	
   153	/*
   154	 * qla_nvmet_fcp_abort -
   155	 * Invoked by the nvme-t to abort an IO
   156	 * Send an abort to the firmware
   157	 */
   158	static void
   159	qla_nvmet_fcp_abort(struct nvmet_fc_target_port *tgtport,
   160				struct nvmefc_tgt_fcp_req *req)
   161	{
   162		struct qla_nvmet_cmd *tgt_cmd =
   163			container_of(req, struct qla_nvmet_cmd, cmd.fcp_req);
   164		struct scsi_qla_host *vha = tgt_cmd->vha;
   165		struct qla_hw_data *ha = vha->hw;
   166		srb_t *sp;
   167	
   168		if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
   169			return;
   170	
   171		/* Alloc SRB structure */
   172		sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
   173		if (!sp) {
   174			ql_log(ql_log_info, vha, 0x11005, "Failed to allocate SRB\n");
   175			return;
   176		}
   177	
 > 178		sp->type = SRB_NVMET_SEND_ABTS;
   179		sp->done = qla_nvmet_fcp_abort_done;
   180		sp->vha = vha;
   181		sp->fcport = tgt_cmd->fcport;
   182	
   183		ha->isp_ops->abort_command(sp);
   184	
   185	}
   186	
   187	/*
   188	 * qla_nvmet_fcp_req_release -
   189	 * Delete the cmd from the list and free the cmd
   190	 */
   191	static void
   192	qla_nvmet_fcp_req_release(struct nvmet_fc_target_port *tgtport,
   193				struct nvmefc_tgt_fcp_req *rsp)
   194	{
   195		struct qla_nvmet_cmd *tgt_cmd =
   196			container_of(rsp, struct qla_nvmet_cmd, cmd.fcp_req);
   197		scsi_qla_host_t *vha = tgt_cmd->vha;
   198		unsigned long flags;
   199	
   200		if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
   201			return;
   202	
   203		spin_lock_irqsave(&vha->cmd_list_lock, flags);
   204		list_del(&tgt_cmd->cmd_list);
   205		spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
   206	
   207		kfree(tgt_cmd);
   208	}
   209	
   210	static struct nvmet_fc_target_template qla_nvmet_fc_transport = {
   211		.targetport_delete	= qla_nvmet_targetport_delete,
   212		.xmt_ls_rsp		= qla_nvmet_ls_rsp,
   213		.fcp_op			= qla_nvmet_fcp_op,
   214		.fcp_abort		= qla_nvmet_fcp_abort,
   215		.fcp_req_release	= qla_nvmet_fcp_req_release,
   216		.max_hw_queues		= 8,
   217		.max_sgl_segments	= 128,
   218		.max_dif_sgl_segments	= 64,
   219		.dma_boundary		= 0xFFFFFFFF,
   220		.target_features	= NVMET_FCTGTFEAT_READDATA_RSP |
   221						NVMET_FCTGTFEAT_CMD_IN_ISR |
   222						NVMET_FCTGTFEAT_OPDONE_IN_ISR,
   223		.target_priv_sz	= sizeof(struct nvme_private),
   224	};
   225	
   226	/*
   227	 * qla_nvmet_create_targetport -
   228	 * Create a targetport. Registers the template with the nvme-t
   229	 * layer
   230	 */
   231	int qla_nvmet_create_targetport(struct scsi_qla_host *vha)
   232	{
   233		struct nvmet_fc_port_info pinfo;
   234		struct qla_nvmet_tgtport *tport;
   235		int error = 0;
   236	
   237		if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
   238			return 0;
   239	
 > 240		ql_dbg(ql_dbg_nvme, vha, 0xe081,
   241			"Creating target port for :%p\n", vha);
   242	
   243		memset(&pinfo, 0, (sizeof(struct nvmet_fc_port_info)));
   244		pinfo.node_name = wwn_to_u64(vha->node_name);
   245		pinfo.port_name = wwn_to_u64(vha->port_name);
   246		pinfo.port_id	= vha->d_id.b24;
   247	
   248		error = nvmet_fc_register_targetport(&pinfo,
   249		    &qla_nvmet_fc_transport, &vha->hw->pdev->dev,
   250		    &vha->targetport);
   251	
   252		if (error) {
   253			ql_dbg(ql_dbg_nvme, vha, 0xe082,
   254				"Cannot register NVME transport:%d\n", error);
   255			return error;
   256		}
   257		tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
   258		tport->vha = vha;
   259		ql_dbg(ql_dbg_nvme, vha, 0xe082,
   260			" Registered NVME transport:%p WWPN:%llx\n",
   261			tport, pinfo.port_name);
   262		return 0;
   263	}
   264	
   265	/*
   266	 * qla_nvmet_delete -
   267	 * Delete a targetport.
   268	 */
   269	int qla_nvmet_delete(struct scsi_qla_host *vha)
   270	{
   271		struct qla_nvmet_tgtport *tport;
   272	
   273		if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
   274			return 0;
   275	
   276		if (!vha->flags.nvmet_enabled)
   277			return 0;
   278		if (vha->targetport) {
 > 279			tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
   280	
   281			ql_dbg(ql_dbg_nvme, vha, 0xe083,
   282				"Deleting target port :%p\n", tport);
   283			init_completion(&tport->tport_del);
 > 284			nvmet_fc_unregister_targetport(vha->targetport);
   285			wait_for_completion_timeout(&tport->tport_del, 5);
   286	
 > 287			nvmet_release_sessions(vha);
   288		}
   289		return 0;
   290	}
   291	

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
diff mbox series

Patch

diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 17d5bc1cc56b..ec924733c10e 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,7 +1,8 @@ 
 # SPDX-License-Identifier: GPL-2.0
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
+		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \
+		qla_nvmet.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_nvmet.c b/drivers/scsi/qla2xxx/qla_nvmet.c
new file mode 100644
index 000000000000..5335c0618f00
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvmet.c
@@ -0,0 +1,798 @@ 
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
+#include "qla_nvme.h"
+#include "qla_nvmet.h"
+
+static void qla_nvmet_send_resp_ctio(struct qla_qpair *qpair,
+	struct qla_nvmet_cmd *cmd, struct nvmefc_tgt_fcp_req *rsp);
+static void qla_nvmet_send_abts_ctio(struct scsi_qla_host *vha,
+		struct abts_recv_from_24xx *abts, bool flag);
+
+/*
+ * qla_nvmet_targetport_delete -
+ * Invoked by the nvmet to indicate that the target port has
+ * been deleted
+ */
+static void
+qla_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
+{
+	struct qla_nvmet_tgtport *tport = targetport->private;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
+	complete(&tport->tport_del);
+}
+
+/*
+ * qlt_nvmet_ls_done -
+ * Invoked by the firmware interface to indicate the completion
+ * of an LS cmd
+ * Free all associated resources of the LS cmd
+ */
+static void qlt_nvmet_ls_done(void *ptr, int res)
+{
+	struct srb *sp = ptr;
+	struct srb_iocb   *nvme = &sp->u.iocb_cmd;
+	struct nvmefc_tgt_ls_req *rsp = nvme->u.nvme.desc;
+	struct qla_nvmet_cmd *tgt_cmd = nvme->u.nvme.cmd;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
+	ql_log(ql_log_info, sp->vha, 0x11000,
+		"Done with NVME LS4 req\n");
+
+	ql_log(ql_log_info, sp->vha, 0x11001,
+		"sp: %p vha: %p, rsp: %p, cmd: %p\n",
+		sp, sp->vha, nvme->u.nvme.desc, nvme->u.nvme.cmd);
+
+	rsp->done(rsp);
+	/* Free tgt_cmd */
+	kfree(tgt_cmd->buf);
+	kfree(tgt_cmd);
+	qla2x00_rel_sp(sp);
+}
+
+/*
+ * qla_nvmet_ls_rsp -
+ * Invoked by the nvme-t to complete the LS req.
+ * Prepare and send a response CTIO to the firmware.
+ */
+static int
+qla_nvmet_ls_rsp(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_ls_req *rsp)
+{
+	struct qla_nvmet_cmd *tgt_cmd =
+		container_of(rsp, struct qla_nvmet_cmd, cmd.ls_req);
+	struct scsi_qla_host *vha = tgt_cmd->vha;
+	struct srb_iocb   *nvme;
+	int     rval = QLA_FUNCTION_FAILED;
+	srb_t *sp;
+
+	ql_log(ql_log_info, vha, 0x11002,
+		"Dumping the NVMET-LS response buffer\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+		(uint8_t *)rsp->rspbuf, rsp->rsplen);
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, NULL, GFP_ATOMIC);
+	if (!sp) {
+		ql_log(ql_log_info, vha, 0x11003, "Failed to allocate SRB\n");
+		return -ENOMEM;
+	}
+
+	sp->type = SRB_NVMET_LS;
+	sp->done = qlt_nvmet_ls_done;
+	sp->vha = vha;
+	sp->fcport = tgt_cmd->fcport;
+
+	nvme = &sp->u.iocb_cmd;
+	nvme->u.nvme.rsp_dma = rsp->rspdma;
+	nvme->u.nvme.rsp_len = rsp->rsplen;
+	nvme->u.nvme.exchange_address = tgt_cmd->atio.u.pt_ls4.exchange_address;
+	nvme->u.nvme.nport_handle = tgt_cmd->atio.u.pt_ls4.nport_handle;
+	nvme->u.nvme.vp_index = tgt_cmd->atio.u.pt_ls4.vp_index;
+
+	nvme->u.nvme.cmd = tgt_cmd; /* To be freed */
+	nvme->u.nvme.desc = rsp; /* Call back to nvmet */
+
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x11004,
+			"qla2x00_start_sp failed = %d\n", rval);
+		return rval;
+	}
+
+	return 0;
+}
+
+/*
+ * qla_nvmet_fcp_op -
+ * Invoked by the nvme-t to complete the IO.
+ * Prepare and send a response CTIO to the firmware.
+ */
+static int
+qla_nvmet_fcp_op(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *rsp)
+{
+	struct qla_nvmet_cmd *tgt_cmd =
+		container_of(rsp, struct qla_nvmet_cmd, cmd.fcp_req);
+	struct scsi_qla_host *vha = tgt_cmd->vha;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return 0;
+
+	/* Prepare and send CTIO 82h */
+	qla_nvmet_send_resp_ctio(vha->qpair, tgt_cmd, rsp);
+
+	return 0;
+}
+
+/*
+ * qla_nvmet_fcp_abort_done
+ * free up the used resources
+ */
+static void qla_nvmet_fcp_abort_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+
+	qla2x00_rel_sp(sp);
+}
+
+/*
+ * qla_nvmet_fcp_abort -
+ * Invoked by the nvme-t to abort an IO
+ * Send an abort to the firmware
+ */
+static void
+qla_nvmet_fcp_abort(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *req)
+{
+	struct qla_nvmet_cmd *tgt_cmd =
+		container_of(req, struct qla_nvmet_cmd, cmd.fcp_req);
+	struct scsi_qla_host *vha = tgt_cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+	srb_t *sp;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+	if (!sp) {
+		ql_log(ql_log_info, vha, 0x11005, "Failed to allocate SRB\n");
+		return;
+	}
+
+	sp->type = SRB_NVMET_SEND_ABTS;
+	sp->done = qla_nvmet_fcp_abort_done;
+	sp->vha = vha;
+	sp->fcport = tgt_cmd->fcport;
+
+	ha->isp_ops->abort_command(sp);
+
+}
+
+/*
+ * qla_nvmet_fcp_req_release -
+ * Delete the cmd from the list and free the cmd
+ */
+static void
+qla_nvmet_fcp_req_release(struct nvmet_fc_target_port *tgtport,
+			struct nvmefc_tgt_fcp_req *rsp)
+{
+	struct qla_nvmet_cmd *tgt_cmd =
+		container_of(rsp, struct qla_nvmet_cmd, cmd.fcp_req);
+	scsi_qla_host_t *vha = tgt_cmd->vha;
+	unsigned long flags;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_del(&tgt_cmd->cmd_list);
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+
+	kfree(tgt_cmd);
+}
+
+static struct nvmet_fc_target_template qla_nvmet_fc_transport = {
+	.targetport_delete	= qla_nvmet_targetport_delete,
+	.xmt_ls_rsp		= qla_nvmet_ls_rsp,
+	.fcp_op			= qla_nvmet_fcp_op,
+	.fcp_abort		= qla_nvmet_fcp_abort,
+	.fcp_req_release	= qla_nvmet_fcp_req_release,
+	.max_hw_queues		= 8,
+	.max_sgl_segments	= 128,
+	.max_dif_sgl_segments	= 64,
+	.dma_boundary		= 0xFFFFFFFF,
+	.target_features	= NVMET_FCTGTFEAT_READDATA_RSP |
+					NVMET_FCTGTFEAT_CMD_IN_ISR |
+					NVMET_FCTGTFEAT_OPDONE_IN_ISR,
+	.target_priv_sz	= sizeof(struct nvme_private),
+};
+
+/*
+ * qla_nvmet_create_targetport -
+ * Create a targetport. Registers the template with the nvme-t
+ * layer
+ */
+int qla_nvmet_create_targetport(struct scsi_qla_host *vha)
+{
+	struct nvmet_fc_port_info pinfo;
+	struct qla_nvmet_tgtport *tport;
+	int error = 0;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return 0;
+
+	ql_dbg(ql_dbg_nvme, vha, 0xe081,
+		"Creating target port for :%p\n", vha);
+
+	memset(&pinfo, 0, (sizeof(struct nvmet_fc_port_info)));
+	pinfo.node_name = wwn_to_u64(vha->node_name);
+	pinfo.port_name = wwn_to_u64(vha->port_name);
+	pinfo.port_id	= vha->d_id.b24;
+
+	error = nvmet_fc_register_targetport(&pinfo,
+	    &qla_nvmet_fc_transport, &vha->hw->pdev->dev,
+	    &vha->targetport);
+
+	if (error) {
+		ql_dbg(ql_dbg_nvme, vha, 0xe082,
+			"Cannot register NVME transport:%d\n", error);
+		return error;
+	}
+	tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
+	tport->vha = vha;
+	ql_dbg(ql_dbg_nvme, vha, 0xe082,
+		" Registered NVME transport:%p WWPN:%llx\n",
+		tport, pinfo.port_name);
+	return 0;
+}
+
+/*
+ * qla_nvmet_delete -
+ * Delete a targetport.
+ */
+int qla_nvmet_delete(struct scsi_qla_host *vha)
+{
+	struct qla_nvmet_tgtport *tport;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return 0;
+
+	if (!vha->flags.nvmet_enabled)
+		return 0;
+	if (vha->targetport) {
+		tport = (struct qla_nvmet_tgtport *)vha->targetport->private;
+
+		ql_dbg(ql_dbg_nvme, vha, 0xe083,
+			"Deleting target port :%p\n", tport);
+		init_completion(&tport->tport_del);
+		nvmet_fc_unregister_targetport(vha->targetport);
+		wait_for_completion_timeout(&tport->tport_del, 5);
+
+		nvmet_release_sessions(vha);
+	}
+	return 0;
+}
+
+/*
+ * qla_nvmet_handle_ls -
+ * Handle a link service request from the initiator.
+ * Get the LS payload from the ATIO queue, invoke
+ * nvmet_fc_rcv_ls_req to pass the LS req to nvmet.
+ */
+int qla_nvmet_handle_ls(struct scsi_qla_host *vha,
+	struct pt_ls4_rx_unsol *pt_ls4, void *buf)
+{
+	struct qla_nvmet_cmd *tgt_cmd;
+	uint32_t size;
+	int ret;
+	uint32_t look_up_sid;
+	fc_port_t *sess = NULL;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return 0;
+
+	look_up_sid = pt_ls4->s_id[2] << 16 |
+	    pt_ls4->s_id[1] << 8 | pt_ls4->s_id[0];
+
+	ql_log(ql_log_info, vha, 0x11005,
+		"%s - Look UP sid: %#x\n", __func__, look_up_sid);
+
+	sess = qla_nvmet_find_sess_by_s_id(vha, look_up_sid);
+	if (unlikely(!sess))
+		WARN_ON(1);
+
+	size = cpu_to_le16(pt_ls4->desc_len) + 8;
+
+	tgt_cmd = kzalloc(sizeof(struct qla_nvmet_cmd), GFP_ATOMIC);
+	if (tgt_cmd == NULL)
+		return -ENOMEM;
+
+	tgt_cmd->vha = vha;
+	tgt_cmd->ox_id = pt_ls4->ox_id;
+	tgt_cmd->buf = buf;
+	/* Store the received nphdl, rx_exh_addr etc */
+	memcpy(&tgt_cmd->atio.u.pt_ls4, pt_ls4, sizeof(struct pt_ls4_rx_unsol));
+	tgt_cmd->fcport = sess;
+
+	ql_log(ql_log_info, vha, 0x11006,
+		"Dumping the PURLS-ATIO request\n");
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075,
+		(uint8_t *)pt_ls4, sizeof(struct pt_ls4_rx_unsol));
+
+	ql_log(ql_log_info, vha, 0x11007,
+		"Sending LS to nvmet buf: %p, len: %#x\n", buf, size);
+
+	ret = nvmet_fc_rcv_ls_req(vha->targetport,
+		&tgt_cmd->cmd.ls_req, buf, size);
+
+	if (ret == 0) {
+		ql_log(ql_log_info, vha, 0x11008,
+			"LS req handled successfully\n");
+		return 0;
+	}
+	ql_log(ql_log_warn, vha, 0x11009,
+		"LS req failed\n");
+
+	return ret;
+}
+
+/*
+ * qla_nvmet_process_cmd -
+ * Handle NVME cmd request from the initiator.
+ * Get the NVME payload from the ATIO queue, invoke
+ * nvmet_fc_rcv_ls_req to pass the LS req to nvmet.
+ * On a failure send an abts to the initiator?
+ */
+int qla_nvmet_process_cmd(struct scsi_qla_host *vha,
+	struct qla_nvmet_cmd *tgt_cmd)
+{
+	int ret;
+	struct atio7_nvme_cmnd *nvme_cmd;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return 0;
+
+	nvme_cmd = (struct atio7_nvme_cmnd *)&tgt_cmd->nvme_cmd_iu;
+
+	ret = nvmet_fc_rcv_fcp_req(vha->targetport, &tgt_cmd->cmd.fcp_req,
+			nvme_cmd, tgt_cmd->cmd_len);
+	if (ret != 0) {
+		ql_log(ql_log_warn, vha, 0x1100a,
+			"%s-%d - Failed (ret: %#x) to process NVME command\n",
+				__func__, __LINE__, ret);
+		/* Send ABTS to initator ? */
+	}
+	return 0;
+}
+
+/*
+ * qla_nvmet_handle_abts
+ * Handle an abort from the initiator
+ * Invoke nvmet_fc_rcv_fcp_abort to pass the abts to the nvmet
+ */
+int qla_nvmet_handle_abts(struct scsi_qla_host *vha,
+	struct abts_recv_from_24xx *abts)
+{
+	uint16_t ox_id = cpu_to_be16(abts->fcp_hdr_le.ox_id);
+	unsigned long flags;
+	struct qla_nvmet_cmd *cmd = NULL;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return 0;
+
+	/* Retrieve the cmd from cmd list */
+	spin_lock_irqsave(&vha->cmd_list_lock, flags);
+	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
+		if (cmd->ox_id == ox_id)
+			break; /* Found the cmd */
+	}
+	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+	if (!cmd) {
+		ql_log(ql_log_warn, vha, 0x1100b,
+			"%s-%d - Command not found\n", __func__, __LINE__);
+		/* Send a RJT */
+		qla_nvmet_send_abts_ctio(vha, abts, 0);
+		return 0;
+	}
+
+	nvmet_fc_rcv_fcp_abort(vha->targetport, &cmd->cmd.fcp_req);
+	/* Send an ACC */
+	qla_nvmet_send_abts_ctio(vha, abts, 1);
+
+	return 0;
+}
+
+/*
+ * qla_nvmet_abts_done
+ * Complete the cmd back to the nvme-t and
+ * free up the used resources
+ */
+static void qla_nvmet_abts_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+
+	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
+		return;
+
+	qla2x00_rel_sp(sp);
+}
+/*
+ * qla_nvmet_fcp_done
+ * Complete the cmd back to the nvme-t and
+ * free up the used resources
+ */
+static void qla_nvmet_fcp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct nvmefc_tgt_fcp_req *rsp;
+
+	rsp = sp->u.iocb_cmd.u.nvme.desc;
+
+	if (res) {
+		rsp->fcp_error = NVME_SC_SUCCESS;
+		if (rsp->op == NVMET_FCOP_RSP)
+			rsp->transferred_length = 0;
+		else
+			rsp->transferred_length = rsp->transfer_length;
+	} else {
+		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
+		rsp->transferred_length = 0;
+	}
+	rsp->done(rsp);
+	qla2x00_rel_sp(sp);
+}
+
+/*
+ * qla_nvmet_send_resp_ctio
+ * Send the response CTIO to the firmware
+ */
+static void qla_nvmet_send_resp_ctio(struct qla_qpair *qpair,
+	struct qla_nvmet_cmd *cmd, struct nvmefc_tgt_fcp_req *rsp_buf)
+{
+	struct atio_from_isp *atio = &cmd->atio;
+	struct ctio_nvme_to_27xx *ctio;
+	struct scsi_qla_host *vha = cmd->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct fcp_hdr *fchdr = &atio->u.nvme_isp27.fcp_hdr;
+	srb_t *sp;
+	unsigned long flags;
+	uint16_t temp, c_flags = 0;
+	struct req_que *req = vha->hw->req_q_map[0];
+	uint32_t req_cnt = 1;
+	uint32_t *cur_dsd;
+	uint16_t avail_dsds;
+	uint16_t tot_dsds, i, cnt;
+	struct scatterlist *sgl, *sg;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, cmd->fcport, GFP_ATOMIC);
+	if (!sp) {
+		ql_log(ql_log_info, vha, 0x1100c, "Failed to allocate SRB\n");
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return;
+	}
+
+	sp->type = SRB_NVMET_FCP;
+	sp->name = "nvmet_fcp";
+	sp->done = qla_nvmet_fcp_done;
+	sp->u.iocb_cmd.u.nvme.desc = rsp_buf;
+	sp->u.iocb_cmd.u.nvme.cmd = cmd;
+
+	ctio = (struct ctio_nvme_to_27xx *)qla2x00_alloc_iocbs(vha, sp);
+	if (!ctio) {
+		ql_dbg(ql_dbg_nvme, vha, 0x3067,
+		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
+		    vha->host_no, __func__);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return;
+	}
+
+	ctio->entry_type = CTIO_NVME;
+	ctio->entry_count = 1;
+	ctio->handle = sp->handle;
+	ctio->nport_handle = cpu_to_le16(cmd->fcport->loop_id);
+	ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+	ctio->vp_index = vha->vp_idx;
+	ctio->initiator_id[0] = fchdr->s_id[2];
+	ctio->initiator_id[1] = fchdr->s_id[1];
+	ctio->initiator_id[2] = fchdr->s_id[0];
+	ctio->exchange_addr = atio->u.nvme_isp27.exchange_addr;
+	temp = be16_to_cpu(fchdr->ox_id);
+	ctio->ox_id = cpu_to_le16(temp);
+	tot_dsds = ctio->dseg_count = cpu_to_le16(rsp_buf->sg_cnt);
+	c_flags = atio->u.nvme_isp27.attr << 9;
+
+	if ((ctio->dseg_count > 1) && (rsp_buf->op != NVMET_FCOP_RSP)) {
+		/* Check for additional continuation IOCB space */
+		req_cnt = qla24xx_calc_iocbs(vha, ctio->dseg_count);
+		ctio->entry_count = req_cnt;
+
+		if (req->cnt < (req_cnt + 2)) {
+			cnt = (uint16_t)RD_REG_DWORD_RELAXED(req->req_q_out);
+
+			if  (req->ring_index < cnt)
+				req->cnt = cnt - req->ring_index;
+			else
+				req->cnt = req->length -
+				    (req->ring_index - cnt);
+
+			if (unlikely(req->cnt < (req_cnt + 2))) {
+				ql_log(ql_log_warn, vha, 0xfff,
+					"Running out of IOCB space for continuation IOCBs\n");
+				goto err_exit;
+			}
+		}
+	}
+
+	switch (rsp_buf->op) {
+	case NVMET_FCOP_READDATA:
+	case NVMET_FCOP_READDATA_RSP:
+		/* Populate the CTIO resp with the SGL present in the rsp */
+		ql_log(ql_log_info, vha, 0x1100c,
+			"op: %#x, ox_id=%x c_flags=%x transfer_length: %#x req_cnt: %#x, tot_dsds: %#x\n",
+			rsp_buf->op, ctio->ox_id, c_flags,
+			rsp_buf->transfer_length, req_cnt, tot_dsds);
+
+		avail_dsds = 1;
+		cur_dsd = (uint32_t *)
+				&ctio->u.nvme_status_mode0.dsd0[0];
+		sgl = rsp_buf->sg;
+
+		/* Load data segments */
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			dma_addr_t      sle_dma;
+			cont_a64_entry_t *cont_pkt;
+
+			/* Allocate additional continuation packets? */
+			if (avail_dsds == 0) {
+				/*
+				 * Five DSDs are available in the Cont
+				 * Type 1 IOCB.
+				 */
+
+				/* Adjust ring index */
+				req->ring_index++;
+				if (req->ring_index == req->length) {
+					req->ring_index = 0;
+					req->ring_ptr = req->ring;
+				} else {
+					req->ring_ptr++;
+				}
+				cont_pkt = (cont_a64_entry_t *)
+						req->ring_ptr;
+				*((uint32_t *)(&cont_pkt->entry_type)) =
+					cpu_to_le32(CONTINUE_A64_TYPE);
+
+				cur_dsd = (uint32_t *)
+						cont_pkt->dseg_0_address;
+				avail_dsds = 5;
+			}
+
+			sle_dma = sg_dma_address(sg);
+			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+			avail_dsds--;
+		}
+
+		ctio->u.nvme_status_mode0.transfer_len =
+			cpu_to_le32(rsp_buf->transfer_length);
+		ctio->u.nvme_status_mode0.relative_offset =
+			cpu_to_le32(rsp_buf->offset);
+		ctio->flags = cpu_to_le16(c_flags | 0x2);
+
+		if (rsp_buf->op == NVMET_FCOP_READDATA_RSP) {
+			if (rsp_buf->rsplen == 12) {
+				ctio->flags |=
+					NVMET_CTIO_STS_MODE0 |
+					NVMET_CTIO_SEND_STATUS;
+			} else if (rsp_buf->rsplen == 32) {
+				struct nvme_fc_ersp_iu *ersp =
+				    rsp_buf->rspaddr;
+				uint32_t iter = 4, *inbuf, *outbuf;
+
+				ctio->flags |=
+					NVMET_CTIO_STS_MODE1 |
+					NVMET_CTIO_SEND_STATUS;
+				inbuf = (uint32_t *)
+					&((uint8_t *)rsp_buf->rspaddr)[16];
+				outbuf = (uint32_t *)
+				    ctio->u.nvme_status_mode1.nvme_comp_q_entry;
+				for (; iter; iter--)
+					*outbuf++ = cpu_to_be32(*inbuf++);
+
+				ctio->u.nvme_status_mode1.rsp_seq_num =
+					cpu_to_be32(ersp->rsn);
+				ctio->u.nvme_status_mode1.transfer_len =
+					cpu_to_be32(ersp->xfrd_len);
+			} else
+				ql_log(ql_log_warn, vha, 0x1100d,
+						"unhandled resp len = %x\n",
+						rsp_buf->rsplen);
+		}
+		break;
+
+	case NVMET_FCOP_WRITEDATA:
+		/* Send transfer rdy */
+		ql_log(ql_log_info, vha, 0x1100e,
+			"FCOP_WRITE: ox_id=%x c_flags=%x transfer_length: %#x req_cnt: %#x, tot_dsds: %#x\n",
+			ctio->ox_id, c_flags, rsp_buf->transfer_length,
+			req_cnt, tot_dsds);
+
+		ctio->flags = cpu_to_le16(c_flags | 0x1);
+
+		avail_dsds = 1;
+		cur_dsd = (uint32_t *)&ctio->u.nvme_status_mode0.dsd0[0];
+		sgl = rsp_buf->sg;
+
+		/* Load data segments */
+		for_each_sg(sgl, sg, tot_dsds, i) {
+			dma_addr_t      sle_dma;
+			cont_a64_entry_t *cont_pkt;
+
+			/* Allocate additional continuation packets? */
+			if (avail_dsds == 0) {
+				/*
+				 * Five DSDs are available in the Continuation
+				 * Type 1 IOCB.
+				 */
+
+				/* Adjust ring index */
+				req->ring_index++;
+				if (req->ring_index == req->length) {
+					req->ring_index = 0;
+					req->ring_ptr = req->ring;
+				} else {
+					req->ring_ptr++;
+				}
+				cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+				*((uint32_t *)(&cont_pkt->entry_type)) =
+					cpu_to_le32(CONTINUE_A64_TYPE);
+
+				cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+				avail_dsds = 5;
+			}
+
+			sle_dma = sg_dma_address(sg);
+			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+			*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+			avail_dsds--;
+		}
+
+		ctio->u.nvme_status_mode0.transfer_len =
+			cpu_to_le32(rsp_buf->transfer_length);
+		ctio->u.nvme_status_mode0.relative_offset =
+			cpu_to_le32(rsp_buf->offset);
+
+		break;
+	case NVMET_FCOP_RSP:
+		/* Send a response frame */
+		ctio->flags = cpu_to_le16(c_flags);
+		if (rsp_buf->rsplen == 12) {
+			ctio->flags |=
+			NVMET_CTIO_STS_MODE0 | NVMET_CTIO_SEND_STATUS;
+		} else if (rsp_buf->rsplen == 32) {
+			struct nvme_fc_ersp_iu *ersp = rsp_buf->rspaddr;
+			uint32_t iter = 4, *inbuf, *outbuf;
+
+			ctio->flags |=
+				NVMET_CTIO_STS_MODE1 | NVMET_CTIO_SEND_STATUS;
+			inbuf = (uint32_t *)
+				&((uint8_t *)rsp_buf->rspaddr)[16];
+			outbuf = (uint32_t *)
+				ctio->u.nvme_status_mode1.nvme_comp_q_entry;
+			for (; iter; iter--)
+				*outbuf++ = cpu_to_be32(*inbuf++);
+			ctio->u.nvme_status_mode1.rsp_seq_num =
+						cpu_to_be32(ersp->rsn);
+			ctio->u.nvme_status_mode1.transfer_len =
+						cpu_to_be32(ersp->xfrd_len);
+
+			ql_log(ql_log_info, vha, 0x1100f,
+				"op: %#x, rsplen: %#x\n", rsp_buf->op,
+				rsp_buf->rsplen);
+		} else
+			ql_log(ql_log_warn, vha, 0x11010,
+				"unhandled resp len = %x for op NVMET_FCOP_RSP\n",
+				rsp_buf->rsplen);
+		break;
+	}
+
+	/* Memory Barrier */
+	wmb();
+
+	qla2x00_start_iocbs(vha, vha->hw->req_q_map[0]);
+err_exit:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * qla_nvmet_send_abts_ctio
+ * Send the abts CTIO to the firmware
+ */
+static void qla_nvmet_send_abts_ctio(struct scsi_qla_host *vha,
+		struct abts_recv_from_24xx *rabts, bool flag)
+{
+	struct abts_resp_to_24xx *resp;
+	srb_t *sp;
+	uint32_t f_ctl;
+	uint8_t *p;
+
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, NULL, GFP_ATOMIC);
+	if (!sp) {
+		ql_log(ql_log_info, vha, 0x11011, "Failed to allocate SRB\n");
+		return;
+	}
+
+	sp->type = SRB_NVMET_ABTS;
+	sp->name = "nvmet_abts";
+	sp->done = qla_nvmet_abts_done;
+
+	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, sp);
+	if (!resp) {
+		ql_dbg(ql_dbg_nvme, vha, 0x3067,
+		    "qla2x00t(%ld): %s failed: unable to allocate request packet",
+		    vha->host_no, __func__);
+		return;
+	}
+
+	resp->entry_type = ABTS_RESP_24XX;
+	resp->entry_count = 1;
+	resp->handle = sp->handle;
+
+	resp->nport_handle = rabts->nport_handle;
+	resp->vp_index = rabts->vp_index;
+	resp->exchange_address = rabts->exchange_addr_to_abort;
+	resp->fcp_hdr_le = rabts->fcp_hdr_le;
+	f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+	    F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+	    F_CTL_SEQ_INITIATIVE);
+	p = (uint8_t *)&f_ctl;
+	resp->fcp_hdr_le.f_ctl[0] = *p++;
+	resp->fcp_hdr_le.f_ctl[1] = *p++;
+	resp->fcp_hdr_le.f_ctl[2] = *p;
+
+	resp->fcp_hdr_le.d_id[0] = rabts->fcp_hdr_le.s_id[0];
+	resp->fcp_hdr_le.d_id[1] = rabts->fcp_hdr_le.s_id[1];
+	resp->fcp_hdr_le.d_id[2] = rabts->fcp_hdr_le.s_id[2];
+	resp->fcp_hdr_le.s_id[0] = rabts->fcp_hdr_le.d_id[0];
+	resp->fcp_hdr_le.s_id[1] = rabts->fcp_hdr_le.d_id[1];
+	resp->fcp_hdr_le.s_id[2] = rabts->fcp_hdr_le.d_id[2];
+
+	if (flag) { /* BA_ACC */
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+		resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+		resp->payload.ba_acct.low_seq_cnt = 0x0000;
+		resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+		resp->payload.ba_acct.ox_id = rabts->fcp_hdr_le.ox_id;
+		resp->payload.ba_acct.rx_id = rabts->fcp_hdr_le.rx_id;
+	} else {
+		resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+		resp->payload.ba_rjt.reason_code =
+			BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+	}
+	/* Memory Barrier */
+	wmb();
+
+	qla2x00_start_iocbs(vha, vha->hw->req_q_map[0]);
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvmet.h b/drivers/scsi/qla2xxx/qla_nvmet.h
new file mode 100644
index 000000000000..188ad2c5e3f1
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvmet.h
@@ -0,0 +1,129 @@ 
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NVMET_H
+#define __QLA_NVMET_H
+
+#include <linux/blk-mq.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+#include <linux/nvme-fc-driver.h>
+
+#include "qla_def.h"
+
+struct qla_nvmet_tgtport {
+	struct scsi_qla_host *vha;
+	struct completion tport_del;
+};
+
+struct qla_nvmet_cmd {
+	union {
+		struct nvmefc_tgt_ls_req ls_req;
+		struct nvmefc_tgt_fcp_req fcp_req;
+	} cmd;
+	struct scsi_qla_host *vha;
+	void *buf;
+	struct atio_from_isp atio;
+	struct atio7_nvme_cmnd nvme_cmd_iu;
+	uint16_t cmd_len;
+	spinlock_t nvme_cmd_lock;
+	struct list_head cmd_list; /* List of cmds */
+	struct work_struct work;
+
+	struct scatterlist *sg;	/* cmd data buffer SG vector */
+	int sg_cnt;		/* SG segments count */
+	int bufflen;		/* cmd buffer length */
+	int offset;
+	enum dma_data_direction dma_data_direction;
+	uint16_t ox_id;
+	struct fc_port *fcport;
+};
+
+#define CTIO_NVME    0x82            /* CTIO FC-NVMe IOCB */
+struct ctio_nvme_to_27xx {
+	uint8_t entry_type;             /* Entry type. */
+	uint8_t entry_count;            /* Entry count. */
+	uint8_t sys_define;             /* System defined. */
+	uint8_t entry_status;           /* Entry Status. */
+
+	uint32_t handle;                /* System handle. */
+	uint16_t nport_handle;          /* N_PORT handle. */
+	uint16_t timeout;               /* Command timeout. */
+
+	uint16_t dseg_count;            /* Data segment count. */
+	uint8_t	 vp_index;		/* vp_index */
+	uint8_t  addl_flags;		/* Additional flags */
+
+	uint8_t  initiator_id[3];	/* Initiator ID */
+	uint8_t	 rsvd1;
+
+	uint32_t exchange_addr;		/* Exch addr */
+
+	uint16_t ox_id;			/* Ox ID */
+	uint16_t flags;
+#define NVMET_CTIO_STS_MODE0 0
+#define NVMET_CTIO_STS_MODE1 BIT_6
+#define NVMET_CTIO_STS_MODE2 BIT_7
+#define NVMET_CTIO_SEND_STATUS BIT_15
+	union {
+		struct {
+			uint8_t reserved1[8];
+			uint32_t relative_offset;
+			uint8_t	reserved2[4];
+			uint32_t transfer_len;
+			uint8_t reserved3[4];
+			uint32_t dsd0[2];
+			uint32_t dsd0_len;
+		} nvme_status_mode0;
+		struct {
+			uint8_t nvme_comp_q_entry[16];
+			uint32_t transfer_len;
+			uint32_t rsp_seq_num;
+			uint32_t dsd0[2];
+			uint32_t dsd0_len;
+		} nvme_status_mode1;
+		struct {
+			uint32_t reserved4[4];
+			uint32_t transfer_len;
+			uint32_t reserved5;
+			uint32_t rsp_dsd[2];
+			uint32_t rsp_dsd_len;
+		} nvme_status_mode2;
+	} u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type FC NVMe from ISP to target driver
+ * returned entry structure.
+ */
+struct ctio_nvme_from_27xx {
+	uint8_t	 entry_type;		    /* Entry type. */
+	uint8_t	 entry_count;		    /* Entry count. */
+	uint8_t	 sys_define;		    /* System defined. */
+	uint8_t	 entry_status;		    /* Entry Status. */
+	uint32_t handle;		    /* System defined handle */
+	uint16_t status;
+	uint16_t timeout;
+	uint16_t dseg_count;		    /* Data segment count. */
+	uint8_t  vp_index;
+	uint8_t  reserved1[5];
+	uint32_t exchange_address;
+	uint16_t ox_id;
+	uint16_t flags;
+	uint32_t residual;
+	uint8_t  reserved2[32];
+} __packed;
+
+int qla_nvmet_handle_ls(struct scsi_qla_host *vha,
+	struct pt_ls4_rx_unsol *ls4, void *buf);
+int qla_nvmet_create_targetport(struct scsi_qla_host *vha);
+int qla_nvmet_delete(struct scsi_qla_host *vha);
+int qla_nvmet_handle_abts(struct scsi_qla_host *vha,
+	struct abts_recv_from_24xx *abts);
+int qla_nvmet_process_cmd(struct scsi_qla_host *vha,
+	struct qla_nvmet_cmd *cmd);
+
+#endif