From patchwork Fri May 22 14:42:38 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: James Smart X-Patchwork-Id: 6465181 Return-Path: X-Original-To: patchwork-linux-scsi@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id A87489F318 for ; Fri, 22 May 2015 14:42:59 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 8F9D120160 for ; Fri, 22 May 2015 14:42:58 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 5A7242045B for ; Fri, 22 May 2015 14:42:57 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1757230AbbEVOmw (ORCPT ); Fri, 22 May 2015 10:42:52 -0400 Received: from mail-qg0-f48.google.com ([209.85.192.48]:33202 "EHLO mail-qg0-f48.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1757153AbbEVOmt (ORCPT ); Fri, 22 May 2015 10:42:49 -0400 Received: by qgfa63 with SMTP id a63so9116441qgf.0 for ; Fri, 22 May 2015 07:42:49 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20130820; h=x-gm-message-state:date:from:to:subject:message-id:user-agent :mime-version:content-type:content-transfer-encoding; bh=AFZv7dZzF31w7lIxKLsOr7Dm/+bf+O9WAn7d4PTWEq8=; b=QsEH2r5uDpUBJNsIVPqk6dbmToCRJO+giXAxAOVibN1RZRQitVVFcN/nMwQLAMf6hs eMnEKLWov0/XKYI9Z7fUdCmYjEVPeHVXM8iJKhlxmBspkACMQz+os4hEdheMsnxvJqFa YVDbErrzt+qBVy6NHiE0debzdtPkoNiNwxOpMBTvK7Eyn44btBo2QVQP47cGaVszh3Qe I471+3/2w9YqQ35yuuntL0AFW991kIImcfzyGHGYs9HOp/uyTldj8PdBD5vKsbO5ujcu 3xElqrQZm0zRcS2jq0rOgw6kWP+OCe5FptL8cRUovhl8KL/zpIvSg9iMQihmNsqeaDnr Kp3g== X-Gm-Message-State: ALoCoQlJN9QedhiW0xw4Mq/IJrDToBEIT95hI19N46UJaFd1Y1NGP8UPPIdLAeoWLH6RvCLvbptj X-Received: by 10.55.31.168 with SMTP id n40mr19054332qkh.56.1432305768789; Fri, 22 May 2015 07:42:48 -0700 (PDT) Received: from myfc17 (c-75-67-235-135.hsd1.nh.comcast.net. [75.67.235.135]) by mx.google.com with ESMTPSA id n64sm1413241qkh.31.2015.05.22.07.42.47 for (version=TLSv1 cipher=RC4-SHA bits=128/128); Fri, 22 May 2015 07:42:48 -0700 (PDT) Date: Fri, 22 May 2015 10:42:38 -0400 From: james.smart@avagotech.com (James Smart) To: linux-scsi@vger.kernel.org Subject: [PATCH 4/9] lpfc: Add support for using block multi-queue Message-ID: <555f405e.2o5/YwCIJ/19LVTb%james.smart@avagotech.com> User-Agent: Heirloom mailx 12.5 7/5/10 MIME-Version: 1.0 Sender: linux-scsi-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-scsi@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Add support for using block multi-queue With blk-mq support in the mid-layer, lpfc can do IO steering based on the information in the request tag. This patch allows lpfc to use blk-mq if enabled. If not enabled, we fall back into the emulex-internal affinity mappings. This feature can be turned on via CONFIG_SCSI_MQ_DEFAULT or passing scsi_mod.use_blk_mq=Y as a parameter to the kernel. Signed-off-by: Dick Kennedy Signed-off-by: James Smart Reviewed-by: Hannes Reinecke --- drivers/scsi/lpfc/lpfc_init.c | 4 ++- drivers/scsi/lpfc/lpfc_scsi.c | 43 +++++++++++++++++++++++++ drivers/scsi/lpfc/lpfc_scsi.h | 3 ++ drivers/scsi/lpfc/lpfc_sli.c | 74 ++++++++++++++----------------------------- 4 files changed, 72 insertions(+), 52 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 14424e6..f962118 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3303,6 +3303,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_lun = vport->cfg_max_luns; shost->this_id = -1; shost->max_cmd_len = 16; + shost->nr_hw_queues = phba->cfg_fcp_io_channel; if (phba->sli_rev == LPFC_SLI_REV4) { shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; @@ -8980,7 +8981,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->cfg_fcp_io_channel = vectors; } - lpfc_sli4_set_affinity(phba, vectors); + if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport))) + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 116df9c..4a2a818 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -3846,6 +3846,49 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } /** + * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution + * @phba: Pointer to HBA context object. + * + * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index + * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock + * held. + * If scsi-mq is enabled, get the default block layer mapping of software queues + * to hardware queues. This information is saved in request tag. + * + * Return: index into SLI4 fast-path FCP queue index. + **/ +int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *cmnd = lpfc_cmd->pCmd; + struct lpfc_vector_map_info *cpup; + int chann, cpu; + uint32_t tag; + uint16_t hwq; + + if (shost_use_blk_mq(cmnd->device->host)) { + tag = blk_mq_unique_tag(cmnd->request); + hwq = blk_mq_unique_tag_to_hwq(tag); + + return hwq; + } + + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU + && phba->cfg_fcp_io_channel > 1) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; +} + + +/** * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine * @phba: The Hba for which this call is being executed. * @pIocbIn: The command IOCBQ for the scsi cmnd. diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index 474e30c..18b9260 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -184,3 +184,6 @@ struct lpfc_scsi_buf { #define FIND_FIRST_OAS_LUN 0 #define NO_MORE_OAS_LUN -1 #define NOT_OAS_ENABLED_LUN NO_MORE_OAS_LUN + +int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 41d3370..07df296 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -8138,36 +8138,6 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, } /** - * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution - * @phba: Pointer to HBA context object. - * - * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index - * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock - * held. - * - * Return: index into SLI4 fast-path FCP queue index. - **/ -static inline int -lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) -{ - struct lpfc_vector_map_info *cpup; - int chann, cpu; - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU - && phba->cfg_fcp_io_channel > 1) { - cpu = smp_processor_id(); - if (cpu < phba->sli4_hba.num_present_cpu) { - cpup = phba->sli4_hba.cpu_map; - cpup += cpu; - return cpup->channel_id; - } - } - chann = atomic_add_return(1, &phba->fcp_qidx); - chann = (chann % phba->cfg_fcp_io_channel); - return chann; -} - -/** * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry. * @phba: Pointer to HBA context object. * @piocb: Pointer to command iocb. @@ -8807,27 +8777,29 @@ int lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb) { - if (phba->sli_rev == LPFC_SLI_REV4) { - if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { - if (!(phba->cfg_fof) || - (!(piocb->iocb_flag & LPFC_IO_FOF))) { - if (unlikely(!phba->sli4_hba.fcp_wq)) - return LPFC_HBA_ERROR; - /* - * for abort iocb fcp_wqidx should already - * be setup based on what work queue we used. - */ - if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) - piocb->fcp_wqidx = - lpfc_sli4_scmd_to_wqidx_distr(phba); - ring_number = MAX_SLI3_CONFIGURED_RINGS + - piocb->fcp_wqidx; - } else { - if (unlikely(!phba->sli4_hba.oas_wq)) - return LPFC_HBA_ERROR; - piocb->fcp_wqidx = 0; - ring_number = LPFC_FCP_OAS_RING; - } + if (phba->sli_rev < LPFC_SLI_REV4) + return ring_number; + + if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) { + if (!(phba->cfg_fof) || + (!(piocb->iocb_flag & LPFC_IO_FOF))) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return LPFC_HBA_ERROR; + /* + * for abort iocb fcp_wqidx should already + * be setup based on what work queue we used. + */ + if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) + piocb->fcp_wqidx = + lpfc_sli4_scmd_to_wqidx_distr(phba, + piocb->context1); + ring_number = MAX_SLI3_CONFIGURED_RINGS + + piocb->fcp_wqidx; + } else { + if (unlikely(!phba->sli4_hba.oas_wq)) + return LPFC_HBA_ERROR; + piocb->fcp_wqidx = 0; + ring_number = LPFC_FCP_OAS_RING; } } return ring_number;