From patchwork Sun Jun 18 15:21:37 2017 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sagi Grimberg X-Patchwork-Id: 9794853 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 55376601C8 for ; Sun, 18 Jun 2017 15:22:20 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 47170283AF for ; Sun, 18 Jun 2017 15:22:20 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 3C306283C0; Sun, 18 Jun 2017 15:22:20 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.1 required=2.0 tests=BAYES_00,DKIM_SIGNED, RCVD_IN_DNSWL_HI, T_DKIM_INVALID, URIBL_BLACK autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 8AD55283AF for ; Sun, 18 Jun 2017 15:22:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753073AbdFRPWS (ORCPT ); Sun, 18 Jun 2017 11:22:18 -0400 Received: from merlin.infradead.org ([205.233.59.134]:52072 "EHLO merlin.infradead.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753113AbdFRPWP (ORCPT ); Sun, 18 Jun 2017 11:22:15 -0400 DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=infradead.org; s=merlin.20170209; h=References:In-Reply-To:Message-Id:Date: Subject:Cc:To:From:Sender:Reply-To:MIME-Version:Content-Type: Content-Transfer-Encoding:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Id: List-Help:List-Unsubscribe:List-Subscribe:List-Post:List-Owner:List-Archive; bh=NTD9W7w6Oeb/MMf7qEtivWrjYhgeQqB9IE0eXFrumJM=; b=P77idYpyrHGk+bjWJLx3BFj69 ozP2eU2ZAyJYzDXYOuAXIBQvo6/nMvjkqfN0MXoecqf+R0LQgejp4392rH7+CQlsIld/e6X2X6ESf v/zg4ubGwIJFq0c/2voIIu9BXhYH+ZFKvEUhJxc/C9XOXYQooEHFyVcplV7oec2uDmMB2XU4Of6/B bPbIuH5tMHTmYHh2/dyXFJhKDy/v6EovfLYo/oqVp17TD94rpb4Ct/6CyED5mpVJCb2+xpr942aLK 8nPuzMbzOFgnxJFXAhzdBSYmnRkXzBCHsEtepzfzfy47/Qg3flRwPFE5vpzb3gDIGlK3XqmdpLEEL ljIFCRNJQ==; Received: from bzq-82-81-101-184.red.bezeqint.net ([82.81.101.184] helo=bombadil.infradead.org) by merlin.infradead.org with esmtpsa (Exim 4.87 #1 (Red Hat Linux)) id 1dMc1k-0006WN-5N; Sun, 18 Jun 2017 15:22:12 +0000 From: Sagi Grimberg To: linux-nvme@lists.infradead.org Cc: Christoph Hellwig , Keith Busch , linux-block@vger.kernel.org Subject: [PATCH rfc 03/30] nvme-rdma: reuse configure/destroy admin queue Date: Sun, 18 Jun 2017 18:21:37 +0300 Message-Id: <1497799324-19598-4-git-send-email-sagi@grimberg.me> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1497799324-19598-1-git-send-email-sagi@grimberg.me> References: <1497799324-19598-1-git-send-email-sagi@grimberg.me> Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP We have all we need in these functions now that these are aware if we are doing a full instantiation/removal. For that we move nvme_rdma_configure_admin_queue to avoid a forward declaration, and we add blk_mq_ops forward declaration. Signed-off-by: Sagi Grimberg --- drivers/nvme/host/rdma.c | 253 ++++++++++++++++++++++------------------------- 1 file changed, 119 insertions(+), 134 deletions(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3e4c6aa119ee..5fef5545e365 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -140,6 +140,9 @@ static DEFINE_MUTEX(device_list_mutex); static LIST_HEAD(nvme_rdma_ctrl_list); static DEFINE_MUTEX(nvme_rdma_ctrl_mutex); +static const struct blk_mq_ops nvme_rdma_mq_ops; +static const struct blk_mq_ops nvme_rdma_admin_mq_ops; + /* * Disabling this option makes small I/O goes faster, but is fundamentally * unsafe. With it turned off we will have to register a global rkey that @@ -562,20 +565,22 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { + if (test_bit(NVME_RDMA_Q_DELETING, &queue->flags)) + return; rdma_disconnect(queue->cm_id); ib_drain_qp(queue->qp); } static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) { + if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) + return; nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); } static void nvme_rdma_stop_and_free_queue(struct nvme_rdma_queue *queue) { - if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) - return; nvme_rdma_stop_queue(queue); nvme_rdma_free_queue(queue); } @@ -671,6 +676,116 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remo nvme_rdma_free_queue(&ctrl->queues[0]); } +static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, bool new) +{ + int error; + + error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); + if (error) + return error; + + ctrl->device = ctrl->queues[0].device; + ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS, + ctrl->device->dev->attrs.max_fast_reg_page_list_len); + + if (new) { + /* + * We need a reference on the device as long as the tag_set is alive, + * as the MRs in the request structures need a valid ib_device. + */ + error = -EINVAL; + if (!nvme_rdma_dev_get(ctrl->device)) + goto out_free_queue; + + memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); + ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops; + ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH; + ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ + ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; + ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) + + SG_CHUNK_SIZE * sizeof(struct scatterlist); + ctrl->admin_tag_set.driver_data = ctrl; + ctrl->admin_tag_set.nr_hw_queues = 1; + ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; + + error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); + if (error) + goto out_put_dev; + + ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); + if (IS_ERR(ctrl->ctrl.admin_q)) { + error = PTR_ERR(ctrl->ctrl.admin_q); + goto out_free_tagset; + } + + ctrl->ctrl.admin_connect_q = blk_mq_init_queue(&ctrl->admin_tag_set); + if (IS_ERR(ctrl->ctrl.admin_connect_q)) { + error = PTR_ERR(ctrl->ctrl.admin_connect_q); + goto out_cleanup_queue; + } + } else { + error = blk_mq_reinit_tagset(&ctrl->admin_tag_set); + if (error) + goto out_free_queue; + } + + error = nvmf_connect_admin_queue(&ctrl->ctrl); + if (error) + goto out_cleanup_connect_queue; + + set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); + + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); + if (error) { + dev_err(ctrl->ctrl.device, + "prop_get NVME_REG_CAP failed\n"); + goto out_cleanup_connect_queue; + } + + ctrl->ctrl.sqsize = + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); + + error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); + if (error) + goto out_cleanup_connect_queue; + + ctrl->ctrl.max_hw_sectors = + (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9); + + error = nvme_init_identify(&ctrl->ctrl); + if (error) + goto out_cleanup_connect_queue; + + error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, + &ctrl->async_event_sqe, sizeof(struct nvme_command), + DMA_TO_DEVICE); + if (error) + goto out_cleanup_connect_queue; + + nvme_start_keep_alive(&ctrl->ctrl); + + return 0; + +out_cleanup_connect_queue: + if (new) + blk_cleanup_queue(ctrl->ctrl.admin_connect_q); +out_cleanup_queue: + if (new) + blk_cleanup_queue(ctrl->ctrl.admin_q); +out_free_tagset: + if (new) { + /* disconnect and drain the queue before freeing the tagset */ + nvme_rdma_stop_queue(&ctrl->queues[0]); + blk_mq_free_tag_set(&ctrl->admin_tag_set); + } +out_put_dev: + if (new) + nvme_rdma_dev_put(ctrl->device); +out_free_queue: + nvme_rdma_free_queue(&ctrl->queues[0]); + return error; +} + static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); @@ -725,28 +840,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) goto requeue; } - nvme_rdma_stop_and_free_queue(&ctrl->queues[0]); + nvme_rdma_destroy_admin_queue(ctrl, false); - ret = blk_mq_reinit_tagset(&ctrl->admin_tag_set); - if (ret) - goto requeue; - - ret = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); - if (ret) - goto requeue; - - ret = nvmf_connect_admin_queue(&ctrl->ctrl); - if (ret) - goto requeue; - - set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); - - ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); + ret = nvme_rdma_configure_admin_queue(ctrl, false); if (ret) goto requeue; - nvme_start_keep_alive(&ctrl->ctrl); - if (ctrl->queue_count > 1) { ret = nvme_rdma_init_io_queues(ctrl); if (ret) @@ -760,12 +859,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); ctrl->ctrl.nr_reconnects = 0; - - if (ctrl->queue_count > 1) { - nvme_queue_scan(&ctrl->ctrl); - nvme_queue_async_events(&ctrl->ctrl); - } - dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); return; @@ -1546,114 +1639,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { .timeout = nvme_rdma_timeout, }; -static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, bool new) -{ - int error; - - error = nvme_rdma_init_queue(ctrl, 0, NVME_AQ_DEPTH); - if (error) - return error; - - ctrl->device = ctrl->queues[0].device; - ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS, - ctrl->device->dev->attrs.max_fast_reg_page_list_len); - - if (new) { - /* - * We need a reference on the device as long as the tag_set is alive, - * as the MRs in the request structures need a valid ib_device. - */ - error = -EINVAL; - if (!nvme_rdma_dev_get(ctrl->device)) - goto out_free_queue; - - memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); - ctrl->admin_tag_set.ops = &nvme_rdma_admin_mq_ops; - ctrl->admin_tag_set.queue_depth = NVME_RDMA_AQ_BLKMQ_DEPTH; - ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ - ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; - ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_rdma_request) + - SG_CHUNK_SIZE * sizeof(struct scatterlist); - ctrl->admin_tag_set.driver_data = ctrl; - ctrl->admin_tag_set.nr_hw_queues = 1; - ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; - - error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); - if (error) - goto out_put_dev; - - ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); - if (IS_ERR(ctrl->ctrl.admin_q)) { - error = PTR_ERR(ctrl->ctrl.admin_q); - goto out_free_tagset; - } - - ctrl->ctrl.admin_connect_q = blk_mq_init_queue(&ctrl->admin_tag_set); - if (IS_ERR(ctrl->ctrl.admin_connect_q)) { - error = PTR_ERR(ctrl->ctrl.admin_connect_q); - goto out_cleanup_queue; - } - } - - error = nvmf_connect_admin_queue(&ctrl->ctrl); - if (error) - goto out_cleanup_connect_queue; - - set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); - - blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); - - error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); - if (error) { - dev_err(ctrl->ctrl.device, - "prop_get NVME_REG_CAP failed\n"); - goto out_cleanup_connect_queue; - } - - ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); - - error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); - if (error) - goto out_cleanup_connect_queue; - - ctrl->ctrl.max_hw_sectors = - (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9); - - error = nvme_init_identify(&ctrl->ctrl); - if (error) - goto out_cleanup_connect_queue; - - error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, - &ctrl->async_event_sqe, sizeof(struct nvme_command), - DMA_TO_DEVICE); - if (error) - goto out_cleanup_connect_queue; - - nvme_start_keep_alive(&ctrl->ctrl); - - return 0; - -out_cleanup_connect_queue: - if (new) - blk_cleanup_queue(ctrl->ctrl.admin_connect_q); -out_cleanup_queue: - if (new) - blk_cleanup_queue(ctrl->ctrl.admin_q); -out_free_tagset: - if (new) { - /* disconnect and drain the queue before freeing the tagset */ - nvme_rdma_stop_queue(&ctrl->queues[0]); - blk_mq_free_tag_set(&ctrl->admin_tag_set); - } -out_put_dev: - if (new) - nvme_rdma_dev_put(ctrl->device); -out_free_queue: - nvme_rdma_free_queue(&ctrl->queues[0]); - return error; -} - static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { nvme_stop_keep_alive(&ctrl->ctrl);