From patchwork Mon Jan 20 08:19:31 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weihang Li X-Patchwork-Id: 11341267 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id F355913A4 for ; Mon, 20 Jan 2020 08:23:37 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id DC991207E0 for ; Mon, 20 Jan 2020 08:23:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726148AbgATIXh (ORCPT ); Mon, 20 Jan 2020 03:23:37 -0500 Received: from szxga04-in.huawei.com ([45.249.212.190]:9670 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726125AbgATIXg (ORCPT ); Mon, 20 Jan 2020 03:23:36 -0500 Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id E847AEFFC24881C63FA6; Mon, 20 Jan 2020 16:23:31 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Mon, 20 Jan 2020 16:23:25 +0800 From: Weihang Li To: , CC: , , Subject: [PATCH for-next 1/7] RDMA/hns: Optimize qp destroy flow Date: Mon, 20 Jan 2020 16:19:31 +0800 Message-ID: <1579508377-55818-2-git-send-email-liweihang@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1579508377-55818-1-git-send-email-liweihang@huawei.com> References: <1579508377-55818-1-git-send-email-liweihang@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Wrap the duplicate code in hip08 and hip06 qp destruction process as hns_roce_qp_destroy() to simply the qp destroy flow. Signed-off-by: Xi Wang Signed-off-by: Weihang Li --- drivers/infiniband/hw/hns/hns_roce_device.h | 5 ++-- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 19 ++----------- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 39 +-------------------------- drivers/infiniband/hw/hns/hns_roce_qp.c | 41 +++++++++++++++++++++++++++++ 4 files changed, 46 insertions(+), 58 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a7c4ff9..1f361e6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -1248,9 +1248,8 @@ void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq); void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); -void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); -void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, - int cnt); +void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_udata *udata); __be32 send_ieth(const struct ib_send_wr *wr); int to_hr_qp_type(int qp_type); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index c6e6658..fe37e6f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -3623,26 +3623,11 @@ int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) if (send_cq && send_cq != recv_cq) __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); } - hns_roce_unlock_cqs(send_cq, recv_cq); - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_qp_free(hr_dev, hr_qp); - - /* RC QP, release QPN */ - if (hr_qp->ibqp.qp_type == IB_QPT_RC) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); - - hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt); - - ib_umem_release(hr_qp->umem); - if (!udata) { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); + hns_roce_unlock_cqs(send_cq, recv_cq); - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - } + hns_roce_qp_destroy(hr_dev, hr_qp, udata); - kfree(hr_qp); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 12c4cd8..5dbff86 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5032,43 +5032,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, hns_roce_unlock_cqs(send_cq, recv_cq); spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); - hns_roce_qp_free(hr_dev, hr_qp); - - /* Not special_QP, free their QPN */ - if ((hr_qp->ibqp.qp_type == IB_QPT_RC) || - (hr_qp->ibqp.qp_type == IB_QPT_UC) || - (hr_qp->ibqp.qp_type == IB_QPT_UD)) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); - - hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); - - if (udata) { - struct hns_roce_ucontext *context = - rdma_udata_to_drv_context( - udata, - struct hns_roce_ucontext, - ibucontext); - - if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->sdb); - - if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->rdb); - } else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - if (hr_qp->rq.wqe_cnt) - hns_roce_free_db(hr_dev, &hr_qp->rdb); - } - ib_umem_release(hr_qp->umem); - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hr_qp->rq.wqe_cnt) { - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - kfree(hr_qp->rq_inl_buf.wqe_list); - } - return ret; } @@ -5083,7 +5046,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", hr_qp->qpn, ret); - kfree(hr_qp); + hns_roce_qp_destroy(hr_dev, hr_qp, udata); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 7c8de1e..748a867 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -1035,6 +1035,47 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, return ret; } +void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_udata *udata) +{ + hns_roce_qp_free(hr_dev, hr_qp); + + /* Not special_QP, free their QPN */ + if (hr_qp->ibqp.qp_type != IB_QPT_GSI) + hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + + hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); + + if (udata) { + struct hns_roce_ucontext *context = + rdma_udata_to_drv_context( + udata, + struct hns_roce_ucontext, + ibucontext); + + if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) + hns_roce_db_unmap_user(context, &hr_qp->sdb); + + if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) + hns_roce_db_unmap_user(context, &hr_qp->rdb); + } else { + kfree(hr_qp->sq.wrid); + kfree(hr_qp->rq.wrid); + hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); + if (hr_qp->rq.wqe_cnt) + hns_roce_free_db(hr_dev, &hr_qp->rdb); + } + ib_umem_release(hr_qp->umem); + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hr_qp->rq.wqe_cnt) { + kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); + kfree(hr_qp->rq_inl_buf.wqe_list); + } + + kfree(hr_qp); +} + struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) From patchwork Mon Jan 20 08:19:32 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weihang Li X-Patchwork-Id: 11341271 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id C04FC13A4 for ; Mon, 20 Jan 2020 08:23:38 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 9F5FE207E0 for ; Mon, 20 Jan 2020 08:23:38 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726075AbgATIXh (ORCPT ); Mon, 20 Jan 2020 03:23:37 -0500 Received: from szxga04-in.huawei.com ([45.249.212.190]:9668 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726587AbgATIXh (ORCPT ); Mon, 20 Jan 2020 03:23:37 -0500 Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.58]) by Forcepoint Email with ESMTP id D692DF08DFF874D0DE47; Mon, 20 Jan 2020 16:23:31 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Mon, 20 Jan 2020 16:23:25 +0800 From: Weihang Li To: , CC: , , Subject: [PATCH for-next 2/7] RDMA/hns: Optimize qp context create and destroy flow Date: Mon, 20 Jan 2020 16:19:32 +0800 Message-ID: <1579508377-55818-3-git-send-email-liweihang@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1579508377-55818-1-git-send-email-liweihang@huawei.com> References: <1579508377-55818-1-git-send-email-liweihang@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Rename the qp context related functions and adjusts the code location to distinguish between the qp context and the entire qp. Signed-off-by: Xi Wang Signed-off-by: Weihang Li --- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 - drivers/infiniband/hw/hns/hns_roce_qp.c | 156 ++++++++++++++--------------- 2 files changed, 76 insertions(+), 84 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 5dbff86..83ac906 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -5011,10 +5011,6 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, spin_lock_irqsave(&hr_dev->qp_list_lock, flags); hns_roce_lock_cqs(send_cq, recv_cq); - list_del(&hr_qp->node); - list_del(&hr_qp->sq_node); - list_del(&hr_qp->rq_node); - if (!udata) { if (recv_cq) __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 748a867..c8f6985 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -139,38 +139,63 @@ enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) } } -static int hns_roce_gsi_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, - struct hns_roce_qp *hr_qp) +static void add_qp_to_list(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_cq *send_cq, struct ib_cq *recv_cq) +{ + struct hns_roce_cq *hr_send_cq, *hr_recv_cq; + unsigned long flags; + + hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; + hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; + + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); + hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); + + list_add_tail(&hr_qp->node, &hr_dev->qp_list); + if (hr_send_cq) + list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); + if (hr_recv_cq) + list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); + + hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); +} + +static int hns_roce_qp_store(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) { struct xarray *xa = &hr_dev->qp_table_xa; int ret; - if (!qpn) + if (!hr_qp->qpn) return -EINVAL; - hr_qp->qpn = qpn; - atomic_set(&hr_qp->refcount, 1); - init_completion(&hr_qp->free); - - ret = xa_err(xa_store_irq(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1), - hr_qp, GFP_KERNEL)); + ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); if (ret) dev_err(hr_dev->dev, "QPC xa_store failed\n"); + else + /* add qp to the roce device's qp list for softwc */ + add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, + init_attr->recv_cq); return ret; } -static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, - struct hns_roce_qp *hr_qp) +static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct device *dev = hr_dev->dev; int ret; - if (!qpn) + if (!hr_qp->qpn) return -EINVAL; - hr_qp->qpn = qpn; + /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ + if (hr_qp->ibqp.qp_type == IB_QPT_GSI && + hr_dev->hw_rev == HNS_ROCE_HW_VER1) + return 0; /* Alloc memory for QPC */ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); @@ -206,17 +231,8 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, } } - ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); - if (ret) - goto err_put_sccc; - return 0; -err_put_sccc: - if (hr_dev->caps.sccc_entry_sz) - hns_roce_table_put(hr_dev, &qp_table->sccc_table, - hr_qp->qpn); - err_put_trrl: if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); @@ -236,25 +252,27 @@ void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) struct xarray *xa = &hr_dev->qp_table_xa; unsigned long flags; + list_del(&hr_qp->node); + list_del(&hr_qp->sq_node); + list_del(&hr_qp->rq_node); + xa_lock_irqsave(xa, flags); __xa_erase(xa, hr_qp->qpn & (hr_dev->caps.num_qps - 1)); xa_unlock_irqrestore(xa, flags); } -void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (atomic_dec_and_test(&hr_qp->refcount)) - complete(&hr_qp->free); - wait_for_completion(&hr_qp->free); + /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ + if (hr_qp->ibqp.qp_type == IB_QPT_GSI && + hr_dev->hw_rev == HNS_ROCE_HW_VER1) + return; - if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) { - if (hr_dev->caps.trrl_entry_sz) - hns_roce_table_put(hr_dev, &qp_table->trrl_table, - hr_qp->qpn); - hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); - } + if (hr_dev->caps.trrl_entry_sz) + hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); + hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); } void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, @@ -677,29 +695,6 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) kfree(hr_qp->rq_inl_buf.wqe_list); } -static void add_qp_to_list(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct ib_cq *send_cq, struct ib_cq *recv_cq) -{ - struct hns_roce_cq *hr_send_cq, *hr_recv_cq; - unsigned long flags; - - hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; - hr_recv_cq = recv_cq ? to_hr_cq(recv_cq) : NULL; - - spin_lock_irqsave(&hr_dev->qp_list_lock, flags); - hns_roce_lock_cqs(hr_send_cq, hr_recv_cq); - - list_add_tail(&hr_qp->node, &hr_dev->qp_list); - if (hr_send_cq) - list_add_tail(&hr_qp->sq_node, &hr_send_cq->sq_list); - if (hr_recv_cq) - list_add_tail(&hr_qp->rq_node, &hr_recv_cq->rq_list); - - hns_roce_unlock_cqs(hr_send_cq, hr_recv_cq); - spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); -} - static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, @@ -923,6 +918,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } } + hr_qp->qpn = qpn; + hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, hr_qp->region_cnt); hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, @@ -934,20 +931,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_mtr; } - if (init_attr->qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) { - /* In v1 engine, GSI QP context in RoCE engine's register */ - ret = hns_roce_gsi_qp_alloc(hr_dev, qpn, hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_alloc failed!\n"); - goto err_qpn; - } - } else { - ret = hns_roce_qp_alloc(hr_dev, qpn, hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_alloc failed!\n"); - goto err_qpn; - } + ret = alloc_qpc(hr_dev, hr_qp); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "alloc qpc failed!\n"); + goto err_qpn; + } + + ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "add qp failed!\n"); + goto err_qpc; } if (sqpn) @@ -959,29 +952,28 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); if (ret) - goto err_qp; + goto err_store; } if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); if (ret) - goto err_qp; + goto err_store; } hr_qp->event = hns_roce_ib_qp_event; - - add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, init_attr->recv_cq); + atomic_set(&hr_qp->refcount, 1); + init_completion(&hr_qp->free); hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); return 0; -err_qp: - if (init_attr->qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - hns_roce_qp_remove(hr_dev, hr_qp); - else - hns_roce_qp_free(hr_dev, hr_qp); +err_store: + hns_roce_qp_remove(hr_dev, hr_qp); + +err_qpc: + free_qpc(hr_dev, hr_qp); err_qpn: if (!sqpn) @@ -1038,7 +1030,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_udata *udata) { - hns_roce_qp_free(hr_dev, hr_qp); + if (atomic_dec_and_test(&hr_qp->refcount)) + complete(&hr_qp->free); + wait_for_completion(&hr_qp->free); + + free_qpc(hr_dev, hr_qp); /* Not special_QP, free their QPN */ if (hr_qp->ibqp.qp_type != IB_QPT_GSI) From patchwork Mon Jan 20 08:19:33 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weihang Li X-Patchwork-Id: 11341281 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id E3B1C92A for ; Mon, 20 Jan 2020 08:23:40 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id C2E0F207E0 for ; Mon, 20 Jan 2020 08:23:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726619AbgATIXj (ORCPT ); Mon, 20 Jan 2020 03:23:39 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:9207 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726635AbgATIXi (ORCPT ); Mon, 20 Jan 2020 03:23:38 -0500 Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id E7E89FB1CBF06183969E; Mon, 20 Jan 2020 16:23:36 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Mon, 20 Jan 2020 16:23:26 +0800 From: Weihang Li To: , CC: , , Subject: [PATCH for-next 3/7] RDMA/hns: Optimize qp number assign flow Date: Mon, 20 Jan 2020 16:19:33 +0800 Message-ID: <1579508377-55818-4-git-send-email-liweihang@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1579508377-55818-1-git-send-email-liweihang@huawei.com> References: <1579508377-55818-1-git-send-email-liweihang@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Encapsulate the code associated with the qp number assignment into alloc_qpn() and free_qpn(). Signed-off-by: Xi Wang Signed-off-by: Weihang Li --- drivers/infiniband/hw/hns/hns_roce_qp.c | 91 ++++++++++++++++----------------- 1 file changed, 44 insertions(+), 47 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index c8f6985..3bd5809 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -108,15 +108,34 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, } } -static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, - int align, unsigned long *base) +static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { - struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; + unsigned long num = 0; + int ret; + + if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { + /* when hw version is v1, the sqpn is allocated */ + if (hr_dev->caps.max_sq_sg <= HNS_ROCE_MAX_SGE_NUM) + num = HNS_ROCE_MAX_PORTS + + hr_dev->iboe.phy_port[hr_qp->port]; + else + num = 1; - return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, - base) ? - -ENOMEM : - 0; + hr_qp->doorbell_qpn = 1; + } else { + ret = hns_roce_bitmap_alloc_range(&hr_dev->qp_table.bitmap, + 1, 1, &num); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "bitmap map alloc error\n"); + return -ENOMEM; + } + + hr_qp->doorbell_qpn = (u32)num; + } + + hr_qp->qpn = num; + + return 0; } enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) @@ -275,15 +294,17 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); } -void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, - int cnt) +static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - if (base_qpn < hr_dev->caps.reserved_qps) + if (hr_qp->ibqp.qp_type == IB_QPT_GSI) + return; + + if (hr_qp->qpn < hr_dev->caps.reserved_qps) return; - hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt, BITMAP_RR); + hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); } static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, @@ -698,7 +719,7 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, - struct ib_udata *udata, unsigned long sqpn, + struct ib_udata *udata, struct hns_roce_qp *hr_qp) { dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; @@ -708,7 +729,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( udata, struct hns_roce_ucontext, ibucontext); struct hns_roce_buf_region *r; - unsigned long qpn = 0; u32 page_shift; int buf_count; int ret; @@ -907,19 +927,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } } - if (sqpn) { - qpn = sqpn; - } else { - /* Get QPN */ - ret = hns_roce_reserve_range_qp(hr_dev, 1, 1, &qpn); - if (ret) { - dev_err(dev, "hns_roce_reserve_range_qp alloc qpn error\n"); - goto err_wrid; - } - } - - hr_qp->qpn = qpn; - hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, hr_qp->region_cnt); hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, @@ -928,6 +935,12 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->regions, hr_qp->region_cnt); if (ret) { dev_err(dev, "mtr attach error for create qp\n"); + goto err_wrid; + } + + ret = alloc_qpn(hr_dev, hr_qp); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "alloc qpn error\n"); goto err_mtr; } @@ -943,11 +956,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_qpc; } - if (sqpn) - hr_qp->doorbell_qpn = 1; - else - hr_qp->doorbell_qpn = (u32)hr_qp->qpn; - if (udata) { ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); @@ -961,6 +969,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_store; } + hr_qp->ibqp.qp_num = hr_qp->qpn; hr_qp->event = hns_roce_ib_qp_event; atomic_set(&hr_qp->refcount, 1); init_completion(&hr_qp->free); @@ -976,8 +985,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, free_qpc(hr_dev, hr_qp); err_qpn: - if (!sqpn) - hns_roce_release_range_qp(hr_dev, qpn, 1); + free_qpn(hr_dev, hr_qp); err_mtr: hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); @@ -1036,9 +1044,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, free_qpc(hr_dev, hr_qp); - /* Not special_QP, free their QPN */ - if (hr_qp->ibqp.qp_type != IB_QPT_GSI) - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + free_qpn(hr_dev, hr_qp); hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); @@ -1087,7 +1093,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, if (!hr_qp) return ERR_PTR(-ENOMEM); - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, 0, + ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); if (ret) { ibdev_err(ibdev, "Create QP 0x%06lx failed(%d)\n", @@ -1096,8 +1102,6 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, return ERR_PTR(ret); } - hr_qp->ibqp.qp_num = hr_qp->qpn; - break; } case IB_QPT_GSI: { @@ -1114,15 +1118,8 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, hr_qp->port = init_attr->port_num - 1; hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - /* when hw version is v1, the sqpn is allocated */ - if (hr_dev->caps.max_sq_sg <= 2) - hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS + - hr_dev->iboe.phy_port[hr_qp->port]; - else - hr_qp->ibqp.qp_num = 1; - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, - hr_qp->ibqp.qp_num, hr_qp); + hr_qp); if (ret) { ibdev_err(ibdev, "Create GSI QP failed!\n"); kfree(hr_qp); From patchwork Mon Jan 20 08:19:34 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weihang Li X-Patchwork-Id: 11341291 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 4993F188B for ; Mon, 20 Jan 2020 08:23:42 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 295BC207FF for ; Mon, 20 Jan 2020 08:23:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726650AbgATIXk (ORCPT ); Mon, 20 Jan 2020 03:23:40 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:9209 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726642AbgATIXk (ORCPT ); Mon, 20 Jan 2020 03:23:40 -0500 Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 02FA9C40D78002B2886E; Mon, 20 Jan 2020 16:23:37 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Mon, 20 Jan 2020 16:23:26 +0800 From: Weihang Li To: , CC: , , Subject: [PATCH for-next 4/7] RDMA/hns: Optimize qp buffer allocation flow Date: Mon, 20 Jan 2020 16:19:34 +0800 Message-ID: <1579508377-55818-5-git-send-email-liweihang@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1579508377-55818-1-git-send-email-liweihang@huawei.com> References: <1579508377-55818-1-git-send-email-liweihang@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Encapsulate qp buffer allocation related code into 3 functions: alloc_qp_buf(), map_qp_buf() and free_qp_buf(). Signed-off-by: Xi Wang Signed-off-by: Weihang Li --- drivers/infiniband/hw/hns/hns_roce_device.h | 1 - drivers/infiniband/hw/hns/hns_roce_qp.c | 268 +++++++++++++++------------- 2 files changed, 147 insertions(+), 122 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 1f361e6..9ddeb2b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -660,7 +660,6 @@ struct hns_roce_qp { /* this define must less than HNS_ROCE_MAX_BT_REGION */ #define HNS_ROCE_WQE_REGION_MAX 3 struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX]; - int region_cnt; int wqe_bt_pg_shift; u32 buff_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 3bd5809..5184cb4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -716,23 +716,150 @@ static void free_rq_inline_buf(struct hns_roce_qp *hr_qp) kfree(hr_qp->rq_inl_buf.wqe_list); } +static int map_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + u32 page_shift, bool is_user) +{ + dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_buf_region *r; + int region_count; + int buf_count; + int ret; + int i; + + region_count = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions, + ARRAY_SIZE(hr_qp->regions), page_shift); + + /* alloc a tmp list for storing wqe buf address */ + ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, region_count); + if (ret) { + ibdev_err(ibdev, "alloc buf_list error for create qp\n"); + return ret; + } + + for (i = 0; i < region_count; i++) { + r = &hr_qp->regions[i]; + if (is_user) + buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i], + r->count, r->offset, hr_qp->umem, + page_shift); + else + buf_count = hns_roce_get_kmem_bufs(hr_dev, buf_list[i], + r->count, r->offset, &hr_qp->hr_buf); + + if (buf_count != r->count) { + ibdev_err(ibdev, "get %s qp buf err,expect %d,ret %d.\n", + is_user ? "user" : "kernel", + r->count, buf_count); + ret = -ENOBUFS; + goto done; + } + } + + hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, + region_count); + hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, + page_shift); + ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, hr_qp->regions, + region_count); + if (ret) + ibdev_err(ibdev, "mtr attatch error for create qp\n"); + + goto done; + + hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); +done: + hns_roce_free_buf_list(buf_list, region_count); + + return ret; +} + +static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, unsigned long addr) +{ + u32 page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; + struct ib_device *ibdev = &hr_dev->ib_dev; + bool is_rq_buf_inline; + int ret; + + is_rq_buf_inline = (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hns_roce_qp_has_rq(init_attr); + if (is_rq_buf_inline) { + ret = alloc_rq_inline_buf(hr_qp, init_attr); + if (ret) { + ibdev_err(ibdev, "alloc recv inline buffer error\n"); + return ret; + } + } + + if (udata) { + hr_qp->umem = ib_umem_get(udata, addr, hr_qp->buff_size, 0); + if (IS_ERR(hr_qp->umem)) { + ibdev_err(ibdev, "get umem error for qp buf\n"); + ret = PTR_ERR(hr_qp->umem); + goto err_inline; + } + } else { + ret = hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, + (1 << page_shift) * 2, + &hr_qp->hr_buf, page_shift); + if (ret) { + ibdev_err(ibdev, "alloc roce buf error\n"); + goto err_inline; + } + } + + ret = map_qp_buf(hr_dev, hr_qp, page_shift, udata); + if (ret) { + ibdev_err(ibdev, "map roce buf error\n"); + goto err_alloc; + } + + return 0; + +err_inline: + if (is_rq_buf_inline) + free_rq_inline_buf(hr_qp); + +err_alloc: + if (udata) { + ib_umem_release(hr_qp->umem); + hr_qp->umem = NULL; + } else { + hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); + } + + return ret; +} + +static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) +{ + hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); + if (hr_qp->umem) { + ib_umem_release(hr_qp->umem); + hr_qp->umem = NULL; + } + + if (hr_qp->hr_buf.nbufs > 0) + hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && + hr_qp->rq.wqe_cnt) + free_rq_inline_buf(hr_qp); +} static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct hns_roce_qp *hr_qp) { - dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL }; struct device *dev = hr_dev->dev; struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp_resp resp = {}; struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( udata, struct hns_roce_ucontext, ibucontext); - struct hns_roce_buf_region *r; - u32 page_shift; - int buf_count; int ret; - int i; mutex_init(&hr_qp->mutex); spin_lock_init(&hr_qp->sq.lock); @@ -754,59 +881,18 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_out; } - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hns_roce_qp_has_rq(init_attr)) { - ret = alloc_rq_inline_buf(hr_qp, init_attr); - if (ret) { - dev_err(dev, "allocate receive inline buffer failed\n"); - goto err_out; - } - } - - page_shift = PAGE_SHIFT + hr_dev->caps.mtt_buf_pg_sz; if (udata) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { dev_err(dev, "ib_copy_from_udata error for create qp\n"); ret = -EFAULT; - goto err_alloc_rq_inline_buf; + goto err_out; } ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, &ucmd); if (ret) { dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); - goto err_alloc_rq_inline_buf; - } - - hr_qp->umem = ib_umem_get(udata, ucmd.buf_addr, - hr_qp->buff_size, 0); - if (IS_ERR(hr_qp->umem)) { - dev_err(dev, "ib_umem_get error for create qp\n"); - ret = PTR_ERR(hr_qp->umem); - goto err_alloc_rq_inline_buf; - } - hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, - hr_qp->regions, ARRAY_SIZE(hr_qp->regions), - page_shift); - ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, - hr_qp->region_cnt); - if (ret) { - dev_err(dev, "alloc buf_list error for create qp\n"); - goto err_alloc_list; - } - - for (i = 0; i < hr_qp->region_cnt; i++) { - r = &hr_qp->regions[i]; - buf_count = hns_roce_get_umem_bufs(hr_dev, - buf_list[i], r->count, r->offset, - hr_qp->umem, page_shift); - if (buf_count != r->count) { - dev_err(dev, - "get umem buf err, expect %d,ret %d.\n", - r->count, buf_count); - ret = -ENOBUFS; - goto err_get_bufs; - } + goto err_out; } if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && @@ -817,7 +903,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, &hr_qp->sdb); if (ret) { dev_err(dev, "sq record doorbell map failed!\n"); - goto err_get_bufs; + goto err_out; } /* indicate kernel supports sq record db */ @@ -844,13 +930,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_alloc_rq_inline_buf; + goto err_out; } if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { dev_err(dev, "init_attr->create_flags error!\n"); ret = -EINVAL; - goto err_alloc_rq_inline_buf; + goto err_out; } /* Set SQ size */ @@ -858,7 +944,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp); if (ret) { dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); - goto err_alloc_rq_inline_buf; + goto err_out; } /* QP doorbell register address */ @@ -872,49 +958,17 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); if (ret) { dev_err(dev, "rq record doorbell alloc failed!\n"); - goto err_alloc_rq_inline_buf; + goto err_out; } *hr_qp->rdb.db_record = 0; hr_qp->rdb_en = 1; } - /* Allocate QP buf */ - if (hns_roce_buf_alloc(hr_dev, hr_qp->buff_size, - (1 << page_shift) * 2, - &hr_qp->hr_buf, page_shift)) { - dev_err(dev, "hns_roce_buf_alloc error!\n"); - ret = -ENOMEM; - goto err_db; - } - hr_qp->region_cnt = split_wqe_buf_region(hr_dev, hr_qp, - hr_qp->regions, ARRAY_SIZE(hr_qp->regions), - page_shift); - ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, - hr_qp->region_cnt); - if (ret) { - dev_err(dev, "alloc buf_list error for create qp!\n"); - goto err_alloc_list; - } - - for (i = 0; i < hr_qp->region_cnt; i++) { - r = &hr_qp->regions[i]; - buf_count = hns_roce_get_kmem_bufs(hr_dev, - buf_list[i], r->count, r->offset, - &hr_qp->hr_buf); - if (buf_count != r->count) { - dev_err(dev, - "get kmem buf err, expect %d,ret %d.\n", - r->count, buf_count); - ret = -ENOBUFS; - goto err_get_bufs; - } - } - hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { ret = -ENOMEM; - goto err_get_bufs; + goto err_db; } if (hr_qp->rq.wqe_cnt) { @@ -927,21 +981,16 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, } } - hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions, - hr_qp->region_cnt); - hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift, - page_shift); - ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, - hr_qp->regions, hr_qp->region_cnt); + ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); if (ret) { - dev_err(dev, "mtr attach error for create qp\n"); - goto err_wrid; + ibdev_err(&hr_dev->ib_dev, "alloc qp buf error\n"); + goto err_db; } ret = alloc_qpn(hr_dev, hr_qp); if (ret) { ibdev_err(&hr_dev->ib_dev, "alloc qpn error\n"); - goto err_mtr; + goto err_buf; } ret = alloc_qpc(hr_dev, hr_qp); @@ -974,8 +1023,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, atomic_set(&hr_qp->refcount, 1); init_completion(&hr_qp->free); - hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); - return 0; err_store: @@ -987,8 +1034,8 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, err_qpn: free_qpn(hr_dev, hr_qp); -err_mtr: - hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); +err_buf: + free_qp_buf(hr_dev, hr_qp); err_wrid: if (udata) { @@ -1013,24 +1060,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (!udata) kfree(hr_qp->sq.wrid); -err_get_bufs: - hns_roce_free_buf_list(buf_list, hr_qp->region_cnt); - -err_alloc_list: - if (!hr_qp->umem) - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); - ib_umem_release(hr_qp->umem); - err_db: if (!udata && hns_roce_qp_has_rq(init_attr) && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) hns_roce_free_db(hr_dev, &hr_qp->rdb); -err_alloc_rq_inline_buf: - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hns_roce_qp_has_rq(init_attr)) - free_rq_inline_buf(hr_qp); - err_out: return ret; } @@ -1046,7 +1080,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, free_qpn(hr_dev, hr_qp); - hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr); + free_qp_buf(hr_dev, hr_qp); if (udata) { struct hns_roce_ucontext *context = @@ -1063,17 +1097,9 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } else { kfree(hr_qp->sq.wrid); kfree(hr_qp->rq.wrid); - hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf); if (hr_qp->rq.wqe_cnt) hns_roce_free_db(hr_dev, &hr_qp->rdb); } - ib_umem_release(hr_qp->umem); - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) && - hr_qp->rq.wqe_cnt) { - kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list); - kfree(hr_qp->rq_inl_buf.wqe_list); - } kfree(hr_qp); } From patchwork Mon Jan 20 08:19:35 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weihang Li X-Patchwork-Id: 11341287 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id A7C5292A for ; Mon, 20 Jan 2020 08:23:41 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 90184207E0 for ; Mon, 20 Jan 2020 08:23:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726635AbgATIXk (ORCPT ); Mon, 20 Jan 2020 03:23:40 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:9206 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726039AbgATIXj (ORCPT ); Mon, 20 Jan 2020 03:23:39 -0500 Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id E29D19F8345AAC2BD45E; Mon, 20 Jan 2020 16:23:36 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Mon, 20 Jan 2020 16:23:26 +0800 From: Weihang Li To: , CC: , , Subject: [PATCH for-next 5/7] RDMA/hns: Optimize qp param setup flow Date: Mon, 20 Jan 2020 16:19:35 +0800 Message-ID: <1579508377-55818-6-git-send-email-liweihang@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1579508377-55818-1-git-send-email-liweihang@huawei.com> References: <1579508377-55818-1-git-send-email-liweihang@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Encapsulate the qp param setup related code into set_qp_param(). Signed-off-by: Xi Wang Signed-off-by: Weihang Li --- drivers/infiniband/hw/hns/hns_roce_qp.c | 114 +++++++++++++++++--------------- 1 file changed, 61 insertions(+), 53 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 5184cb4..baa3f48 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -307,7 +307,7 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hns_roce_bitmap_free_range(&qp_table->bitmap, hr_qp->qpn, 1, BITMAP_RR); } -static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, +static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, bool is_user, int has_rq, struct hns_roce_qp *hr_qp) { @@ -386,10 +386,9 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, return 0; } -static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp, - struct hns_roce_ib_create_qp *ucmd) +static int set_user_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp, + struct hns_roce_ib_create_qp *ucmd) { u32 ex_sge_num; u32 page_size; @@ -584,9 +583,8 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev, return 0; } -static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, - struct ib_qp_cap *cap, - struct hns_roce_qp *hr_qp) +static int set_kernel_sq_size(struct hns_roce_dev *hr_dev, + struct ib_qp_cap *cap, struct hns_roce_qp *hr_qp) { struct device *dev = hr_dev->dev; u32 page_size; @@ -848,6 +846,58 @@ static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hr_qp->rq.wqe_cnt) free_rq_inline_buf(hr_qp); } + +static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp *ucmd) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + hr_qp->ibqp.qp_type = init_attr->qp_type; + + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) + hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; + else + hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; + + ret = set_rq_size(hr_dev, &init_attr->cap, udata, + hns_roce_qp_has_rq(init_attr), hr_qp); + if (ret) { + ibdev_err(ibdev, "set user rq size failed\n"); + return ret; + } + + if (udata) { + if (ib_copy_from_udata(ucmd, udata, sizeof(*ucmd))) { + ibdev_err(ibdev, "copy create qp ucmd error\n"); + return -EFAULT; + } + + ret = set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, ucmd); + if (ret) + ibdev_err(ibdev, "set user sq size error\n"); + } else { + if (init_attr->create_flags & + IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { + ibdev_err(ibdev, "invalid BLOCK_MULTICAST_LOOPBACK!\n"); + return -EINVAL; + } + + if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { + ibdev_err(ibdev, "invalid IPOIB_UD_LSO!\n"); + return -EINVAL; + } + + ret = set_kernel_sq_size(hr_dev, &init_attr->cap, hr_qp); + if (ret) + ibdev_err(ibdev, "set kernel sq size error\n"); + } + + return ret; +} + static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, @@ -867,34 +917,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->state = IB_QPS_RESET; - hr_qp->ibqp.qp_type = init_attr->qp_type; - - if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) - hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR; - else - hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR; - - ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, udata, - hns_roce_qp_has_rq(init_attr), hr_qp); + ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); if (ret) { - dev_err(dev, "hns_roce_set_rq_size failed\n"); - goto err_out; + ibdev_err(&hr_dev->ib_dev, "set qp param error!\n"); + return ret; } if (udata) { - if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { - dev_err(dev, "ib_copy_from_udata error for create qp\n"); - ret = -EFAULT; - goto err_out; - } - - ret = hns_roce_set_user_sq_size(hr_dev, &init_attr->cap, hr_qp, - &ucmd); - if (ret) { - dev_err(dev, "hns_roce_set_user_sq_size error for create qp\n"); - goto err_out; - } - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && (udata->inlen >= sizeof(ucmd)) && (udata->outlen >= sizeof(resp)) && @@ -926,27 +955,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->rdb_en = 1; } } else { - if (init_attr->create_flags & - IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { - dev_err(dev, "init_attr->create_flags error!\n"); - ret = -EINVAL; - goto err_out; - } - - if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) { - dev_err(dev, "init_attr->create_flags error!\n"); - ret = -EINVAL; - goto err_out; - } - - /* Set SQ size */ - ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, - hr_qp); - if (ret) { - dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); - goto err_out; - } - /* QP doorbell register address */ hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + DB_REG_OFFSET * hr_dev->priv_uar.index; From patchwork Mon Jan 20 08:19:36 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weihang Li X-Patchwork-Id: 11341283 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 1061A13A4 for ; Mon, 20 Jan 2020 08:23:41 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id ED82A2073D for ; Mon, 20 Jan 2020 08:23:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726587AbgATIXj (ORCPT ); Mon, 20 Jan 2020 03:23:39 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:9208 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726650AbgATIXj (ORCPT ); Mon, 20 Jan 2020 03:23:39 -0500 Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id ECA8839547BECDF8E92E; Mon, 20 Jan 2020 16:23:36 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Mon, 20 Jan 2020 16:23:27 +0800 From: Weihang Li To: , CC: , , Subject: [PATCH for-next 6/7] RDMA/hns: Optimize kernel qp wrid allocation flow Date: Mon, 20 Jan 2020 16:19:36 +0800 Message-ID: <1579508377-55818-7-git-send-email-liweihang@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1579508377-55818-1-git-send-email-liweihang@huawei.com> References: <1579508377-55818-1-git-send-email-liweihang@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Encapsulate the kernel qp wrid allocation related code into 2 functions: alloc_kernel_wrid() and free_kernel_wrid(). Signed-off-by: Xi Wang Signed-off-by: Weihang Li --- drivers/infiniband/hw/hns/hns_roce_qp.c | 73 ++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 28 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index baa3f48..c51d4d4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -847,6 +847,45 @@ static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) free_rq_inline_buf(hr_qp); } +static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + u64 *sq_wrid = NULL; + u64 *rq_wrid = NULL; + int ret; + + sq_wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(sq_wrid)) { + ibdev_err(ibdev, "sq wrid alloc failed!\n"); + return -ENOMEM; + } + + if (hr_qp->rq.wqe_cnt) { + rq_wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(rq_wrid)) { + ibdev_err(ibdev, "rq wrid alloc failed!\n"); + ret = -ENOMEM; + goto err_sq; + } + } + + hr_qp->sq.wrid = sq_wrid; + hr_qp->rq.wrid = rq_wrid; + return 0; +err_sq: + kfree(sq_wrid); + + return ret; +} + +static void free_kernel_wrid(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + kfree(hr_qp->rq.wrid); + kfree(hr_qp->sq.wrid); +} + static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, @@ -972,21 +1011,11 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hr_qp->rdb_en = 1; } - hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { - ret = -ENOMEM; + ret = alloc_kernel_wrid(hr_dev, hr_qp); + if (ret) { + ibdev_err(&hr_dev->ib_dev, "alloc wrid error!\n"); goto err_db; } - - if (hr_qp->rq.wqe_cnt) { - hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - if (ZERO_OR_NULL_PTR(hr_qp->rq.wrid)) { - ret = -ENOMEM; - goto err_sq_wrid; - } - } } ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); @@ -1035,25 +1064,20 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, err_store: hns_roce_qp_remove(hr_dev, hr_qp); - err_qpc: free_qpc(hr_dev, hr_qp); - err_qpn: free_qpn(hr_dev, hr_qp); - err_buf: free_qp_buf(hr_dev, hr_qp); - err_wrid: + free_kernel_wrid(hr_dev, hr_qp); + if (udata) { if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && (udata->outlen >= sizeof(resp)) && hns_roce_qp_has_rq(init_attr)) hns_roce_db_unmap_user(uctx, &hr_qp->rdb); - } else { - if (hr_qp->rq.wqe_cnt) - kfree(hr_qp->rq.wrid); } err_sq_dbmap: @@ -1064,10 +1088,6 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, hns_roce_qp_has_sq(init_attr)) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); -err_sq_wrid: - if (!udata) - kfree(hr_qp->sq.wrid); - err_db: if (!udata && hns_roce_qp_has_rq(init_attr) && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) @@ -1085,10 +1105,9 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, wait_for_completion(&hr_qp->free); free_qpc(hr_dev, hr_qp); - free_qpn(hr_dev, hr_qp); - free_qp_buf(hr_dev, hr_qp); + free_kernel_wrid(hr_dev, hr_qp); if (udata) { struct hns_roce_ucontext *context = @@ -1103,8 +1122,6 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) hns_roce_db_unmap_user(context, &hr_qp->rdb); } else { - kfree(hr_qp->sq.wrid); - kfree(hr_qp->rq.wrid); if (hr_qp->rq.wqe_cnt) hns_roce_free_db(hr_dev, &hr_qp->rdb); } From patchwork Mon Jan 20 08:19:37 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Weihang Li X-Patchwork-Id: 11341279 X-Patchwork-Delegate: leon@leon.nu Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 3733A92A for ; Mon, 20 Jan 2020 08:23:40 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 16283207E0 for ; Mon, 20 Jan 2020 08:23:40 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726761AbgATIXj (ORCPT ); Mon, 20 Jan 2020 03:23:39 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:9210 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726728AbgATIXj (ORCPT ); Mon, 20 Jan 2020 03:23:39 -0500 Received: from DGGEMS410-HUB.china.huawei.com (unknown [172.30.72.60]) by Forcepoint Email with ESMTP id 239C5A10CEA0160CF9DB; Mon, 20 Jan 2020 16:23:37 +0800 (CST) Received: from localhost.localdomain (10.67.165.24) by DGGEMS410-HUB.china.huawei.com (10.3.19.210) with Microsoft SMTP Server id 14.3.439.0; Mon, 20 Jan 2020 16:23:27 +0800 From: Weihang Li To: , CC: , , Subject: [PATCH for-next 7/7] RDMA/hns: Optimize qp doorbell allocation flow Date: Mon, 20 Jan 2020 16:19:37 +0800 Message-ID: <1579508377-55818-8-git-send-email-liweihang@huawei.com> X-Mailer: git-send-email 2.8.1 In-Reply-To: <1579508377-55818-1-git-send-email-liweihang@huawei.com> References: <1579508377-55818-1-git-send-email-liweihang@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.67.165.24] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org From: Xi Wang Encapsulate the kernel qp doorbell allocation related code into 2 functions: alloc_qp_db() and free_qp_db(). Signed-off-by: Xi Wang Signed-off-by: Weihang Li --- drivers/infiniband/hw/hns/hns_roce_qp.c | 212 +++++++++++++++++--------------- 1 file changed, 112 insertions(+), 100 deletions(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index c51d4d4..4158d6e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -847,6 +847,96 @@ static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) free_rq_inline_buf(hr_qp); } +#define user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd) \ + ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && \ + udata->outlen >= sizeof(*resp) && \ + hns_roce_qp_has_sq(init_attr) && udata->inlen >= sizeof(*ucmd)) + +#define user_qp_has_rdb(hr_dev, init_attr, udata, resp) \ + ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && \ + udata->outlen >= sizeof(*resp) && \ + hns_roce_qp_has_rq(init_attr)) + +#define kernel_qp_has_rdb(hr_dev, init_attr) \ + ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && \ + hns_roce_qp_has_rq(init_attr)) + +static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp *ucmd, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct hns_roce_ucontext, ibucontext); + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + if (udata) { + if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { + ret = hns_roce_db_map_user(uctx, udata, ucmd->sdb_addr, + &hr_qp->sdb); + if (ret) { + ibdev_err(ibdev, "sq doorbell map failed!\n"); + goto err_out; + } + hr_qp->sdb_en = 1; + resp->cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB; + } + + if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { + ret = hns_roce_db_map_user(uctx, udata, ucmd->db_addr, + &hr_qp->rdb); + if (ret) { + ibdev_err(ibdev, "rq doorbell map failed!\n"); + goto err_sdb; + } + hr_qp->rdb_en = 1; + resp->cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; + } + } else { + /* QP doorbell register address */ + hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + + DB_REG_OFFSET * hr_dev->priv_uar.index; + hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + + DB_REG_OFFSET * hr_dev->priv_uar.index; + + if (kernel_qp_has_rdb(hr_dev, init_attr)) { + ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); + if (ret) { + ibdev_err(ibdev, "rq doorbell alloc failed!\n"); + goto err_out; + } + *hr_qp->rdb.db_record = 0; + hr_qp->rdb_en = 1; + } + } + + return 0; +err_sdb: + if (udata && hr_qp->sdb_en) + hns_roce_db_unmap_user(uctx, &hr_qp->sdb); +err_out: + return ret; +} + +static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, + struct ib_udata *udata) +{ + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( + udata, struct hns_roce_ucontext, ibucontext); + + if (udata) { + if (hr_qp->rdb_en) + hns_roce_db_unmap_user(uctx, &hr_qp->rdb); + if (hr_qp->sdb_en) + hns_roce_db_unmap_user(uctx, &hr_qp->sdb); + } else { + if (hr_qp->rdb_en) + hns_roce_free_db(hr_dev, &hr_qp->rdb); + } +} + static int alloc_kernel_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { @@ -943,11 +1033,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, struct ib_udata *udata, struct hns_roce_qp *hr_qp) { - struct device *dev = hr_dev->dev; - struct hns_roce_ib_create_qp ucmd; struct hns_roce_ib_create_qp_resp resp = {}; - struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( - udata, struct hns_roce_ucontext, ibucontext); + struct ib_device *ibdev = &hr_dev->ib_dev; + struct hns_roce_ib_create_qp ucmd; int ret; mutex_init(&hr_qp->mutex); @@ -958,95 +1046,55 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ret = set_qp_param(hr_dev, hr_qp, init_attr, udata, &ucmd); if (ret) { - ibdev_err(&hr_dev->ib_dev, "set qp param error!\n"); + ibdev_err(ibdev, "set qp param error!\n"); return ret; } - if (udata) { - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && - (udata->inlen >= sizeof(ucmd)) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_sq(init_attr)) { - ret = hns_roce_db_map_user(uctx, udata, ucmd.sdb_addr, - &hr_qp->sdb); - if (ret) { - dev_err(dev, "sq record doorbell map failed!\n"); - goto err_out; - } - - /* indicate kernel supports sq record db */ - resp.cap_flags |= HNS_ROCE_SUPPORT_SQ_RECORD_DB; - hr_qp->sdb_en = 1; - } - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_rq(init_attr)) { - ret = hns_roce_db_map_user(uctx, udata, ucmd.db_addr, - &hr_qp->rdb); - if (ret) { - dev_err(dev, "rq record doorbell map failed!\n"); - goto err_sq_dbmap; - } - - /* indicate kernel supports rq record db */ - resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; - hr_qp->rdb_en = 1; - } - } else { - /* QP doorbell register address */ - hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - hr_qp->rq.db_reg_l = hr_dev->reg_base + hr_dev->odb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - hns_roce_qp_has_rq(init_attr)) { - ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); - if (ret) { - dev_err(dev, "rq record doorbell alloc failed!\n"); - goto err_out; - } - *hr_qp->rdb.db_record = 0; - hr_qp->rdb_en = 1; - } - + if (!udata) { ret = alloc_kernel_wrid(hr_dev, hr_qp); if (ret) { - ibdev_err(&hr_dev->ib_dev, "alloc wrid error!\n"); - goto err_db; + ibdev_err(ibdev, "alloc wrid error!\n"); + return ret; } } + ret = alloc_qp_db(hr_dev, hr_qp, init_attr, udata, &ucmd, &resp); + if (ret) { + ibdev_err(ibdev, "alloc qp db error\n"); + goto err_wrid; + } + ret = alloc_qp_buf(hr_dev, hr_qp, init_attr, udata, ucmd.buf_addr); if (ret) { - ibdev_err(&hr_dev->ib_dev, "alloc qp buf error\n"); + ibdev_err(ibdev, "alloc qp buf error\n"); goto err_db; } ret = alloc_qpn(hr_dev, hr_qp); if (ret) { - ibdev_err(&hr_dev->ib_dev, "alloc qpn error\n"); + ibdev_err(ibdev, "alloc qpn error\n"); goto err_buf; } ret = alloc_qpc(hr_dev, hr_qp); if (ret) { - ibdev_err(&hr_dev->ib_dev, "alloc qpc failed!\n"); + ibdev_err(ibdev, "alloc qpc failed!\n"); goto err_qpn; } ret = hns_roce_qp_store(hr_dev, hr_qp, init_attr); if (ret) { - ibdev_err(&hr_dev->ib_dev, "add qp failed!\n"); + ibdev_err(ibdev, "store qp failed!\n"); goto err_qpc; } if (udata) { ret = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp))); - if (ret) + if (ret) { + ibdev_err(ibdev, "copy qp resp failed!\n"); goto err_store; + } } if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { @@ -1070,30 +1118,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, free_qpn(hr_dev, hr_qp); err_buf: free_qp_buf(hr_dev, hr_qp); +err_db: + free_qp_db(hr_dev, hr_qp, udata); err_wrid: free_kernel_wrid(hr_dev, hr_qp); - - if (udata) { - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_rq(init_attr)) - hns_roce_db_unmap_user(uctx, &hr_qp->rdb); - } - -err_sq_dbmap: - if (udata) - if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SQ_RECORD_DB) && - (udata->inlen >= sizeof(ucmd)) && - (udata->outlen >= sizeof(resp)) && - hns_roce_qp_has_sq(init_attr)) - hns_roce_db_unmap_user(uctx, &hr_qp->sdb); - -err_db: - if (!udata && hns_roce_qp_has_rq(init_attr) && - (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) - hns_roce_free_db(hr_dev, &hr_qp->rdb); - -err_out: return ret; } @@ -1108,23 +1136,7 @@ void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, free_qpn(hr_dev, hr_qp); free_qp_buf(hr_dev, hr_qp); free_kernel_wrid(hr_dev, hr_qp); - - if (udata) { - struct hns_roce_ucontext *context = - rdma_udata_to_drv_context( - udata, - struct hns_roce_ucontext, - ibucontext); - - if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->sdb); - - if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1)) - hns_roce_db_unmap_user(context, &hr_qp->rdb); - } else { - if (hr_qp->rq.wqe_cnt) - hns_roce_free_db(hr_dev, &hr_qp->rdb); - } + free_qp_db(hr_dev, hr_qp, udata); kfree(hr_qp); }