From patchwork Tue Dec 18 13:21:54 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 10735631 X-Patchwork-Delegate: jgg@ziepe.ca Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 13E3914DE for ; Tue, 18 Dec 2018 12:46:05 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 027022A45A for ; Tue, 18 Dec 2018 12:46:05 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id EB0EC2A468; Tue, 18 Dec 2018 12:46:04 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-7.9 required=2.0 tests=BAYES_00,MAILING_LIST_MULTI, RCVD_IN_DNSWL_HI autolearn=ham version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id E1BFE2A45A for ; Tue, 18 Dec 2018 12:46:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726612AbeLRMqD (ORCPT ); Tue, 18 Dec 2018 07:46:03 -0500 Received: from szxga05-in.huawei.com ([45.249.212.191]:16155 "EHLO huawei.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1726546AbeLRMqD (ORCPT ); Tue, 18 Dec 2018 07:46:03 -0500 Received: from DGGEMS401-HUB.china.huawei.com (unknown [172.30.72.59]) by Forcepoint Email with ESMTP id 1DB666F1427E5; Tue, 18 Dec 2018 20:45:58 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by DGGEMS401-HUB.china.huawei.com (10.3.19.201) with Microsoft SMTP Server id 14.3.408.0; Tue, 18 Dec 2018 20:45:49 +0800 From: Lijun Ou To: , CC: , , Subject: [PATCH V5 jgg-for-next 2/3] RDMA/hns: Add SCC context clr support for hip08 Date: Tue, 18 Dec 2018 21:21:54 +0800 Message-ID: <1545139315-86371-3-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1545139315-86371-1-git-send-email-oulijun@huawei.com> References: <1545139315-86371-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Yangyang Li This patch adds SCC context clear support for DCQCN in kernel space driver. Signed-off-by: Yangyang Li --- v4->v5: 1. Delete Redundant memset operations in hns_roce_v2_qp_flow_control_init(), because these variables have been cleared in hns_roce_cmq_setup_basic_desc(). 2. Add mutex to ensure the security of concurrent operations in hns_roce_v2_qp_flow_control_init(). --- drivers/infiniband/hw/hns/hns_roce_device.h | 4 ++ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 59 ++++++++++++++++++++++++++++- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 15 ++++++++ drivers/infiniband/hw/hns/hns_roce_qp.c | 8 ++++ 4 files changed, 85 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 9c414b7..124219c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -202,6 +202,7 @@ enum { HNS_ROCE_CAP_FLAG_SRQ = BIT(5), HNS_ROCE_CAP_FLAG_MW = BIT(7), HNS_ROCE_CAP_FLAG_FRMR = BIT(8), + HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), }; @@ -483,6 +484,7 @@ struct hns_roce_qp_table { struct hns_roce_hem_table irrl_table; struct hns_roce_hem_table trrl_table; struct hns_roce_hem_table sccc_table; + struct mutex scc_mutex; }; struct hns_roce_cq_table { @@ -867,6 +869,8 @@ struct hns_roce_hw { int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state); int (*destroy_qp)(struct ib_qp *ibqp); + int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp); int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr); int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index cd3784b..f77ff93 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1436,7 +1436,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) if (hr_dev->pci_dev->revision == 0x21) { caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | - HNS_ROCE_CAP_FLAG_SRQ; + HNS_ROCE_CAP_FLAG_SRQ | + HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL; + caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ; caps->sccc_ba_pg_sz = 0; caps->sccc_buf_pg_sz = 0; @@ -4275,6 +4277,60 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) return 0; } +static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + struct hns_roce_sccc_clr_done *rst, *resp; + struct hns_roce_sccc_clr *clr; + struct hns_roce_cmq_desc desc; + int ret, i; + + mutex_lock(&hr_dev->qp_table.scc_mutex); + + /* set scc ctx clear done flag */ + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); + rst = (struct hns_roce_sccc_clr_done *)desc.data; + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret); + goto out; + } + + /* clear scc context */ + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false); + clr = (struct hns_roce_sccc_clr *)desc.data; + clr->qpn = cpu_to_le32(hr_qp->qpn); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret); + goto out; + } + + /* query scc context clear is done or not */ + resp = (struct hns_roce_sccc_clr_done *)desc.data; + for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) { + hns_roce_cmq_setup_basic_desc(&desc, + HNS_ROCE_OPC_QUERY_SCCC, true); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret); + goto out; + } + + if (resp->clr_done) + goto out; + + msleep(20); + } + + dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n"); + ret = -ETIMEDOUT; + +out: + mutex_unlock(&hr_dev->qp_table.scc_mutex); + return ret; +} + static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); @@ -5822,6 +5878,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, .modify_qp = hns_roce_v2_modify_qp, .query_qp = hns_roce_v2_query_qp, .destroy_qp = hns_roce_v2_destroy_qp, + .qp_flow_control_init = hns_roce_v2_qp_flow_control_init, .modify_cq = hns_roce_v2_modify_cq, .post_send = hns_roce_v2_post_send, .post_recv = hns_roce_v2_post_recv, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 007caa2..4da247d 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -123,6 +123,8 @@ #define HNS_ROCE_CMQ_EN_B 16 #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B) +#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5 + #define check_whether_last_step(hop_num, step_idx) \ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \ (step_idx == 1 && hop_num == 1) || \ @@ -232,6 +234,9 @@ enum hns_roce_opcode_type { HNS_ROCE_OPC_POST_MB = 0x8504, HNS_ROCE_OPC_QUERY_MB_ST = 0x8505, HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506, + HNS_ROCE_OPC_CLR_SCCC = 0x8509, + HNS_ROCE_OPC_QUERY_SCCC = 0x850a, + HNS_ROCE_OPC_RESET_SCCC = 0x850b, HNS_SWITCH_PARAMETER_CFG = 0x1033, }; @@ -1757,4 +1762,14 @@ struct hns_roce_wqe_atomic_seg { __le64 cmp_data; }; +struct hns_roce_sccc_clr { + __le32 qpn; + __le32 rsv[5]; +}; + +struct hns_roce_sccc_clr_done { + __le32 clr_done; + __le32 rsv[5]; +}; + #endif diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index c9da7c1..301f840 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -811,6 +811,13 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, if (ret) goto err_qp; } + + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { + ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); + if (ret) + goto err_qp; + } + hr_qp->event = hns_roce_ib_qp_event; return 0; @@ -1152,6 +1159,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) int reserved_from_bot; int ret; + mutex_init(&qp_table->scc_mutex); spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);