From patchwork Wed Jun 1 15:37:57 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 9147681 Return-Path: Received: from mail.wl.linuxfoundation.org (pdx-wl-mail.web.codeaurora.org [172.30.200.125]) by pdx-korg-patchwork.web.codeaurora.org (Postfix) with ESMTP id 763E460467 for ; Wed, 1 Jun 2016 15:34:42 +0000 (UTC) Received: from mail.wl.linuxfoundation.org (localhost [127.0.0.1]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id 6641D25819 for ; Wed, 1 Jun 2016 15:34:42 +0000 (UTC) Received: by mail.wl.linuxfoundation.org (Postfix, from userid 486) id 5A5FF26B4A; Wed, 1 Jun 2016 15:34:42 +0000 (UTC) X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on pdx-wl-mail.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.9 required=2.0 tests=BAYES_00,RCVD_IN_DNSWL_HI autolearn=unavailable version=3.3.1 Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.wl.linuxfoundation.org (Postfix) with ESMTP id DC9E52656B for ; Wed, 1 Jun 2016 15:34:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1161789AbcFAPdn (ORCPT ); Wed, 1 Jun 2016 11:33:43 -0400 Received: from szxga01-in.huawei.com ([58.251.152.64]:38748 "EHLO szxga01-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1161288AbcFAPZe (ORCPT ); Wed, 1 Jun 2016 11:25:34 -0400 Received: from 172.24.1.136 (EHLO szxeml427-hub.china.huawei.com) ([172.24.1.136]) by szxrg01-dlp.huawei.com (MOS 4.3.7-GA FastPath queued) with ESMTP id DLQ23895; Wed, 01 Jun 2016 23:25:31 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by szxeml427-hub.china.huawei.com (10.82.67.182) with Microsoft SMTP Server id 14.3.235.1; Wed, 1 Jun 2016 23:25:20 +0800 From: Lijun Ou To: , , , , , , CC: , , , , , , , , , , , Subject: [PATCH v9 15/22] IB/hns: Add PD operations support Date: Wed, 1 Jun 2016 23:37:57 +0800 Message-ID: <1464795484-77395-16-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1464795484-77395-1-git-send-email-oulijun@huawei.com> References: <1464795484-77395-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected X-Mirapoint-Virus-RAPID-Raw: score=unknown(0), refid=str=0001.0A020203.574EFE6B.037B, ss=1, re=0.000, recu=0.000, reip=0.000, cl=1, cld=1, fgs=0, ip=0.0.0.0, so=2013-06-18 04:22:30, dmn=2013-03-21 17:37:32 X-Mirapoint-Loop-Id: a3819e7edef2beac26725c31bea36310 Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch added the verbs to operate PD. It mainly includes the functions of allocating PD and deallocating PD. Signed-off-by: Wei Hu Signed-off-by: Nenglong Zhao Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_device.h | 17 ++++++++ drivers/infiniband/hw/hns/hns_roce_main.c | 8 +++- drivers/infiniband/hw/hns/hns_roce_pd.c | 62 +++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 99f2653..36fd4f3 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -134,6 +134,11 @@ struct hns_roce_ucontext { struct hns_roce_uar uar; }; +struct hns_roce_pd { + struct ib_pd ibpd; + unsigned long pdn; +}; + struct hns_roce_bitmap { /* Bitmap Traversal last a bit which is 1 */ unsigned long last; @@ -399,6 +404,11 @@ static inline struct hns_roce_ucontext return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); } +static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct hns_roce_pd, ibpd); +} + static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest) { __raw_writeq(*(u64 *) val, dest); @@ -446,6 +456,13 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, unsigned long obj, int cnt); +struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata); +int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn); +void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn); +int hns_roce_dealloc_pd(struct ib_pd *pd); + void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 64cf5c8..2cebbc8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -604,7 +604,9 @@ int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->uverbs_cmd_mask = (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) | - (1ULL << IB_USER_VERBS_CMD_QUERY_PORT); + (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ULL << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD); /* HCA||device||port */ ib_dev->modify_device = hns_roce_modify_device; @@ -618,6 +620,10 @@ int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext; ib_dev->mmap = hns_roce_mmap; + /* PD */ + ib_dev->alloc_pd = hns_roce_alloc_pd; + ib_dev->dealloc_pd = hns_roce_dealloc_pd; + ret = ib_register_device(ib_dev, NULL); if (ret) { dev_err(dev, "ib_register_device failed!\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 6ad38f2..f7f8fc0 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -40,6 +40,28 @@ #include "hns_roce_common.h" #include "hns_roce_device.h" +int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) +{ + struct device *dev = &hr_dev->pdev->dev; + unsigned long pd_number; + int ret = 0; + + ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number); + if (ret == -1) { + dev_err(dev, "alloc pdn from pdbitmap failed\n"); + return -ENOMEM; + } + + *pdn = pd_number; + + return 0; +} + +void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) +{ + hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn); +} + int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) { return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, @@ -52,6 +74,46 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); } +struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_pd *pd; + int ret; + + pd = kmalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); + if (ret) { + kfree(pd); + dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); + return ERR_PTR(ret); + } + + if (context) { + if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) { + hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); + dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } + + return &pd->ibpd; +} + +int hns_roce_dealloc_pd(struct ib_pd *pd) +{ + hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); + kfree(to_hr_pd(pd)); + + return 0; +} + int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct resource *res;