From patchwork Wed May 4 12:21:12 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Lijun Ou X-Patchwork-Id: 9013221 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id CFA1A9F1C1 for ; Wed, 4 May 2016 12:12:54 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id D90DA200E7 for ; Wed, 4 May 2016 12:12:53 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id C13722008F for ; Wed, 4 May 2016 12:12:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753059AbcEDMII (ORCPT ); Wed, 4 May 2016 08:08:08 -0400 Received: from szxga01-in.huawei.com ([58.251.152.64]:40504 "EHLO szxga01-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752944AbcEDMIE (ORCPT ); Wed, 4 May 2016 08:08:04 -0400 Received: from 172.24.1.36 (EHLO szxeml426-hub.china.huawei.com) ([172.24.1.36]) by szxrg01-dlp.huawei.com (MOS 4.3.7-GA FastPath queued) with ESMTP id DJZ45758; Wed, 04 May 2016 20:08:00 +0800 (CST) Received: from linux-ioko.site (10.71.200.31) by szxeml426-hub.china.huawei.com (10.82.67.181) with Microsoft SMTP Server id 14.3.235.1; Wed, 4 May 2016 20:07:48 +0800 From: Lijun Ou To: , , , , , , CC: , , , , , , , , , , , Subject: [PATCH v7 15/21] IB/hns: Add PD operations support Date: Wed, 4 May 2016 20:21:12 +0800 Message-ID: <1462364478-10808-16-git-send-email-oulijun@huawei.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1462364478-10808-1-git-send-email-oulijun@huawei.com> References: <1462364478-10808-1-git-send-email-oulijun@huawei.com> MIME-Version: 1.0 X-Originating-IP: [10.71.200.31] X-CFilter-Loop: Reflected X-Mirapoint-Virus-RAPID-Raw: score=unknown(0), refid=str=0001.0A020201.5729E621.003A, ss=1, re=0.000, recu=0.000, reip=0.000, cl=1, cld=1, fgs=0, ip=0.0.0.0, so=2013-06-18 04:22:30, dmn=2013-03-21 17:37:32 X-Mirapoint-Loop-Id: 47c1e18197d26d85319a47bf020d648a Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-9.0 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch added the verbs to operate PD. It mainly includes the functions of allocating PD and deallocating PD. Signed-off-by: Wei Hu Signed-off-by: Nenglong Zhao Signed-off-by: Lijun Ou --- drivers/infiniband/hw/hns/hns_roce_device.h | 17 ++++++++ drivers/infiniband/hw/hns/hns_roce_main.c | 8 +++- drivers/infiniband/hw/hns/hns_roce_pd.c | 62 +++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index d135e7f..eff7339 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -116,6 +116,11 @@ struct hns_roce_ucontext { struct hns_roce_uar uar; }; +struct hns_roce_pd { + struct ib_pd ibpd; + unsigned long pdn; +}; + struct hns_roce_bitmap { /* Bitmap Traversal last a bit which is 1 */ unsigned long last; @@ -384,6 +389,11 @@ static inline struct hns_roce_ucontext return container_of(ibucontext, struct hns_roce_ucontext, ibucontext); } +static inline struct hns_roce_pd *to_hr_pd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct hns_roce_pd, ibpd); +} + static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest) { __raw_writeq(*(u64 *) val, dest); @@ -431,6 +441,13 @@ int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap, unsigned long obj, int cnt); +struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata); +int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn); +void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn); +int hns_roce_dealloc_pd(struct ib_pd *pd); + void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 6a1fd38..edb2cfe 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -585,7 +585,9 @@ int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->uverbs_cmd_mask = (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) | - (1ULL << IB_USER_VERBS_CMD_QUERY_PORT); + (1ULL << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ULL << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD); /* HCA||device||port */ ib_dev->modify_device = hns_roce_modify_device; @@ -599,6 +601,10 @@ int hns_roce_register_device(struct hns_roce_dev *hr_dev) ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext; ib_dev->mmap = hns_roce_mmap; + /* PD */ + ib_dev->alloc_pd = hns_roce_alloc_pd; + ib_dev->dealloc_pd = hns_roce_dealloc_pd; + ret = ib_register_device(ib_dev, NULL); if (ret) { dev_err(dev, "ib_register_device failed!\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 90b0f33..6485378 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -17,6 +17,28 @@ #include "hns_roce_common.h" #include "hns_roce_device.h" +int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) +{ + struct device *dev = &hr_dev->pdev->dev; + unsigned long pd_number; + int ret = 0; + + ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number); + if (ret == -1) { + dev_err(dev, "alloc pdn from pdbitmap failed\n"); + return -ENOMEM; + } + + *pdn = pd_number; + + return 0; +} + +void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) +{ + hns_roce_bitmap_free(&hr_dev->pd_bitmap, pdn); +} + int hns_roce_init_pd_table(struct hns_roce_dev *hr_dev) { return hns_roce_bitmap_init(&hr_dev->pd_bitmap, hr_dev->caps.num_pds, @@ -29,6 +51,46 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) hns_roce_bitmap_cleanup(&hr_dev->pd_bitmap); } +struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); + struct device *dev = &hr_dev->pdev->dev; + struct hns_roce_pd *pd; + int ret; + + pd = kmalloc(sizeof(*pd), GFP_KERNEL); + if (!pd) + return ERR_PTR(-ENOMEM); + + ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); + if (ret) { + kfree(pd); + dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); + return ERR_PTR(ret); + } + + if (context) { + if (ib_copy_to_udata(udata, &pd->pdn, sizeof(u64))) { + hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); + dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); + kfree(pd); + return ERR_PTR(-EFAULT); + } + } + + return &pd->ibpd; +} + +int hns_roce_dealloc_pd(struct ib_pd *pd) +{ + hns_roce_pd_free(to_hr_dev(pd->device), to_hr_pd(pd)->pdn); + kfree(to_hr_pd(pd)); + + return 0; +} + int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct resource *res;