From patchwork Sun May 17 05:51:06 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Haggai Eran X-Patchwork-Id: 6422401 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id E2ABC9F1CC for ; Sun, 17 May 2015 05:52:32 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id D921820567 for ; Sun, 17 May 2015 05:52:31 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id C18D420549 for ; Sun, 17 May 2015 05:52:30 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751949AbbEQFw1 (ORCPT ); Sun, 17 May 2015 01:52:27 -0400 Received: from ns1327.ztomy.com ([193.47.165.129]:34502 "EHLO mellanox.co.il" rhost-flags-OK-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1751922AbbEQFw0 (ORCPT ); Sun, 17 May 2015 01:52:26 -0400 Received: from Internal Mail-Server by MTLPINE1 (envelope-from haggaie@mellanox.com) with ESMTPS (AES256-SHA encrypted); 17 May 2015 08:51:03 +0300 Received: from gen-l-vrt-034.mtl.labs.mlnx (gen-l-vrt-034.mtl.labs.mlnx [10.137.34.1]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id t4H5pBE3006062; Sun, 17 May 2015 08:51:11 +0300 From: Haggai Eran To: Doug Ledford Cc: linux-rdma@vger.kernel.org, netdev@vger.kernel.org, Liran Liss , Guy Shapiro , Shachar Raindel , Yotam Kenneth , Haggai Eran Subject: [PATCH v4 for-next 10/12] IB/cma: Share CM IDs between namespaces Date: Sun, 17 May 2015 08:51:06 +0300 Message-Id: <1431841868-28063-11-git-send-email-haggaie@mellanox.com> X-Mailer: git-send-email 1.7.11.2 In-Reply-To: <1431841868-28063-1-git-send-email-haggaie@mellanox.com> References: <1431841868-28063-1-git-send-email-haggaie@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Use ib_cm_id_create_and_listen to create listening IB CM IDs or share existing ones if needed. When given a request on a specific CM ID, the code now needs to find the namespace matching the request, and find the RDMA CM ID based on the namespace and the request parameters, instead of using the context field of ib_cm_id as was previously done. Signed-off-by: Haggai Eran Signed-off-by: Guy Shapiro Signed-off-by: Yotam Kenneth Signed-off-by: Shachar Raindel --- drivers/infiniband/core/cma.c | 136 +++++++++++++++++++++++++++++++++++------- 1 file changed, 115 insertions(+), 21 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index a4645a16c9f9..e5d389ffa497 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1012,6 +1012,112 @@ static int cma_save_net_info(struct sockaddr *src_addr, return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); } +struct cma_req_info { + struct ib_device *device; + int port; + __be64 service_id; + u16 pkey; +}; + +static int cma_save_req_info(struct ib_cm_event *ib_event, + struct cma_req_info *req) +{ + struct ib_cm_req_event_param *req_param = &ib_event->param.req_rcvd; + struct ib_cm_sidr_req_event_param *sidr_param = + &ib_event->param.sidr_req_rcvd; + + switch (ib_event->event) { + case IB_CM_REQ_RECEIVED: + req->device = req_param->listen_id->device; + req->port = req_param->port; + req->service_id = req_param->primary_path->service_id; + req->pkey = be16_to_cpu(req_param->primary_path->pkey); + break; + case IB_CM_SIDR_REQ_RECEIVED: + req->device = sidr_param->listen_id->device; + req->port = sidr_param->port; + req->service_id = sidr_param->service_id; + req->pkey = sidr_param->pkey; + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct net *cma_get_net_ns(struct ib_cm_event *ib_event, + struct cma_req_info *req) +{ + struct sockaddr_storage addr_storage; + struct sockaddr *listen_addr; + int err = 0; + + listen_addr = (struct sockaddr *)&addr_storage; + err = cma_save_ip_info(listen_addr, NULL, ib_event, req->service_id); + if (err) + return ERR_PTR(err); + + return ib_get_net_ns_by_port_pkey_ip(req->device, req->port, + req->pkey, listen_addr); +} + +static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) +{ + return (be64_to_cpu(service_id) >> 16) & 0xffff; +} + +static struct rdma_id_private *cma_find_listener( + struct rdma_bind_list *bind_list, + struct ib_cm_id *cm_id, + struct ib_cm_event *ib_event) +{ + struct rdma_id_private *id_priv, *id_priv_dev; + + if (!bind_list) + return ERR_PTR(-EINVAL); + + hlist_for_each_entry(id_priv, &bind_list->owners, node) { + if (!cm_compare_private_data(ib_event->private_data, + &id_priv->compare_data)) { + if (id_priv->id.device == cm_id->device) + return id_priv; + list_for_each_entry(id_priv_dev, + &id_priv->listen_list, + listen_list) { + if (id_priv_dev->id.device == cm_id->device) + return id_priv_dev; + } + } + } + + return ERR_PTR(-EINVAL); +} + +static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, + struct ib_cm_event *ib_event) +{ + struct cma_req_info req; + struct net *net; + struct rdma_bind_list *bind_list; + struct rdma_id_private *id_priv; + int err; + + err = cma_save_req_info(ib_event, &req); + if (err) + return ERR_PTR(err); + + net = cma_get_net_ns(ib_event, &req); + if (IS_ERR(net)) + return ERR_PTR(PTR_ERR(net)); + + bind_list = cma_ps_find(net, rdma_ps_from_service_id(req.service_id), + cma_port_from_service_id(req.service_id)); + id_priv = cma_find_listener(bind_list, cm_id, ib_event); + put_net(net); + return id_priv; +} + static inline int cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); @@ -1371,10 +1477,9 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_cm_event event; int offset, ret; - listen_id = cm_id->context; - if (cm_compare_private_data(ib_event->private_data, - &listen_id->compare_data)) - return -EINVAL; + listen_id = cma_id_from_event(cm_id, ib_event); + if (IS_ERR(listen_id)) + return PTR_ERR(listen_id); if (!cma_check_req_qp_type(&listen_id->id, ib_event)) return -EINVAL; @@ -1648,27 +1753,16 @@ static int cma_ib_listen(struct rdma_id_private *id_priv) __be64 svc_id; int ret; - id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv); - if (IS_ERR(id)) - return PTR_ERR(id); - - id_priv->cm_id.ib = id; - addr = cma_src_addr(id_priv); svc_id = rdma_get_service_id(&id_priv->id, addr); - if (cma_any_addr(addr) && !id_priv->afonly) - ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL); - else { + if (!cma_any_addr(addr) || id_priv->afonly) cma_set_compare_data(id_priv->id.ps, addr, &id_priv->compare_data); - ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, - &id_priv->compare_data); - } - - if (ret) { - ib_destroy_cm_id(id_priv->cm_id.ib); - id_priv->cm_id.ib = NULL; - } + id = ib_cm_id_create_and_listen(id_priv->id.device, cma_req_handler, + svc_id, 0); + if (IS_ERR(id)) + return PTR_ERR(id); + id_priv->cm_id.ib = id; return ret; }