From patchwork Thu Jul 30 14:50:22 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Haggai Eran X-Patchwork-Id: 6903401 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 4DB8E9F358 for ; Thu, 30 Jul 2015 14:51:28 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 2EB602047D for ; Thu, 30 Jul 2015 14:51:27 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 1FCE720457 for ; Thu, 30 Jul 2015 14:51:26 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1751328AbbG3OvZ (ORCPT ); Thu, 30 Jul 2015 10:51:25 -0400 Received: from [193.47.165.129] ([193.47.165.129]:41045 "EHLO mellanox.co.il" rhost-flags-FAIL-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1751857AbbG3OvZ (ORCPT ); Thu, 30 Jul 2015 10:51:25 -0400 Received: from Internal Mail-Server by MTLPINE1 (envelope-from haggaie@mellanox.com) with ESMTPS (AES256-SHA encrypted); 30 Jul 2015 17:50:32 +0300 Received: from arch003.mtl.labs.mlnx (arch003.mtl.labs.mlnx [10.137.35.1]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id t6UEoV6i014065; Thu, 30 Jul 2015 17:50:31 +0300 From: Haggai Eran To: Doug Ledford Cc: Liran Liss , Haggai Eran , linux-rdma@vger.kernel.org, Jason Gunthorpe Subject: [PATCH v4 10/14] IB/cma: Add net_dev and private data checks to RDMA CM Date: Thu, 30 Jul 2015 17:50:22 +0300 Message-Id: <1438267826-32155-11-git-send-email-haggaie@mellanox.com> X-Mailer: git-send-email 1.7.11.2 In-Reply-To: <1438267826-32155-1-git-send-email-haggaie@mellanox.com> References: <1438267826-32155-1-git-send-email-haggaie@mellanox.com> Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-8.3 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Instead of relying on a the ib_cm module to check an incoming CM request's private data header, add these checks to the RDMA CM module. This allows a following patch to to clean up the ib_cm interface and remove the code that looks into the private headers. It will also allow supporting namespaces in RDMA CM by making these checks namespace aware later on. Signed-off-by: Haggai Eran --- drivers/infiniband/core/cma.c | 188 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 185 insertions(+), 3 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f2d799209412..011aa7310dd3 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -263,6 +263,15 @@ struct cma_hdr { #define CMA_VERSION 0x00 +struct cma_req_info { + struct ib_device *device; + int port; + union ib_gid local_gid; + __be64 service_id; + u16 pkey; + bool has_gid:1; +}; + static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) { unsigned long flags; @@ -300,7 +309,7 @@ static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, return old; } -static inline u8 cma_get_ip_ver(struct cma_hdr *hdr) +static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr) { return hdr->ip_version >> 4; } @@ -1016,7 +1025,7 @@ static int cma_save_ip_info(struct sockaddr *src_addr, cma_save_ip6_info(src_addr, dst_addr, hdr, port); break; default: - return -EINVAL; + return -EAFNOSUPPORT; } return 0; @@ -1040,6 +1049,176 @@ static int cma_save_net_info(struct sockaddr *src_addr, return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id); } +static int cma_save_req_info(const struct ib_cm_event *ib_event, + struct cma_req_info *req) +{ + const struct ib_cm_req_event_param *req_param = + &ib_event->param.req_rcvd; + const struct ib_cm_sidr_req_event_param *sidr_param = + &ib_event->param.sidr_req_rcvd; + + switch (ib_event->event) { + case IB_CM_REQ_RECEIVED: + req->device = req_param->listen_id->device; + req->port = req_param->port; + memcpy(&req->local_gid, &req_param->primary_path->sgid, + sizeof(req->local_gid)); + req->has_gid = true; + req->service_id = req_param->primary_path->service_id; + req->pkey = req_param->bth_pkey; + break; + case IB_CM_SIDR_REQ_RECEIVED: + req->device = sidr_param->listen_id->device; + req->port = sidr_param->port; + req->has_gid = false; + req->service_id = sidr_param->service_id; + req->pkey = sidr_param->bth_pkey; + break; + default: + return -EINVAL; + } + + return 0; +} + +static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, + const struct cma_req_info *req) +{ + struct sockaddr_storage listen_addr_storage; + struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage; + struct net_device *net_dev; + const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; + int err; + + err = cma_save_ip_info(listen_addr, NULL, ib_event, req->service_id); + if (err) + return ERR_PTR(err); + + net_dev = ib_get_net_dev_by_params(req->device, req->port, req->pkey, + gid, listen_addr); + if (!net_dev) + return ERR_PTR(-ENODEV); + + return net_dev; +} + +static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id) +{ + return (be64_to_cpu(service_id) >> 16) & 0xffff; +} + +static bool cma_match_private_data(struct rdma_id_private *id_priv, + const struct cma_hdr *hdr) +{ + struct sockaddr *addr = cma_src_addr(id_priv); + __be32 ip4_addr; + struct in6_addr ip6_addr; + + if (cma_any_addr(addr) && !id_priv->afonly) + return true; + + switch (addr->sa_family) { + case AF_INET: + ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; + if (cma_get_ip_ver(hdr) != 4) + return false; + if (!cma_any_addr(addr) && + hdr->dst_addr.ip4.addr != ip4_addr) + return false; + break; + case AF_INET6: + ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; + if (cma_get_ip_ver(hdr) != 6) + return false; + if (!cma_any_addr(addr) && + memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) + return false; + break; + case AF_IB: + return true; + default: + return false; + } + + return true; +} + +static bool cma_match_net_dev(const struct rdma_id_private *id_priv, + const struct net_device *net_dev) +{ + const struct rdma_addr *addr = &id_priv->id.route.addr; + + if (!net_dev) + /* This request is an AF_IB request */ + return addr->src_addr.ss_family == AF_IB; + + return !addr->dev_addr.bound_dev_if || + (net_eq(dev_net(net_dev), &init_net) && + addr->dev_addr.bound_dev_if == net_dev->ifindex); +} + +static struct rdma_id_private *cma_find_listener( + const struct rdma_bind_list *bind_list, + const struct ib_cm_id *cm_id, + const struct ib_cm_event *ib_event, + const struct cma_req_info *req, + const struct net_device *net_dev) +{ + struct rdma_id_private *id_priv, *id_priv_dev; + + if (!bind_list) + return ERR_PTR(-EINVAL); + + hlist_for_each_entry(id_priv, &bind_list->owners, node) { + if (cma_match_private_data(id_priv, ib_event->private_data)) { + if (id_priv->id.device == cm_id->device && + cma_match_net_dev(id_priv, net_dev)) + return id_priv; + list_for_each_entry(id_priv_dev, + &id_priv->listen_list, + listen_list) { + if (id_priv_dev->id.device == cm_id->device && + cma_match_net_dev(id_priv_dev, net_dev)) + return id_priv_dev; + } + } + } + + return ERR_PTR(-EINVAL); +} + +static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, + struct ib_cm_event *ib_event) +{ + struct cma_req_info req; + struct rdma_bind_list *bind_list; + struct rdma_id_private *id_priv; + struct net_device *net_dev; + int err; + + err = cma_save_req_info(ib_event, &req); + if (err) + return ERR_PTR(err); + + net_dev = cma_get_net_dev(ib_event, &req); + if (IS_ERR(net_dev)) { + if (PTR_ERR(net_dev) == -EAFNOSUPPORT) { + /* Assuming the protocol is AF_IB */ + net_dev = NULL; + } else { + return ERR_PTR(PTR_ERR(net_dev)); + } + } + + bind_list = cma_ps_find(rdma_ps_from_service_id(req.service_id), + cma_port_from_service_id(req.service_id)); + id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, net_dev); + + dev_put(net_dev); + + return id_priv; +} + static inline int cma_user_data_offset(struct rdma_id_private *id_priv) { return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); @@ -1399,7 +1578,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_cm_event event; int offset, ret; - listen_id = cm_id->context; + listen_id = cma_id_from_event(cm_id, ib_event); + if (IS_ERR(listen_id)) + return PTR_ERR(listen_id); + if (!cma_check_req_qp_type(&listen_id->id, ib_event)) return -EINVAL;