From patchwork Wed Apr 9 23:54:11 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Devesh Sharma X-Patchwork-Id: 3958051 Return-Path: X-Original-To: patchwork-linux-rdma@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id E877C9F375 for ; Thu, 10 Apr 2014 00:26:12 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id A5A7020636 for ; Thu, 10 Apr 2014 00:26:11 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 9713E20154 for ; Thu, 10 Apr 2014 00:26:10 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S933236AbaDJA0I (ORCPT ); Wed, 9 Apr 2014 20:26:08 -0400 Received: from cmexedge2.ext.emulex.com ([138.239.224.100]:34361 "EHLO CMEXEDGE2.ext.emulex.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S933204AbaDJA0G (ORCPT ); Wed, 9 Apr 2014 20:26:06 -0400 Received: from CMEXHTCAS1.ad.emulex.com (138.239.115.217) by CMEXEDGE2.ext.emulex.com (138.239.224.100) with Microsoft SMTP Server (TLS) id 14.3.174.1; Wed, 9 Apr 2014 17:26:24 -0700 Received: from neo01-el64.lab.bg.emulex.com (10.192.204.8) by smtp.emulex.com (138.239.115.207) with Microsoft SMTP Server id 14.3.174.1; Wed, 9 Apr 2014 17:26:01 -0700 From: Devesh Sharma To: CC: , , "Devesh Sharma" Subject: [PATCH V2] NFS-RDMA: fix qp pointer validation checks Date: Thu, 10 Apr 2014 05:24:11 +0530 X-Mailer: git-send-email 1.7.1 MIME-Version: 1.0 Message-ID: Sender: linux-rdma-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-rdma@vger.kernel.org X-Spam-Status: No, score=-7.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=ham version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP If the rdma_create_qp fails to create qp due to device firmware being in invalid state xprtrdma still tries to destroy the non-existant qp and ends up in a NULL pointer reference crash. Adding proper checks for vaidating QP pointer avoids this to happen. V0: Using IS_ERR() to check validity of qp pointer. V1: Use of IS_ERR() will not be able to catch NULL QP pointers as rdma_create_qp() returnes NULL in case ib_create_qp verb is failed. Therefore, changed from usage of IS_ERR to null pointer check. V2: ib_post_send() should not abort after DECR_CQCOUNT() checking for NULL pointer was causing exit from functions after decrementing CQCOUNT(). Fixed this in V2. Signed-off-by: Devesh Sharma --- net/sunrpc/xprtrdma/verbs.c | 92 ++++++++++++++++++++++++++----------------- 1 files changed, 56 insertions(+), 36 deletions(-) diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 9372656..9e56baf 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -831,10 +831,12 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) if (ep->rep_connected != 0) { struct rpcrdma_xprt *xprt; retry: - rc = rpcrdma_ep_disconnect(ep, ia); - if (rc && rc != -ENOTCONN) - dprintk("RPC: %s: rpcrdma_ep_disconnect" + if (ia->ri_id->qp) { + rc = rpcrdma_ep_disconnect(ep, ia); + if (rc && rc != -ENOTCONN) + dprintk("RPC: %s: rpcrdma_ep_disconnect" " status %i\n", __func__, rc); + } rpcrdma_clean_cq(ep->rep_cq); xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); @@ -859,7 +861,8 @@ retry: goto out; } /* END TEMP */ - rdma_destroy_qp(ia->ri_id); + if (ia->ri_id->qp) + rdma_destroy_qp(ia->ri_id); rdma_destroy_id(ia->ri_id); ia->ri_id = id; } @@ -1555,22 +1558,30 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg, IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_REMOTE_READ); frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; - DECR_CQCOUNT(&r_xprt->rx_ep); - rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); + if (!ia->ri_is->qp) { + DECR_CQCOUNT(&r_xprt->rx_ep); + rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); - if (rc) { - dprintk("RPC: %s: failed ib_post_send for register," - " status %i\n", __func__, rc); - while (i--) - rpcrdma_unmap_one(ia, --seg); + if (rc) { + dprintk("RPC: %s: failed ib_post_send for register," + " status %i\n", __func__, rc); + goto out; + } else { + seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; + seg1->mr_base = seg1->mr_dma + pageoff; + seg1->mr_nsegs = i; + seg1->mr_len = len; + } } else { - seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; - seg1->mr_base = seg1->mr_dma + pageoff; - seg1->mr_nsegs = i; - seg1->mr_len = len; + rc = -EINVAL; + goto out; } + *nsegs = i; +out: + while (i--) + rpcrdma_unmap_one(ia, --seg); return rc; } @@ -1590,12 +1601,16 @@ rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg, invalidate_wr.opcode = IB_WR_LOCAL_INV; invalidate_wr.send_flags = IB_SEND_SIGNALED; invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; - DECR_CQCOUNT(&r_xprt->rx_ep); - rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); - if (rc) - dprintk("RPC: %s: failed ib_post_send for invalidate," - " status %i\n", __func__, rc); + if (ia->ri_id->qp) { + DECR_CQCOUNT(&r_xprt->rx_ep); + rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); + if (rc) + dprintk("RPC: %s: failed ib_post_send for invalidate," + " status %i\n", __func__, rc); + } else + rc = -EINVAL; + return rc; } @@ -1916,17 +1931,19 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, req->rl_send_iov[0].addr, req->rl_send_iov[0].length, DMA_TO_DEVICE); - if (DECR_CQCOUNT(ep) > 0) - send_wr.send_flags = 0; - else { /* Provider must take a send completion every now and then */ - INIT_CQCOUNT(ep); - send_wr.send_flags = IB_SEND_SIGNALED; - } - - rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); - if (rc) - dprintk("RPC: %s: ib_post_send returned %i\n", __func__, - rc); + if (ia->ri_id->qp) { + if (DECR_CQCOUNT(ep) > 0) + send_wr.send_flags = 0; + else { /* Provider must take a send completion every now and then */ + INIT_CQCOUNT(ep); + send_wr.send_flags = IB_SEND_SIGNALED + } + rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); + if (rc) + dprintk("RPC: %s: ib_post_send returned %i\n", __func__, + rc); + } else + rc = -EINVAL; out: return rc; } @@ -1950,11 +1967,14 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, ib_dma_sync_single_for_cpu(ia->ri_id->device, rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL); - DECR_CQCOUNT(ep); - rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); + if (ia->ri_id->qp) { + DECR_CQCOUNT(ep); + rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); + if (rc) + dprintk("RPC: %s: ib_post_recv returned %i\n", __func__, + rc); + } else + rc = -EINVAL; - if (rc) - dprintk("RPC: %s: ib_post_recv returned %i\n", __func__, - rc); return rc; }