@@ -16,8 +16,8 @@ static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
struct rxe_mc_grp *grp;
grp = __alloc(&rxe->mc_grp_pool);
- if (unlikely(!grp))
- return NULL;
+ if (IS_ERR(grp))
+ return grp;
INIT_LIST_HEAD(&grp->qp_list);
spin_lock_init(&grp->mcg_lock);
@@ -28,7 +28,7 @@ static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
if (unlikely(err)) {
drop_key(grp);
rxe_drop_ref(grp);
- return NULL;
+ return ERR_PTR(err);
}
return grp;
@@ -43,20 +43,30 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_pool *pool = &rxe->mc_grp_pool;
unsigned long flags;
- if (unlikely(rxe->attr.max_mcast_qp_attach == 0))
- return -EINVAL;
+ if (unlikely(rxe->attr.max_mcast_qp_attach == 0)) {
+ err = -EINVAL;
+ goto err;
+ }
write_lock_irqsave(&pool->pool_lock, flags);
grp = __get_key(pool, mgid);
+ if (IS_ERR(grp))
+ goto err_ptr;
if (grp)
goto done;
grp = create_grp(rxe, pool, mgid);
- if (unlikely(!grp))
- err = -ENOMEM;
+ if (IS_ERR(grp))
+ goto err_ptr;
done:
write_unlock_irqrestore(&pool->pool_lock, flags);
*grp_p = grp;
+ return 0;
+
+err_ptr:
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+ err = PTR_ERR(grp);
+err:
return err;
}
@@ -82,8 +92,8 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
}
mce = rxe_alloc(&rxe->mc_elem_pool);
- if (!mce) {
- err = -ENOMEM;
+ if (IS_ERR(mce)) {
+ err = PTR_ERR(mce);
goto out;
}
@@ -386,7 +386,7 @@ static struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 lkey)
struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
mr = rxe_get_key(&rxe->mr_pool, &lkey);
- if (!mr)
+ if (IS_ERR(mr) || !mr)
return NULL;
if (unlikely((mr->ibmr.lkey != lkey) || (mr->pd != pd) ||
@@ -217,6 +217,8 @@ static int do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
rkey = mw->ibmw.rkey;
new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.umw.rkey & 0x000000ff);
duplicate_mw = rxe_get_key(&rxe->mw_pool, &new_rkey);
+ if (IS_ERR(duplicate_mw))
+ return PTR_ERR(duplicate_mw);
if (duplicate_mw) {
pr_err_once("new MW key is a duplicate, try another\n");
rxe_drop_ref(duplicate_mw);
@@ -260,14 +262,23 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
if (qp->is_user) {
mw = rxe_get_index(&rxe->mw_pool,
wqe->wr.wr.umw.mw_index);
+ if (IS_ERR(mw)) {
+ ret = PTR_ERR(mw);
+ goto err1;
+ }
if (!mw) {
pr_err_once("mw with index = %d not found\n",
wqe->wr.wr.umw.mw_index);
ret = -EINVAL;
goto err1;
}
+
mr = rxe_get_index(&rxe->mr_pool,
wqe->wr.wr.umw.mr_index);
+ if (IS_ERR(mr)) {
+ ret = PTR_ERR(mr);
+ goto err2;
+ }
if (!mr && wqe->wr.wr.umw.length) {
pr_err_once("mr with index = %d not found\n",
wqe->wr.wr.umw.mr_index);
@@ -581,7 +581,7 @@ void *rxe_alloc(struct rxe_pool *pool)
(pool->flags & RXE_POOL_ATOMIC) ?
GFP_ATOMIC : GFP_KERNEL);
if (!obj)
- goto out;
+ return ERR_PTR(-ENOMEM);
elem = (struct rxe_pool_entry *)((u8 *)obj +
rxe_type_info[pool->type].elem_offset);
@@ -591,9 +591,9 @@ void *rxe_alloc(struct rxe_pool *pool)
write_unlock_irqrestore(&pool->pool_lock, flags);
if (err) {
kfree(obj);
- obj = NULL;
+ return ERR_PTR(err);
}
-out:
+
return obj;
}
@@ -186,6 +186,8 @@ static int hdr_check(struct rxe_pkt_info *pkt)
index = (qpn == 1) ? port->qp_gsi_index : qpn;
qp = rxe_get_index(&rxe->qp_pool, index);
+ if (IS_ERR(qp))
+ goto err1;
if (unlikely(!qp)) {
pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
goto err1;
@@ -245,7 +247,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
/* lookup mcast group corresponding to mgid, takes a ref */
mcg = rxe_get_key(&rxe->mc_grp_pool, &dgid);
- if (!mcg)
+ if (IS_ERR(mcg) || !mcg)
goto err1; /* mcast group not registered */
spin_lock_bh(&mcg->mcg_lock);
@@ -565,7 +565,7 @@ static int invalidate_key(struct rxe_qp *qp, u32 key)
if (key & IS_MW) {
mw = rxe_get_key(&rxe->mw_pool, &key);
- if (!mw) {
+ if (IS_ERR(mw) || !mw) {
pr_err("No mw for key %#x\n", key);
return -EINVAL;
}
@@ -573,7 +573,7 @@ static int invalidate_key(struct rxe_qp *qp, u32 key)
rxe_drop_ref(mw);
} else {
mr = rxe_get_key(&rxe->mr_pool, &key);
- if (!mr) {
+ if (IS_ERR(mr) || !mr) {
pr_err("No mr for key %#x\n", key);
return -EINVAL;
}
@@ -442,7 +442,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
*/
if (rkey & IS_MW) {
mw = rxe_get_key(&rxe->mw_pool, &rkey);
- if (!mw) {
+ if (IS_ERR(mw) || !mw) {
pr_err_once("no MW found with rkey = 0x%08x\n", rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
@@ -466,7 +466,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
rxe_drop_ref(mw);
} else {
mr = rxe_get_key(&rxe->mr_pool, &rkey);
- if (!mr || (mr->rkey != rkey)) {
+ if (IS_ERR(mr) || !mr || (mr->rkey != rkey)) {
pr_err_once("no MR found with rkey = 0x%08x\n", rkey);
state = RESPST_ERR_RKEY_VIOLATION;
goto err;
@@ -794,7 +794,7 @@ static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
if (rkey & IS_MW) {
mw = rxe_get_key(&rxe->mw_pool, &rkey);
- if (!mw) {
+ if (IS_ERR(mw) || !mw) {
pr_err("No mw for rkey %#x\n", rkey);
goto err;
}
@@ -802,7 +802,7 @@ static int invalidate_rkey(struct rxe_qp *qp, u32 rkey)
rxe_drop_ref(mw);
} else {
mr = rxe_get_key(&rxe->mr_pool, &rkey);
- if (!mr || mr->ibmr.rkey != rkey) {
+ if (IS_ERR(mr) || !mr || mr->ibmr.rkey != rkey) {
pr_err("No mr for rkey %#x\n", rkey);
goto err;
}
@@ -397,8 +397,8 @@ static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
goto err1;
qp = rxe_alloc(&rxe->qp_pool);
- if (!qp) {
- err = -ENOMEM;
+ if (IS_ERR(qp)) {
+ err = PTR_ERR(qp);
goto err1;
}
@@ -872,9 +872,9 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_ref(pd);
mr = rxe_alloc(&rxe->mr_pool);
- if (!mr) {
+ if (IS_ERR(mr)) {
rxe_drop_ref(pd);
- return ERR_PTR(-ENOMEM);
+ return (void *)mr;
}
rxe_mr_init_dma(pd, access, mr);
@@ -905,8 +905,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
rxe_add_ref(pd);
mr = rxe_alloc(&rxe->mr_pool);
- if (!mr) {
- err = -ENOMEM;
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
goto err2;
}
@@ -956,8 +956,8 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
rxe_add_ref(pd);
mr = rxe_alloc(&rxe->mr_pool);
- if (!mr) {
- err = -ENOMEM;
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
goto err1;
}
v7: - rxe_pool.c has been extended to return errors from alloc, get_index and get_key using ERR_PTR. Add code to detect these as needed. Signed-off-by: Bob Pearson <rpearson@hpe.com> --- drivers/infiniband/sw/rxe/rxe_mcast.c | 28 ++++++++++++++++++--------- drivers/infiniband/sw/rxe/rxe_mr.c | 2 +- drivers/infiniband/sw/rxe/rxe_mw.c | 11 +++++++++++ drivers/infiniband/sw/rxe/rxe_pool.c | 6 +++--- drivers/infiniband/sw/rxe/rxe_recv.c | 4 +++- drivers/infiniband/sw/rxe/rxe_req.c | 4 ++-- drivers/infiniband/sw/rxe/rxe_resp.c | 8 ++++---- drivers/infiniband/sw/rxe/rxe_verbs.c | 16 +++++++-------- 8 files changed, 51 insertions(+), 28 deletions(-)