@@ -689,6 +689,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return -EINVAL;
}
+ rxe_disable(mr);
mr->state = RXE_MR_STATE_INVALID;
rxe_drop_ref(mr_pd(mr));
rxe_drop_ref(mr);
@@ -25,6 +25,8 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
+ rxe_enable(mw);
+
return 0;
}
@@ -56,6 +58,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
struct rxe_mw *mw = to_rmw(ibmw);
struct rxe_pd *pd = to_rpd(ibmw->pd);
+ rxe_disable(mw);
spin_lock_bh(&mw->lock);
rxe_do_dealloc_mw(mw);
spin_unlock_bh(&mw->lock);
@@ -203,7 +203,7 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
spin_lock_irqsave(&pool->xa.xa_lock, flags);
elem = xa_load(&pool->xa, index);
- if (elem && kref_get_unless_zero(&elem->ref_cnt))
+ if (elem && elem->enabled && kref_get_unless_zero(&elem->ref_cnt))
obj = elem->obj;
else
obj = NULL;
@@ -29,6 +29,7 @@ struct rxe_pool_elem {
struct kref ref_cnt;
struct list_head list;
u32 index;
+ bool enabled;
};
struct rxe_pool {
@@ -81,4 +82,8 @@ int __rxe_drop_ref(struct rxe_pool_elem *elem);
#define rxe_read_ref(obj) kref_read(&(obj)->elem.ref_cnt)
+#define rxe_enable(obj) ((obj)->elem.enabled = true)
+
+#define rxe_disable(obj) ((obj)->elem.enabled = false)
+
#endif /* RXE_POOL_H */
@@ -150,6 +150,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct rxe_pd *pd = to_rpd(ibpd);
rxe_drop_ref(pd);
+
return 0;
}
@@ -197,6 +198,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
}
rxe_init_av(init_attr->ah_attr, &ah->av);
+
+ rxe_enable(ah);
+
return 0;
}
@@ -228,6 +232,8 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
+ rxe_disable(ah);
+
rxe_drop_ref(ah);
return 0;
}
@@ -310,6 +316,8 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (err)
goto err2;
+ rxe_enable(srq);
+
return 0;
err2:
@@ -368,6 +376,8 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
{
struct rxe_srq *srq = to_rsrq(ibsrq);
+ rxe_disable(srq);
+
if (srq->rq.queue)
rxe_queue_cleanup(srq->rq.queue);
@@ -439,6 +449,8 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (err)
goto qp_init;
+ rxe_enable(qp);
+
return 0;
qp_init:
@@ -495,6 +507,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
if (ret)
return ret;
+ rxe_disable(qp);
rxe_qp_destroy(qp);
rxe_drop_ref(qp);
return 0;
@@ -807,9 +820,10 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct rxe_cq *cq = to_rcq(ibcq);
- rxe_cq_disable(cq);
+ rxe_cq_disable(cq);
rxe_drop_ref(cq);
+
return 0;
}
@@ -905,6 +919,8 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_add_ref(pd);
rxe_mr_init_dma(pd, access, mr);
+ rxe_enable(mr);
+
return &mr->ibmr;
}
@@ -932,6 +948,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
if (err)
goto err3;
+ rxe_enable(mr);
+
return &mr->ibmr;
err3:
@@ -964,6 +982,8 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (err)
goto err2;
+ rxe_enable(mr);
+
return &mr->ibmr;
err2:
Currently the rdma_rxe driver has a security weakness due to adding objects which are partially initialized to indices allowing external actors to gain access to them by sending packets which refer to their index (e.g. qpn, rkey, etc). This patch adds a member to the pool element struct indicating whether the object should/or should not allow looking up from its index. This variable is set only after the object is completely created and unset as soon as possible when the object is destroyed. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_mr.c | 1 + drivers/infiniband/sw/rxe/rxe_mw.c | 3 +++ drivers/infiniband/sw/rxe/rxe_pool.c | 2 +- drivers/infiniband/sw/rxe/rxe_pool.h | 5 +++++ drivers/infiniband/sw/rxe/rxe_verbs.c | 22 +++++++++++++++++++++- 5 files changed, 31 insertions(+), 2 deletions(-)