@@ -687,6 +687,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
if (atomic_read(&mr->num_mw) > 0)
return -EINVAL;
+ rxe_disable_lookup(mr);
rxe_put(mr);
return 0;
@@ -25,6 +25,8 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
+ rxe_finalize(mw);
+
return 0;
}
@@ -32,6 +34,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
+ rxe_disable_lookup(mw);
rxe_put(mw);
return 0;
@@ -140,7 +140,9 @@ void *rxe_alloc(struct rxe_pool *pool)
elem->obj = obj;
kref_init(&elem->ref_cnt);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ /* allocate index in array but leave pointer as NULL so it
+ * can't be looked up until rxe_finalize() is called */
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
if (err)
goto err_free;
@@ -168,7 +170,7 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, elem, pool->limit,
+ err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
if (err)
goto err_cnt;
@@ -202,8 +204,12 @@ static void rxe_elem_release(struct kref *kref)
{
struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
struct rxe_pool *pool = elem->pool;
+ struct xarray *xa = &pool->xa;
+ unsigned long flags;
- xa_erase(&pool->xa, elem->index);
+ xa_lock_irqsave(xa, flags);
+ __xa_erase(&pool->xa, elem->index);
+ xa_unlock_irqrestore(xa, flags);
if (pool->cleanup)
pool->cleanup(elem);
@@ -223,3 +229,27 @@ int __rxe_put(struct rxe_pool_elem *elem)
{
return kref_put(&elem->ref_cnt, rxe_elem_release);
}
+
+void __rxe_finalize(struct rxe_pool_elem *elem)
+{
+ struct xarray *xa = &elem->pool->xa;
+ unsigned long flags;
+ void *ret;
+
+ xa_lock_irqsave(xa, flags);
+ ret = __xa_store(&elem->pool->xa, elem->index, elem, GFP_KERNEL);
+ xa_unlock_irqrestore(xa, flags);
+ WARN_ON(xa_err(ret));
+}
+
+void __rxe_disable_lookup(struct rxe_pool_elem *elem)
+{
+ struct xarray *xa = &elem->pool->xa;
+ unsigned long flags;
+ void *ret;
+
+ xa_lock_irqsave(xa, flags);
+ ret = __xa_store(&elem->pool->xa, elem->index, NULL, GFP_KERNEL);
+ xa_unlock_irqrestore(xa, flags);
+ WARN_ON(xa_err(ret));
+}
@@ -63,20 +63,23 @@ void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
/* lookup an indexed object from index. takes a reference on object */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index);
int __rxe_get(struct rxe_pool_elem *elem);
-
#define rxe_get(obj) __rxe_get(&(obj)->elem)
int __rxe_put(struct rxe_pool_elem *elem);
-
#define rxe_put(obj) __rxe_put(&(obj)->elem)
#define rxe_read(obj) kref_read(&(obj)->elem.ref_cnt)
+void __rxe_finalize(struct rxe_pool_elem *elem);
+#define rxe_finalize(obj) __rxe_finalize(&(obj)->elem)
+
+void __rxe_disable_lookup(struct rxe_pool_elem *elem);
+#define rxe_disable_lookup(obj) __rxe_disable_lookup(&(obj)->elem)
+
#endif /* RXE_POOL_H */
@@ -197,6 +197,8 @@ static int rxe_create_ah(struct ib_ah *ibah,
}
rxe_init_av(init_attr->ah_attr, &ah->av);
+ rxe_finalize(ah);
+
return 0;
}
@@ -228,6 +230,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
+ rxe_disable_lookup(ah);
rxe_put(ah);
return 0;
}
@@ -435,6 +438,7 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (err)
goto qp_init;
+ rxe_finalize(qp);
return 0;
qp_init:
@@ -487,6 +491,7 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct rxe_qp *qp = to_rqp(ibqp);
int ret;
+ rxe_disable_lookup(qp);
ret = rxe_qp_chk_destroy(qp);
if (ret)
return ret;
@@ -905,6 +910,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_get(pd);
rxe_mr_init_dma(pd, access, mr);
+ rxe_finalize(mr);
return &mr->ibmr;
}
@@ -933,6 +939,8 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
if (err)
goto err3;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err3:
@@ -965,6 +973,8 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (err)
goto err2;
+ rxe_finalize(mr);
+
return &mr->ibmr;
err2:
Currently the rdma_rxe driver has a security weakness due to giving objects which are partially initialized indices allowing external actors to gain access to them by sending packets which refer to their index (e.g. qpn, rkey, etc) causing unpredictable results. This patch adds two new APIs rxe_finalize(obj) and rxe_disable_lookup(obj) which enable or disable looking up pool objects from indices using rxe_pool_get_index(). By default objects are disabled. These APIs are used to enable looking up objects which have indices: AH, QP, MR, and MW. They are added in create verbs after the objects are fully initialized and as soon as possible in destroy verbs. Note, the sequence rxe_disable_lookup(obj); rxe_put(obj); in destroy verbs stops the looking up of objects by clearing the pointer in the xarray with __xa_store() before a possible long wait until the last reference is dropped somewhere else and the xarray entry is erased with xa_erase(). Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_mr.c | 1 + drivers/infiniband/sw/rxe/rxe_mw.c | 3 +++ drivers/infiniband/sw/rxe/rxe_pool.c | 36 ++++++++++++++++++++++++--- drivers/infiniband/sw/rxe/rxe_pool.h | 9 ++++--- drivers/infiniband/sw/rxe/rxe_verbs.c | 10 ++++++++ 5 files changed, 53 insertions(+), 6 deletions(-)