diff mbox series

[for-next,v10,05/11] RDMA/rxe: Stop lookup of partially built objects

Message ID 20220225195750.37802-6-rpearsonhpe@gmail.com (mailing list archive)
State Superseded
Delegated to: Jason Gunthorpe
Headers show
Series Fix race conditions in rxe_pool | expand

Commit Message

Bob Pearson Feb. 25, 2022, 7:57 p.m. UTC
Currently the rdma_rxe driver has a security weakness due to adding
objects which are partially initialized to indices allowing external
actors to gain access to them by sending packets which refer to
their index (e.g. qpn, rkey, etc).

This patch adds a member to the pool element struct indicating whether
the object should/or should not allow looking up from its index. This
variable is set only after the object is completely created and unset
as soon as possible when the object is destroyed.

Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
---
 drivers/infiniband/sw/rxe/rxe_mr.c    |  1 +
 drivers/infiniband/sw/rxe/rxe_mw.c    |  3 +++
 drivers/infiniband/sw/rxe/rxe_pool.c  |  2 +-
 drivers/infiniband/sw/rxe/rxe_pool.h  |  5 +++++
 drivers/infiniband/sw/rxe/rxe_verbs.c | 22 +++++++++++++++++++++-
 5 files changed, 31 insertions(+), 2 deletions(-)

Comments

Jason Gunthorpe Feb. 28, 2022, 5:01 p.m. UTC | #1
On Fri, Feb 25, 2022 at 01:57:45PM -0600, Bob Pearson wrote:
> Currently the rdma_rxe driver has a security weakness due to adding
> objects which are partially initialized to indices allowing external
> actors to gain access to them by sending packets which refer to
> their index (e.g. qpn, rkey, etc).
> 
> This patch adds a member to the pool element struct indicating whether
> the object should/or should not allow looking up from its index. This
> variable is set only after the object is completely created and unset
> as soon as possible when the object is destroyed.

Why do we have to put incompletely initialized pointers into the
xarray?

Either:

 1) Do the xa_alloc after everything is setup properly, splitting
    allocation and ID assignment.

 2) Do xa_alloc(XA_ZERO_ENTRY) at the start to reserve the ID
    then xa_store to set the pointer (can't fail) or xa_erase()
    to abort it

> @@ -81,4 +82,8 @@ int __rxe_drop_ref(struct rxe_pool_elem *elem);
>  
>  #define rxe_read_ref(obj) kref_read(&(obj)->elem.ref_cnt)
>  
> +#define rxe_enable(obj) ((obj)->elem.enabled = true)
> +
> +#define rxe_disable(obj) ((obj)->elem.enabled = false)

None of this is locked properly. A release/acquire needs to happen to
ensure all the stores that initialized the memory are visible to the
reader. Both of the above will ensure that happens.

Jason
diff mbox series

Patch

diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 35628b8a00b4..157cfb710a7e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -689,6 +689,7 @@  int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
 		return -EINVAL;
 	}
 
+	rxe_disable(mr);
 	mr->state = RXE_MR_STATE_INVALID;
 	rxe_drop_ref(mr_pd(mr));
 	rxe_drop_ref(mr);
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 7df36c40eec2..e4d207f24eba 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -25,6 +25,8 @@  int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
 			RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
 	spin_lock_init(&mw->lock);
 
+	rxe_enable(mw);
+
 	return 0;
 }
 
@@ -56,6 +58,7 @@  int rxe_dealloc_mw(struct ib_mw *ibmw)
 	struct rxe_mw *mw = to_rmw(ibmw);
 	struct rxe_pd *pd = to_rpd(ibmw->pd);
 
+	rxe_disable(mw);
 	spin_lock_bh(&mw->lock);
 	rxe_do_dealloc_mw(mw);
 	spin_unlock_bh(&mw->lock);
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index c298467337b8..19d8fb77b166 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -203,7 +203,7 @@  void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
 
 	spin_lock_irqsave(&pool->xa.xa_lock, flags);
 	elem = xa_load(&pool->xa, index);
-	if (elem && kref_get_unless_zero(&elem->ref_cnt))
+	if (elem && elem->enabled && kref_get_unless_zero(&elem->ref_cnt))
 		obj = elem->obj;
 	else
 		obj = NULL;
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 422987c90cb9..b963c300d636 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -29,6 +29,7 @@  struct rxe_pool_elem {
 	struct kref		ref_cnt;
 	struct list_head	list;
 	u32			index;
+	bool			enabled;
 };
 
 struct rxe_pool {
@@ -81,4 +82,8 @@  int __rxe_drop_ref(struct rxe_pool_elem *elem);
 
 #define rxe_read_ref(obj) kref_read(&(obj)->elem.ref_cnt)
 
+#define rxe_enable(obj) ((obj)->elem.enabled = true)
+
+#define rxe_disable(obj) ((obj)->elem.enabled = false)
+
 #endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index f0c5715ac500..077be3eb9763 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -150,6 +150,7 @@  static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 	struct rxe_pd *pd = to_rpd(ibpd);
 
 	rxe_drop_ref(pd);
+
 	return 0;
 }
 
@@ -197,6 +198,9 @@  static int rxe_create_ah(struct ib_ah *ibah,
 	}
 
 	rxe_init_av(init_attr->ah_attr, &ah->av);
+
+	rxe_enable(ah);
+
 	return 0;
 }
 
@@ -228,6 +232,8 @@  static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
 {
 	struct rxe_ah *ah = to_rah(ibah);
 
+	rxe_disable(ah);
+
 	rxe_drop_ref(ah);
 	return 0;
 }
@@ -310,6 +316,8 @@  static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
 	if (err)
 		goto err2;
 
+	rxe_enable(srq);
+
 	return 0;
 
 err2:
@@ -368,6 +376,8 @@  static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
 {
 	struct rxe_srq *srq = to_rsrq(ibsrq);
 
+	rxe_disable(srq);
+
 	if (srq->rq.queue)
 		rxe_queue_cleanup(srq->rq.queue);
 
@@ -439,6 +449,8 @@  static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
 	if (err)
 		goto qp_init;
 
+	rxe_enable(qp);
+
 	return 0;
 
 qp_init:
@@ -495,6 +507,7 @@  static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
 	if (ret)
 		return ret;
 
+	rxe_disable(qp);
 	rxe_qp_destroy(qp);
 	rxe_drop_ref(qp);
 	return 0;
@@ -807,9 +820,10 @@  static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
 {
 	struct rxe_cq *cq = to_rcq(ibcq);
 
-	rxe_cq_disable(cq);
 
+	rxe_cq_disable(cq);
 	rxe_drop_ref(cq);
+
 	return 0;
 }
 
@@ -905,6 +919,8 @@  static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
 	rxe_add_ref(pd);
 	rxe_mr_init_dma(pd, access, mr);
 
+	rxe_enable(mr);
+
 	return &mr->ibmr;
 }
 
@@ -932,6 +948,8 @@  static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
 	if (err)
 		goto err3;
 
+	rxe_enable(mr);
+
 	return &mr->ibmr;
 
 err3:
@@ -964,6 +982,8 @@  static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
 	if (err)
 		goto err2;
 
+	rxe_enable(mr);
+
 	return &mr->ibmr;
 
 err2: