@@ -31,7 +31,6 @@ void rxe_dealloc(struct ib_device *ib_dev)
rxe_pool_cleanup(&rxe->mr_pool);
rxe_pool_cleanup(&rxe->mw_pool);
rxe_pool_cleanup(&rxe->mc_grp_pool);
- rxe_pool_cleanup(&rxe->mc_elem_pool);
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
@@ -128,8 +127,6 @@ static void rxe_init_pools(struct rxe_dev *rxe)
rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW, rxe->attr.max_mw);
rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
rxe->attr.max_mcast_grp);
- rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
- rxe->attr.max_total_mcast_qp_attach);
}
/* initialize rxe device state */
@@ -63,14 +63,15 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
goto out;
}
- elem = rxe_alloc(&rxe->mc_elem_pool);
+ elem = kzalloc(sizeof(*elem), GFP_KERNEL);
if (!elem) {
err = -ENOMEM;
goto out;
}
- /* each qp holds a ref on the grp */
+ /* each elem holds a ref on the grp and the qp */
rxe_add_ref(grp);
+ rxe_add_ref(qp);
grp->num_qp++;
elem->qp = qp;
@@ -91,6 +92,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
{
struct rxe_mc_grp *grp;
struct rxe_mc_elem *elem, *tmp;
+ int ret = -EINVAL;
grp = rxe_pool_get_key(&rxe->mc_grp_pool, mgid);
if (!grp)
@@ -107,18 +109,21 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_unlock_bh(&grp->mcg_lock);
spin_unlock_bh(&qp->grp_lock);
- rxe_drop_ref(elem);
- rxe_drop_ref(grp); /* ref held by QP */
- rxe_drop_ref(grp); /* ref from get_key */
- return 0;
+ kfree(elem);
+ rxe_drop_ref(qp); /* ref held by elem */
+ rxe_drop_ref(grp); /* ref held by elem */
+ ret = 0;
+ goto out_drop_ref;
}
}
spin_unlock_bh(&grp->mcg_lock);
spin_unlock_bh(&qp->grp_lock);
+
+out_drop_ref:
rxe_drop_ref(grp); /* ref from get_key */
err1:
- return -EINVAL;
+ return ret;
}
void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
@@ -142,8 +147,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
list_del(&elem->qp_list);
grp->num_qp--;
spin_unlock_bh(&grp->mcg_lock);
+ rxe_drop_ref(qp);
rxe_drop_ref(grp);
- rxe_drop_ref(elem);
+ kfree(elem);
}
}
@@ -89,12 +89,6 @@ static const struct rxe_type_info {
.key_offset = offsetof(struct rxe_mc_grp, mgid),
.key_size = sizeof(union ib_gid),
},
- [RXE_TYPE_MC_ELEM] = {
- .name = "rxe-mc_elem",
- .size = sizeof(struct rxe_mc_elem),
- .elem_offset = offsetof(struct rxe_mc_elem, elem),
- .flags = RXE_POOL_ALLOC,
- },
};
void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
@@ -23,7 +23,6 @@ enum rxe_elem_type {
RXE_TYPE_MR,
RXE_TYPE_MW,
RXE_TYPE_MC_GRP,
- RXE_TYPE_MC_ELEM,
RXE_NUM_TYPES, /* keep me last */
};
@@ -364,7 +364,6 @@ struct rxe_mc_grp {
};
struct rxe_mc_elem {
- struct rxe_pool_elem elem;
struct list_head qp_list;
struct list_head grp_list;
struct rxe_qp *qp;
@@ -402,7 +401,6 @@ struct rxe_dev {
struct rxe_pool mr_pool;
struct rxe_pool mw_pool;
struct rxe_pool mc_grp_pool;
- struct rxe_pool mc_elem_pool;
spinlock_t pending_lock; /* guard pending_mmaps */
struct list_head pending_mmaps;
Currently rxe_mc_elem structs are treated as rdma objects which is unneeded. This patch replaces rxe_alloc and rxe_drop_ref by kzalloc and kfree for these structs which hold associatons between multicast groups and QPs. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe.c | 3 --- drivers/infiniband/sw/rxe/rxe_mcast.c | 22 ++++++++++++++-------- drivers/infiniband/sw/rxe/rxe_pool.c | 6 ------ drivers/infiniband/sw/rxe/rxe_pool.h | 1 - drivers/infiniband/sw/rxe/rxe_verbs.h | 2 -- 5 files changed, 14 insertions(+), 20 deletions(-)