@@ -28,7 +28,6 @@ void rxe_dealloc(struct ib_device *ib_dev)
rxe_pool_cleanup(&rxe->cq_pool);
rxe_pool_cleanup(&rxe->mr_pool);
rxe_pool_cleanup(&rxe->mw_pool);
- rxe_pool_cleanup(&rxe->mc_grp_pool);
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
@@ -157,15 +156,8 @@ static int rxe_init_pools(struct rxe_dev *rxe)
if (err)
goto err8;
- err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
- rxe->attr.max_mcast_grp);
- if (err)
- goto err9;
-
return 0;
-err9:
- rxe_pool_cleanup(&rxe->mw_pool);
err8:
rxe_pool_cleanup(&rxe->mr_pool);
err7:
@@ -43,6 +43,7 @@ void rxe_cq_cleanup(struct rxe_pool_elem *arg);
struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid);
int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
+void rxe_cleanup_mcg(struct kref *kref);
/* rxe_mmap.c */
struct rxe_mmap_info {
@@ -98,7 +98,7 @@ static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
}
if (node) {
- rxe_add_ref(mcg);
+ kref_get(&mcg->ref_cnt);
return mcg;
}
@@ -141,11 +141,13 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
if (unlikely(err))
return err;
+ kref_init(&mcg->ref_cnt);
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
INIT_LIST_HEAD(&mcg->qp_list);
mcg->rxe = rxe;
+ mcg->index = rxe->mcg_next++;
- rxe_add_ref(mcg);
+ kref_get(&mcg->ref_cnt);
__rxe_insert_mcg(mcg);
@@ -163,7 +165,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
static int rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_mcg **mcgp)
{
- struct rxe_pool *pool = &rxe->mc_grp_pool;
struct rxe_mcg *mcg, *tmp;
int err;
@@ -178,7 +179,7 @@ static int rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
}
/* speculative alloc of mcg */
- mcg = rxe_alloc(pool);
+ mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
return -ENOMEM;
@@ -186,7 +187,7 @@ static int rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
- rxe_drop_ref(mcg);
+ kfree(mcg);
mcg = tmp;
goto out;
}
@@ -206,10 +207,53 @@ static int rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
err_dec:
atomic_dec(&rxe->mcg_num);
spin_unlock_bh(&rxe->mcg_lock);
- rxe_drop_ref(mcg);
+ kfree(mcg);
return err;
}
+/**
+ * rxe_cleanup_mcg - cleanup mcg for kref_put
+ * @kref:
+ *
+ * caller may or may not hold rxe->mcg_lock
+ */
+void rxe_cleanup_mcg(struct kref *kref)
+{
+ struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt);
+
+ kfree(mcg);
+}
+
+/**
+ * __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock
+ * @mcg: the mcg object
+ *
+ * Context: caller is holding rxe->mcg_lock, no qp's are attached to mcg
+ */
+void __rxe_destroy_mcg(struct rxe_mcg *mcg)
+{
+ struct rxe_dev *rxe = mcg->rxe;
+
+ __rxe_remove_mcg(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
+ rxe_mcast_delete(rxe, &mcg->mgid);
+ atomic_dec(&rxe->mcg_num);
+}
+
+/**
+ * rxe_destroy_mcg - destroy mcg object
+ * @mcg: the mcg object
+ *
+ * Context: no qp's are attached to mcg
+ */
+static void rxe_destroy_mcg(struct rxe_mcg *mcg)
+{
+ spin_lock_bh(&mcg->rxe->mcg_lock);
+ __rxe_destroy_mcg(mcg);
+ spin_unlock_bh(&mcg->rxe->mcg_lock);
+}
+
static int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
struct rxe_mcg *mcg)
{
@@ -259,35 +303,6 @@ static int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
-/**
- * __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock
- * @mcg: the mcg object
- *
- * Context: caller is holding rxe->mcg_lock, all refs to mcg are dropped
- * no qp's are attached to mcg
- */
-void __rxe_destroy_mcg(struct rxe_mcg *mcg)
-{
- __rxe_remove_mcg(mcg);
-
- rxe_drop_ref(mcg);
-
- rxe_mcast_delete(mcg->rxe, &mcg->mgid);
-}
-
-/**
- * rxe_destroy_mcg - destroy mcg object
- * @mcg: the mcg object
- *
- * Context: all refs to mcg are dropped, no qp's are attached to mcg
- */
-static void rxe_destroy_mcg(struct rxe_mcg *mcg)
-{
- spin_lock_bh(&mcg->rxe->mcg_lock);
- __rxe_destroy_mcg(mcg);
- spin_unlock_bh(&mcg->rxe->mcg_lock);
-}
-
static int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
union ib_gid *mgid)
{
@@ -308,14 +323,14 @@ static int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
atomic_dec(&qp->mcg_num);
spin_unlock_bh(&rxe->mcg_lock);
- rxe_drop_ref(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
kfree(mca);
return 0;
}
}
spin_unlock_bh(&rxe->mcg_lock);
- rxe_drop_ref(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
err1:
return -EINVAL;
}
@@ -336,7 +351,7 @@ int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
if (atomic_read(&mcg->qp_num) == 0)
rxe_destroy_mcg(mcg);
- rxe_drop_ref(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
return err;
}
@@ -79,11 +79,6 @@ static const struct rxe_type_info {
.min_index = RXE_MIN_MW_INDEX,
.max_index = RXE_MAX_MW_INDEX,
},
- [RXE_TYPE_MC_GRP] = {
- .name = "rxe-mcg",
- .size = sizeof(struct rxe_mcg),
- .elem_offset = offsetof(struct rxe_mcg, elem),
- },
};
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
@@ -21,7 +21,6 @@ enum rxe_elem_type {
RXE_TYPE_CQ,
RXE_TYPE_MR,
RXE_TYPE_MW,
- RXE_TYPE_MC_GRP,
RXE_NUM_TYPES, /* keep me last */
};
@@ -275,6 +275,8 @@ static void rxe_rcv_mcast_pkt(struct sk_buff *skb)
break;
}
spin_unlock_bh(&rxe->mcg_lock);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
+
nmax = n;
/* this is unreliable datagram service so we let
@@ -320,8 +322,6 @@ static void rxe_rcv_mcast_pkt(struct sk_buff *skb)
kfree(qp_array);
- rxe_drop_ref(mcg);
-
if (likely(!skb))
return;
@@ -352,12 +352,13 @@ struct rxe_mw {
};
struct rxe_mcg {
- struct rxe_pool_elem elem;
struct rb_node node;
+ struct kref ref_cnt;
struct rxe_dev *rxe;
struct list_head qp_list;
atomic_t qp_num;
union ib_gid mgid;
+ unsigned int index;
u32 qkey;
u16 pkey;
};
@@ -400,6 +401,7 @@ struct rxe_dev {
spinlock_t mcg_lock; /* guard multicast groups */
struct rb_root mcg_tree;
atomic_t mcg_num;
+ unsigned int mcg_next;
spinlock_t pending_lock; /* guard pending_mmaps */
struct list_head pending_mmaps;
Finish removing mcg from rxe pools. Replace rxe pools ref counting by kref's. Replace rxe_alloc by kzalloc. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe.c | 8 --- drivers/infiniband/sw/rxe/rxe_loc.h | 1 + drivers/infiniband/sw/rxe/rxe_mcast.c | 91 ++++++++++++++++----------- drivers/infiniband/sw/rxe/rxe_pool.c | 5 -- drivers/infiniband/sw/rxe/rxe_pool.h | 1 - drivers/infiniband/sw/rxe/rxe_recv.c | 4 +- drivers/infiniband/sw/rxe/rxe_verbs.h | 4 +- 7 files changed, 59 insertions(+), 55 deletions(-)