@@ -7,44 +7,56 @@
#include "rxe.h"
#include "rxe_loc.h"
-int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
- struct rxe_mc_grp **grp_p)
+/* caller should hold mc_grp_pool->pool_lock */
+static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
+ struct rxe_pool *pool,
+ union ib_gid *mgid)
{
int err;
struct rxe_mc_grp *grp;
- if (rxe->attr.max_mcast_qp_attach == 0) {
- err = -EINVAL;
- goto err1;
- }
-
- grp = rxe_get_key(&rxe->mc_grp_pool, mgid);
- if (grp)
- goto done;
-
- grp = rxe_alloc(&rxe->mc_grp_pool);
- if (!grp) {
- err = -ENOMEM;
- goto err1;
- }
+ grp = __alloc(&rxe->mc_grp_pool);
+ if (unlikely(!grp))
+ return NULL;
INIT_LIST_HEAD(&grp->qp_list);
spin_lock_init(&grp->mcg_lock);
grp->rxe = rxe;
-
- rxe_add_key(grp, mgid);
+ add_key(grp, mgid);
err = rxe_mcast_add(rxe, mgid);
- if (err)
- goto err2;
+ if (unlikely(err)) {
+ drop_key(grp);
+ rxe_drop_ref(grp);
+ return NULL;
+ }
+ return grp;
+}
+
+/* atomically lookup or create mc group */
+int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
+ struct rxe_mc_grp **grp_p)
+{
+ int err = 0;
+ struct rxe_mc_grp *grp;
+ struct rxe_pool *pool = &rxe->mc_grp_pool;
+ unsigned long flags;
+
+ if (unlikely(rxe->attr.max_mcast_qp_attach == 0))
+ return -EINVAL;
+
+ write_lock_irqsave(&pool->pool_lock, flags);
+ grp = __get_key(pool, mgid);
+ if (grp)
+ goto done;
+
+ grp = create_grp(rxe, pool, mgid);
+ if (unlikely(!grp))
+ err = -ENOMEM;
done:
+ write_unlock_irqrestore(&pool->pool_lock, flags);
*grp_p = grp;
- return 0;
-
-err2:
- rxe_drop_ref(grp);
-err1:
return err;
}
@@ -52,13 +64,13 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
struct rxe_mc_grp *grp)
{
int err;
- struct rxe_mc_elem *elem;
+ struct rxe_mc_elem *mce;
/* check to see of the qp is already a member of the group */
spin_lock_bh(&qp->grp_lock);
spin_lock_bh(&grp->mcg_lock);
- list_for_each_entry(elem, &grp->qp_list, qp_list) {
- if (elem->qp == qp) {
+ list_for_each_entry(mce, &grp->qp_list, qp_list) {
+ if (mce->qp == qp) {
err = 0;
goto out;
}
@@ -69,8 +81,8 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
goto out;
}
- elem = rxe_alloc(&rxe->mc_elem_pool);
- if (!elem) {
+ mce = rxe_alloc(&rxe->mc_elem_pool);
+ if (!mce) {
err = -ENOMEM;
goto out;
}
@@ -79,11 +91,11 @@ int rxe_mcast_add_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
rxe_add_ref(grp);
grp->num_qp++;
- elem->qp = qp;
- elem->grp = grp;
+ mce->qp = qp;
+ mce->grp = grp;
- list_add(&elem->qp_list, &grp->qp_list);
- list_add(&elem->grp_list, &qp->grp_list);
+ list_add(&mce->qp_list, &grp->qp_list);
+ list_add(&mce->grp_list, &qp->grp_list);
err = 0;
out:
@@ -96,7 +108,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
union ib_gid *mgid)
{
struct rxe_mc_grp *grp;
- struct rxe_mc_elem *elem, *tmp;
+ struct rxe_mc_elem *mce, *tmp;
grp = rxe_get_key(&rxe->mc_grp_pool, mgid);
if (!grp)
@@ -105,15 +117,15 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_bh(&qp->grp_lock);
spin_lock_bh(&grp->mcg_lock);
- list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
- if (elem->qp == qp) {
- list_del(&elem->qp_list);
- list_del(&elem->grp_list);
+ list_for_each_entry_safe(mce, tmp, &grp->qp_list, qp_list) {
+ if (mce->qp == qp) {
+ list_del(&mce->qp_list);
+ list_del(&mce->grp_list);
grp->num_qp--;
spin_unlock_bh(&grp->mcg_lock);
spin_unlock_bh(&qp->grp_lock);
- rxe_drop_ref(elem);
+ rxe_drop_ref(mce);
rxe_drop_ref(grp); /* ref held by QP */
rxe_drop_ref(grp); /* ref from get_key */
return 0;
@@ -130,7 +142,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
{
struct rxe_mc_grp *grp;
- struct rxe_mc_elem *elem;
+ struct rxe_mc_elem *mce;
while (1) {
spin_lock_bh(&qp->grp_lock);
@@ -138,24 +150,24 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
spin_unlock_bh(&qp->grp_lock);
break;
}
- elem = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
+ mce = list_first_entry(&qp->grp_list, struct rxe_mc_elem,
grp_list);
- list_del(&elem->grp_list);
+ list_del(&mce->grp_list);
spin_unlock_bh(&qp->grp_lock);
- grp = elem->grp;
+ grp = mce->grp;
spin_lock_bh(&grp->mcg_lock);
- list_del(&elem->qp_list);
+ list_del(&mce->qp_list);
grp->num_qp--;
spin_unlock_bh(&grp->mcg_lock);
rxe_drop_ref(grp);
- rxe_drop_ref(elem);
+ rxe_drop_ref(mce);
}
}
-void rxe_mc_cleanup(struct rxe_pool_entry *arg)
+void rxe_mc_cleanup(struct rxe_pool_entry *pelem)
{
- struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
+ struct rxe_mc_grp *grp = container_of(pelem, struct rxe_mc_grp, pelem);
struct rxe_dev *rxe = grp->rxe;
rxe_drop_key(grp);
This patch does the following: - Cleans up multicast group create to use an atomic sequence of lookup followed by allocate. This fixes an error that occurred when two QPs were attempting to attach to the same mcast address at the same time. - Fixes a bug in rxe_mcast_get_grp not initializing err = 0. If the group is found in get_key the routine can return an uninitialized value to caller. - Changes the variable elem to mce (for mcast elem). This is less likely to confuse readers because of the ambiguity with elem as pool element which is also used. Signed-off-by: Bob Pearson <rpearson@hpe.com> --- drivers/infiniband/sw/rxe/rxe_mcast.c | 108 ++++++++++++++------------ 1 file changed, 60 insertions(+), 48 deletions(-)