@@ -15,18 +15,16 @@ static struct rxe_mc_grp *create_grp(struct rxe_dev *rxe,
int err;
struct rxe_mc_grp *grp;
- grp = rxe_alloc_locked(&rxe->mc_grp_pool);
+ grp = rxe_alloc_with_key_locked(&rxe->mc_grp_pool, mgid);
if (!grp)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&grp->qp_list);
spin_lock_init(&grp->mcg_lock);
grp->rxe = rxe;
- rxe_add_key_locked(grp, mgid);
err = rxe_mcast_add(rxe, mgid);
if (unlikely(err)) {
- rxe_drop_key_locked(grp);
rxe_drop_ref(grp);
return ERR_PTR(err);
}
@@ -174,6 +172,5 @@ void rxe_mc_cleanup(struct rxe_pool_entry *arg)
struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
struct rxe_dev *rxe = grp->rxe;
- rxe_drop_key(grp);
rxe_mcast_delete(rxe, &grp->mgid);
}
@@ -245,47 +245,6 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
-{
- struct rxe_pool *pool = elem->pool;
- int err;
-
- memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
- err = rxe_insert_key(pool, elem);
-
- return err;
-}
-
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
-{
- struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- int err;
-
- write_lock_irqsave(&pool->pool_lock, flags);
- err = __rxe_add_key_locked(elem, key);
- write_unlock_irqrestore(&pool->pool_lock, flags);
-
- return err;
-}
-
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
-{
- struct rxe_pool *pool = elem->pool;
-
- rb_erase(&elem->key_node, &pool->key.tree);
-}
-
-void __rxe_drop_key(struct rxe_pool_entry *elem)
-{
- struct rxe_pool *pool = elem->pool;
- unsigned long flags;
-
- write_lock_irqsave(&pool->pool_lock, flags);
- __rxe_drop_key_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
-}
-
static int rxe_add_index(struct rxe_pool_entry *elem)
{
struct rxe_pool *pool = elem->pool;
@@ -342,6 +301,31 @@ void *rxe_alloc_locked(struct rxe_pool *pool)
return NULL;
}
+void *rxe_alloc_with_key_locked(struct rxe_pool *pool, void *key)
+{
+ struct rxe_pool_entry *elem;
+ u8 *obj;
+ int err;
+
+ obj = rxe_alloc_locked(pool);
+ if (!obj)
+ return NULL;
+
+ elem = (struct rxe_pool_entry *)(obj + pool->elem_offset);
+ memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
+ err = rxe_insert_key(pool, elem);
+ if (err) {
+ kfree(obj);
+ goto out_cnt;
+ }
+
+ return obj;
+
+out_cnt:
+ atomic_dec(&pool->num_elem);
+ return NULL;
+}
+
void *rxe_alloc(struct rxe_pool *pool)
{
struct rxe_pool_entry *elem;
@@ -383,6 +367,18 @@ void *rxe_alloc(struct rxe_pool *pool)
return NULL;
}
+void *rxe_alloc_with_key(struct rxe_pool *pool, void *key)
+{
+ unsigned long flags;
+ void *obj;
+
+ write_lock_irqsave(&pool->pool_lock, flags);
+ obj = rxe_alloc_with_key_locked(pool, key);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
+
+ return obj;
+}
+
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
{
unsigned long flags;
@@ -424,6 +420,9 @@ void rxe_elem_release(struct kref *kref)
if (pool->flags & RXE_POOL_INDEX)
rxe_drop_index(elem);
+ if (pool->flags & RXE_POOL_KEY)
+ rb_erase(&elem->key_node, &pool->key.tree);
+
if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
obj = elem->obj;
kfree(obj);
@@ -104,31 +104,15 @@ void *rxe_alloc_locked(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
+void *rxe_alloc_with_key_locked(struct rxe_pool *pool, void *key);
+
+void *rxe_alloc_with_key(struct rxe_pool *pool, void *key);
+
/* connect already allocated object to pool */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
-/* assign a key to a keyed object and insert object into
- * pool's rb tree holding and not holding pool_lock
- */
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
-
-#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
-
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key);
-
-#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
-
-/* remove elem from rb tree holding and not holding the pool_lock */
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
-
-#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
-
-void __rxe_drop_key(struct rxe_pool_entry *elem);
-
-#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
-
/* lookup an indexed object from index holding and not holding the pool_lock.
* takes a reference on object
*/
Currently adding and dropping a key from a rxe object requires separate API calls from allocating and freeing the object but these are always performed together. This patch combines these into single APIs. This requires adding new rxe_allocate_with_key(_locked) APIs. By combining allocating an object and adding key metadata inside a single locked sequence and dropping the key metadata and releasing the object the possibility of a race condition where the object state and key metadata state are inconsistent is removed. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_mcast.c | 5 +- drivers/infiniband/sw/rxe/rxe_pool.c | 81 +++++++++++++-------------- drivers/infiniband/sw/rxe/rxe_pool.h | 24 ++------ 3 files changed, 45 insertions(+), 65 deletions(-)