@@ -133,8 +133,6 @@ int rxe_pool_init(
atomic_set(&pool->num_elem, 0);
- rwlock_init(&pool->pool_lock);
-
if (info->flags & RXE_POOL_XARRAY) {
xa_init_flags(&pool->xarray.xa, XA_FLAGS_ALLOC);
pool->xarray.limit.max = info->max_index;
@@ -292,9 +290,9 @@ static void *__rxe_alloc_locked(struct rxe_pool *pool)
elem->obj = obj;
if (pool->flags & RXE_POOL_XARRAY) {
- err = xa_alloc_cyclic_bh(&pool->xarray.xa, &elem->index, elem,
- pool->xarray.limit,
- &pool->xarray.next, GFP_KERNEL);
+ err = __xa_alloc_cyclic(&pool->xarray.xa, &elem->index, elem,
+ pool->xarray.limit,
+ &pool->xarray.next, GFP_KERNEL);
if (err)
goto err;
}
@@ -359,9 +357,9 @@ void *rxe_alloc(struct rxe_pool *pool)
{
void *obj;
- write_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
obj = rxe_alloc_locked(pool);
- write_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return obj;
}
@@ -370,9 +368,9 @@ void *rxe_alloc_with_key(struct rxe_pool *pool, void *key)
{
void *obj;
- write_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
obj = rxe_alloc_with_key_locked(pool, key);
- write_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return obj;
}
@@ -381,7 +379,7 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
int err;
- write_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto err;
@@ -389,9 +387,9 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
elem->obj = (u8 *)elem - pool->elem_offset;
if (pool->flags & RXE_POOL_XARRAY) {
- err = xa_alloc_cyclic_bh(&pool->xarray.xa, &elem->index, elem,
- pool->xarray.limit,
- &pool->xarray.next, GFP_KERNEL);
+ err = __xa_alloc_cyclic(&pool->xarray.xa, &elem->index, elem,
+ pool->xarray.limit,
+ &pool->xarray.next, GFP_KERNEL);
if (err)
goto err;
}
@@ -403,13 +401,13 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
}
refcount_set(&elem->refcnt, 1);
- write_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return 0;
err:
atomic_dec(&pool->num_elem);
- write_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return -EINVAL;
}
@@ -442,9 +440,9 @@ static void *__rxe_get_index(struct rxe_pool *pool, u32 index)
{
void *obj;
- read_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
obj = __rxe_get_index_locked(pool, index);
- read_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return obj;
}
@@ -465,9 +463,9 @@ static void *__rxe_get_xarray(struct rxe_pool *pool, u32 index)
{
void *obj;
- read_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
obj = __rxe_get_xarray_locked(pool, index);
- read_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return obj;
}
@@ -523,9 +521,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{
void *obj;
- read_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
obj = rxe_pool_get_key_locked(pool, key);
- read_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return obj;
}
@@ -546,9 +544,9 @@ int __rxe_add_ref(struct rxe_pool_elem *elem)
struct rxe_pool *pool = elem->pool;
int ret;
- read_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
ret = __rxe_add_ref_locked(elem);
- read_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return ret;
}
@@ -569,9 +567,9 @@ int __rxe_drop_ref(struct rxe_pool_elem *elem)
struct rxe_pool *pool = elem->pool;
int ret;
- read_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
ret = __rxe_drop_ref_locked(elem);
- read_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
return ret;
}
@@ -584,7 +582,7 @@ static int __rxe_fini(struct rxe_pool_elem *elem)
done = refcount_dec_if_one(&elem->refcnt);
if (done) {
if (pool->flags & RXE_POOL_XARRAY)
- xa_erase(&pool->xarray.xa, elem->index);
+ __xa_erase(&pool->xarray.xa, elem->index);
if (pool->flags & RXE_POOL_INDEX)
rxe_drop_index(elem);
if (pool->flags & RXE_POOL_KEY)
@@ -621,9 +619,9 @@ int __rxe_fini_ref(struct rxe_pool_elem *elem)
struct rxe_pool *pool = elem->pool;
int ret;
- read_lock_bh(&pool->pool_lock);
+ xa_lock_bh(&pool->xarray.xa);
ret = __rxe_fini(elem);
- read_unlock_bh(&pool->pool_lock);
+ xa_unlock_bh(&pool->xarray.xa);
if (!ret) {
if (pool->cleanup)
In rxe_pool.c xa_alloc_bh and xa_erase_bh and variants already include spin_lock_bh() __xa_alloc() spin_unlock_bh() So we are double locking. Replacing pool_lock by xa_lock and using xa_lock in all the places that were previously locked by pool_lock but dropping the double locks is a performance improvement. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_pool.c | 54 ++++++++++++++-------------- 1 file changed, 26 insertions(+), 28 deletions(-)