@@ -8,7 +8,7 @@
#define RXE_POOL_TIMEOUT (200)
#define RXE_POOL_MAX_TIMEOUTS (3)
-#define RXE_POOL_ALIGN (16)
+#define RXE_POOL_ALIGN (64)
static const struct rxe_type_info {
const char *name;
@@ -120,24 +120,35 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
WARN_ON(!xa_empty(&pool->xa));
}
+/**
+ * rxe_alloc - allocate a new pool object
+ * @pool: object pool
+ *
+ * Context: in task.
+ * Returns: object on success else an ERR_PTR
+ */
void *rxe_alloc(struct rxe_pool *pool)
{
struct rxe_pool_elem *elem;
void *obj;
- int err;
+ int err = -EINVAL;
if (WARN_ON(!(pool->flags & RXE_POOL_ALLOC)))
- return NULL;
+ goto err_out;
+
+ if (WARN_ON(!in_task()))
+ goto err_out;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto err_cnt;
+ goto err_dec;
obj = kzalloc(pool->elem_size, GFP_KERNEL);
- if (!obj)
- goto err_cnt;
+ if (!obj) {
+ err = -ENOMEM;
+ goto err_dec;
+ }
elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
-
elem->pool = pool;
elem->obj = obj;
kref_init(&elem->ref_cnt);
@@ -154,20 +165,32 @@ void *rxe_alloc(struct rxe_pool *pool)
err_free:
kfree(obj);
-err_cnt:
+err_dec:
atomic_dec(&pool->num_elem);
- return NULL;
+err_out:
+ return ERR_PTR(err);
}
+/**
+ * __rxe_add_to_pool - add rdma-core allocated object to rxe object pool
+ * @pool: object pool
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Context: in task.
+ * Returns: 0 on success else an error
+ */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
- int err;
+ int err = -EINVAL;
if (WARN_ON(pool->flags & RXE_POOL_ALLOC))
- return -EINVAL;
+ goto err_out;
+
+ if (WARN_ON(!in_task()))
+ goto err_out;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto err_cnt;
+ goto err_dec;
elem->pool = pool;
elem->obj = (u8 *)elem - pool->elem_offset;
@@ -177,15 +200,23 @@ int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
if (err)
- goto err_cnt;
+ goto err_dec;
return 0;
-err_cnt:
+err_dec:
atomic_dec(&pool->num_elem);
- return -EINVAL;
+err_out:
+ return err;
}
+/**
+ * rxe_pool_get_index - find object in pool with given index
+ * @pool: object pool
+ * @index: index
+ *
+ * Returns: object on success else NULL
+ */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
struct rxe_pool_elem *elem;
@@ -248,16 +279,32 @@ int __rxe_cleanup(struct rxe_pool_elem *elem)
return err;
}
+/**
+ * __rxe_get - takes a ref on the object unless ref count is zero
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Returns: 1 if reference is added else 0
+ */
int __rxe_get(struct rxe_pool_elem *elem)
{
return kref_get_unless_zero(&elem->ref_cnt);
}
+/**
+ * __rxe_put - puts a ref on the object
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Returns: 1 if ref count reaches zero and release called else 0
+ */
int __rxe_put(struct rxe_pool_elem *elem)
{
return kref_put(&elem->ref_cnt, rxe_elem_release);
}
+/**
+ * __rxe_finalize - enable looking up object from index
+ * @elem: rxe_pool_elem embedded in object
+ */
void __rxe_finalize(struct rxe_pool_elem *elem)
{
struct xarray *xa = &elem->pool->xa;
@@ -270,6 +317,10 @@ void __rxe_finalize(struct rxe_pool_elem *elem)
WARN_ON(xa_err(ret));
}
+/**
+ * __rxe_disable_lookup - disable looking up object from index
+ * @elem: rxe_pool_elem embedded in object
+ */
void __rxe_disable_lookup(struct rxe_pool_elem *elem)
{
struct xarray *xa = &elem->pool->xa;
Minor cleanup of rxe_pool.c. Add document comment headers for the subroutines. Increase alignment for pool elements. Convert some printk's to WARN-ON's. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_pool.c | 81 ++++++++++++++++++++++------ 1 file changed, 66 insertions(+), 15 deletions(-)