@@ -1,14 +1,14 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
+ * Copyright (c) 2022 Hewlett Packard Enterprise, Inc. All rights reserved.
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
#include "rxe.h"
-#define RXE_POOL_TIMEOUT (200)
-#define RXE_POOL_MAX_TIMEOUTS (3)
-#define RXE_POOL_ALIGN (16)
+#define RXE_POOL_TIMEOUT (200) /* jiffies */
+#define RXE_POOL_ALIGN (64)
static const struct rxe_type_info {
const char *name;
@@ -90,6 +90,14 @@ static const struct rxe_type_info {
},
};
+/**
+ * rxe_pool_init - initialize a rxe object pool
+ * @rxe: rxe device pool belongs to
+ * @pool: object pool
+ * @type: pool type
+ *
+ * Called from rxe_init()
+ */
void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
enum rxe_elem_type type)
{
@@ -113,6 +121,12 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
pool->limit.max = info->max_index;
}
+/**
+ * rxe_pool_cleanup - free any remaining pool resources
+ * @pool: object pool
+ *
+ * Called from rxe_dealloc()
+ */
void rxe_pool_cleanup(struct rxe_pool *pool)
{
struct rxe_pool_elem *elem;
@@ -136,24 +150,37 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
if (WARN_ON(elem_count || obj_count))
pr_debug("Freed %d indices and %d objects from pool %s\n",
- elem_count, obj_count, pool->name);
+ elem_count, obj_count, pool->name);
}
+/**
+ * rxe_alloc - allocate a new pool object
+ * @pool: object pool
+ *
+ * Context: in task.
+ * Returns: object on success else an ERR_PTR
+ */
void *rxe_alloc(struct rxe_pool *pool)
{
+ struct xarray *xa = &pool->xa;
struct rxe_pool_elem *elem;
void *obj;
- int err;
+ int err = -EINVAL;
if (WARN_ON(!(pool->flags & RXE_POOL_ALLOC)))
- return NULL;
+ goto err_out;
+
+ if (WARN_ON(!in_task()))
+ goto err_dec;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto err_cnt;
+ goto err_dec;
obj = kzalloc(pool->elem_size, GFP_KERNEL);
- if (!obj)
- goto err_cnt;
+ if (!obj) {
+ err = -ENOMEM;
+ goto err_dec;
+ }
elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
@@ -162,7 +189,7 @@ void *rxe_alloc(struct rxe_pool *pool)
kref_init(&elem->ref_cnt);
init_completion(&elem->complete);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
+ err = xa_alloc_cyclic(xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
if (err)
goto err_free;
@@ -171,38 +198,59 @@ void *rxe_alloc(struct rxe_pool *pool)
err_free:
kfree(obj);
-err_cnt:
+err_dec:
atomic_dec(&pool->num_elem);
- return NULL;
+err_out:
+ return ERR_PTR(err);
}
+/**
+ * __rxe_add_to_pool - add rdma-core allocated object to rxe object pool
+ * @pool: object pool
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Context: in task.
+ * Returns: 0 on success else an error
+ */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
- int err;
+ struct xarray *xa = &pool->xa;
+ int err = -EINVAL;
if (WARN_ON(pool->flags & RXE_POOL_ALLOC))
- return -EINVAL;
+ goto err_out;
+
+ if (WARN_ON(!in_task()))
+ goto err_out;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto err_cnt;
+ goto err_dec;
elem->pool = pool;
elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
init_completion(&elem->complete);
- err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
+ err = xa_alloc_cyclic(xa, &elem->index, NULL, pool->limit,
&pool->next, GFP_KERNEL);
if (err)
- goto err_cnt;
+ goto err_dec;
return 0;
-err_cnt:
+err_dec:
atomic_dec(&pool->num_elem);
- return -EINVAL;
+err_out:
+ return err;
}
+/**
+ * rxe_pool_get_index - find object in pool with given index
+ * @pool: object pool
+ * @index: index
+ *
+ * Returns: object on success else NULL
+ */
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
struct rxe_pool_elem *elem;
@@ -220,6 +268,12 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
return obj;
}
+/**
+ * rxe_elem_release - remove object index and complete
+ * @kref: kref embedded in pool element
+ *
+ * Context: ref count of pool object has reached zero.
+ */
static void rxe_elem_release(struct kref *kref)
{
struct rxe_pool_elem *elem = container_of(kref, typeof(*elem), ref_cnt);
@@ -234,6 +288,12 @@ static void rxe_elem_release(struct kref *kref)
complete(&elem->complete);
}
+/**
+ * __rxe_wait - put a ref on object and wait for completion
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Returns: 0 if object did not timeout else an error
+ */
int __rxe_wait(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
@@ -244,12 +304,9 @@ int __rxe_wait(struct rxe_pool_elem *elem)
if (timeout) {
ret = wait_for_completion_timeout(&elem->complete, timeout);
- if (!ret) {
- pr_warn("Timed out waiting for %s#%d to complete\n",
+ if (WARN_ON(!ret)) {
+ pr_debug("Timed out waiting for %s#%d to complete\n",
pool->name, elem->index);
- if (++pool->timeouts >= RXE_POOL_MAX_TIMEOUTS)
- timeout = 0;
-
err = -EINVAL;
}
}
@@ -265,16 +322,34 @@ int __rxe_wait(struct rxe_pool_elem *elem)
return err;
}
+/**
+ * __rxe_add_ref - takes a ref on the object unless ref count is zero
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Returns: 1 if reference is added else 0
+ */
int __rxe_get(struct rxe_pool_elem *elem)
{
return kref_get_unless_zero(&elem->ref_cnt);
}
+/**
+ * __rxe_drop_ref - puts a ref on the object
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Returns: 1 if ref count reaches zero and release called else 0
+ */
int __rxe_put(struct rxe_pool_elem *elem)
{
return kref_put(&elem->ref_cnt, rxe_elem_release);
}
+/**
+ * __rxe_show - enable looking up object from index
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Returns 0 on success else an error
+ */
int __rxe_show(struct rxe_pool_elem *elem)
{
struct xarray *xa = &elem->pool->xa;
@@ -290,6 +365,12 @@ int __rxe_show(struct rxe_pool_elem *elem)
return 0;
}
+/**
+ * __rxe_hide - disable looking up object from index
+ * @elem: rxe_pool_elem embedded in object
+ *
+ * Returns 0 on success else an error
+ */
int __rxe_hide(struct rxe_pool_elem *elem)
{
struct xarray *xa = &elem->pool->xa;
@@ -906,8 +906,8 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(mr))
+ return (void *)mr;
rxe_get(pd);
rxe_mr_init_dma(pd, access, mr);
@@ -928,26 +928,22 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
struct rxe_mr *mr;
mr = rxe_alloc(&rxe->mr_pool);
- if (!mr) {
- err = -ENOMEM;
- goto err2;
- }
-
+ if (IS_ERR(mr))
+ return (void *)mr;
rxe_get(pd);
err = rxe_mr_init_user(pd, start, length, iova, access, mr);
if (err)
- goto err3;
+ goto err;
rxe_show(mr);
return &mr->ibmr;
-err3:
+err:
rxe_put(pd);
rxe_wait(mr);
-err2:
return ERR_PTR(err);
}
@@ -963,25 +959,22 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
return ERR_PTR(-EINVAL);
mr = rxe_alloc(&rxe->mr_pool);
- if (!mr) {
- err = -ENOMEM;
- goto err1;
- }
+ if (IS_ERR(mr))
+ return (void *)mr;
rxe_get(pd);
err = rxe_mr_init_fast(pd, max_num_sg, mr);
if (err)
- goto err2;
+ goto err;
rxe_show(mr);
return &mr->ibmr;
-err2:
+err:
rxe_put(pd);
rxe_wait(mr);
-err1:
return ERR_PTR(err);
}
Minor cleanup of rxe_pool.c. Add document comment headers for the subroutines. Increase alignment for pool elements. Convert some printk's to WARN-ON's. Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> --- drivers/infiniband/sw/rxe/rxe_pool.c | 129 +++++++++++++++++++++----- drivers/infiniband/sw/rxe/rxe_verbs.c | 27 ++---- 2 files changed, 115 insertions(+), 41 deletions(-)