@@ -177,18 +177,18 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
goto out;
}
- pool->max_index = max;
- pool->min_index = min;
+ pool->index.max_index = max;
+ pool->index.min_index = min;
size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
- pool->table = kmalloc(size, GFP_KERNEL);
- if (!pool->table) {
+ pool->index.table = kmalloc(size, GFP_KERNEL);
+ if (!pool->index.table) {
err = -ENOMEM;
goto out;
}
- pool->table_size = size;
- bitmap_zero(pool->table, max - min + 1);
+ pool->index.table_size = size;
+ bitmap_zero(pool->index.table, max - min + 1);
out:
return err;
@@ -210,7 +210,8 @@ int rxe_pool_init(
pool->max_elem = max_elem;
pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
pool->flags = rxe_type_info[type].flags;
- pool->tree = RB_ROOT;
+ pool->index.tree = RB_ROOT;
+ pool->key.tree = RB_ROOT;
pool->cleanup = rxe_type_info[type].cleanup;
atomic_set(&pool->num_elem, 0);
@@ -228,8 +229,8 @@ int rxe_pool_init(
}
if (rxe_type_info[type].flags & RXE_POOL_KEY) {
- pool->key_offset = rxe_type_info[type].key_offset;
- pool->key_size = rxe_type_info[type].key_size;
+ pool->key.key_offset = rxe_type_info[type].key_offset;
+ pool->key.key_size = rxe_type_info[type].key_size;
}
pool->state = RXE_POOL_STATE_VALID;
@@ -243,7 +244,7 @@ static void rxe_pool_release(struct kref *kref)
struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
pool->state = RXE_POOL_STATE_INVALID;
- kfree(pool->table);
+ kfree(pool->index.table);
}
static void rxe_pool_put(struct rxe_pool *pool)
@@ -268,27 +269,27 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
static u32 alloc_index(struct rxe_pool *pool)
{
u32 index;
- u32 range = pool->max_index - pool->min_index + 1;
+ u32 range = pool->index.max_index - pool->index.min_index + 1;
- index = find_next_zero_bit(pool->table, range, pool->last);
+ index = find_next_zero_bit(pool->index.table, range, pool->index.last);
if (index >= range)
- index = find_first_zero_bit(pool->table, range);
+ index = find_first_zero_bit(pool->index.table, range);
WARN_ON_ONCE(index >= range);
- set_bit(index, pool->table);
- pool->last = index;
- return index + pool->min_index;
+ set_bit(index, pool->index.table);
+ pool->index.last = index;
+ return index + pool->index.min_index;
}
static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
- struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
struct rxe_pool_entry *elem;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, node);
+ elem = rb_entry(parent, struct rxe_pool_entry, index_node);
if (elem->index == new->index) {
pr_warn("element already exists!\n");
@@ -301,25 +302,25 @@ static void insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
link = &(*link)->rb_right;
}
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &pool->tree);
+ rb_link_node(&new->index_node, parent, link);
+ rb_insert_color(&new->index_node, &pool->index.tree);
out:
return;
}
static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
{
- struct rb_node **link = &pool->tree.rb_node;
+ struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
struct rxe_pool_entry *elem;
int cmp;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, node);
+ elem = rb_entry(parent, struct rxe_pool_entry, key_node);
- cmp = memcmp((u8 *)elem + pool->key_offset,
- (u8 *)new + pool->key_offset, pool->key_size);
+ cmp = memcmp((u8 *)elem + pool->key.key_offset,
+ (u8 *)new + pool->key.key_offset, pool->key.key_size);
if (cmp == 0) {
pr_warn("key already exists!\n");
@@ -332,8 +333,8 @@ static void insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
link = &(*link)->rb_right;
}
- rb_link_node(&new->node, parent, link);
- rb_insert_color(&new->node, &pool->tree);
+ rb_link_node(&new->key_node, parent, link);
+ rb_insert_color(&new->key_node, &pool->key.tree);
out:
return;
}
@@ -345,7 +346,7 @@ void rxe_add_key(void *arg, void *key)
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
+ memcpy((u8 *)elem + pool->key.key_offset, key, pool->key.key_size);
insert_key(pool, elem);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
@@ -357,7 +358,7 @@ void rxe_drop_key(void *arg)
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- rb_erase(&elem->node, &pool->tree);
+ rb_erase(&elem->key_node, &pool->key.tree);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
@@ -380,8 +381,8 @@ void rxe_drop_index(void *arg)
unsigned long flags;
write_lock_irqsave(&pool->pool_lock, flags);
- clear_bit(elem->index - pool->min_index, pool->table);
- rb_erase(&elem->node, &pool->tree);
+ clear_bit(elem->index - pool->index.min_index, pool->index.table);
+ rb_erase(&elem->index_node, &pool->index.tree);
write_unlock_irqrestore(&pool->pool_lock, flags);
}
@@ -485,10 +486,10 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
if (pool->state != RXE_POOL_STATE_VALID)
goto out;
- node = pool->tree.rb_node;
+ node = pool->index.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, node);
+ elem = rb_entry(node, struct rxe_pool_entry, index_node);
if (elem->index > index)
node = node->rb_left;
@@ -517,13 +518,13 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
if (pool->state != RXE_POOL_STATE_VALID)
goto out;
- node = pool->tree.rb_node;
+ node = pool->key.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, node);
+ elem = rb_entry(node, struct rxe_pool_entry, key_node);
- cmp = memcmp((u8 *)elem + pool->key_offset,
- key, pool->key_size);
+ cmp = memcmp((u8 *)elem + pool->key.key_offset,
+ key, pool->key.key_size);
if (cmp > 0)
node = node->rb_left;
@@ -84,8 +84,11 @@ struct rxe_pool_entry {
struct kref ref_cnt;
struct list_head list;
- /* only used if indexed or keyed */
- struct rb_node node;
+ /* only used if keyed */
+ struct rb_node key_node;
+
+ /* only used if indexed */
+ struct rb_node index_node;
u32 index;
};
@@ -102,15 +105,22 @@ struct rxe_pool {
unsigned int max_elem;
atomic_t num_elem;
- /* only used if indexed or keyed */
- struct rb_root tree;
- unsigned long *table;
- size_t table_size;
- u32 max_index;
- u32 min_index;
- u32 last;
- size_t key_offset;
- size_t key_size;
+ /* only used if indexed */
+ struct {
+ struct rb_root tree;
+ unsigned long *table;
+ size_t table_size;
+ u32 last;
+ u32 max_index;
+ u32 min_index;
+ } index;
+
+ /* only used if keyed */
+ struct {
+ struct rb_root tree;
+ size_t key_offset;
+ size_t key_size;
+ } key;
};
/* initialize slab caches for managed objects */
Allowed both indices and keys to exist for objects in the pools. Previously you were limited to one or the other. This will support allowing the keys on MWs to change. Signed-off-by: Bob Pearson <rpearson@hpe.com> --- drivers/infiniband/sw/rxe/rxe_pool.c | 73 ++++++++++++++-------------- drivers/infiniband/sw/rxe/rxe_pool.h | 32 +++++++----- 2 files changed, 58 insertions(+), 47 deletions(-)