@@ -42,6 +42,9 @@
#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
+/* Called from init/main.c */
+void xarray_slabcache_init(void);
+
/**
* xa_mk_value() - Create an XArray entry from an integer.
* @v: Value to store in XArray.
@@ -106,6 +106,7 @@ static int kernel_init(void *);
extern void init_IRQ(void);
extern void radix_tree_init(void);
+extern void xarray_slabcache_init(void);
/*
* Debug helper: via this flag we know that we are in 'early bootup code'
@@ -621,6 +622,7 @@ asmlinkage __visible void __init start_kernel(void)
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
radix_tree_init();
+ xarray_slabcache_init();
/*
* Set up housekeeping before setting up workqueues to allow the unbound
@@ -44,7 +44,7 @@
/*
* Radix tree node cache.
*/
-struct kmem_cache *radix_tree_node_cachep;
+static struct kmem_cache *radix_tree_node_cachep;
/*
* The radix tree is variable-height, so an insert operation not only has
@@ -27,6 +27,8 @@
* @entry refers to something stored in a slot in the xarray
*/
+static struct kmem_cache *xa_node_cachep;
+
static inline unsigned int xa_lock_type(const struct xarray *xa)
{
return (__force unsigned int)xa->xa_flags & 3;
@@ -244,9 +246,21 @@ void *xas_load(struct xa_state *xas)
}
EXPORT_SYMBOL_GPL(xas_load);
-/* Move the radix tree node cache here */
-extern struct kmem_cache *radix_tree_node_cachep;
-extern void radix_tree_node_rcu_free(struct rcu_head *head);
+void xa_node_rcu_free(struct rcu_head *head)
+{
+ struct xa_node *node = container_of(head, struct xa_node, rcu_head);
+
+ /*
+ * Must only free zeroed nodes into the slab. We can be left with
+ * non-NULL entries by radix_tree_free_nodes, so clear the entries
+ * and tags here.
+ */
+ memset(node->slots, 0, sizeof(node->slots));
+ memset(node->tags, 0, sizeof(node->tags));
+ INIT_LIST_HEAD(&node->private_list);
+
+ kmem_cache_free(xa_node_cachep, node);
+}
#define XA_RCU_FREE ((struct xarray *)1)
@@ -254,7 +268,7 @@ static void xa_node_free(struct xa_node *node)
{
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
node->array = XA_RCU_FREE;
- call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+ call_rcu(&node->rcu_head, xa_node_rcu_free);
}
/*
@@ -270,7 +284,7 @@ static void xas_destroy(struct xa_state *xas)
if (!node)
return;
XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
- kmem_cache_free(radix_tree_node_cachep, node);
+ kmem_cache_free(xa_node_cachep, node);
xas->xa_alloc = NULL;
}
@@ -298,7 +312,7 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
xas_destroy(xas);
return false;
}
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc(xa_node_cachep, gfp);
if (!xas->xa_alloc)
return false;
XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
@@ -327,10 +341,10 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
}
if (gfpflags_allow_blocking(gfp)) {
xas_unlock_type(xas, lock_type);
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc(xa_node_cachep, gfp);
xas_lock_type(xas, lock_type);
} else {
- xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
+ xas->xa_alloc = kmem_cache_alloc(xa_node_cachep, gfp);
}
if (!xas->xa_alloc)
return false;
@@ -358,7 +372,7 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
if (node) {
xas->xa_alloc = NULL;
} else {
- node = kmem_cache_alloc(radix_tree_node_cachep,
+ node = kmem_cache_alloc(xa_node_cachep,
GFP_NOWAIT | __GFP_NOWARN);
if (!node) {
xas_set_err(xas, -ENOMEM);
@@ -1971,6 +1985,22 @@ void xa_destroy(struct xarray *xa)
}
EXPORT_SYMBOL(xa_destroy);
+static void xa_node_ctor(void *arg)
+{
+ struct xa_node *node = arg;
+
+ memset(node, 0, sizeof(*node));
+ INIT_LIST_HEAD(&node->private_list);
+}
+
+void __init xarray_slabcache_init(void)
+{
+ xa_node_cachep = kmem_cache_create("xarray_node",
+ sizeof(struct xa_node), 0,
+ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+ xa_node_ctor);
+}
+
#ifdef XA_DEBUG
void xa_dump_node(const struct xa_node *node)
{
Earlier, Slab Movable Objects (SMO) was implemented. The XArray is now able to take advantage of SMO in order to make xarray nodes movable (when using the SLUB allocator). Currently the radix tree uses the same slab cache as the XArray. Only XArray nodes are movable _not_ radix tree nodes. We can give the radix tree its own slab cache to overcome this. In preparation for implementing XArray object migration (xa_node objects) via Slab Movable Objects add a slab cache solely for XArray nodes and make the XArray use this slab cache instead of the radix_tree_node slab cache. Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Tobin C. Harding <tobin@kernel.org> --- include/linux/xarray.h | 3 +++ init/main.c | 2 ++ lib/radix-tree.c | 2 +- lib/xarray.c | 48 ++++++++++++++++++++++++++++++++++-------- 4 files changed, 45 insertions(+), 10 deletions(-)