@@ -13,7 +13,6 @@
#include "i915_drv.h"
#include "i915_active.h"
-#include "i915_globals.h"
/*
* Active refs memory management
@@ -22,10 +21,7 @@
* they idle (when we know the active requests are inactive) and allocate the
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
-static struct i915_global_active {
- struct i915_global base;
- struct kmem_cache *slab_cache;
-} global;
+static struct kmem_cache *slab_cache;
struct active_node {
struct rb_node node;
@@ -174,7 +170,7 @@ __active_retire(struct i915_active *ref)
/* Finally free the discarded timeline tree */
rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
GEM_BUG_ON(i915_active_fence_isset(&it->base));
- kmem_cache_free(global.slab_cache, it);
+ kmem_cache_free(slab_cache, it);
}
}
@@ -322,7 +318,7 @@ active_instance(struct i915_active *ref, u64 idx)
* XXX: We should preallocate this before i915_active_ref() is ever
* called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
*/
- node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
+ node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
if (!node)
goto out;
@@ -788,7 +784,7 @@ void i915_active_fini(struct i915_active *ref)
mutex_destroy(&ref->mutex);
if (ref->cache)
- kmem_cache_free(global.slab_cache, ref->cache);
+ kmem_cache_free(slab_cache, ref->cache);
}
static inline bool is_idle_barrier(struct active_node *node, u64 idx)
@@ -908,7 +904,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
node = reuse_idle_barrier(ref, idx);
rcu_read_unlock();
if (!node) {
- node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+ node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
if (!node)
goto unwind;
@@ -956,7 +952,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));
- kmem_cache_free(global.slab_cache, node);
+ kmem_cache_free(slab_cache, node);
}
return -ENOMEM;
}
@@ -1176,21 +1172,16 @@ struct i915_active *i915_active_create(void)
#include "selftests/i915_active.c"
#endif
-static void i915_global_active_exit(void)
+void i915_active_module_exit(void)
{
- kmem_cache_destroy(global.slab_cache);
+ kmem_cache_destroy(slab_cache);
}
-static struct i915_global_active global = { {
- .exit = i915_global_active_exit,
-} };
-
-int __init i915_global_active_init(void)
+int __init i915_active_module_init(void)
{
- global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
- if (!global.slab_cache)
+ slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+ if (!slab_cache)
return -ENOMEM;
- i915_global_register(&global.base);
return 0;
}
@@ -247,4 +247,7 @@ static inline int __i915_request_await_exclusive(struct i915_request *rq,
return err;
}
+void i915_active_module_exit(void);
+int i915_active_module_init(void);
+
#endif /* _I915_ACTIVE_H_ */
@@ -7,7 +7,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "i915_active.h"
#include "i915_buddy.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h"
@@ -34,7 +33,6 @@ static void __i915_globals_cleanup(void)
}
static __initconst int (* const initfn[])(void) = {
- i915_global_active_init,
i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
@@ -23,7 +23,6 @@ int i915_globals_init(void);
void i915_globals_exit(void);
/* constructors */
-int i915_global_active_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
int i915_global_objects_init(void);
@@ -30,6 +30,7 @@
#include "display/intel_fbdev.h"
+#include "i915_active.h"
#include "i915_drv.h"
#include "i915_perf.h"
#include "i915_globals.h"
@@ -1294,6 +1295,7 @@ static const struct {
void (*exit)(void);
} init_funcs[] = {
{ i915_check_nomodeset, NULL },
+ { i915_active_module_init, i915_active_module_exit },
{ i915_globals_init, i915_globals_exit },
{ i915_mock_selftests, NULL },
{ i915_pmu_init, i915_pmu_exit },