aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_globals.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_globals.c')
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c82
1 files changed, 38 insertions, 44 deletions
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index cfd0bc462f58..1cf4e8bc8ec6 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -15,62 +15,61 @@
#include "i915_scheduler.h"
#include "i915_vma.h"
-int __init i915_globals_init(void)
+static LIST_HEAD(globals);
+
+void __init i915_global_register(struct i915_global *global)
{
- int err;
+ GEM_BUG_ON(!global->shrink);
+ GEM_BUG_ON(!global->exit);
- err = i915_global_active_init();
- if (err)
- return err;
+ list_add_tail(&global->link, &globals);
+}
- err = i915_global_context_init();
- if (err)
- goto err_active;
+static void __i915_globals_cleanup(void)
+{
+ struct i915_global *global, *next;
- err = i915_global_objects_init();
- if (err)
- goto err_context;
+ list_for_each_entry_safe_reverse(global, next, &globals, link)
+ global->exit();
+}
- err = i915_global_request_init();
- if (err)
- goto err_objects;
+static __initconst int (* const initfn[])(void) = {
+ i915_global_active_init,
+ i915_global_context_init,
+ i915_global_objects_init,
+ i915_global_request_init,
+ i915_global_scheduler_init,
+ i915_global_vma_init,
+};
- err = i915_global_scheduler_init();
- if (err)
- goto err_request;
+int __init i915_globals_init(void)
+{
+ int i;
- err = i915_global_vma_init();
- if (err)
- goto err_scheduler;
+ for (i = 0; i < ARRAY_SIZE(initfn); i++) {
+ int err;
- return 0;
+ err = initfn[i]();
+ if (err) {
+ __i915_globals_cleanup();
+ return err;
+ }
+ }
-err_scheduler:
- i915_global_scheduler_exit();
-err_request:
- i915_global_request_exit();
-err_objects:
- i915_global_objects_exit();
-err_context:
- i915_global_context_exit();
-err_active:
- i915_global_active_exit();
- return err;
+ return 0;
}
static void i915_globals_shrink(void)
{
+ struct i915_global *global;
+
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
- i915_global_active_shrink();
- i915_global_context_shrink();
- i915_global_objects_shrink();
- i915_global_request_shrink();
- i915_global_scheduler_shrink();
- i915_global_vma_shrink();
+ list_for_each_entry(global, &globals, link)
+ global->shrink();
}
static atomic_t active;
@@ -128,12 +127,7 @@ void __exit i915_globals_exit(void)
rcu_barrier();
flush_scheduled_work();
- i915_global_vma_exit();
- i915_global_scheduler_exit();
- i915_global_request_exit();
- i915_global_objects_exit();
- i915_global_context_exit();
- i915_global_active_exit();
+ __i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();