aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/msm/msm_gem_shrinker.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c123
1 files changed, 58 insertions, 65 deletions
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 482576d7a39a..9d5248be746f 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -6,58 +6,28 @@
#include "msm_drv.h"
#include "msm_gem.h"
+#include "msm_gpu.h"
#include "msm_gpu_trace.h"
-static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-{
- /* NOTE: we are *closer* to being able to get rid of
- * mutex_trylock_recursive().. the msm_gem code itself does
- * not need struct_mutex, although codepaths that can trigger
- * shrinker are still called in code-paths that hold the
- * struct_mutex.
- *
- * Also, msm_obj->madv is protected by struct_mutex.
- *
- * The next step is probably split out a seperate lock for
- * protecting inactive_list, so that shrinker does not need
- * struct_mutex.
- */
- switch (mutex_trylock_recursive(&dev->struct_mutex)) {
- case MUTEX_TRYLOCK_FAILED:
- return false;
-
- case MUTEX_TRYLOCK_SUCCESS:
- *unlock = true;
- return true;
-
- case MUTEX_TRYLOCK_RECURSIVE:
- *unlock = false;
- return true;
- }
-
- BUG();
-}
-
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long count = 0;
- bool unlock;
- if (!msm_gem_shrinker_lock(dev, &unlock))
- return 0;
+ mutex_lock(&priv->mm_lock);
- list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
+ if (!msm_gem_trylock(&msm_obj->base))
+ continue;
if (is_purgeable(msm_obj))
count += msm_obj->base.size >> PAGE_SHIFT;
+ msm_gem_unlock(&msm_obj->base);
}
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->mm_lock);
return count;
}
@@ -67,25 +37,24 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct msm_drm_private *priv =
container_of(shrinker, struct msm_drm_private, shrinker);
- struct drm_device *dev = priv->dev;
struct msm_gem_object *msm_obj;
unsigned long freed = 0;
- bool unlock;
- if (!msm_gem_shrinker_lock(dev, &unlock))
- return SHRINK_STOP;
+ mutex_lock(&priv->mm_lock);
- list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ list_for_each_entry(msm_obj, &priv->inactive_dontneed, mm_list) {
if (freed >= sc->nr_to_scan)
break;
+ if (!msm_gem_trylock(&msm_obj->base))
+ continue;
if (is_purgeable(msm_obj)) {
- msm_gem_purge(&msm_obj->base, OBJ_LOCK_SHRINKER);
+ msm_gem_purge(&msm_obj->base);
freed += msm_obj->base.size >> PAGE_SHIFT;
}
+ msm_gem_unlock(&msm_obj->base);
}
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->mm_lock);
if (freed > 0)
trace_msm_gem_purge(freed << PAGE_SHIFT);
@@ -93,33 +62,57 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return freed;
}
+/* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+static const int vmap_shrink_limit = 15;
+
+static unsigned
+vmap_shrink(struct list_head *mm_list)
+{
+ struct msm_gem_object *msm_obj;
+ unsigned unmapped = 0;
+
+ list_for_each_entry(msm_obj, mm_list, mm_list) {
+ if (!msm_gem_trylock(&msm_obj->base))
+ continue;
+ if (is_vunmapable(msm_obj)) {
+ msm_gem_vunmap(&msm_obj->base);
+ unmapped++;
+ }
+ msm_gem_unlock(&msm_obj->base);
+
+ if (++unmapped >= vmap_shrink_limit)
+ break;
+ }
+
+ return unmapped;
+}
+
static int
msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
{
struct msm_drm_private *priv =
container_of(nb, struct msm_drm_private, vmap_notifier);
- struct drm_device *dev = priv->dev;
- struct msm_gem_object *msm_obj;
- unsigned unmapped = 0;
- bool unlock;
+ struct list_head *mm_lists[] = {
+ &priv->inactive_dontneed,
+ &priv->inactive_willneed,
+ priv->gpu ? &priv->gpu->active_list : NULL,
+ NULL,
+ };
+ unsigned idx, unmapped = 0;
- if (!msm_gem_shrinker_lock(dev, &unlock))
- return NOTIFY_DONE;
+ mutex_lock(&priv->mm_lock);
- list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
- if (is_vunmapable(msm_obj)) {
- msm_gem_vunmap(&msm_obj->base, OBJ_LOCK_SHRINKER);
- /* since we don't know any better, lets bail after a few
- * and if necessary the shrinker will be invoked again.
- * Seems better than unmapping *everything*
- */
- if (++unmapped >= 15)
- break;
- }
+ for (idx = 0; mm_lists[idx]; idx++) {
+ unmapped += vmap_shrink(mm_lists[idx]);
+
+ if (unmapped >= vmap_shrink_limit)
+ break;
}
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&priv->mm_lock);
*(unsigned long *)ptr += unmapped;
@@ -131,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
/**
* msm_gem_shrinker_init - Initialize msm shrinker
- * @dev_priv: msm device
+ * @dev: drm device
*
* This function registers and sets up the msm shrinker.
*/
@@ -149,7 +142,7 @@ void msm_gem_shrinker_init(struct drm_device *dev)
/**
* msm_gem_shrinker_cleanup - Clean up msm shrinker
- * @dev_priv: msm device
+ * @dev: drm device
*
* This function unregisters the msm shrinker.
*/