aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_shrinker.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-10-13 21:26:19 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2017-10-16 20:44:19 +0100
commitc5418a8b38a4ff9284e8e59c06a24f443197131c (patch)
tree66a1841b60db39e4b12ade89df84bd7c0924bdb6 /drivers/gpu/drm/i915/i915_gem_shrinker.c
parentdrm/i915: Wire up shrinkctl->nr_scanned (diff)
downloadlinux-dev-c5418a8b38a4ff9284e8e59c06a24f443197131c.tar.xz
linux-dev-c5418a8b38a4ff9284e8e59c06a24f443197131c.zip
drm/i915: Set our shrinker->batch to 4096 (~16MiB)
Prefer to defer activating our GEM shrinker until we have a few megabytes to free; or we have accumulated sufficient mempressure by deferring the reclaim to force a shrink. The intent is that because our objects may typically be large, we are too effective at shrinking and are not rewarded for freeing more pages than the batch. It will also defer the initial shrinking to hopefully put it at a lower priority than say the buffer cache (although it will balance out over a number of reclaims, with GEM being more bursty). v2: Give it a feedback system to try and tune the batch size towards an effective size for the available objects. v3: Start keeping track of shrinker stats in debugfs v4: Protect against finding no shrinkable objects (div-by-zero) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-7-chris@chris-wilson.co.uk
Diffstat (limited to '')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c34
1 files changed, 27 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 175765111f4e..eb31f8aa5c21 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -295,20 +295,39 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *dev_priv =
+ struct drm_i915_private *i915 =
container_of(shrinker, struct drm_i915_private, mm.shrinker);
struct drm_i915_gem_object *obj;
+ unsigned long num_objects = 0;
unsigned long count = 0;
- spin_lock(&dev_priv->mm.obj_lock);
- list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link)
- if (can_release_pages(obj))
+ spin_lock(&i915->mm.obj_lock);
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+ if (can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
+ num_objects++;
+ }
- list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link)
- if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+ if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
- spin_unlock(&dev_priv->mm.obj_lock);
+ num_objects++;
+ }
+ spin_unlock(&i915->mm.obj_lock);
+
+ /* Update our preferred vmscan batch size for the next pass.
+ * Our rough guess for an effective batch size is roughly 2
+ * available GEM objects worth of pages. That is we don't want
+ * the shrinker to fire, until it is worth the cost of freeing an
+ * entire GEM object.
+ */
+ if (num_objects) {
+ unsigned long avg = 2 * count / num_objects;
+
+ i915->mm.shrinker.batch =
+ max((i915->mm.shrinker.batch + avg) >> 1,
+ 128ul /* default SHRINK_BATCH */);
+ }
return count;
}
@@ -473,6 +492,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
+ dev_priv->mm.shrinker.batch = 4096;
WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;