aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gem
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-08-02 22:21:37 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-08-02 23:39:48 +0100
commit515b8b7e935ef3f59c4efda04a3b05353ed6fbb7 (patch)
tree5e857d58e9b11b258f7f7079ef7f4273468be7eb /drivers/gpu/drm/i915/gem
parentdrm/i915: Hide unshrinkable context objects from the shrinker (diff)
downloadlinux-dev-515b8b7e935ef3f59c4efda04a3b05353ed6fbb7.tar.xz
linux-dev-515b8b7e935ef3f59c4efda04a3b05353ed6fbb7.zip
drm/i915: Flush the freed object list on file close
As we increase the number of RCU objects, it becomes easier for us to have several hundred thousand objects in the deferred RCU free queues. An example is gem_ctx_create/files which continually creates active contexts, which are not immediately freed upon close as they are kept alive by outstanding requests. This lack of backpressure allows the context objects to persist until they overwhelm and starve the system. We can increase our backpressure by flushing the freed object queue upon closing the device fd which should then not impact other clients. Testcase: igt/gem_ctx_create/*files Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190802212137.22207-2-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gem')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c38
1 files changed, 4 insertions, 34 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 4ea97fca9c35..19d55115747c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -211,48 +211,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
- struct llist_node *freed;
-
- /* Free the oldest, most stale object to keep the free_list short */
- freed = NULL;
- if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
- /* Only one consumer of llist_del_first() allowed */
- spin_lock(&i915->mm.free_lock);
- freed = llist_del_first(&i915->mm.free_list);
- spin_unlock(&i915->mm.free_lock);
- }
- if (unlikely(freed)) {
- freed->next = NULL;
+ struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+ if (unlikely(freed))
__i915_gem_free_objects(i915, freed);
- }
}
static void __i915_gem_free_work(struct work_struct *work)
{
struct drm_i915_private *i915 =
container_of(work, struct drm_i915_private, mm.free_work);
- struct llist_node *freed;
-
- /*
- * All file-owned VMA should have been released by this point through
- * i915_gem_close_object(), or earlier by i915_gem_context_close().
- * However, the object may also be bound into the global GTT (e.g.
- * older GPUs without per-process support, or for direct access through
- * the GTT either for the user or for scanout). Those VMA still need to
- * unbound now.
- */
-
- spin_lock(&i915->mm.free_lock);
- while ((freed = llist_del_all(&i915->mm.free_list))) {
- spin_unlock(&i915->mm.free_lock);
- __i915_gem_free_objects(i915, freed);
- if (need_resched())
- return;
-
- spin_lock(&i915->mm.free_lock);
- }
- spin_unlock(&i915->mm.free_lock);
+ i915_gem_flush_free_objects(i915);
}
void i915_gem_free_object(struct drm_gem_object *gem_obj)