From d8462d0ad350ccd399899de051060ee562a5e537 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Tue, 20 Jun 2017 13:43:19 +0100 Subject: drm/i915: Retire the VMA's fence tracker before unbinding Since we may track unfenced access (GPU access to the vma that explicitly requires no fence), vma->last_fence may be set without any attached fence (vma->fence) and so will not be flushed when we call i915_vma_put_fence(). Since we stopped doing a full retire of the activity trackers for unbind, we need to explicitly retire each tracker. Fixes: b0decaf75bd9 ("drm/i915: Track active vma requests") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Tvrtko Ursulin Link: http://patchwork.freedesktop.org/patch/msgid/20170620124321.1108-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin (cherry picked from commit 760a898d8069111704e1bd43f00ebf369ae46e57) Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_vma.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 1aba47024656..f066e2d785f5 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma) break; } + if (!ret) { + ret = i915_gem_active_retire(&vma->last_fence, + &vma->vm->i915->drm.struct_mutex); + } + __i915_vma_unpin(vma); if (ret) return ret; -- cgit v1.2.3-59-g8ed1b From 2c7367626733e27d6f6d9906db7a31ada587566b Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 17 Jun 2017 12:57:44 +0100 Subject: drm/i915: Hold struct_mutex for per-file stats in debugfs/i915_gem_object As we walk the obj->vma_list in per_file_stats(), we need to hold struct_mutex to prevent alteration of that list. Fixes: 1d2ac403ae3b ("drm: Protect dev->filelist with its own mutex") Fixes: c84455b4bacc ("drm/i915: Move debug only per-request pid tracking from request to ctx") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=101460 Signed-off-by: Chris Wilson Cc: Daniel Vetter Cc: Joonas Lahtinen Link: http://patchwork.freedesktop.org/patch/msgid/20170617115744.4452-1-chris@chris-wilson.co.uk Reviewed-by: Tvrtko Ursulin (cherry picked from commit 0caf81b5c53d9bd332a95dbcb44db8de0b397a7c) Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_debugfs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d689e511744e..4bd1467c17b1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data) struct file_stats *stats = data; struct i915_vma *vma; + lockdep_assert_held(&obj->base.dev->struct_mutex); + stats->count++; stats->total += obj->base.size; if (!obj->bind_count) @@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data) struct drm_i915_gem_request *request; struct task_struct *task; + mutex_lock(&dev->struct_mutex); + memset(&stats, 0, sizeof(stats)); stats.file_priv = file->driver_priv; spin_lock(&file->table_lock); @@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU. */ - mutex_lock(&dev->struct_mutex); request = list_first_entry_or_null(&file_priv->mm.request_list, struct drm_i915_gem_request, client_link); @@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) PIDTYPE_PID); print_file_stats(m, task ? task->comm : "", stats); rcu_read_unlock(); + mutex_unlock(&dev->struct_mutex); } mutex_unlock(&dev->filelist_mutex); -- cgit v1.2.3-59-g8ed1b From 611cdf3695a307fdca3ff3779a1e6cca935e2d31 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Fri, 16 Jun 2017 15:05:18 +0100 Subject: drm/i915: Disable EXEC_OBJECT_ASYNC when doing relocations If we write a relocation into the buffer, we require our own implicit synchronisation added after the start of the execbuf, outside of the user's control. As we may end up clflushing, or doing the patch itself on the GPU, asynchronously we need to look at the implicit serialisation on obj->resv and hence need to disable EXEC_OBJECT_ASYNC for this object. If the user does trigger a stall for relocations, we make sure the stall is complete enough so that the batch is not submitted before we complete those relocations. Fixes: 77ae9957897d ("drm/i915: Enable userspace to opt-out of implicit fencing") Signed-off-by: Chris Wilson Cc: Joonas Lahtinen Cc: Jason Ekstrand Reviewed-by: Joonas Lahtinen (cherry picked from commit 071750e550af46b5d3a84ad56c2a108c3e136284) [danvet: Resolve conflicts, resolution reviewed by Tvrtko on irc.] Signed-off-by: Daniel Vetter --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a3e59c8ef27b..9ad13eeed904 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -546,11 +546,12 @@ repeat: } static int -i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, +i915_gem_execbuffer_relocate_entry(struct i915_vma *vma, struct eb_vmas *eb, struct drm_i915_gem_relocation_entry *reloc, struct reloc_cache *cache) { + struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_gem_object *target_obj; struct drm_i915_gem_object *target_i915_obj; @@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -EINVAL; } + /* + * If we write into the object, we need to force the synchronisation + * barrier, either with an asynchronous clflush or if we executed the + * patching using the GPU (though that should be serialised by the + * timeline). To be completely sure, and since we are required to + * do relocations we are already stalling, disable the user's opt + * of our synchronisation. + */ + vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC; + ret = relocate_entry(obj, reloc, cache, target_offset); if (ret) return ret; @@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma, do { u64 offset = r->presumed_offset; - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache); + ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache); if (ret) goto out; @@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma, reloc_cache_init(&cache, eb->i915); for (i = 0; i < entry->relocation_count; i++) { - ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache); + ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache); if (ret) break; } -- cgit v1.2.3-59-g8ed1b