aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-11-25 10:58:57 +0000
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2019-11-25 16:39:07 +0200
commita09c2860ae4fcf4d7e25202661f3eb868547cddb (patch)
treec50b577d05040a93fa9002dc8c5a310222491f27 /drivers/gpu/drm/i915/gt
parentdrm/i915/execlists: Fixup cancel_port_requests() (diff)
downloadlinux-dev-a09c2860ae4fcf4d7e25202661f3eb868547cddb.tar.xz
linux-dev-a09c2860ae4fcf4d7e25202661f3eb868547cddb.zip
drm/i915/gt: Adapt engine_park synchronisation rules for engine_retire
In the next patch, we will introduce a new asynchronous retirement worker, fed by execlists CS events. Here we may queue a retirement as soon as a request is submitted to HW (and completes instantly), and we also want to process that retirement as early as possible and cannot afford to postpone (as there may not be another opportunity to retire it for a few seconds). To allow the new async retirer to run in parallel with our submission, pull the __i915_request_queue (that passes the request to HW) inside the timelines spinlock so that the retirement cannot release the timeline before we have completed the submission. v2: Actually to play nicely with engine_retire, we have to raise the timeline.active_lock before releasing the HW. intel_gt_retire_requsts() is still serialised by the outer lock so they cannot see this intermediate state, and engine_retire is serialised by HW submission. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191125105858.1718307-2-chris@chris-wilson.co.uk (cherry picked from commit 88a4655e75ac8f55eea5e3f38b176cba9cf653b5) Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gt')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 373a4b9f159c..0e1ad4a4bd97 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -74,16 +74,33 @@ static inline void __timeline_mark_unlock(struct intel_context *ce,
#endif /* !IS_ENABLED(CONFIG_LOCKDEP) */
static void
-__intel_timeline_enter_and_release_pm(struct intel_timeline *tl,
- struct intel_engine_cs *engine)
+__queue_and_release_pm(struct i915_request *rq,
+ struct intel_timeline *tl,
+ struct intel_engine_cs *engine)
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * We have to serialise all potential retirement paths with our
+ * submission, as we don't want to underflow either the
+ * engine->wakeref.counter or our timeline->active_count.
+ *
+ * Equally, we cannot allow a new submission to start until
+ * after we finish queueing, nor could we allow that submitter
+ * to retire us before we are ready!
+ */
spin_lock(&timelines->lock);
+ /* Let intel_gt_retire_requests() retire us (acquired under lock) */
if (!atomic_fetch_inc(&tl->active_count))
list_add_tail(&tl->link, &timelines->active_list);
+ /* Hand the request over to HW and so engine_retire() */
+ __i915_request_queue(rq, NULL);
+
+ /* Let new submissions commence (and maybe retire this timeline) */
__intel_wakeref_defer_park(&engine->wakeref);
spin_unlock(&timelines->lock);
@@ -148,10 +165,8 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
rq->sched.attr.priority = I915_PRIORITY_BARRIER;
__i915_request_commit(rq);
- __i915_request_queue(rq, NULL);
-
- /* Expose ourselves to intel_gt_retire_requests() and new submission */
- __intel_timeline_enter_and_release_pm(ce->timeline, engine);
+ /* Expose ourselves to the world */
+ __queue_and_release_pm(rq, ce->timeline, engine);
result = false;
out_unlock: