aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt/intel_lrc.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-05-07 13:29:54 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-05-07 17:40:20 +0100
commit5a6ac10b17ff285440d6abea97b6942b13f82772 (patch)
tree34da76129e1753da611733900677a5c9a173f7e8 /drivers/gpu/drm/i915/gt/intel_lrc.c
parentdrm/i915: Only reschedule the submission tasklet if preemption is possible (diff)
downloadlinux-dev-5a6ac10b17ff285440d6abea97b6942b13f82772.tar.xz
linux-dev-5a6ac10b17ff285440d6abea97b6942b13f82772.zip
drm/i915/execlists: Don't apply priority boost for resets
Do not treat reset as a normal preemption event and avoid giving the guilty request a priority boost for simply being active at the time of reset. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190507122954.6299-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 636df21983dd..d1a54d2c3d5d 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -371,11 +371,11 @@ static void unwind_wa_tail(struct i915_request *rq)
}
static struct i915_request *
-__unwind_incomplete_requests(struct intel_engine_cs *engine)
+__unwind_incomplete_requests(struct intel_engine_cs *engine, int boost)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
- int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
+ int prio = I915_PRIORITY_INVALID | boost;
lockdep_assert_held(&engine->timeline.lock);
@@ -419,8 +419,9 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* in the priority queue, but they will not gain immediate access to
* the GPU.
*/
- if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
- prio |= ACTIVE_PRIORITY;
+ if (~prio & boost && __i915_request_has_started(active)) {
+ prio |= boost;
+ GEM_BUG_ON(active->sched.attr.priority >= prio);
active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
@@ -435,7 +436,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- return __unwind_incomplete_requests(engine);
+ return __unwind_incomplete_requests(engine, 0);
}
static inline void
@@ -656,7 +657,8 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists)
execlists_cancel_port_requests(execlists);
__unwind_incomplete_requests(container_of(execlists,
struct intel_engine_cs,
- execlists));
+ execlists),
+ ACTIVE_PRIORITY);
}
static void execlists_dequeue(struct intel_engine_cs *engine)
@@ -1909,7 +1911,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
execlists_cancel_port_requests(execlists);
/* Push back any incomplete requests for replay after the reset. */
- rq = __unwind_incomplete_requests(engine);
+ rq = __unwind_incomplete_requests(engine, 0);
if (!rq)
goto out_replay;