aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt/intel_engine_cs.c
diff options
context:
space:
mode:
authorMatthew Brost <matthew.brost@intel.com>2021-06-17 18:06:34 -0700
committerMatt Roper <matthew.d.roper@intel.com>2021-06-18 15:13:33 -0700
commit349a2bc5aae45f54bce1c6fd54d8d3ac2ae26611 (patch)
treea93ad9e0bc0d8f5b2065a834569fabfd3a7743fd /drivers/gpu/drm/i915/gt/intel_engine_cs.c
parentdrm/i915: Reset sched_engine.no_priolist immediately after dequeue (diff)
downloadlinux-dev-349a2bc5aae45f54bce1c6fd54d8d3ac2ae26611.tar.xz
linux-dev-349a2bc5aae45f54bce1c6fd54d8d3ac2ae26611.zip
drm/i915: Move active tracking to i915_sched_engine
Move active request tracking and its lock to i915_sched_engine. This lock is also the submission lock so having it in the i915_sched_engine is the correct place. v3: (Jason Ekstrand) Add kernel doc v6: Rebase Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.comk> Signed-off-by: Matt Roper <matthew.d.roper@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210618010638.98941-5-matthew.brost@intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_engine_cs.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c43
1 files changed, 11 insertions, 32 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 506b8f702a7f..530bd3baf9d7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -721,7 +721,6 @@ static int engine_setup_common(struct intel_engine_cs *engine)
if (err)
goto err_cmd_parser;
- intel_engine_init_active(engine, ENGINE_PHYSICAL);
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
@@ -780,11 +779,11 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->rq.ring = &frame->ring;
mutex_lock(&ce->timeline->mutex);
- spin_lock_irq(&engine->active.lock);
+ spin_lock_irq(&engine->sched_engine->lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
- spin_unlock_irq(&engine->active.lock);
+ spin_unlock_irq(&engine->sched_engine->lock);
mutex_unlock(&ce->timeline->mutex);
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
@@ -793,28 +792,6 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
return dw;
}
-void
-intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
-{
- INIT_LIST_HEAD(&engine->active.requests);
- INIT_LIST_HEAD(&engine->active.hold);
-
- spin_lock_init(&engine->active.lock);
- lockdep_set_subclass(&engine->active.lock, subclass);
-
- /*
- * Due to an interesting quirk in lockdep's internal debug tracking,
- * after setting a subclass we must ensure the lock is used. Otherwise,
- * nr_unused_locks is incremented once too often.
- */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- local_irq_disable();
- lock_map_acquire(&engine->active.lock.dep_map);
- lock_map_release(&engine->active.lock.dep_map);
- local_irq_enable();
-#endif
-}
-
struct intel_context *
intel_engine_create_pinned_context(struct intel_engine_cs *engine,
struct i915_address_space *vm,
@@ -969,7 +946,7 @@ int intel_engines_init(struct intel_gt *gt)
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
- GEM_BUG_ON(!list_empty(&engine->active.requests));
+ GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
i915_sched_engine_put(engine->sched_engine);
@@ -1325,7 +1302,7 @@ static struct intel_timeline *get_timeline(struct i915_request *rq)
struct intel_timeline *tl;
/*
- * Even though we are holding the engine->active.lock here, there
+ * Even though we are holding the engine->sched_engine->lock here, there
* is no control over the submission queue per-se and we are
* inspecting the active state at a random point in time, with an
* unknown queue. Play safe and make sure the timeline remains valid.
@@ -1672,7 +1649,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tRequests:\n");
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
struct intel_timeline *tl = get_timeline(rq);
@@ -1703,8 +1680,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
- drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ drm_printf(m, "\tOn hold?: %lu\n",
+ list_count(&engine->sched_engine->hold));
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
@@ -1784,7 +1762,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&engine->sched_engine->lock);
rcu_read_lock();
request = execlists_active(&engine->execlists);
@@ -1802,7 +1780,8 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
if (active)
return active;
- list_for_each_entry(request, &engine->active.requests, sched.link) {
+ list_for_each_entry(request, &engine->sched_engine->requests,
+ sched.link) {
if (__i915_request_is_complete(request))
continue;