aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-01-24 15:18:05 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2017-01-24 15:55:36 +0000
commit538b257dae83268cc3536fb4c4ab4f57901d449d (patch)
tree7b0e0db60b711a16c65d9b139a2aa3517619cd86 /drivers/gpu
parentdrm/i915: Only disable execlist preemption for the duration of the request (diff)
downloadlinux-dev-538b257dae83268cc3536fb4c4ab4f57901d449d.tar.xz
linux-dev-538b257dae83268cc3536fb4c4ab4f57901d449d.zip
drm/i915: Move breadcrumbs irq_posted up a level to engine
In the next patch, we will use the irq_posted technique for another engine interrupt, rather than use two members for the atomic updates, we can use two bits of one instead. First, we need to update the breadcrumbs to use the new common engine->irq_posted. v2: Use set_bit() rather than __set_bit() to ensure atomicity with respect to other bits in the mask Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170124151805.26146-1-chris@chris-wilson.co.uk Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
4 files changed, 9 insertions, 8 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a53f6b30b695..6eff81fb939c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3963,7 +3963,7 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
*/
if (engine->irq_seqno_barrier &&
rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
- cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
+ test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
struct task_struct *tsk;
/* The ordering of irq_posted versus applying the barrier
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6fefc34ef602..7e087c344265 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1033,7 +1033,7 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
- smp_store_mb(engine->breadcrumbs.irq_posted, true);
+ set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
if (intel_engine_wakeup(engine))
trace_i915_gem_request_notify(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index c6fa77177615..6b24f2544b6b 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -81,7 +81,7 @@ static void irq_enable(struct intel_engine_cs *engine)
* we still need to force the barrier before reading the seqno,
* just in case.
*/
- engine->breadcrumbs.irq_posted = true;
+ set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
/* Caller disables interrupts */
spin_lock(&engine->i915->irq_lock);
@@ -95,8 +95,6 @@ static void irq_disable(struct intel_engine_cs *engine)
spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
spin_unlock(&engine->i915->irq_lock);
-
- engine->breadcrumbs.irq_posted = false;
}
static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
@@ -257,7 +255,8 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
* in case the seqno passed.
*/
__intel_breadcrumbs_enable_irq(b);
- if (READ_ONCE(b->irq_posted))
+ if (test_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted))
wake_up_process(to_wait(next)->tsk);
}
@@ -610,7 +609,7 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
if (intel_engine_has_waiter(engine)) {
b->timeout = wait_timeout();
__intel_breadcrumbs_enable_irq(b);
- if (READ_ONCE(b->irq_posted))
+ if (test_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted))
wake_up_process(b->first_wait->tsk);
} else {
/* sanitize the IMR and unmask any auxiliary interrupts */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index dbd32585f27a..a9ea84ea3155 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -211,6 +211,9 @@ struct intel_engine_cs {
struct intel_render_state *render_state;
+ unsigned long irq_posted;
+#define ENGINE_IRQ_BREADCRUMB 0
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -229,7 +232,6 @@ struct intel_engine_cs {
*/
struct intel_breadcrumbs {
struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
- bool irq_posted;
spinlock_t lock; /* protects the lists of requests; irqsafe */
struct rb_root waiters; /* sorted by retirement, priority */