aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_breadcrumbs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_breadcrumbs.c')
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c911
1 files changed, 183 insertions, 728 deletions
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 447c5256f63a..cacaa1d04d17 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -29,180 +29,146 @@
#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
-static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
+static void irq_enable(struct intel_engine_cs *engine)
{
- struct intel_wait *wait;
- unsigned int result = 0;
-
- lockdep_assert_held(&b->irq_lock);
-
- wait = b->irq_wait;
- if (wait) {
- /*
- * N.B. Since task_asleep() and ttwu are not atomic, the
- * waiter may actually go to sleep after the check, causing
- * us to suppress a valid wakeup. We prefer to reduce the
- * number of false positive missed_breadcrumb() warnings
- * at the expense of a few false negatives, as it it easy
- * to trigger a false positive under heavy load. Enough
- * signal should remain from genuine missed_breadcrumb()
- * for us to detect in CI.
- */
- bool was_asleep = task_asleep(wait->tsk);
-
- result = ENGINE_WAKEUP_WAITER;
- if (wake_up_process(wait->tsk) && was_asleep)
- result |= ENGINE_WAKEUP_ASLEEP;
- }
+ if (!engine->irq_enable)
+ return;
- return result;
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock(&engine->i915->irq_lock);
}
-unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
+static void irq_disable(struct intel_engine_cs *engine)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- unsigned long flags;
- unsigned int result;
-
- spin_lock_irqsave(&b->irq_lock, flags);
- result = __intel_breadcrumbs_wakeup(b);
- spin_unlock_irqrestore(&b->irq_lock, flags);
-
- return result;
-}
+ if (!engine->irq_disable)
+ return;
-static unsigned long wait_timeout(void)
-{
- return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+ /* Caller disables interrupts */
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock(&engine->i915->irq_lock);
}
-static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
+static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
- if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer(__func__);
+ lockdep_assert_held(&b->irq_lock);
- intel_engine_dump(engine, &p,
- "%s missed breadcrumb at %pS\n",
- engine->name, __builtin_return_address(0));
- }
+ GEM_BUG_ON(!b->irq_enabled);
+ if (!--b->irq_enabled)
+ irq_disable(container_of(b,
+ struct intel_engine_cs,
+ breadcrumbs));
- set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+ b->irq_armed = false;
}
-static void intel_breadcrumbs_hangcheck(struct timer_list *t)
+void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine =
- from_timer(engine, t, breadcrumbs.hangcheck);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- unsigned int irq_count;
if (!b->irq_armed)
return;
- irq_count = READ_ONCE(b->irq_count);
- if (b->hangcheck_interrupts != irq_count) {
- b->hangcheck_interrupts = irq_count;
- mod_timer(&b->hangcheck, wait_timeout());
- return;
- }
+ spin_lock_irq(&b->irq_lock);
+ if (b->irq_armed)
+ __intel_breadcrumbs_disarm_irq(b);
+ spin_unlock_irq(&b->irq_lock);
+}
- /* We keep the hangcheck timer alive until we disarm the irq, even
- * if there are no waiters at present.
- *
- * If the waiter was currently running, assume it hasn't had a chance
- * to process the pending interrupt (e.g, low priority task on a loaded
- * system) and wait until it sleeps before declaring a missed interrupt.
- *
- * If the waiter was asleep (and not even pending a wakeup), then we
- * must have missed an interrupt as the GPU has stopped advancing
- * but we still have a waiter. Assuming all batches complete within
- * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
- */
- if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
- missed_breadcrumb(engine);
- mod_timer(&b->fake_irq, jiffies + 1);
- } else {
- mod_timer(&b->hangcheck, wait_timeout());
- }
+static inline bool __request_completed(const struct i915_request *rq)
+{
+ return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
-static void intel_breadcrumbs_fake_irq(struct timer_list *t)
+bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine =
- from_timer(engine, t, breadcrumbs.fake_irq);
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_context *ce, *cn;
+ struct list_head *pos, *next;
+ LIST_HEAD(signal);
- /*
- * The timer persists in case we cannot enable interrupts,
- * or if we have previously seen seqno/interrupt incoherency
- * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
- * Here the worker will wake up every jiffie in order to kick the
- * oldest waiter to do the coherent seqno check.
- */
+ spin_lock(&b->irq_lock);
- spin_lock_irq(&b->irq_lock);
- if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
- __intel_engine_disarm_breadcrumbs(engine);
- spin_unlock_irq(&b->irq_lock);
- if (!b->irq_armed)
- return;
+ if (b->irq_armed && list_empty(&b->signalers))
+ __intel_breadcrumbs_disarm_irq(b);
- /* If the user has disabled the fake-irq, restore the hangchecking */
- if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings)) {
- mod_timer(&b->hangcheck, wait_timeout());
- return;
- }
+ list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
+ GEM_BUG_ON(list_empty(&ce->signals));
- mod_timer(&b->fake_irq, jiffies + 1);
-}
+ list_for_each_safe(pos, next, &ce->signals) {
+ struct i915_request *rq =
+ list_entry(pos, typeof(*rq), signal_link);
-static void irq_enable(struct intel_engine_cs *engine)
-{
- /*
- * FIXME: Ideally we want this on the API boundary, but for the
- * sake of testing with mock breadcrumbs (no HW so unable to
- * enable irqs) we place it deep within the bowels, at the point
- * of no return.
- */
- GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
+ if (!__request_completed(rq))
+ break;
- /* Enabling the IRQ may miss the generation of the interrupt, but
- * we still need to force the barrier before reading the seqno,
- * just in case.
- */
- set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
+ &rq->fence.flags));
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
- /* Caller disables interrupts */
- if (engine->irq_enable) {
- spin_lock(&engine->i915->irq_lock);
- engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ /*
+ * We may race with direct invocation of
+ * dma_fence_signal(), e.g. i915_request_retire(),
+ * in which case we can skip processing it ourselves.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &rq->fence.flags))
+ continue;
+
+ /*
+ * Queue for execution after dropping the signaling
+ * spinlock as the callback chain may end up adding
+ * more signalers to the same context or engine.
+ */
+ i915_request_get(rq);
+ list_add_tail(&rq->signal_link, &signal);
+ }
+
+ /*
+ * We process the list deletion in bulk, only using a list_add
+ * (not list_move) above but keeping the status of
+ * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit.
+ */
+ if (!list_is_first(pos, &ce->signals)) {
+ /* Advance the list to the first incomplete request */
+ __list_del_many(&ce->signals, pos);
+ if (&ce->signals == pos) /* now empty */
+ list_del_init(&ce->signal_link);
+ }
}
-}
-static void irq_disable(struct intel_engine_cs *engine)
-{
- /* Caller disables interrupts */
- if (engine->irq_disable) {
- spin_lock(&engine->i915->irq_lock);
- engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
+ spin_unlock(&b->irq_lock);
+
+ list_for_each_safe(pos, next, &signal) {
+ struct i915_request *rq =
+ list_entry(pos, typeof(*rq), signal_link);
+
+ dma_fence_signal(&rq->fence);
+ i915_request_put(rq);
}
+
+ return !list_empty(&signal);
}
-void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
+bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ bool result;
- lockdep_assert_held(&b->irq_lock);
- GEM_BUG_ON(b->irq_wait);
- GEM_BUG_ON(!b->irq_armed);
+ local_irq_disable();
+ result = intel_engine_breadcrumbs_irq(engine);
+ local_irq_enable();
- GEM_BUG_ON(!b->irq_enabled);
- if (!--b->irq_enabled)
- irq_disable(engine);
+ return result;
+}
- b->irq_armed = false;
+static void signal_irq_work(struct irq_work *work)
+{
+ struct intel_engine_cs *engine =
+ container_of(work, typeof(*engine), breadcrumbs.irq_work);
+
+ intel_engine_breadcrumbs_irq(engine);
}
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
@@ -227,666 +193,155 @@ void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
spin_unlock_irq(&b->irq_lock);
}
-void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct intel_wait *wait, *n;
-
- if (!b->irq_armed)
- return;
-
- /*
- * We only disarm the irq when we are idle (all requests completed),
- * so if the bottom-half remains asleep, it missed the request
- * completion.
- */
- if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
- missed_breadcrumb(engine);
-
- spin_lock_irq(&b->rb_lock);
-
- spin_lock(&b->irq_lock);
- b->irq_wait = NULL;
- if (b->irq_armed)
- __intel_engine_disarm_breadcrumbs(engine);
- spin_unlock(&b->irq_lock);
-
- rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
- GEM_BUG_ON(!intel_engine_signaled(engine, wait->seqno));
- RB_CLEAR_NODE(&wait->node);
- wake_up_process(wait->tsk);
- }
- b->waiters = RB_ROOT;
-
- spin_unlock_irq(&b->rb_lock);
-}
-
-static bool use_fake_irq(const struct intel_breadcrumbs *b)
-{
- const struct intel_engine_cs *engine =
- container_of(b, struct intel_engine_cs, breadcrumbs);
-
- if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
- return false;
-
- /*
- * Only start with the heavy weight fake irq timer if we have not
- * seen any interrupts since enabling it the first time. If the
- * interrupts are still arriving, it means we made a mistake in our
- * engine->seqno_barrier(), a timing error that should be transient
- * and unlikely to reoccur.
- */
- return READ_ONCE(b->irq_count) == b->hangcheck_interrupts;
-}
-
-static void enable_fake_irq(struct intel_breadcrumbs *b)
-{
- /* Ensure we never sleep indefinitely */
- if (!b->irq_enabled || use_fake_irq(b))
- mod_timer(&b->fake_irq, jiffies + 1);
- else
- mod_timer(&b->hangcheck, wait_timeout());
-}
-
-static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
- struct drm_i915_private *i915 = engine->i915;
- bool enabled;
lockdep_assert_held(&b->irq_lock);
if (b->irq_armed)
- return false;
+ return;
- /* The breadcrumb irq will be disarmed on the interrupt after the
+ /*
+ * The breadcrumb irq will be disarmed on the interrupt after the
* waiters are signaled. This gives us a single interrupt window in
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
b->irq_armed = true;
- if (I915_SELFTEST_ONLY(b->mock)) {
- /* For our mock objects we want to avoid interaction
- * with the real hardware (which is not set up). So
- * we simply pretend we have enabled the powerwell
- * and the irq, and leave it up to the mock
- * implementation to call intel_engine_wakeup()
- * itself when it wants to simulate a user interrupt,
- */
- return true;
- }
-
- /* Since we are waiting on a request, the GPU should be busy
+ /*
+ * Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference. This is tracked
* by i915->gt.awake, we can forgo holding our own wakref
* for the interrupt as before i915->gt.awake is released (when
* the driver is idle) we disarm the breadcrumbs.
*/
- /* No interrupts? Kick the waiter every jiffie! */
- enabled = false;
- if (!b->irq_enabled++ &&
- !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
+ if (!b->irq_enabled++)
irq_enable(engine);
- enabled = true;
- }
-
- enable_fake_irq(b);
- return enabled;
-}
-
-static inline struct intel_wait *to_wait(struct rb_node *node)
-{
- return rb_entry(node, struct intel_wait, node);
-}
-
-static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
- struct intel_wait *wait)
-{
- lockdep_assert_held(&b->rb_lock);
- GEM_BUG_ON(b->irq_wait == wait);
-
- /*
- * This request is completed, so remove it from the tree, mark it as
- * complete, and *then* wake up the associated task. N.B. when the
- * task wakes up, it will find the empty rb_node, discern that it
- * has already been removed from the tree and skip the serialisation
- * of the b->rb_lock and b->irq_lock. This means that the destruction
- * of the intel_wait is not serialised with the interrupt handler
- * by the waiter - it must instead be serialised by the caller.
- */
- rb_erase(&wait->node, &b->waiters);
- RB_CLEAR_NODE(&wait->node);
-
- if (wait->tsk->state != TASK_RUNNING)
- wake_up_process(wait->tsk); /* implicit smp_wmb() */
-}
-
-static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
- struct rb_node *next)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- spin_lock(&b->irq_lock);
- GEM_BUG_ON(!b->irq_armed);
- GEM_BUG_ON(!b->irq_wait);
- b->irq_wait = to_wait(next);
- spin_unlock(&b->irq_lock);
-
- /* We always wake up the next waiter that takes over as the bottom-half
- * as we may delegate not only the irq-seqno barrier to the next waiter
- * but also the task of waking up concurrent waiters.
- */
- if (next)
- wake_up_process(to_wait(next)->tsk);
}
-static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
+void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct rb_node **p, *parent, *completed;
- bool first, armed;
- u32 seqno;
-
- GEM_BUG_ON(!wait->seqno);
-
- /* Insert the request into the retirement ordered list
- * of waiters by walking the rbtree. If we are the oldest
- * seqno in the tree (the first to be retired), then
- * set ourselves as the bottom-half.
- *
- * As we descend the tree, prune completed branches since we hold the
- * spinlock we know that the first_waiter must be delayed and can
- * reduce some of the sequential wake up latency if we take action
- * ourselves and wake up the completed tasks in parallel. Also, by
- * removing stale elements in the tree, we may be able to reduce the
- * ping-pong between the old bottom-half and ourselves as first-waiter.
- */
- armed = false;
- first = true;
- parent = NULL;
- completed = NULL;
- seqno = intel_engine_get_seqno(engine);
-
- /* If the request completed before we managed to grab the spinlock,
- * return now before adding ourselves to the rbtree. We let the
- * current bottom-half handle any pending wakeups and instead
- * try and get out of the way quickly.
- */
- if (i915_seqno_passed(seqno, wait->seqno)) {
- RB_CLEAR_NODE(&wait->node);
- return first;
- }
-
- p = &b->waiters.rb_node;
- while (*p) {
- parent = *p;
- if (wait->seqno == to_wait(parent)->seqno) {
- /* We have multiple waiters on the same seqno, select
- * the highest priority task (that with the smallest
- * task->prio) to serve as the bottom-half for this
- * group.
- */
- if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
- p = &parent->rb_right;
- first = false;
- } else {
- p = &parent->rb_left;
- }
- } else if (i915_seqno_passed(wait->seqno,
- to_wait(parent)->seqno)) {
- p = &parent->rb_right;
- if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
- completed = parent;
- else
- first = false;
- } else {
- p = &parent->rb_left;
- }
- }
- rb_link_node(&wait->node, parent, p);
- rb_insert_color(&wait->node, &b->waiters);
-
- if (first) {
- spin_lock(&b->irq_lock);
- b->irq_wait = wait;
- /* After assigning ourselves as the new bottom-half, we must
- * perform a cursory check to prevent a missed interrupt.
- * Either we miss the interrupt whilst programming the hardware,
- * or if there was a previous waiter (for a later seqno) they
- * may be woken instead of us (due to the inherent race
- * in the unlocked read of b->irq_seqno_bh in the irq handler)
- * and so we miss the wake up.
- */
- armed = __intel_breadcrumbs_enable_irq(b);
- spin_unlock(&b->irq_lock);
- }
- if (completed) {
- /* Advance the bottom-half (b->irq_wait) before we wake up
- * the waiters who may scribble over their intel_wait
- * just as the interrupt handler is dereferencing it via
- * b->irq_wait.
- */
- if (!first) {
- struct rb_node *next = rb_next(completed);
- GEM_BUG_ON(next == &wait->node);
- __intel_breadcrumbs_next(engine, next);
- }
-
- do {
- struct intel_wait *crumb = to_wait(completed);
- completed = rb_prev(completed);
- __intel_breadcrumbs_finish(b, crumb);
- } while (completed);
- }
-
- GEM_BUG_ON(!b->irq_wait);
- GEM_BUG_ON(!b->irq_armed);
- GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
+ spin_lock_init(&b->irq_lock);
+ INIT_LIST_HEAD(&b->signalers);
- return armed;
+ init_irq_work(&b->irq_work, signal_irq_work);
}
-bool intel_engine_add_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- bool armed;
-
- spin_lock_irq(&b->rb_lock);
- armed = __intel_engine_add_wait(engine, wait);
- spin_unlock_irq(&b->rb_lock);
- if (armed)
- return armed;
-
- /* Make the caller recheck if its request has already started. */
- return intel_engine_has_started(engine, wait->seqno);
-}
+ unsigned long flags;
-static inline bool chain_wakeup(struct rb_node *rb, int priority)
-{
- return rb && to_wait(rb)->tsk->prio <= priority;
-}
+ spin_lock_irqsave(&b->irq_lock, flags);
-static inline int wakeup_priority(struct intel_breadcrumbs *b,
- struct task_struct *tsk)
-{
- if (tsk == b->signaler)
- return INT_MIN;
+ if (b->irq_enabled)
+ irq_enable(engine);
else
- return tsk->prio;
-}
-
-static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- lockdep_assert_held(&b->rb_lock);
-
- if (RB_EMPTY_NODE(&wait->node))
- goto out;
-
- if (b->irq_wait == wait) {
- const int priority = wakeup_priority(b, wait->tsk);
- struct rb_node *next;
-
- /* We are the current bottom-half. Find the next candidate,
- * the first waiter in the queue on the remaining oldest
- * request. As multiple seqnos may complete in the time it
- * takes us to wake up and find the next waiter, we have to
- * wake up that waiter for it to perform its own coherent
- * completion check.
- */
- next = rb_next(&wait->node);
- if (chain_wakeup(next, priority)) {
- /* If the next waiter is already complete,
- * wake it up and continue onto the next waiter. So
- * if have a small herd, they will wake up in parallel
- * rather than sequentially, which should reduce
- * the overall latency in waking all the completed
- * clients.
- *
- * However, waking up a chain adds extra latency to
- * the first_waiter. This is undesirable if that
- * waiter is a high priority task.
- */
- u32 seqno = intel_engine_get_seqno(engine);
-
- while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
- struct rb_node *n = rb_next(next);
-
- __intel_breadcrumbs_finish(b, to_wait(next));
- next = n;
- if (!chain_wakeup(next, priority))
- break;
- }
- }
-
- __intel_breadcrumbs_next(engine, next);
- } else {
- GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
- }
-
- GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
- rb_erase(&wait->node, &b->waiters);
- RB_CLEAR_NODE(&wait->node);
+ irq_disable(engine);
-out:
- GEM_BUG_ON(b->irq_wait == wait);
- GEM_BUG_ON(rb_first(&b->waiters) !=
- (b->irq_wait ? &b->irq_wait->node : NULL));
+ spin_unlock_irqrestore(&b->irq_lock, flags);
}
-void intel_engine_remove_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- /* Quick check to see if this waiter was already decoupled from
- * the tree by the bottom-half to avoid contention on the spinlock
- * by the herd.
- */
- if (RB_EMPTY_NODE(&wait->node)) {
- GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
- return;
- }
-
- spin_lock_irq(&b->rb_lock);
- __intel_engine_remove_wait(engine, wait);
- spin_unlock_irq(&b->rb_lock);
}
-static void signaler_set_rtpriority(void)
+bool i915_request_enable_breadcrumb(struct i915_request *rq)
{
- struct sched_param param = { .sched_priority = 1 };
-
- sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
-}
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
-static int intel_breadcrumbs_signaler(void *arg)
-{
- struct intel_engine_cs *engine = arg;
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct i915_request *rq, *n;
+ GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
- /* Install ourselves with high priority to reduce signalling latency */
- signaler_set_rtpriority();
+ if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags))
+ return true;
- do {
- bool do_schedule = true;
- LIST_HEAD(list);
- u32 seqno;
+ spin_lock(&b->irq_lock);
+ if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags) &&
+ !__request_completed(rq)) {
+ struct intel_context *ce = rq->hw_context;
+ struct list_head *pos;
- set_current_state(TASK_INTERRUPTIBLE);
- if (list_empty(&b->signals))
- goto sleep;
+ __intel_breadcrumbs_arm_irq(b);
/*
- * We are either woken up by the interrupt bottom-half,
- * or by a client adding a new signaller. In both cases,
- * the GPU seqno may have advanced beyond our oldest signal.
- * If it has, propagate the signal, remove the waiter and
- * check again with the next oldest signal. Otherwise we
- * need to wait for a new interrupt from the GPU or for
- * a new client.
+ * We keep the seqno in retirement order, so we can break
+ * inside intel_engine_breadcrumbs_irq as soon as we've passed
+ * the last completed request (or seen a request that hasn't
+ * event started). We could iterate the timeline->requests list,
+ * but keeping a separate signalers_list has the advantage of
+ * hopefully being much smaller than the full list and so
+ * provides faster iteration and detection when there are no
+ * more interrupts required for this context.
+ *
+ * We typically expect to add new signalers in order, so we
+ * start looking for our insertion point from the tail of
+ * the list.
*/
- seqno = intel_engine_get_seqno(engine);
-
- spin_lock_irq(&b->rb_lock);
- list_for_each_entry_safe(rq, n, &b->signals, signaling.link) {
- u32 this = rq->signaling.wait.seqno;
-
- GEM_BUG_ON(!rq->signaling.wait.seqno);
-
- if (!i915_seqno_passed(seqno, this))
- break;
-
- if (likely(this == i915_request_global_seqno(rq))) {
- __intel_engine_remove_wait(engine,
- &rq->signaling.wait);
+ list_for_each_prev(pos, &ce->signals) {
+ struct i915_request *it =
+ list_entry(pos, typeof(*it), signal_link);
- rq->signaling.wait.seqno = 0;
- __list_del_entry(&rq->signaling.link);
-
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags)) {
- list_add_tail(&rq->signaling.link,
- &list);
- i915_request_get(rq);
- }
- }
- }
- spin_unlock_irq(&b->rb_lock);
-
- if (!list_empty(&list)) {
- local_bh_disable();
- list_for_each_entry_safe(rq, n, &list, signaling.link) {
- dma_fence_signal(&rq->fence);
- GEM_BUG_ON(!i915_request_completed(rq));
- i915_request_put(rq);
- }
- local_bh_enable(); /* kick start the tasklets */
-
- /*
- * If the engine is saturated we may be continually
- * processing completed requests. This angers the
- * NMI watchdog if we never let anything else
- * have access to the CPU. Let's pretend to be nice
- * and relinquish the CPU if we burn through the
- * entire RT timeslice!
- */
- do_schedule = need_resched();
- }
-
- if (unlikely(do_schedule)) {
- /* Before we sleep, check for a missed seqno */
- if (current->state & TASK_NORMAL &&
- !list_empty(&b->signals) &&
- engine->irq_seqno_barrier &&
- test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)) {
- engine->irq_seqno_barrier(engine);
- intel_engine_wakeup(engine);
- }
-
-sleep:
- if (kthread_should_park())
- kthread_parkme();
-
- if (unlikely(kthread_should_stop()))
+ if (i915_seqno_passed(rq->fence.seqno, it->fence.seqno))
break;
-
- schedule();
}
- } while (1);
- __set_current_state(TASK_RUNNING);
+ list_add(&rq->signal_link, pos);
+ if (pos == &ce->signals) /* catch transitions from empty list */
+ list_move_tail(&ce->signal_link, &b->signalers);
- return 0;
-}
-
-static void insert_signal(struct intel_breadcrumbs *b,
- struct i915_request *request,
- const u32 seqno)
-{
- struct i915_request *iter;
-
- lockdep_assert_held(&b->rb_lock);
-
- /*
- * A reasonable assumption is that we are called to add signals
- * in sequence, as the requests are submitted for execution and
- * assigned a global_seqno. This will be the case for the majority
- * of internally generated signals (inter-engine signaling).
- *
- * Out of order waiters triggering random signaling enabling will
- * be more problematic, but hopefully rare enough and the list
- * small enough that the O(N) insertion sort is not an issue.
- */
-
- list_for_each_entry_reverse(iter, &b->signals, signaling.link)
- if (i915_seqno_passed(seqno, iter->signaling.wait.seqno))
- break;
-
- list_add(&request->signaling.link, &iter->signaling.link);
-}
-
-bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup)
-{
- struct intel_engine_cs *engine = request->engine;
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct intel_wait *wait = &request->signaling.wait;
- u32 seqno;
-
- /*
- * Note that we may be called from an interrupt handler on another
- * device (e.g. nouveau signaling a fence completion causing us
- * to submit a request, and so enable signaling). As such,
- * we need to make sure that all other users of b->rb_lock protect
- * against interrupts, i.e. use spin_lock_irqsave.
- */
-
- /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&request->lock);
-
- seqno = i915_request_global_seqno(request);
- if (!seqno) /* will be enabled later upon execution */
- return true;
-
- GEM_BUG_ON(wait->seqno);
- wait->tsk = b->signaler;
- wait->request = request;
- wait->seqno = seqno;
-
- /*
- * Add ourselves into the list of waiters, but registering our
- * bottom-half as the signaller thread. As per usual, only the oldest
- * waiter (not just signaller) is tasked as the bottom-half waking
- * up all completed waiters after the user interrupt.
- *
- * If we are the oldest waiter, enable the irq (after which we
- * must double check that the seqno did not complete).
- */
- spin_lock(&b->rb_lock);
- insert_signal(b, request, seqno);
- wakeup &= __intel_engine_add_wait(engine, wait);
- spin_unlock(&b->rb_lock);
-
- if (wakeup) {
- wake_up_process(b->signaler);
- return !intel_wait_complete(wait);
+ set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
}
+ spin_unlock(&b->irq_lock);
- return true;
+ return !__request_completed(rq);
}
-void intel_engine_cancel_signaling(struct i915_request *request)
+void i915_request_cancel_breadcrumb(struct i915_request *rq)
{
- struct intel_engine_cs *engine = request->engine;
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_breadcrumbs *b = &rq->engine->breadcrumbs;
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&request->lock);
-
- if (!READ_ONCE(request->signaling.wait.seqno))
+ if (!test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
return;
- spin_lock(&b->rb_lock);
- __intel_engine_remove_wait(engine, &request->signaling.wait);
- if (fetch_and_zero(&request->signaling.wait.seqno))
- __list_del_entry(&request->signaling.link);
- spin_unlock(&b->rb_lock);
-}
-
-int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- struct task_struct *tsk;
-
- spin_lock_init(&b->rb_lock);
- spin_lock_init(&b->irq_lock);
-
- timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
- timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
-
- INIT_LIST_HEAD(&b->signals);
-
- /* Spawn a thread to provide a common bottom-half for all signals.
- * As this is an asynchronous interface we cannot steal the current
- * task for handling the bottom-half to the user interrupt, therefore
- * we create a thread to do the coherent seqno dance after the
- * interrupt and then signal the waitqueue (via the dma-buf/fence).
- */
- tsk = kthread_run(intel_breadcrumbs_signaler, engine,
- "i915/signal:%d", engine->id);
- if (IS_ERR(tsk))
- return PTR_ERR(tsk);
-
- b->signaler = tsk;
-
- return 0;
-}
-
-static void cancel_fake_irq(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- del_timer_sync(&b->fake_irq); /* may queue b->hangcheck */
- del_timer_sync(&b->hangcheck);
- clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
-}
-
-void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
-{
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
- unsigned long flags;
-
- spin_lock_irqsave(&b->irq_lock, flags);
-
- /*
- * Leave the fake_irq timer enabled (if it is running), but clear the
- * bit so that it turns itself off on its next wake up and goes back
- * to the long hangcheck interval if still required.
- */
- clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
-
- if (b->irq_enabled)
- irq_enable(engine);
- else
- irq_disable(engine);
+ spin_lock(&b->irq_lock);
+ if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+ struct intel_context *ce = rq->hw_context;
- /*
- * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
- * GPU is active and may have already executed the MI_USER_INTERRUPT
- * before the CPU is ready to receive. However, the engine is currently
- * idle (we haven't started it yet), there is no possibility for a
- * missed interrupt as we enabled the irq and so we can clear the
- * immediate wakeup (until a real interrupt arrives for the waiter).
- */
- clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ list_del(&rq->signal_link);
+ if (list_empty(&ce->signals))
+ list_del_init(&ce->signal_link);
- spin_unlock_irqrestore(&b->irq_lock, flags);
+ clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
+ }
+ spin_unlock(&b->irq_lock);
}
-void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
+void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
+ struct drm_printer *p)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_context *ce;
+ struct i915_request *rq;
- /* The engines should be idle and all requests accounted for! */
- WARN_ON(READ_ONCE(b->irq_wait));
- WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
- WARN_ON(!list_empty(&b->signals));
+ if (list_empty(&b->signalers))
+ return;
- if (!IS_ERR_OR_NULL(b->signaler))
- kthread_stop(b->signaler);
+ drm_printf(p, "Signals:\n");
- cancel_fake_irq(engine);
+ spin_lock_irq(&b->irq_lock);
+ list_for_each_entry(ce, &b->signalers, signal_link) {
+ list_for_each_entry(rq, &ce->signals, signal_link) {
+ drm_printf(p, "\t[%llx:%llx%s] @ %dms\n",
+ rq->fence.context, rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+ }
+ }
+ spin_unlock_irq(&b->irq_lock);
}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/intel_breadcrumbs.c"
-#endif