diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_request.c | 248 |
1 files changed, 139 insertions, 109 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 0b2fe55e6194..0e813819b041 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -31,6 +31,7 @@ #include <linux/sched/signal.h> #include "gem/i915_gem_context.h" +#include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" #include "gt/intel_ring.h" #include "gt/intel_rps.h" @@ -186,48 +187,34 @@ static void irq_execute_cb_hook(struct irq_work *wrk) irq_execute_cb(wrk); } -static void __notify_execute_cb(struct i915_request *rq) +static __always_inline void +__notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) { struct execute_cb *cb, *cn; - lockdep_assert_held(&rq->lock); - - GEM_BUG_ON(!i915_request_is_active(rq)); if (llist_empty(&rq->execute_cb)) return; - llist_for_each_entry_safe(cb, cn, rq->execute_cb.first, work.llnode) - irq_work_queue(&cb->work); - - /* - * XXX Rollback on __i915_request_unsubmit() - * - * In the future, perhaps when we have an active time-slicing scheduler, - * it will be interesting to unsubmit parallel execution and remove - * busywaits from the GPU until their master is restarted. This is - * quite hairy, we have to carefully rollback the fence and do a - * preempt-to-idle cycle on the target engine, all the while the - * master execute_cb may refire. - */ - init_llist_head(&rq->execute_cb); + llist_for_each_entry_safe(cb, cn, + llist_del_all(&rq->execute_cb), + work.llnode) + fn(&cb->work); } -static inline void -remove_from_client(struct i915_request *request) +static void __notify_execute_cb_irq(struct i915_request *rq) { - struct drm_i915_file_private *file_priv; + __notify_execute_cb(rq, irq_work_queue); +} - if (!READ_ONCE(request->file_priv)) - return; +static bool irq_work_imm(struct irq_work *wrk) +{ + wrk->func(wrk); + return false; +} - rcu_read_lock(); - file_priv = xchg(&request->file_priv, NULL); - if (file_priv) { - spin_lock(&file_priv->mm.lock); - list_del(&request->client_link); - spin_unlock(&file_priv->mm.lock); - } - rcu_read_unlock(); +static void __notify_execute_cb_imm(struct i915_request *rq) +{ + __notify_execute_cb(rq, irq_work_imm); } static void free_capture_list(struct i915_request *request) @@ -274,9 +261,16 @@ static void remove_from_engine(struct i915_request *rq) locked = engine; } list_del_init(&rq->sched.link); + clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); + + /* Prevent further __await_execution() registering a cb, then flush */ + set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); + spin_unlock_irq(&locked->active.lock); + + __notify_execute_cb_imm(rq); } bool i915_request_retire(struct i915_request *rq) @@ -288,6 +282,7 @@ bool i915_request_retire(struct i915_request *rq) GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); trace_i915_request_retire(rq); + i915_request_mark_complete(rq); /* * We know the GPU must have read the request to have @@ -305,32 +300,30 @@ bool i915_request_retire(struct i915_request *rq) __i915_request_fill(rq, POISON_FREE); rq->ring->head = rq->postfix; + if (!i915_request_signaled(rq)) { + spin_lock_irq(&rq->lock); + dma_fence_signal_locked(&rq->fence); + spin_unlock_irq(&rq->lock); + } + + if (i915_request_has_waitboost(rq)) { + GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); + atomic_dec(&rq->engine->gt->rps.num_waiters); + } + /* * We only loosely track inflight requests across preemption, * and so we may find ourselves attempting to retire a _completed_ * request that we have removed from the HW and put back on a run * queue. + * + * As we set I915_FENCE_FLAG_ACTIVE on the request, this should be + * after removing the breadcrumb and signaling it, so that we do not + * inadvertently attach the breadcrumb to a completed request. */ remove_from_engine(rq); - - spin_lock_irq(&rq->lock); - i915_request_mark_complete(rq); - if (!i915_request_signaled(rq)) - dma_fence_signal_locked(&rq->fence); - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) - i915_request_cancel_breadcrumb(rq); - if (i915_request_has_waitboost(rq)) { - GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); - atomic_dec(&rq->engine->gt->rps.num_waiters); - } - if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { - set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); - __notify_execute_cb(rq); - } GEM_BUG_ON(!llist_empty(&rq->execute_cb)); - spin_unlock_irq(&rq->lock); - remove_from_client(rq); __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ intel_context_exit(rq->context); @@ -357,12 +350,6 @@ void i915_request_retire_upto(struct i915_request *rq) } while (i915_request_retire(tmp) && tmp != rq); } -static void __llist_add(struct llist_node *node, struct llist_head *head) -{ - node->next = head->first; - head->first = node; -} - static struct i915_request * const * __engine_active(struct intel_engine_cs *engine) { @@ -388,17 +375,38 @@ static bool __request_in_flight(const struct i915_request *signal) * As we know that there are always preemption points between * requests, we know that only the currently executing request * may be still active even though we have cleared the flag. - * However, we can't rely on our tracking of ELSP[0] to known + * However, we can't rely on our tracking of ELSP[0] to know * which request is currently active and so maybe stuck, as * the tracking maybe an event behind. Instead assume that * if the context is still inflight, then it is still active * even if the active flag has been cleared. + * + * To further complicate matters, if there a pending promotion, the HW + * may either perform a context switch to the second inflight execlists, + * or it may switch to the pending set of execlists. In the case of the + * latter, it may send the ACK and we process the event copying the + * pending[] over top of inflight[], _overwriting_ our *active. Since + * this implies the HW is arbitrating and not struck in *active, we do + * not worry about complete accuracy, but we do require no read/write + * tearing of the pointer [the read of the pointer must be valid, even + * as the array is being overwritten, for which we require the writes + * to avoid tearing.] + * + * Note that the read of *execlists->active may race with the promotion + * of execlists->pending[] to execlists->inflight[], overwritting + * the value at *execlists->active. This is fine. The promotion implies + * that we received an ACK from the HW, and so the context is not + * stuck -- if we do not see ourselves in *active, the inflight status + * is valid. If instead we see ourselves being copied into *active, + * we are inflight and may signal the callback. */ if (!intel_context_inflight(signal->context)) return false; rcu_read_lock(); - for (port = __engine_active(signal->engine); (rq = *port); port++) { + for (port = __engine_active(signal->engine); + (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */ + port++) { if (rq->context == signal->context) { inflight = i915_seqno_passed(rq->fence.seqno, signal->fence.seqno); @@ -439,18 +447,24 @@ __await_execution(struct i915_request *rq, cb->work.func = irq_execute_cb_hook; } - spin_lock_irq(&signal->lock); - if (i915_request_is_active(signal) || __request_in_flight(signal)) { - if (hook) { - hook(rq, &signal->fence); - i915_request_put(signal); - } - i915_sw_fence_complete(cb->fence); - kmem_cache_free(global.slab_execute_cbs, cb); - } else { - __llist_add(&cb->work.llnode, &signal->execute_cb); + /* + * Register the callback first, then see if the signaler is already + * active. This ensures that if we race with the + * __notify_execute_cb from i915_request_submit() and we are not + * included in that list, we get a second bite of the cherry and + * execute it ourselves. After this point, a future + * i915_request_submit() will notify us. + * + * In i915_request_retire() we set the ACTIVE bit on a completed + * request (then flush the execute_cb). So by registering the + * callback first, then checking the ACTIVE bit, we serialise with + * the completed/retired request. + */ + if (llist_add(&cb->work.llnode, &signal->execute_cb)) { + if (i915_request_is_active(signal) || + __request_in_flight(signal)) + __notify_execute_cb_imm(signal); } - spin_unlock_irq(&signal->lock); return 0; } @@ -528,8 +542,13 @@ bool __i915_request_submit(struct i915_request *request) if (i915_request_completed(request)) goto xfer; + if (unlikely(intel_context_is_closed(request->context) && + !intel_engine_has_heartbeat(engine))) + intel_context_set_banned(request->context); + if (unlikely(intel_context_is_banned(request->context))) i915_request_set_error_once(request, -EIO); + if (unlikely(fatal_error(request->fence.error))) __i915_request_skip(request); @@ -566,19 +585,21 @@ xfer: clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags); } - /* We may be recursing from the signal callback of another i915 fence */ - if (!i915_request_signaled(request)) { - spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - - __notify_execute_cb(request); - if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &request->fence.flags) && - !i915_request_enable_breadcrumb(request)) - intel_engine_signal_breadcrumbs(engine); + /* + * XXX Rollback bonded-execution on __i915_request_unsubmit()? + * + * In the future, perhaps when we have an active time-slicing scheduler, + * it will be interesting to unsubmit parallel execution and remove + * busywaits from the GPU until their master is restarted. This is + * quite hairy, we have to carefully rollback the fence and do a + * preempt-to-idle cycle on the target engine, all the while the + * master execute_cb may refire. + */ + __notify_execute_cb_irq(request); - spin_unlock(&request->lock); - GEM_BUG_ON(!llist_empty(&request->execute_cb)); - } + /* We may be recursing from the signal callback of another i915 fence */ + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) + i915_request_enable_breadcrumb(request); return result; } @@ -600,27 +621,27 @@ void __i915_request_unsubmit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; + /* + * Only unwind in reverse order, required so that the per-context list + * is kept in seqno/ring order. + */ RQ_TRACE(request, "\n"); GEM_BUG_ON(!irqs_disabled()); lockdep_assert_held(&engine->active.lock); /* - * Only unwind in reverse order, required so that the per-context list - * is kept in seqno/ring order. + * Before we remove this breadcrumb from the signal list, we have + * to ensure that a concurrent dma_fence_enable_signaling() does not + * attach itself. We first mark the request as no longer active and + * make sure that is visible to other cores, and then remove the + * breadcrumb if attached. */ - - /* We may be recursing from the signal callback of another i915 fence */ - spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); - + GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); + clear_bit_unlock(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) i915_request_cancel_breadcrumb(request); - GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); - clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); - - spin_unlock(&request->lock); - /* We've already spun, don't charge on resubmitting. */ if (request->sched.semaphores && i915_request_started(request)) request->sched.semaphores = 0; @@ -757,7 +778,6 @@ static void __i915_request_ctor(void *arg) dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0); - rq->file_priv = NULL; rq->capture_list = NULL; init_llist_head(&rq->execute_cb); @@ -847,7 +867,6 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) /* No zalloc, everything must be cleared after use */ rq->batch = NULL; - GEM_BUG_ON(rq->file_priv); GEM_BUG_ON(rq->capture_list); GEM_BUG_ON(!llist_empty(&rq->execute_cb)); @@ -1640,7 +1659,7 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu) return this_cpu != cpu; } -static bool __i915_spin_request(const struct i915_request * const rq, int state) +static bool __i915_spin_request(struct i915_request * const rq, int state) { unsigned long timeout_ns; unsigned int cpu; @@ -1673,7 +1692,7 @@ static bool __i915_spin_request(const struct i915_request * const rq, int state) timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns); timeout_ns += local_clock_ns(&cpu); do { - if (i915_request_completed(rq)) + if (dma_fence_is_signaled(&rq->fence)) return true; if (signal_pending_state(state, current)) @@ -1697,7 +1716,7 @@ static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) { struct request_wait *wait = container_of(cb, typeof(*wait), cb); - wake_up_process(wait->tsk); + wake_up_process(fetch_and_zero(&wait->tsk)); } /** @@ -1766,10 +1785,8 @@ long i915_request_wait(struct i915_request *rq, * duration, which we currently lack. */ if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) && - __i915_spin_request(rq, state)) { - dma_fence_signal(&rq->fence); + __i915_spin_request(rq, state)) goto out; - } /* * This client is about to stall waiting for the GPU. In many cases @@ -1783,25 +1800,36 @@ long i915_request_wait(struct i915_request *rq, * but at a cost of spending more power processing the workload * (bad for battery). */ - if (flags & I915_WAIT_PRIORITY) { - if (!i915_request_started(rq) && - INTEL_GEN(rq->engine->i915) >= 6) - intel_rps_boost(rq); - } + if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq)) + intel_rps_boost(rq); wait.tsk = current; if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) goto out; + /* + * Flush the submission tasklet, but only if it may help this request. + * + * We sometimes experience some latency between the HW interrupts and + * tasklet execution (mostly due to ksoftirqd latency, but it can also + * be due to lazy CS events), so lets run the tasklet manually if there + * is a chance it may submit this request. If the request is not ready + * to run, as it is waiting for other fences to be signaled, flushing + * the tasklet is busy work without any advantage for this client. + * + * If the HW is being lazy, this is the last chance before we go to + * sleep to catch any pending events. We will check periodically in + * the heartbeat to flush the submission tasklets as a last resort + * for unhappy HW. + */ + if (i915_request_is_ready(rq)) + intel_engine_flush_submission(rq->engine); + for (;;) { set_current_state(state); - if (i915_request_completed(rq)) { - dma_fence_signal(&rq->fence); + if (dma_fence_is_signaled(&rq->fence)) break; - } - - intel_engine_flush_submission(rq->engine); if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; @@ -1817,7 +1845,9 @@ long i915_request_wait(struct i915_request *rq, } __set_current_state(TASK_RUNNING); - dma_fence_remove_callback(&rq->fence, &wait.cb); + if (READ_ONCE(wait.tsk)) + dma_fence_remove_callback(&rq->fence, &wait.cb); + GEM_BUG_ON(!list_empty(&wait.cb.node)); out: mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_); |