aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/i915/gt/intel_lrc.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2020-02-07 11:02:13 +0000
committerChris Wilson <chris@chris-wilson.co.uk>2020-02-07 13:07:28 +0000
commit793c226173675ea7f8591d22803a1432e2b724e0 (patch)
tree1e7773a8568888c759ae83b3976f5a220a109ebf /drivers/gpu/drm/i915/gt/intel_lrc.c
parentRevert "drm/i915: Don't use VBT for detecting DPCD backlight controls" (diff)
downloadwireguard-linux-793c226173675ea7f8591d22803a1432e2b724e0.tar.xz
wireguard-linux-793c226173675ea7f8591d22803a1432e2b724e0.zip
drm/i915/gt: Protect execlists_hold/unhold from new waiters
As we may add new waiters to a request as it is being run, we need to mark the list iteration as being safe for concurrent addition. v2: Mika spotted that we used the same trick for signalers_list, so warn the compiler about the lockless walk there as well. Fixes: 32ff621fd744 ("drm/i915/gt: Allow temporary suspension of inflight requests") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20200207110213.2734386-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/intel_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9808244cb8fe..77bd8d8c027e 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1620,6 +1620,11 @@ last_active(const struct intel_engine_execlists *execlists)
&(rq__)->sched.waiters_list, \
wait_link)
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_lockless(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
+
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
LIST_HEAD(list);
@@ -2378,7 +2383,7 @@ static void __execlists_hold(struct i915_request *rq)
list_move_tail(&rq->sched.link, &rq->engine->active.hold);
i915_request_set_hold(rq);
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
@@ -2464,7 +2469,7 @@ static bool hold_request(const struct i915_request *rq)
* If one of our ancestors is on hold, we must also be on hold,
* otherwise we will bypass it and execute before it.
*/
- list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+ for_each_signaler(p, rq) {
const struct i915_request *s =
container_of(p->signaler, typeof(*s), sched);
@@ -2496,7 +2501,7 @@ static void __execlists_unhold(struct i915_request *rq)
RQ_TRACE(rq, "hold release\n");
/* Also release any children on this engine that are ready */
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);