aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-10-28 13:58:57 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2016-10-28 20:53:56 +0100
commitf2d13290e3275df34c0cd625fbc665965af08c67 (patch)
tree3d96a9a4415d92cfc0ac56fc7cae8f51f0e87506 /drivers
parentdrm/i915: Reserve space in the global seqno during request allocation (diff)
downloadlinux-dev-f2d13290e3275df34c0cd625fbc665965af08c67.tar.xz
linux-dev-f2d13290e3275df34c0cd625fbc665965af08c67.zip
drm/i915: Defer setting of global seqno on request to submission
Defer the assignment of the global seqno on a request to its submission. In the next patch, we will only allocate the global seqno at that time, here we are just enabling the wait-for-submission before wait-for-seqno paths. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161028125858.23563-34-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 9b22f66464f0..7499e3b205c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -324,14 +324,32 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
struct drm_i915_gem_request *request =
container_of(fence, typeof(*request), submit);
struct intel_engine_cs *engine = request->engine;
+ struct intel_timeline *timeline;
+ u32 seqno;
if (state != FENCE_COMPLETE)
return NOTIFY_DONE;
/* Will be called from irq-context when using foreign DMA fences */
- engine->timeline->last_submitted_seqno = request->fence.seqno;
+ timeline = request->timeline;
+ seqno = request->fence.seqno;
+ GEM_BUG_ON(!seqno);
+ GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
+
+ GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
+ request->previous_seqno = timeline->last_submitted_seqno;
+ timeline->last_submitted_seqno = seqno;
+
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ request->global_seqno = seqno;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_enable_signaling(request);
+ spin_unlock(&request->lock);
+
+ GEM_BUG_ON(!request->global_seqno);
engine->emit_breadcrumb(request,
request->ring->vaddr + request->postfix);
engine->submit_request(request);
@@ -427,10 +445,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
INIT_LIST_HEAD(&req->active_list);
req->i915 = dev_priv;
req->engine = engine;
- req->global_seqno = req->fence.seqno;
req->ctx = i915_gem_context_get(ctx);
/* No zalloc, must clear what we need by hand */
+ req->global_seqno = 0;
req->previous_context = NULL;
req->file_priv = NULL;
req->batch = NULL;
@@ -704,15 +722,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
&request->submitq);
- GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
- request->fence.seqno));
+ list_add_tail(&request->link, &timeline->requests);
- request->emitted_jiffies = jiffies;
- request->previous_seqno = timeline->last_pending_seqno;
timeline->last_pending_seqno = request->fence.seqno;
i915_gem_active_set(&timeline->last_request, request);
- list_add_tail(&request->link, &timeline->requests);
+
list_add_tail(&request->ring_link, &ring->request_list);
+ request->emitted_jiffies = jiffies;
i915_gem_mark_busy(engine);