diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 115 |
1 files changed, 56 insertions, 59 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 9b6d781b22ec..d8bca8ba2efc 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -267,7 +267,7 @@ find_priolist: return ptr_pack_bits(p, first, 1); } -static void unwind_wa_tail(struct drm_i915_gem_request *rq) +static void unwind_wa_tail(struct i915_request *rq) { rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); assert_ring_tail_valid(rq->ring, rq->tail); @@ -275,7 +275,7 @@ static void unwind_wa_tail(struct drm_i915_gem_request *rq) static void __unwind_incomplete_requests(struct intel_engine_cs *engine) { - struct drm_i915_gem_request *rq, *rn; + struct i915_request *rq, *rn; struct i915_priolist *uninitialized_var(p); int last_prio = I915_PRIORITY_INVALID; @@ -284,10 +284,10 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine) list_for_each_entry_safe_reverse(rq, rn, &engine->timeline->requests, link) { - if (i915_gem_request_completed(rq)) + if (i915_request_completed(rq)) return; - __i915_gem_request_unsubmit(rq); + __i915_request_unsubmit(rq); unwind_wa_tail(rq); GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID); @@ -316,8 +316,7 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) } static inline void -execlists_context_status_change(struct drm_i915_gem_request *rq, - unsigned long status) +execlists_context_status_change(struct i915_request *rq, unsigned long status) { /* * Only used when GVT-g is enabled now. When GVT-g is disabled, @@ -331,14 +330,14 @@ execlists_context_status_change(struct drm_i915_gem_request *rq, } static inline void -execlists_context_schedule_in(struct drm_i915_gem_request *rq) +execlists_context_schedule_in(struct i915_request *rq) { execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(rq->engine); } static inline void -execlists_context_schedule_out(struct drm_i915_gem_request *rq) +execlists_context_schedule_out(struct i915_request *rq) { intel_engine_context_out(rq->engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); @@ -353,7 +352,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) ASSIGN_CTX_PDP(ppgtt, reg_state, 0); } -static u64 execlists_update_context(struct drm_i915_gem_request *rq) +static u64 execlists_update_context(struct i915_request *rq) { struct intel_context *ce = &rq->ctx->engine[rq->engine->id]; struct i915_hw_ppgtt *ppgtt = @@ -385,7 +384,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) unsigned int n; for (n = execlists_num_ports(&engine->execlists); n--; ) { - struct drm_i915_gem_request *rq; + struct i915_request *rq; unsigned int count; u64 desc; @@ -430,15 +429,14 @@ static bool can_merge_ctx(const struct i915_gem_context *prev, return true; } -static void port_assign(struct execlist_port *port, - struct drm_i915_gem_request *rq) +static void port_assign(struct execlist_port *port, struct i915_request *rq) { GEM_BUG_ON(rq == port_request(port)); if (port_isset(port)) - i915_gem_request_put(port_request(port)); + i915_request_put(port_request(port)); - port_set(port, port_pack(i915_gem_request_get(rq), port_count(port))); + port_set(port, port_pack(i915_request_get(rq), port_count(port))); } static void inject_preempt_context(struct intel_engine_cs *engine) @@ -476,7 +474,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) struct execlist_port *port = execlists->port; const struct execlist_port * const last_port = &execlists->port[execlists->port_mask]; - struct drm_i915_gem_request *last = port_request(port); + struct i915_request *last = port_request(port); struct rb_node *rb; bool submit = false; @@ -565,7 +563,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) /* WaIdleLiteRestore:bdw,skl * Apply the wa NOOPs to prevent - * ring:HEAD == req:TAIL as we resubmit the + * ring:HEAD == rq:TAIL as we resubmit the * request. See gen8_emit_breadcrumb() for * where we prepare the padding after the * end of the request. @@ -576,7 +574,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) do { struct i915_priolist *p = rb_entry(rb, typeof(*p), node); - struct drm_i915_gem_request *rq, *rn; + struct i915_request *rq, *rn; list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) { /* @@ -626,8 +624,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) } INIT_LIST_HEAD(&rq->priotree.link); - __i915_gem_request_submit(rq); - trace_i915_gem_request_in(rq, port_index(port, execlists)); + __i915_request_submit(rq); + trace_i915_request_in(rq, port_index(port, execlists)); last = rq; submit = true; } @@ -665,12 +663,12 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) unsigned int num_ports = execlists_num_ports(execlists); while (num_ports-- && port_isset(port)) { - struct drm_i915_gem_request *rq = port_request(port); + struct i915_request *rq = port_request(port); GEM_BUG_ON(!execlists->active); intel_engine_context_out(rq->engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED); - i915_gem_request_put(rq); + i915_request_put(rq); memset(port, 0, sizeof(*port)); port++; @@ -680,7 +678,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) static void execlists_cancel_requests(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; - struct drm_i915_gem_request *rq, *rn; + struct i915_request *rq, *rn; struct rb_node *rb; unsigned long flags; @@ -692,7 +690,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->timeline->requests, link) { GEM_BUG_ON(!rq->global_seqno); - if (!i915_gem_request_completed(rq)) + if (!i915_request_completed(rq)) dma_fence_set_error(&rq->fence, -EIO); } @@ -705,7 +703,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine) INIT_LIST_HEAD(&rq->priotree.link); dma_fence_set_error(&rq->fence, -EIO); - __i915_gem_request_submit(rq); + __i915_request_submit(rq); } rb = rb_next(rb); @@ -806,7 +804,7 @@ static void execlists_submission_tasklet(unsigned long data) tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?"); while (head != tail) { - struct drm_i915_gem_request *rq; + struct i915_request *rq; unsigned int status; unsigned int count; @@ -885,10 +883,10 @@ static void execlists_submission_tasklet(unsigned long data) GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED); GEM_BUG_ON(port_isset(&port[1]) && !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH)); - GEM_BUG_ON(!i915_gem_request_completed(rq)); + GEM_BUG_ON(!i915_request_completed(rq)); execlists_context_schedule_out(rq); - trace_i915_gem_request_out(rq); - i915_gem_request_put(rq); + trace_i915_request_out(rq); + i915_request_put(rq); execlists_port_complete(execlists, port); } else { @@ -928,7 +926,7 @@ static void insert_request(struct intel_engine_cs *engine, tasklet_hi_schedule(&engine->execlists.tasklet); } -static void execlists_submit_request(struct drm_i915_gem_request *request) +static void execlists_submit_request(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; unsigned long flags; @@ -944,9 +942,9 @@ static void execlists_submit_request(struct drm_i915_gem_request *request) spin_unlock_irqrestore(&engine->timeline->lock, flags); } -static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt) +static struct i915_request *pt_to_request(struct i915_priotree *pt) { - return container_of(pt, struct drm_i915_gem_request, priotree); + return container_of(pt, struct i915_request, priotree); } static struct intel_engine_cs * @@ -964,7 +962,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked) return engine; } -static void execlists_schedule(struct drm_i915_gem_request *request, int prio) +static void execlists_schedule(struct i915_request *request, int prio) { struct intel_engine_cs *engine; struct i915_dependency *dep, *p; @@ -973,7 +971,7 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio) GEM_BUG_ON(prio == I915_PRIORITY_INVALID); - if (i915_gem_request_completed(request)) + if (i915_request_completed(request)) return; if (prio <= READ_ONCE(request->priotree.priority)) @@ -1158,7 +1156,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine, i915_gem_context_put(ctx); } -static int execlists_request_alloc(struct drm_i915_gem_request *request) +static int execlists_request_alloc(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; struct intel_context *ce = &request->ctx->engine[engine->id]; @@ -1590,7 +1588,7 @@ static void reset_irq(struct intel_engine_cs *engine) } static void reset_common_ring(struct intel_engine_cs *engine, - struct drm_i915_gem_request *request) + struct i915_request *request) { struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_context *ce; @@ -1658,15 +1656,15 @@ static void reset_common_ring(struct intel_engine_cs *engine, unwind_wa_tail(request); } -static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) +static int intel_logical_ring_emit_pdps(struct i915_request *rq) { - struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; - struct intel_engine_cs *engine = req->engine; + struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; + struct intel_engine_cs *engine = rq->engine; const int num_lri_cmds = GEN8_3LVL_PDPES * 2; u32 *cs; int i; - cs = intel_ring_begin(req, num_lri_cmds * 2 + 2); + cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -1681,12 +1679,12 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) } *cs++ = MI_NOOP; - intel_ring_advance(req, cs); + intel_ring_advance(rq, cs); return 0; } -static int gen8_emit_bb_start(struct drm_i915_gem_request *req, +static int gen8_emit_bb_start(struct i915_request *rq, u64 offset, u32 len, const unsigned int flags) { @@ -1699,18 +1697,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, * it is unsafe in case of lite-restore (because the ctx is * not idle). PML4 is allocated during ppgtt init so this is * not needed in 48-bit.*/ - if (req->ctx->ppgtt && - (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) && - !i915_vm_is_48bit(&req->ctx->ppgtt->base) && - !intel_vgpu_active(req->i915)) { - ret = intel_logical_ring_emit_pdps(req); + if (rq->ctx->ppgtt && + (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) && + !i915_vm_is_48bit(&rq->ctx->ppgtt->base) && + !intel_vgpu_active(rq->i915)) { + ret = intel_logical_ring_emit_pdps(rq); if (ret) return ret; - req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); + rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); } - cs = intel_ring_begin(req, 4); + cs = intel_ring_begin(rq, 4); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -1739,7 +1737,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0); *cs++ = lower_32_bits(offset); *cs++ = upper_32_bits(offset); - intel_ring_advance(req, cs); + intel_ring_advance(rq, cs); return 0; } @@ -1758,7 +1756,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } -static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) +static int gen8_emit_flush(struct i915_request *request, u32 mode) { u32 cmd, *cs; @@ -1790,7 +1788,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) return 0; } -static int gen8_emit_flush_render(struct drm_i915_gem_request *request, +static int gen8_emit_flush_render(struct i915_request *request, u32 mode) { struct intel_engine_cs *engine = request->engine; @@ -1865,7 +1863,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * used as a workaround for not being allowed to do lite * restore with HEAD==TAIL (WaIdleLiteRestore). */ -static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs) +static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs) { /* Ensure there's always at least one preemption point per-request. */ *cs++ = MI_ARB_CHECK; @@ -1873,7 +1871,7 @@ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs) request->wa_tail = intel_ring_offset(request, cs); } -static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs) +static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs) { /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5)); @@ -1889,8 +1887,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs) } static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS; -static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request, - u32 *cs) +static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs) { /* We're using qword write, seqno should be aligned to 8 bytes. */ BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); @@ -1906,15 +1903,15 @@ static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request, } static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS; -static int gen8_init_rcs_context(struct drm_i915_gem_request *req) +static int gen8_init_rcs_context(struct i915_request *rq) { int ret; - ret = intel_ring_workarounds_emit(req); + ret = intel_ring_workarounds_emit(rq); if (ret) return ret; - ret = intel_rcs_context_init_mocs(req); + ret = intel_rcs_context_init_mocs(rq); /* * Failing to program the MOCS is non-fatal.The system will not * run at peak performance. So generate an error and carry on. @@ -1922,7 +1919,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req) if (ret) DRM_ERROR("MOCS failed to program: expect performance issues.\n"); - return i915_gem_render_state_emit(req); + return i915_gem_render_state_emit(rq); } /** |