aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_engine_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_engine_cs.c')
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c734
1 files changed, 695 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ab1be5c80ea5..854e8e0c836b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -28,53 +28,53 @@
static const struct engine_info {
const char *name;
- unsigned exec_id;
- enum intel_engine_hw_id hw_id;
+ unsigned int exec_id;
+ unsigned int hw_id;
u32 mmio_base;
unsigned irq_shift;
int (*init_legacy)(struct intel_engine_cs *engine);
int (*init_execlists)(struct intel_engine_cs *engine);
} intel_engines[] = {
[RCS] = {
- .name = "render ring",
- .exec_id = I915_EXEC_RENDER,
+ .name = "rcs",
.hw_id = RCS_HW,
+ .exec_id = I915_EXEC_RENDER,
.mmio_base = RENDER_RING_BASE,
.irq_shift = GEN8_RCS_IRQ_SHIFT,
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
},
[BCS] = {
- .name = "blitter ring",
- .exec_id = I915_EXEC_BLT,
+ .name = "bcs",
.hw_id = BCS_HW,
+ .exec_id = I915_EXEC_BLT,
.mmio_base = BLT_RING_BASE,
.irq_shift = GEN8_BCS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
},
[VCS] = {
- .name = "bsd ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs",
.hw_id = VCS_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN6_BSD_RING_BASE,
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
},
[VCS2] = {
- .name = "bsd2 ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs2",
.hw_id = VCS2_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd2_ring_buffer,
},
[VECS] = {
- .name = "video enhancement ring",
- .exec_id = I915_EXEC_VEBOX,
+ .name = "vecs",
.hw_id = VECS_HW,
+ .exec_id = I915_EXEC_VEBOX,
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
@@ -112,21 +112,20 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
}
/**
- * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * intel_engines_init_early() - allocate the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_i915_private *dev_priv)
+int intel_engines_init_early(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
- int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int i;
- int ret;
+ int err;
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
@@ -136,20 +135,8 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
if (!HAS_ENGINE(dev_priv, i))
continue;
- if (i915.enable_execlists)
- init = intel_engines[i].init_execlists;
- else
- init = intel_engines[i].init_legacy;
-
- if (!init)
- continue;
-
- ret = intel_engine_setup(dev_priv, i);
- if (ret)
- goto cleanup;
-
- ret = init(dev_priv->engine[i]);
- if (ret)
+ err = intel_engine_setup(dev_priv, i);
+ if (err)
goto cleanup;
mask |= ENGINE_MASK(i);
@@ -168,14 +155,68 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
return 0;
cleanup:
+ for_each_engine(engine, dev_priv, id)
+ kfree(engine);
+ return err;
+}
+
+/**
+ * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * @dev_priv: i915 device private
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_engines_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id, err_id;
+ unsigned int mask = 0;
+ int err = 0;
+
for_each_engine(engine, dev_priv, id) {
+ int (*init)(struct intel_engine_cs *engine);
+
if (i915.enable_execlists)
- intel_logical_ring_cleanup(engine);
+ init = intel_engines[id].init_execlists;
else
- intel_engine_cleanup(engine);
+ init = intel_engines[id].init_legacy;
+ if (!init) {
+ kfree(engine);
+ dev_priv->engine[id] = NULL;
+ continue;
+ }
+
+ err = init(engine);
+ if (err) {
+ err_id = id;
+ goto cleanup;
+ }
+
+ GEM_BUG_ON(!engine->submit_request);
+ mask |= ENGINE_MASK(id);
}
- return ret;
+ /*
+ * Catch failures to update intel_engines table when the new engines
+ * are added to the driver by a warning and disabling the forgotten
+ * engines.
+ */
+ if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+ device_info->ring_mask = mask;
+
+ device_info->num_rings = hweight32(mask);
+
+ return 0;
+
+cleanup:
+ for_each_engine(engine, dev_priv, id) {
+ if (id >= err_id)
+ kfree(engine);
+ else
+ dev_priv->gt.cleanup_engine(engine);
+ }
+ return err;
}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
@@ -201,21 +242,18 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
void *semaphores;
/* Semaphores are in noncoherent memory, flush to be safe */
- semaphores = kmap(page);
+ semaphores = kmap_atomic(page);
memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- kunmap(page);
+ kunmap_atomic(semaphores);
}
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
+ clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
- engine->timeline->last_submitted_seqno = seqno;
-
engine->hangcheck.seqno = seqno;
/* After manually advancing the seqno, fake the interrupt in case
@@ -306,6 +344,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
{
int ret;
+ engine->set_default_submission(engine);
+
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -484,3 +524,619 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
break;
}
}
+
+static int wa_add(struct drm_i915_private *dev_priv,
+ i915_reg_t addr,
+ const u32 mask, const u32 val)
+{
+ const u32 idx = dev_priv->workarounds.count;
+
+ if (WARN_ON(idx >= I915_MAX_WA_REGS))
+ return -ENOSPC;
+
+ dev_priv->workarounds.reg[idx].addr = addr;
+ dev_priv->workarounds.reg[idx].value = val;
+ dev_priv->workarounds.reg[idx].mask = mask;
+
+ dev_priv->workarounds.count++;
+
+ return 0;
+}
+
+#define WA_REG(addr, mask, val) do { \
+ const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+ if (r) \
+ return r; \
+ } while (0)
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+ WA_REG(addr, mask, _MASKED_FIELD(mask, value))
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+ i915_reg_t reg)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct i915_workarounds *wa = &dev_priv->workarounds;
+ const uint32_t index = wa->hw_whitelist_count[engine->id];
+
+ if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+ return -EINVAL;
+
+ WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+ i915_mmio_reg_offset(reg));
+ wa->hw_whitelist_count[engine->id]++;
+
+ return 0;
+}
+
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:bdw,chv */
+ WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
+ /* WaDisablePartialInstShootdown:bdw,chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:bdw,chv */
+ /* WaHdcDisableFetchWhenMasked:bdw,chv */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
+
+ /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+ * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+ * polygons in the same 8x4 pixel/sample area to be processed without
+ * stalling waiting for the earlier ones to write to Hierarchical Z
+ * buffer."
+ *
+ * This optimization is off by default for BDW and CHV; turn it on.
+ */
+ WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:bdw,chv */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+
+ return 0;
+}
+
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen8_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* WaDisableDopClockGating:bdw
+ *
+ * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+ * to disable EUTC clock gating.
+ */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
+
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+
+ return 0;
+}
+
+static int chv_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen8_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaDisableThreadStallDopClockGating:chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* Improve HiZ throughput on CHV. */
+ WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+ return 0;
+}
+
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
+ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaDisableKillLogic:bxt,skl,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ ECOCHK_DIS_TLB);
+
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_DG_MIRROR_FIX_ENABLE);
+
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN9_RHWO_OPTIMIZATION_DISABLE);
+ /*
+ * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+ * but we do that in per ctx batchbuffer as there is an issue
+ * with this register not getting restored on ctx restore
+ */
+ }
+
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_GPGPU_PREEMPTION);
+
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
+
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+ /* WaDisableMaskBasedCammingInRCC:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+ PIXEL_MASK_CAMMING_DISABLE);
+
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
+ I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_FLUSH_COHERENT_LINES));
+
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
+ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+ if (ret)
+ return ret;
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
+ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ if (ret)
+ return ret;
+
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
+ ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u8 vals[3] = { 0, 0, 0 };
+ unsigned int i;
+
+ for (i = 0; i < 3; i++) {
+ u8 ss;
+
+ /*
+ * Only consider slices where one, and only one, subslice has 7
+ * EUs
+ */
+ if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ continue;
+
+ /*
+ * subslice_7eu[i] != 0 (because of the check above) and
+ * ss_max == 4 (maximum number of subslices possible per slice)
+ *
+ * -> 0 <= ss <= 3;
+ */
+ ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ vals[i] = 3 - ss;
+ }
+
+ if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+ return 0;
+
+ /* Tune IZ hashing. See intel_device_info_runtime_init() */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN9_IZ_HASHING_MASK(2) |
+ GEN9_IZ_HASHING_MASK(1) |
+ GEN9_IZ_HASHING_MASK(0),
+ GEN9_IZ_HASHING(2, vals[2]) |
+ GEN9_IZ_HASHING(1, vals[1]) |
+ GEN9_IZ_HASHING(0, vals[0]));
+
+ return 0;
+}
+
+static int skl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /*
+ * Actual WA is to disable percontext preemption granularity control
+ * until D0 which is the default case so this is equivalent to
+ * !WaDisablePerCtxtPreemptionGranularityControl:skl
+ */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+
+ /* WaEnableGapsTsvCreditFix:skl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableGafsUnitClkGating:skl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:skl */
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaDisableLSQCROPERFforOCL:skl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
+ return skl_tune_iz_hashing(engine);
+}
+
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaStoreMultiplePTEenable:bxt */
+ /* This is a requirement according to Hardware specification */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+
+ /* WaSetClckGatingDisableMedia:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+ ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
+ }
+
+ /* WaDisableThreadStallDopClockGating:bxt */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
+
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+ WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ /* WaDisableSbeCacheDispatchPortSharing:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ }
+
+ /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
+ /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
+ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
+ /* WaDisableLSQCROPERFforOCL:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
+ if (ret)
+ return ret;
+
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+ }
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+ I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaToEnableHwFixForPushConstHWBug:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaInPlaceDecompressionHang:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaInPlaceDecompressionHang:kbl */
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int glk_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaToEnableHwFixForPushConstHWBug:glk */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int err;
+
+ WARN_ON(engine->id != RCS);
+
+ dev_priv->workarounds.count = 0;
+ dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+
+ if (IS_BROADWELL(dev_priv))
+ err = bdw_init_workarounds(engine);
+ else if (IS_CHERRYVIEW(dev_priv))
+ err = chv_init_workarounds(engine);
+ else if (IS_SKYLAKE(dev_priv))
+ err = skl_init_workarounds(engine);
+ else if (IS_BROXTON(dev_priv))
+ err = bxt_init_workarounds(engine);
+ else if (IS_KABYLAKE(dev_priv))
+ err = kbl_init_workarounds(engine);
+ else if (IS_GEMINILAKE(dev_priv))
+ err = glk_init_workarounds(engine);
+ else
+ err = 0;
+ if (err)
+ return err;
+
+ DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
+ engine->name, dev_priv->workarounds.count);
+ return 0;
+}
+
+int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+{
+ struct i915_workarounds *w = &req->i915->workarounds;
+ u32 *cs;
+ int ret, i;
+
+ if (w->count == 0)
+ return 0;
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ cs = intel_ring_begin(req, (w->count * 2 + 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(w->count);
+ for (i = 0; i < w->count; i++) {
+ *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].value;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(req, cs);
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * intel_engine_is_idle() - Report if the engine has finished process all work
+ * @engine: the intel_engine_cs
+ *
+ * Return true if there are no requests pending, nothing left to be submitted
+ * to hardware, and that the engine is idle.
+ */
+bool intel_engine_is_idle(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ /* Any inflight/incomplete requests? */
+ if (!i915_seqno_passed(intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine)))
+ return false;
+
+ /* Interrupt/tasklet pending? */
+ if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+ return false;
+
+ /* Both ports drained, no more ELSP submission? */
+ if (engine->execlist_port[0].request)
+ return false;
+
+ /* Ring stopped? */
+ if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ return false;
+
+ return true;
+}
+
+bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return false;
+
+ /* If the driver is wedged, HW state may be very inconsistent and
+ * report that it is still busy, even though we have stopped using it.
+ */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return true;
+
+ for_each_engine(engine, dev_priv, id) {
+ if (!intel_engine_is_idle(engine))
+ return false;
+ }
+
+ return true;
+}
+
+void intel_engines_reset_default_submission(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ engine->set_default_submission(engine);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_engine.c"
+#endif