aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_uncore.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_uncore.c')
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c175
1 files changed, 151 insertions, 24 deletions
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4df7c2ef8576..448293eb638d 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -62,6 +62,11 @@ static inline void
fw_domain_reset(struct drm_i915_private *i915,
const struct intel_uncore_forcewake_domain *d)
{
+ /*
+ * We don't really know if the powerwell for the forcewake domain we are
+ * trying to reset here does exist at this point (engines could be fused
+ * off in ICL+), so no waiting for acks
+ */
__raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
}
@@ -134,7 +139,9 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
* in the hope that the original ack will be delivered along with
* the fallback ack.
*
- * This workaround is described in HSDES #1604254524
+ * This workaround is described in HSDES #1604254524 and it's known as:
+ * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
+ * although the name is a bit misleading.
*/
pass = 1;
@@ -1353,6 +1360,23 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
fw_domain_reset(dev_priv, d);
}
+static void fw_domain_fini(struct drm_i915_private *dev_priv,
+ enum forcewake_domain_id domain_id)
+{
+ struct intel_uncore_forcewake_domain *d;
+
+ if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
+ return;
+
+ d = &dev_priv->uncore.fw_domain[domain_id];
+
+ WARN_ON(d->wake_count);
+ WARN_ON(hrtimer_cancel(&d->timer));
+ memset(d, 0, sizeof(*d));
+
+ dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+}
+
static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
@@ -1372,7 +1396,8 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 11) {
int i;
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
+ dev_priv->uncore.funcs.force_wake_get =
+ fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
@@ -1565,6 +1590,40 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
&dev_priv->uncore.pmic_bus_access_nb);
}
+/*
+ * We might have detected that some engines are fused off after we initialized
+ * the forcewake domains. Prune them, to make sure they only reference existing
+ * engines.
+ */
+void intel_uncore_prune(struct drm_i915_private *dev_priv)
+{
+ if (INTEL_GEN(dev_priv) >= 11) {
+ enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+ enum forcewake_domain_id domain_id;
+ int i;
+
+ for (i = 0; i < I915_MAX_VCS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
+
+ if (HAS_ENGINE(dev_priv, _VCS(i)))
+ continue;
+
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(dev_priv, domain_id);
+ }
+
+ for (i = 0; i < I915_MAX_VECS; i++) {
+ domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
+
+ if (HAS_ENGINE(dev_priv, _VECS(i)))
+ continue;
+
+ if (fw_domains & BIT(domain_id))
+ fw_domain_fini(dev_priv, domain_id);
+ }
+ }
+}
+
void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
/* Paranoia: make sure we have disabled everything before we exit. */
@@ -1646,11 +1705,10 @@ static void gen3_stop_engine(struct intel_engine_cs *engine)
const i915_reg_t mode = RING_MI_MODE(base);
I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register_fw(dev_priv,
- mode,
- MODE_IDLE,
- MODE_IDLE,
- 500))
+ if (__intel_wait_for_register_fw(dev_priv,
+ mode, MODE_IDLE, MODE_IDLE,
+ 500, 0,
+ NULL))
DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n",
engine->name);
@@ -1804,9 +1862,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
__raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
- err = intel_wait_for_register_fw(dev_priv,
- GEN6_GDRST, hw_domain_mask, 0,
- 500);
+ err = __intel_wait_for_register_fw(dev_priv,
+ GEN6_GDRST, hw_domain_mask, 0,
+ 500, 0,
+ NULL);
if (err)
DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
hw_domain_mask);
@@ -1854,6 +1913,50 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
}
/**
+ * gen11_reset_engines - reset individual engines
+ * @dev_priv: i915 device
+ * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
+ *
+ * This function will reset the individual engines that are set in engine_mask.
+ * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
+ *
+ * Note: It is responsibility of the caller to handle the difference between
+ * asking full domain reset versus reset for all available individual engines.
+ *
+ * Returns 0 on success, nonzero on error.
+ */
+static int gen11_reset_engines(struct drm_i915_private *dev_priv,
+ unsigned engine_mask)
+{
+ struct intel_engine_cs *engine;
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN11_GRDOM_RENDER,
+ [BCS] = GEN11_GRDOM_BLT,
+ [VCS] = GEN11_GRDOM_MEDIA,
+ [VCS2] = GEN11_GRDOM_MEDIA2,
+ [VCS3] = GEN11_GRDOM_MEDIA3,
+ [VCS4] = GEN11_GRDOM_MEDIA4,
+ [VECS] = GEN11_GRDOM_VECS,
+ [VECS2] = GEN11_GRDOM_VECS2,
+ };
+ u32 hw_mask;
+
+ BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN11_GRDOM_FULL;
+ } else {
+ unsigned int tmp;
+
+ hw_mask = 0;
+ for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
+ hw_mask |= hw_engine_mask[engine->id];
+ }
+
+ return gen6_hw_domain_reset(dev_priv, hw_mask);
+}
+
+/**
* __intel_wait_for_register_fw - wait until register matches expected state
* @dev_priv: the i915 device
* @reg: the register to read
@@ -1940,7 +2043,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
u32 reg_value;
int ret;
- might_sleep();
+ might_sleep_if(slow_timeout_ms);
spin_lock_irq(&dev_priv->uncore.lock);
intel_uncore_forcewake_get__locked(dev_priv, fw);
@@ -1952,7 +2055,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irq(&dev_priv->uncore.lock);
- if (ret)
+ if (ret && slow_timeout_ms)
ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
(reg_value & mask) == value,
slow_timeout_ms * 1000, 10, 1000);
@@ -1971,11 +2074,12 @@ static int gen8_reset_engine_start(struct intel_engine_cs *engine)
I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
- ret = intel_wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700);
+ ret = __intel_wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700, 0,
+ NULL);
if (ret)
DRM_ERROR("%s: reset request timeout\n", engine->name);
@@ -2000,7 +2104,10 @@ static int gen8_reset_engines(struct drm_i915_private *dev_priv,
if (gen8_reset_engine_start(engine))
goto not_ready;
- return gen6_reset_engines(dev_priv, engine_mask);
+ if (INTEL_GEN(dev_priv) >= 11)
+ return gen11_reset_engines(dev_priv, engine_mask);
+ else
+ return gen6_reset_engines(dev_priv, engine_mask);
not_ready:
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
@@ -2038,15 +2145,31 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
int retry;
int ret;
- might_sleep();
+ /*
+ * We want to perform per-engine reset from atomic context (e.g.
+ * softirq), which imposes the constraint that we cannot sleep.
+ * However, experience suggests that spending a bit of time waiting
+ * for a reset helps in various cases, so for a full-device reset
+ * we apply the opposite rule and wait if we want to. As we should
+ * always follow up a failed per-engine reset with a full device reset,
+ * being a little faster, stricter and more error prone for the
+ * atomic case seems an acceptable compromise.
+ *
+ * Unfortunately this leads to a bimodal routine, when the goal was
+ * to have a single reset function that worked for resetting any
+ * number of engines simultaneously.
+ */
+ might_sleep_if(engine_mask == ALL_ENGINES);
- /* If the power well sleeps during the reset, the reset
+ /*
+ * If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
for (retry = 0; retry < 3; retry++) {
- /* We stop engines, otherwise we might get failed reset and a
+ /*
+ * We stop engines, otherwise we might get failed reset and a
* dead gpu (on elk). Also as modern gpu as kbl can suffer
* from system hang if batchbuffer is progressing when
* the reset is issued, regardless of READY_TO_RESET ack.
@@ -2060,9 +2183,11 @@ int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
i915_stop_engines(dev_priv, engine_mask);
ret = -ENODEV;
- if (reset)
+ if (reset) {
+ GEM_TRACE("engine_mask=%x\n", engine_mask);
ret = reset(dev_priv, engine_mask);
- if (ret != -ETIMEDOUT)
+ }
+ if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
break;
cond_resched();
@@ -2085,12 +2210,14 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
int intel_reset_guc(struct drm_i915_private *dev_priv)
{
+ u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
+ GEN9_GRDOM_GUC;
int ret;
GEM_BUG_ON(!HAS_GUC(dev_priv));
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC);
+ ret = gen6_hw_domain_reset(dev_priv, guc_domain);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return ret;