aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/drm/i915/gt/selftest_reset.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2019-07-12 20:29:53 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2019-07-12 21:06:56 +0100
commitcb823ed9915b0d4064f3f51e936fbe13c089948a (patch)
tree1583517aa39faef0047518a302de5a55ae54f50d /drivers/gpu/drm/i915/gt/selftest_reset.c
parentdrm/i915/tgl: add modular FIA to device info (diff)
downloadwireguard-linux-cb823ed9915b0d4064f3f51e936fbe13c089948a.tar.xz
wireguard-linux-cb823ed9915b0d4064f3f51e936fbe13c089948a.zip
drm/i915/gt: Use intel_gt as the primary object for handling resets
Having taken the first step in encapsulating the functionality by moving the related files under gt/, the next step is to start encapsulating by passing around the relevant structs rather than the global drm_i915_private. In this step, we pass intel_gt to intel_reset.c Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190712192953.9187-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_reset.c')
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c97
1 files changed, 48 insertions, 49 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 672e32e1ef95..00a4f60cdfd5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -9,26 +9,29 @@
static int igt_global_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
unsigned int reset_count;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that we can issue a global GPU reset */
- igt_global_reset_lock(i915);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- reset_count = i915_reset_count(&i915->gpu_error);
+ reset_count = i915_reset_count(&gt->i915->gpu_error);
- i915_reset(i915, ALL_ENGINES, NULL);
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ if (i915_reset_count(&gt->i915->gpu_error) == reset_count) {
pr_err("No GPU reset recorded!\n");
err = -EINVAL;
}
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ igt_global_reset_unlock(gt);
- if (i915_reset_failed(i915))
+ if (intel_gt_is_wedged(gt))
err = -EIO;
return err;
@@ -36,72 +39,72 @@ static int igt_global_reset(void *arg)
static int igt_wedged_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
intel_wakeref_t wakeref;
/* Check that we can recover a wedged device with a GPU reset */
- igt_global_reset_lock(i915);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ igt_global_reset_lock(gt);
+ wakeref = intel_runtime_pm_get(&gt->i915->runtime_pm);
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(gt);
- GEM_BUG_ON(!i915_reset_failed(i915));
- i915_reset(i915, ALL_ENGINES, NULL);
+ GEM_BUG_ON(!intel_gt_is_wedged(gt));
+ intel_gt_reset(gt, ALL_ENGINES, NULL);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- igt_global_reset_unlock(i915);
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
+ igt_global_reset_unlock(gt);
- return i915_reset_failed(i915) ? -EIO : 0;
+ return intel_gt_is_wedged(gt) ? -EIO : 0;
}
static int igt_atomic_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
int err = 0;
/* Check that the resets are usable from atomic context */
- intel_gt_pm_get(&i915->gt);
- igt_global_reset_lock(i915);
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
intel_engine_mask_t awake;
- GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+ GEM_TRACE("__intel_gt_reset under %s\n", p->name);
- awake = reset_prepare(i915);
+ awake = reset_prepare(gt);
p->critical_section_begin();
- err = intel_gpu_reset(i915, ALL_ENGINES);
+ err = __intel_gt_reset(gt, ALL_ENGINES);
p->critical_section_end();
- reset_finish(i915, awake);
+ reset_finish(gt, awake);
if (err) {
- pr_err("intel_gpu_reset failed under %s\n", p->name);
+ pr_err("__intel_gt_reset failed under %s\n", p->name);
break;
}
}
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
+ igt_force_reset(gt);
unlock:
- igt_global_reset_unlock(i915);
- intel_gt_pm_put(&i915->gt);
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
return err;
}
static int igt_atomic_engine_reset(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -109,33 +112,33 @@ static int igt_atomic_engine_reset(void *arg)
/* Check that the resets are usable from atomic context */
- if (!intel_has_reset_engine(i915))
+ if (!intel_has_reset_engine(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(i915))
+ if (USES_GUC_SUBMISSION(gt->i915))
return 0;
- intel_gt_pm_get(&i915->gt);
- igt_global_reset_lock(i915);
+ intel_gt_pm_get(gt);
+ igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
- if (!igt_force_reset(i915))
+ if (!igt_force_reset(gt))
goto out_unlock;
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
tasklet_disable_nosync(&engine->execlists.tasklet);
intel_engine_pm_get(engine);
for (p = igt_atomic_phases; p->name; p++) {
- GEM_TRACE("i915_reset_engine(%s) under %s\n",
+ GEM_TRACE("intel_engine_reset(%s) under %s\n",
engine->name, p->name);
p->critical_section_begin();
- err = i915_reset_engine(engine, NULL);
+ err = intel_engine_reset(engine, NULL);
p->critical_section_end();
if (err) {
- pr_err("i915_reset_engine(%s) failed under %s\n",
+ pr_err("intel_engine_reset(%s) failed under %s\n",
engine->name, p->name);
break;
}
@@ -148,11 +151,11 @@ static int igt_atomic_engine_reset(void *arg)
}
/* As we poke around the guts, do a full reset before continuing. */
- igt_force_reset(i915);
+ igt_force_reset(gt);
out_unlock:
- igt_global_reset_unlock(i915);
- intel_gt_pm_put(&i915->gt);
+ igt_global_reset_unlock(gt);
+ intel_gt_pm_put(gt);
return err;
}
@@ -165,17 +168,13 @@ int intel_reset_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_atomic_reset),
SUBTEST(igt_atomic_engine_reset),
};
- intel_wakeref_t wakeref;
- int err = 0;
+ struct intel_gt *gt = &i915->gt;
- if (!intel_has_gpu_reset(i915))
+ if (!intel_has_gpu_reset(gt->i915))
return 0;
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(gt))
return -EIO; /* we're long past hope of a successful reset */
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- err = i915_subtests(tests, i915);
-
- return err;
+ return intel_gt_live_subtests(tests, gt);
}