aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gt/selftest_lrc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gt/selftest_lrc.c')
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c868
1 files changed, 705 insertions, 163 deletions
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index eb71ac2f992c..b292f8cbd0bf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -50,14 +50,31 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
return vma;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
- struct i915_gem_engines_iter it;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
struct igt_spinner spin;
- int err = -ENOMEM;
+ int err = 0;
if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915))
return 0;
@@ -65,17 +82,20 @@ static int live_sanitycheck(void *arg)
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- goto err_spin;
-
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
struct i915_request *rq;
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_ctx;
+ goto out_ctx;
}
i915_request_add(rq);
@@ -84,21 +104,21 @@ static int live_sanitycheck(void *arg)
GEM_TRACE_DUMP();
intel_gt_set_wedged(gt);
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
igt_spinner_end(&spin);
if (igt_flush_test(gt->i915)) {
err = -EIO;
- goto err_ctx;
+ goto out_ctx;
}
+
+out_ctx:
+ intel_context_put(ce);
+ if (err)
+ break;
}
- err = 0;
-err_ctx:
- i915_gem_context_unlock_engines(ctx);
- kernel_context_close(ctx);
-err_spin:
igt_spinner_fini(&spin);
return err;
}
@@ -106,7 +126,6 @@ err_spin:
static int live_unlite_restore(struct intel_gt *gt, int prio)
{
struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
int err = -ENOMEM;
@@ -119,15 +138,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
if (igt_spinner_init(&spin, gt))
return err;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- goto err_spin;
-
err = 0;
for_each_engine(engine, gt, id) {
struct intel_context *ce[2] = {};
struct i915_request *rq[2];
struct igt_live_test t;
+ unsigned long saved;
int n;
if (prio && !intel_engine_has_preemption(engine))
@@ -140,11 +156,12 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
err = -EIO;
break;
}
+ engine_heartbeat_disable(engine, &saved);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
- tmp = intel_context_create(ctx, engine);
+ tmp = intel_context_create(engine);
if (IS_ERR(tmp)) {
err = PTR_ERR(tmp);
goto err_ce;
@@ -169,7 +186,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
}
GEM_BUG_ON(!ce[1]->ring->size);
intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
- __execlists_update_reg_state(ce[1], engine);
+ __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
if (IS_ERR(rq[0])) {
@@ -247,14 +264,13 @@ err_ce:
intel_context_put(ce[n]);
}
+ engine_heartbeat_enable(engine, saved);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
break;
}
- kernel_context_close(ctx);
-err_spin:
igt_spinner_fini(&spin);
return err;
}
@@ -269,6 +285,107 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ engine_heartbeat_enable(engine, heartbeat);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -309,17 +426,17 @@ emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
static struct i915_request *
semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
{
- struct i915_gem_context *ctx;
+ struct intel_context *ce;
struct i915_request *rq;
int err;
- ctx = kernel_context(engine->i915);
- if (!ctx)
- return ERR_PTR(-ENOMEM);
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ERR_CAST(ce);
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
- goto out_ctx;
+ goto out_ce;
err = 0;
if (rq->engine->emit_init_breadcrumb)
@@ -332,8 +449,8 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx)
if (err)
rq = ERR_PTR(err);
-out_ctx:
- kernel_context_close(ctx);
+out_ce:
+ intel_context_put(ce);
return rq;
}
@@ -348,7 +465,7 @@ release_queue(struct intel_engine_cs *engine,
struct i915_request *rq;
u32 *cs;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -468,12 +585,16 @@ static int live_timeslice_preempt(void *arg)
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
+ unsigned long saved;
+
if (!intel_engine_has_preemption(engine))
continue;
memset(vaddr, 0, PAGE_SIZE);
+ engine_heartbeat_disable(engine, &saved);
err = slice_semaphore_queue(engine, vma, count);
+ engine_heartbeat_enable(engine, saved);
if (err)
goto err_pin;
@@ -497,7 +618,7 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
{
struct i915_request *rq;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return rq;
@@ -507,13 +628,19 @@ static struct i915_request *nop_request(struct intel_engine_cs *engine)
return rq;
}
-static void wait_for_submit(struct intel_engine_cs *engine,
- struct i915_request *rq)
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
{
+ timeout += jiffies;
do {
cond_resched();
intel_engine_flush_submission(engine);
- } while (!i915_request_is_active(rq));
+ if (i915_request_is_active(rq))
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
}
static long timeslice_threshold(const struct intel_engine_cs *engine)
@@ -566,40 +693,49 @@ static int live_timeslice_queue(void *arg)
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
struct i915_request *rq, *nop;
+ unsigned long saved;
if (!intel_engine_has_preemption(engine))
continue;
+ engine_heartbeat_disable(engine, &saved);
memset(vaddr, 0, PAGE_SIZE);
/* ELSP[0]: semaphore wait */
rq = semaphore_queue(engine, vma, 0);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
- goto err_pin;
+ goto err_heartbeat;
}
engine->schedule(rq, &attr);
- wait_for_submit(engine, rq);
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err) {
+ pr_err("%s: Timed out trying to submit semaphores\n",
+ engine->name);
+ goto err_rq;
+ }
/* ELSP[1]: nop request */
nop = nop_request(engine);
if (IS_ERR(nop)) {
err = PTR_ERR(nop);
- i915_request_put(rq);
- goto err_pin;
+ goto err_rq;
}
- wait_for_submit(engine, nop);
+ err = wait_for_submit(engine, nop, HZ / 2);
i915_request_put(nop);
+ if (err) {
+ pr_err("%s: Timed out trying to submit nop\n",
+ engine->name);
+ goto err_rq;
+ }
GEM_BUG_ON(i915_request_completed(rq));
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Queue: semaphore signal, matching priority as semaphore */
err = release_queue(engine, vma, 1, effective_prio(rq));
- if (err) {
- i915_request_put(rq);
- goto err_pin;
- }
+ if (err)
+ goto err_rq;
intel_engine_flush_submission(engine);
if (!READ_ONCE(engine->execlists.timer.expires) &&
@@ -630,12 +766,14 @@ static int live_timeslice_queue(void *arg)
memset(vaddr, 0xff, PAGE_SIZE);
err = -EIO;
}
+err_rq:
i915_request_put(rq);
+err_heartbeat:
+ engine_heartbeat_enable(engine, saved);
if (err)
break;
}
-err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
@@ -748,15 +886,19 @@ static int live_busywait_preempt(void *arg)
*cs++ = 0;
intel_ring_advance(lo, cs);
+
+ i915_request_get(lo);
i915_request_add(lo);
if (wait_for(READ_ONCE(*map), 10)) {
+ i915_request_put(lo);
err = -ETIMEDOUT;
goto err_vma;
}
/* Low priority request should be busywaiting now */
if (i915_request_wait(lo, 0, 1) != -ETIME) {
+ i915_request_put(lo);
pr_err("%s: Busywaiting request did not!\n",
engine->name);
err = -EIO;
@@ -766,6 +908,7 @@ static int live_busywait_preempt(void *arg)
hi = igt_request_alloc(ctx_hi, engine);
if (IS_ERR(hi)) {
err = PTR_ERR(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -773,6 +916,7 @@ static int live_busywait_preempt(void *arg)
if (IS_ERR(cs)) {
err = PTR_ERR(cs);
i915_request_add(hi);
+ i915_request_put(lo);
goto err_vma;
}
@@ -793,11 +937,13 @@ static int live_busywait_preempt(void *arg)
intel_engine_dump(engine, &p, "%s\n", engine->name);
GEM_TRACE_DUMP();
+ i915_request_put(lo);
intel_gt_set_wedged(gt);
err = -EIO;
goto err_vma;
}
GEM_BUG_ON(READ_ONCE(*map));
+ i915_request_put(lo);
if (igt_live_test_end(&t)) {
err = -EIO;
@@ -1108,7 +1254,7 @@ static int live_nopreempt(void *arg)
}
/* Low priority client, but unpreemptable! */
- rq_a->flags |= I915_REQUEST_NOPREEMPT;
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags);
i915_request_add(rq_a);
if (!igt_wait_for_spinner(&a.spin, rq_a)) {
@@ -1187,13 +1333,13 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq))
return PTR_ERR(rq);
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1201,7 +1347,7 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
goto out;
}
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1236,13 +1382,13 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* no preemption */
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1250,7 +1396,6 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
}
- clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = spinner_create_request(&arg->b.spin,
arg->b.ctx, arg->engine,
MI_ARB_CHECK);
@@ -1259,13 +1404,14 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
}
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
if (err)
goto out;
- i915_gem_context_set_banned(arg->b.ctx);
+ intel_context_set_banned(rq[1]->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1308,13 +1454,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
__func__, arg->engine->name))
return -EIO;
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq[0] = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_ARB_CHECK);
if (IS_ERR(rq[0]))
return PTR_ERR(rq[0]);
+ clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
i915_request_get(rq[0]);
i915_request_add(rq[0]);
if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
@@ -1322,13 +1468,13 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
goto out;
}
- clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
if (IS_ERR(rq[1])) {
err = PTR_ERR(rq[1]);
goto out;
}
+ clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
i915_request_get(rq[1]);
err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
i915_request_add(rq[1]);
@@ -1349,7 +1495,7 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq[2]->context);
err = intel_engine_pulse(arg->engine);
if (err)
goto out;
@@ -1396,13 +1542,13 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
return 0;
GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
- clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
rq = spinner_create_request(&arg->a.spin,
arg->a.ctx, arg->engine,
MI_NOOP); /* preemption disabled */
if (IS_ERR(rq))
return PTR_ERR(rq);
+ clear_bit(CONTEXT_BANNED, &rq->context->flags);
i915_request_get(rq);
i915_request_add(rq);
if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
@@ -1410,7 +1556,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
goto out;
}
- i915_gem_context_set_banned(arg->a.ctx);
+ intel_context_set_banned(rq->context);
err = intel_engine_pulse(arg->engine); /* force reset */
if (err)
goto out;
@@ -1665,6 +1811,7 @@ static int live_suppress_wait_preempt(void *arg)
{
struct intel_gt *gt = arg;
struct preempt_client client[4];
+ struct i915_request *rq[ARRAY_SIZE(client)] = {};
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -1698,7 +1845,6 @@ static int live_suppress_wait_preempt(void *arg)
continue;
for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
- struct i915_request *rq[ARRAY_SIZE(client)];
struct i915_request *dummy;
engine->execlists.preempt_hang.count = 0;
@@ -1708,18 +1854,22 @@ static int live_suppress_wait_preempt(void *arg)
goto err_client_3;
for (i = 0; i < ARRAY_SIZE(client); i++) {
- rq[i] = spinner_create_request(&client[i].spin,
- client[i].ctx, engine,
- MI_NOOP);
- if (IS_ERR(rq[i])) {
- err = PTR_ERR(rq[i]);
+ struct i915_request *this;
+
+ this = spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
goto err_wedged;
}
/* Disable NEWCLIENT promotion */
- __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
+ __i915_active_fence_set(&i915_request_timeline(this)->last_request,
&dummy->fence);
- i915_request_add(rq[i]);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
}
dummy_request_free(dummy);
@@ -1740,8 +1890,11 @@ static int live_suppress_wait_preempt(void *arg)
goto err_wedged;
}
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
+ i915_request_put(rq[i]);
+ rq[i] = NULL;
+ }
if (igt_flush_test(gt->i915))
goto err_wedged;
@@ -1769,8 +1922,10 @@ err_client_0:
return err;
err_wedged:
- for (i = 0; i < ARRAY_SIZE(client); i++)
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
igt_spinner_end(&client[i].spin);
+ i915_request_put(rq[i]);
+ }
intel_gt_set_wedged(gt);
err = -EIO;
goto err_client_3;
@@ -1815,6 +1970,8 @@ static int live_chain_preempt(void *arg)
MI_ARB_CHECK);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
ring_size = rq->wa_tail - rq->head;
@@ -1827,8 +1984,10 @@ static int live_chain_preempt(void *arg)
igt_spinner_end(&lo.spin);
if (i915_request_wait(rq, 0, HZ / 2) < 0) {
pr_err("Timed out waiting to flush %s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
err = -EIO;
@@ -1862,6 +2021,8 @@ static int live_chain_preempt(void *arg)
rq = igt_request_alloc(hi.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
engine->schedule(rq, &attr);
@@ -1874,14 +2035,19 @@ static int live_chain_preempt(void *arg)
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+ i915_request_put(rq);
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+ i915_request_put(rq);
rq = igt_request_alloc(lo.ctx, engine);
if (IS_ERR(rq))
goto err_wedged;
+
+ i915_request_get(rq);
i915_request_add(rq);
+
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
struct drm_printer p =
drm_info_printer(gt->i915->drm.dev);
@@ -1890,8 +2056,11 @@ static int live_chain_preempt(void *arg)
count);
intel_engine_dump(engine, &p,
"%s\n", engine->name);
+
+ i915_request_put(rq);
goto err_wedged;
}
+ i915_request_put(rq);
}
if (igt_live_test_end(&t)) {
@@ -1915,6 +2084,201 @@ err_wedged:
goto err_client_lo;
}
+static int create_gang(struct intel_engine_cs *engine,
+ struct i915_request **prev)
+{
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ce;
+ }
+
+ vma = i915_vma_instance(obj, ce->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs))
+ goto err_obj;
+
+ /* Semaphore target: spin until zero */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = lower_32_bits(vma->node.start);
+ *cs++ = upper_32_bits(vma->node.start);
+
+ if (*prev) {
+ u64 offset = (*prev)->batch->node.start;
+
+ /* Terminate the spinner in the next lower priority batch. */
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = 0;
+ }
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_obj;
+
+ rq->batch = vma;
+ i915_request_get(rq);
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, false);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (!err)
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ i915_vma_unlock(vma);
+ i915_request_add(rq);
+ if (err)
+ goto err_rq;
+
+ i915_gem_object_put(obj);
+ intel_context_put(ce);
+
+ rq->client_link.next = &(*prev)->client_link;
+ *prev = rq;
+ return 0;
+
+err_rq:
+ i915_request_put(rq);
+err_obj:
+ i915_gem_object_put(obj);
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_preempt_gang(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
+ return 0;
+
+ /*
+ * Build as long a chain of preempters as we can, with each
+ * request higher priority than the last. Once we are ready, we release
+ * the last batch which then precolates down the chain, each releasing
+ * the next oldest in turn. The intent is to simply push as hard as we
+ * can with the number of preemptions, trying to exceed narrow HW
+ * limits. At a minimum, we insist that we can sort all the user
+ * high priority levels into execution order.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq = NULL;
+ struct igt_live_test t;
+ IGT_TIMEOUT(end_time);
+ int prio = 0;
+ int err = 0;
+ u32 *cs;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name))
+ return -EIO;
+
+ do {
+ struct i915_sched_attr attr = {
+ .priority = I915_USER_PRIORITY(prio++),
+ };
+
+ err = create_gang(engine, &rq);
+ if (err)
+ break;
+
+ /* Submit each spinner at increasing priority */
+ engine->schedule(rq, &attr);
+
+ if (prio <= I915_PRIORITY_MAX)
+ continue;
+
+ if (prio > (INT_MAX >> I915_USER_PRIORITY_SHIFT))
+ break;
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ } while (1);
+ pr_debug("%s: Preempt chain of %d requests\n",
+ engine->name, prio);
+
+ /*
+ * Such that the last spinner is the highest priority and
+ * should execute first. When that spinner completes,
+ * it will terminate the next lowest spinner until there
+ * are no more spinners and the gang is complete.
+ */
+ cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC);
+ if (!IS_ERR(cs)) {
+ *cs = 0;
+ i915_gem_object_unpin_map(rq->batch->obj);
+ } else {
+ err = PTR_ERR(cs);
+ intel_gt_set_wedged(gt);
+ }
+
+ while (rq) { /* wait for each rq from highest to lowest prio */
+ struct i915_request *n =
+ list_next_entry(rq, client_link);
+
+ if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(engine->i915->drm.dev);
+
+ pr_err("Failed to flush chain of %d requests, at %d\n",
+ prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -ETIME;
+ }
+
+ i915_request_put(rq);
+ rq = n;
+ }
+
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int live_preempt_hang(void *arg)
{
struct intel_gt *gt = arg;
@@ -2391,28 +2755,18 @@ static int nop_virtual_engine(struct intel_gt *gt,
#define CHAIN BIT(0)
{
IGT_TIMEOUT(end_time);
- struct i915_request *request[16];
- struct i915_gem_context *ctx[16];
+ struct i915_request *request[16] = {};
struct intel_context *ve[16];
unsigned long n, prime, nc;
struct igt_live_test t;
ktime_t times[2] = {};
int err;
- GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx));
+ GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) {
- ctx[n] = kernel_context(gt->i915);
- if (!ctx[n]) {
- err = -ENOMEM;
- nctx = n;
- goto out;
- }
-
- ve[n] = intel_execlists_create_virtual(ctx[n],
- siblings, nsibling);
+ ve[n] = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve[n])) {
- kernel_context_close(ctx[n]);
err = PTR_ERR(ve[n]);
nctx = n;
goto out;
@@ -2421,7 +2775,6 @@ static int nop_virtual_engine(struct intel_gt *gt,
err = intel_context_pin(ve[n]);
if (err) {
intel_context_put(ve[n]);
- kernel_context_close(ctx[n]);
nctx = n;
goto out;
}
@@ -2437,27 +2790,35 @@ static int nop_virtual_engine(struct intel_gt *gt,
if (flags & CHAIN) {
for (nc = 0; nc < nctx; nc++) {
for (n = 0; n < prime; n++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
} else {
for (n = 0; n < prime; n++) {
for (nc = 0; nc < nctx; nc++) {
- request[nc] =
- i915_request_create(ve[nc]);
- if (IS_ERR(request[nc])) {
- err = PTR_ERR(request[nc]);
+ struct i915_request *rq;
+
+ rq = i915_request_create(ve[nc]);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto out;
}
- i915_request_add(request[nc]);
+ if (request[nc])
+ i915_request_put(request[nc]);
+ request[nc] = i915_request_get(rq);
+ i915_request_add(rq);
}
}
}
@@ -2483,6 +2844,11 @@ static int nop_virtual_engine(struct intel_gt *gt,
if (prime == 1)
times[0] = times[1];
+ for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
+ request[nc] = NULL;
+ }
+
if (__igt_timeout(end_time, NULL))
break;
}
@@ -2500,9 +2866,9 @@ out:
err = -EIO;
for (nc = 0; nc < nctx; nc++) {
+ i915_request_put(request[nc]);
intel_context_unpin(ve[nc]);
intel_context_put(ve[nc]);
- kernel_context_close(ctx[nc]);
}
return err;
}
@@ -2561,7 +2927,6 @@ static int mask_virtual_engine(struct intel_gt *gt,
unsigned int nsibling)
{
struct i915_request *request[MAX_ENGINE_INSTANCE + 1];
- struct i915_gem_context *ctx;
struct intel_context *ve;
struct igt_live_test t;
unsigned int n;
@@ -2572,11 +2937,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
* restrict it to our desired engine within the virtual engine.
*/
- ctx = kernel_context(gt->i915);
- if (!ctx)
- return -ENOMEM;
-
- ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_close;
@@ -2644,7 +3005,6 @@ out_unpin:
out_put:
intel_context_put(ve);
out_close:
- kernel_context_close(ctx);
return err;
}
@@ -2684,7 +3044,6 @@ static int preserved_virtual_engine(struct intel_gt *gt,
unsigned int nsibling)
{
struct i915_request *last = NULL;
- struct i915_gem_context *ctx;
struct intel_context *ve;
struct i915_vma *scratch;
struct igt_live_test t;
@@ -2692,17 +3051,11 @@ static int preserved_virtual_engine(struct intel_gt *gt,
int err = 0;
u32 *cs;
- ctx = kernel_context(gt->i915);
- if (!ctx)
- return -ENOMEM;
-
scratch = create_scratch(siblings[0]->gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
- ve = intel_execlists_create_virtual(ctx, siblings, nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_scratch;
@@ -2785,8 +3138,6 @@ out_put:
intel_context_put(ve);
out_scratch:
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(ctx);
return err;
}
@@ -2838,16 +3189,54 @@ static int bond_virtual_engine(struct intel_gt *gt,
#define BOND_SCHEDULE BIT(0)
{
struct intel_engine_cs *master;
- struct i915_gem_context *ctx;
struct i915_request *rq[16];
enum intel_engine_id id;
+ struct igt_spinner spin;
unsigned long n;
int err;
+ /*
+ * A set of bonded requests is intended to be run concurrently
+ * across a number of engines. We use one request per-engine
+ * and a magic fence to schedule each of the bonded requests
+ * at the same time. A consequence of our current scheduler is that
+ * we only move requests to the HW ready queue when the request
+ * becomes ready, that is when all of its prerequisite fences have
+ * been signaled. As one of those fences is the master submit fence,
+ * there is a delay on all secondary fences as the HW may be
+ * currently busy. Equally, as all the requests are independent,
+ * they may have other fences that delay individual request
+ * submission to HW. Ergo, we do not guarantee that all requests are
+ * immediately submitted to HW at the same time, just that if the
+ * rules are abided by, they are ready at the same time as the
+ * first is submitted. Userspace can embed semaphores in its batch
+ * to ensure parallel execution of its phases as it requires.
+ * Though naturally it gets requested that perhaps the scheduler should
+ * take care of parallel execution, even across preemption events on
+ * different HW. (The proper answer is of course "lalalala".)
+ *
+ * With the submit-fence, we have identified three possible phases
+ * of synchronisation depending on the master fence: queued (not
+ * ready), executing, and signaled. The first two are quite simple
+ * and checked below. However, the signaled master fence handling is
+ * contentious. Currently we do not distinguish between a signaled
+ * fence and an expired fence, as once signaled it does not convey
+ * any information about the previous execution. It may even be freed
+ * and hence checking later it may not exist at all. Ergo we currently
+ * do not apply the bonding constraint for an already signaled fence,
+ * as our expectation is that it should not constrain the secondaries
+ * and is outside of the scope of the bonded request API (i.e. all
+ * userspace requests are meant to be running in parallel). As
+ * it imposes no constraint, and is effectively a no-op, we do not
+ * check below as normal execution flows are checked extensively above.
+ *
+ * XXX Is the degenerate handling of signaled submit fences the
+ * expected behaviour for userpace?
+ */
+
GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
- ctx = kernel_context(gt->i915);
- if (!ctx)
+ if (igt_spinner_init(&spin, gt))
return -ENOMEM;
err = 0;
@@ -2860,7 +3249,9 @@ static int bond_virtual_engine(struct intel_gt *gt,
memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
- rq[0] = igt_request_alloc(ctx, master);
+ rq[0] = igt_spinner_create_request(&spin,
+ master->kernel_context,
+ MI_NOOP);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto out;
@@ -2873,16 +3264,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
&fence,
GFP_KERNEL);
}
+
i915_request_add(rq[0]);
if (err < 0)
goto out;
+ if (!(flags & BOND_SCHEDULE) &&
+ !igt_wait_for_spinner(&spin, rq[0])) {
+ err = -EIO;
+ goto out;
+ }
+
for (n = 0; n < nsibling; n++) {
struct intel_context *ve;
- ve = intel_execlists_create_virtual(ctx,
- siblings,
- nsibling);
+ ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
onstack_fence_fini(&fence);
@@ -2924,6 +3320,8 @@ static int bond_virtual_engine(struct intel_gt *gt,
}
}
onstack_fence_fini(&fence);
+ intel_engine_flush_submission(master);
+ igt_spinner_end(&spin);
if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
pr_err("Master request did not execute (on %s)!\n",
@@ -2960,7 +3358,7 @@ out:
if (igt_flush_test(gt->i915))
err = -EIO;
- kernel_context_close(ctx);
+ igt_spinner_fini(&spin);
return err;
}
@@ -3012,12 +3410,168 @@ static int live_virtual_bond(void *arg)
return 0;
}
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ unsigned long *heartbeat;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
+ if (!heartbeat)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin, gt)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_disable(siblings[n], &heartbeat[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != ve->engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_enable(siblings[n], heartbeat[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+out_free:
+ kfree(heartbeat);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (USES_GUC_SUBMISSION(gt->i915))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_hold_reset),
SUBTEST(live_timeslice_preempt),
SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
@@ -3028,6 +3582,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_suppress_self_preempt),
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
+ SUBTEST(live_preempt_gang),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
@@ -3035,6 +3590,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
};
if (!HAS_EXECLISTS(i915))
@@ -3080,7 +3636,7 @@ static int live_lrc_layout(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- u32 *mem;
+ u32 *lrc;
int err;
/*
@@ -3088,13 +3644,13 @@ static int live_lrc_layout(void *arg)
* match the layout saved by HW.
*/
- mem = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!mem)
+ lrc = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!lrc)
return -ENOMEM;
err = 0;
for_each_engine(engine, gt, id) {
- u32 *hw, *lrc;
+ u32 *hw;
int dw;
if (!engine->default_state)
@@ -3108,8 +3664,7 @@ static int live_lrc_layout(void *arg)
}
hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
- lrc = memset(mem, 0, PAGE_SIZE);
- execlists_init_reg_state(lrc,
+ execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE),
engine->kernel_context,
engine,
engine->kernel_context->ring,
@@ -3124,6 +3679,13 @@ static int live_lrc_layout(void *arg)
continue;
}
+ if (lrc[dw] == 0) {
+ pr_debug("%s: skipped instruction %x at dword %d\n",
+ engine->name, lri, dw);
+ dw++;
+ continue;
+ }
+
if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
pr_err("%s: Expected LRI command at dword %d, found %08x\n",
engine->name, dw, lri);
@@ -3172,7 +3734,7 @@ static int live_lrc_layout(void *arg)
break;
}
- kfree(mem);
+ kfree(lrc);
return err;
}
@@ -3207,12 +3769,12 @@ static int live_lrc_fixed(void *arg)
} tbl[] = {
{
i915_mmio_reg_offset(RING_START(engine->mmio_base)),
- CTX_RING_BUFFER_START - 1,
+ CTX_RING_START - 1,
"RING_START"
},
{
i915_mmio_reg_offset(RING_CTL(engine->mmio_base)),
- CTX_RING_BUFFER_CONTROL - 1,
+ CTX_RING_CTL - 1,
"RING_CTL"
},
{
@@ -3231,7 +3793,7 @@ static int live_lrc_fixed(void *arg)
"RING_MI_MODE"
},
{
- engine->mmio_base + 0x110,
+ i915_mmio_reg_offset(RING_BBSTATE(engine->mmio_base)),
CTX_BB_STATE - 1,
"BB_STATE"
},
@@ -3270,8 +3832,7 @@ static int live_lrc_fixed(void *arg)
return err;
}
-static int __live_lrc_state(struct i915_gem_context *fixme,
- struct intel_engine_cs *engine,
+static int __live_lrc_state(struct intel_engine_cs *engine,
struct i915_vma *scratch)
{
struct intel_context *ce;
@@ -3286,7 +3847,7 @@ static int __live_lrc_state(struct i915_gem_context *fixme,
int err;
int n;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -3360,7 +3921,6 @@ static int live_lrc_state(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
@@ -3370,18 +3930,12 @@ static int live_lrc_state(void *arg)
* intel_context.
*/
- fixme = kernel_context(gt->i915);
- if (!fixme)
- return -ENOMEM;
-
scratch = create_scratch(gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_lrc_state(fixme, engine, scratch);
+ err = __live_lrc_state(engine, scratch);
if (err)
break;
}
@@ -3390,8 +3944,6 @@ static int live_lrc_state(void *arg)
err = -EIO;
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(fixme);
return err;
}
@@ -3401,7 +3953,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
u32 *cs;
int n;
- rq = i915_request_create(engine->kernel_context);
+ rq = intel_engine_create_kernel_request(engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -3424,8 +3976,7 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
return 0;
}
-static int __live_gpr_clear(struct i915_gem_context *fixme,
- struct intel_engine_cs *engine,
+static int __live_gpr_clear(struct intel_engine_cs *engine,
struct i915_vma *scratch)
{
struct intel_context *ce;
@@ -3441,7 +3992,7 @@ static int __live_gpr_clear(struct i915_gem_context *fixme,
if (err)
return err;
- ce = intel_context_create(fixme, engine);
+ ce = intel_context_create(engine);
if (IS_ERR(ce))
return PTR_ERR(ce);
@@ -3503,7 +4054,6 @@ static int live_gpr_clear(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
- struct i915_gem_context *fixme;
struct i915_vma *scratch;
enum intel_engine_id id;
int err = 0;
@@ -3513,18 +4063,12 @@ static int live_gpr_clear(void *arg)
* to avoid leaking any information from previous contexts.
*/
- fixme = kernel_context(gt->i915);
- if (!fixme)
- return -ENOMEM;
-
scratch = create_scratch(gt);
- if (IS_ERR(scratch)) {
- err = PTR_ERR(scratch);
- goto out_close;
- }
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_gpr_clear(fixme, engine, scratch);
+ err = __live_gpr_clear(engine, scratch);
if (err)
break;
}
@@ -3533,8 +4077,6 @@ static int live_gpr_clear(void *arg)
err = -EIO;
i915_vma_unpin_and_release(&scratch, 0);
-out_close:
- kernel_context_close(fixme);
return err;
}