diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests/intel_lrc.c')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_lrc.c | 566 |
1 files changed, 314 insertions, 252 deletions
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c index 1aea7a8f2224..ca461e3a5f27 100644 --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c @@ -6,215 +6,18 @@ #include "../i915_selftest.h" #include "igt_flush_test.h" +#include "igt_spinner.h" +#include "i915_random.h" #include "mock_context.h" -struct spinner { - struct drm_i915_private *i915; - struct drm_i915_gem_object *hws; - struct drm_i915_gem_object *obj; - u32 *batch; - void *seqno; -}; - -static int spinner_init(struct spinner *spin, struct drm_i915_private *i915) -{ - unsigned int mode; - void *vaddr; - int err; - - GEM_BUG_ON(INTEL_GEN(i915) < 8); - - memset(spin, 0, sizeof(*spin)); - spin->i915 = i915; - - spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(spin->hws)) { - err = PTR_ERR(spin->hws); - goto err; - } - - spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(spin->obj)) { - err = PTR_ERR(spin->obj); - goto err_hws; - } - - i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; - vaddr = i915_gem_object_pin_map(spin->obj, mode); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - spin->batch = vaddr; - - return 0; - -err_unpin_hws: - i915_gem_object_unpin_map(spin->hws); -err_obj: - i915_gem_object_put(spin->obj); -err_hws: - i915_gem_object_put(spin->hws); -err: - return err; -} - -static unsigned int seqno_offset(u64 fence) -{ - return offset_in_page(sizeof(u32) * fence); -} - -static u64 hws_address(const struct i915_vma *hws, - const struct i915_request *rq) -{ - return hws->node.start + seqno_offset(rq->fence.context); -} - -static int emit_recurse_batch(struct spinner *spin, - struct i915_request *rq, - u32 arbitration_command) -{ - struct i915_address_space *vm = &rq->gem_context->ppgtt->vm; - struct i915_vma *hws, *vma; - u32 *batch; - int err; - - vma = i915_vma_instance(spin->obj, vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - hws = i915_vma_instance(spin->hws, vm, NULL); - if (IS_ERR(hws)) - return PTR_ERR(hws); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; - - err = i915_vma_move_to_active(vma, rq, 0); - if (err) - goto unpin_hws; - - if (!i915_gem_object_has_active_reference(vma->obj)) { - i915_gem_object_get(vma->obj); - i915_gem_object_set_active_reference(vma->obj); - } - - err = i915_vma_move_to_active(hws, rq, 0); - if (err) - goto unpin_hws; - - if (!i915_gem_object_has_active_reference(hws->obj)) { - i915_gem_object_get(hws->obj); - i915_gem_object_set_active_reference(hws->obj); - } - - batch = spin->batch; - - *batch++ = MI_STORE_DWORD_IMM_GEN4; - *batch++ = lower_32_bits(hws_address(hws, rq)); - *batch++ = upper_32_bits(hws_address(hws, rq)); - *batch++ = rq->fence.seqno; - - *batch++ = arbitration_command; - - *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; - *batch++ = lower_32_bits(vma->node.start); - *batch++ = upper_32_bits(vma->node.start); - *batch++ = MI_BATCH_BUFFER_END; /* not reached */ - - i915_gem_chipset_flush(spin->i915); - - err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0); - -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); - return err; -} - -static struct i915_request * -spinner_create_request(struct spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 arbitration_command) -{ - struct i915_request *rq; - int err; - - rq = i915_request_alloc(engine, ctx); - if (IS_ERR(rq)) - return rq; - - err = emit_recurse_batch(spin, rq, arbitration_command); - if (err) { - i915_request_add(rq); - return ERR_PTR(err); - } - - return rq; -} - -static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq) -{ - u32 *seqno = spin->seqno + seqno_offset(rq->fence.context); - - return READ_ONCE(*seqno); -} - -static void spinner_end(struct spinner *spin) -{ - *spin->batch = MI_BATCH_BUFFER_END; - i915_gem_chipset_flush(spin->i915); -} - -static void spinner_fini(struct spinner *spin) -{ - spinner_end(spin); - - i915_gem_object_unpin_map(spin->obj); - i915_gem_object_put(spin->obj); - - i915_gem_object_unpin_map(spin->hws); - i915_gem_object_put(spin->hws); -} - -static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq) -{ - if (!wait_event_timeout(rq->execute, - READ_ONCE(rq->global_seqno), - msecs_to_jiffies(10))) - return false; - - return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), - rq->fence.seqno), - 10) && - wait_for(i915_seqno_passed(hws_seqno(spin, rq), - rq->fence.seqno), - 1000)); -} - static int live_sanitycheck(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; - struct spinner spin; + struct igt_spinner spin; int err = -ENOMEM; if (!HAS_LOGICAL_RING_CONTEXTS(i915)) @@ -223,7 +26,7 @@ static int live_sanitycheck(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin, i915)) + if (igt_spinner_init(&spin, i915)) goto err_unlock; ctx = kernel_context(i915); @@ -233,14 +36,14 @@ static int live_sanitycheck(void *arg) for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin, ctx, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx; } i915_request_add(rq); - if (!wait_for_spinner(&spin, rq)) { + if (!igt_wait_for_spinner(&spin, rq)) { GEM_TRACE("spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -248,7 +51,7 @@ static int live_sanitycheck(void *arg) goto err_ctx; } - spinner_end(&spin); + igt_spinner_end(&spin); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx; @@ -259,7 +62,7 @@ static int live_sanitycheck(void *arg) err_ctx: kernel_context_close(ctx); err_spin: - spinner_fini(&spin); + igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -271,7 +74,7 @@ static int live_preempt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; int err = -ENOMEM; @@ -282,34 +85,36 @@ static int live_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); if (!ctx_hi) goto err_spin_lo; - ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); ctx_lo = kernel_context(i915); if (!ctx_lo) goto err_ctx_hi; - ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -317,16 +122,16 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -334,8 +139,8 @@ static int live_preempt(void *arg) goto err_ctx_lo; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -348,9 +153,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -362,7 +167,7 @@ static int live_late_preempt(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; @@ -374,10 +179,10 @@ static int live_late_preempt(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -391,43 +196,44 @@ static int live_late_preempt(void *arg) for_each_engine(engine, i915, id) { struct i915_request *rq; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { pr_err("First context failed to start\n"); goto err_wedged; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_NOOP); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (wait_for_spinner(&spin_hi, rq)) { + if (igt_wait_for_spinner(&spin_hi, rq)) { pr_err("Second context overtook first?\n"); goto err_wedged; } - attr.priority = I915_PRIORITY_MAX; + attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); engine->schedule(rq, &attr); - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { pr_err("High priority context failed to preempt the low priority context\n"); GEM_TRACE_DUMP(); goto err_wedged; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -440,9 +246,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -450,8 +256,8 @@ err_unlock: return err; err_wedged: - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); i915_gem_set_wedged(i915); err = -EIO; goto err_ctx_lo; @@ -461,7 +267,7 @@ static int live_preempt_hang(void *arg) { struct drm_i915_private *i915 = arg; struct i915_gem_context *ctx_hi, *ctx_lo; - struct spinner spin_hi, spin_lo; + struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; int err = -ENOMEM; @@ -475,10 +281,10 @@ static int live_preempt_hang(void *arg) mutex_lock(&i915->drm.struct_mutex); intel_runtime_pm_get(i915); - if (spinner_init(&spin_hi, i915)) + if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; - if (spinner_init(&spin_lo, i915)) + if (igt_spinner_init(&spin_lo, i915)) goto err_spin_hi; ctx_hi = kernel_context(i915); @@ -497,15 +303,15 @@ static int live_preempt_hang(void *arg) if (!intel_engine_has_preemption(engine)) continue; - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_ctx_lo; } i915_request_add(rq); - if (!wait_for_spinner(&spin_lo, rq)) { + if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -513,10 +319,10 @@ static int live_preempt_hang(void *arg) goto err_ctx_lo; } - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); + rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); if (IS_ERR(rq)) { - spinner_end(&spin_lo); + igt_spinner_end(&spin_lo); err = PTR_ERR(rq); goto err_ctx_lo; } @@ -541,7 +347,7 @@ static int live_preempt_hang(void *arg) engine->execlists.preempt_hang.inject_hang = false; - if (!wait_for_spinner(&spin_hi, rq)) { + if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); i915_gem_set_wedged(i915); @@ -549,8 +355,8 @@ static int live_preempt_hang(void *arg) goto err_ctx_lo; } - spinner_end(&spin_hi); - spinner_end(&spin_lo); + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); if (igt_flush_test(i915, I915_WAIT_LOCKED)) { err = -EIO; goto err_ctx_lo; @@ -563,9 +369,9 @@ err_ctx_lo: err_ctx_hi: kernel_context_close(ctx_hi); err_spin_lo: - spinner_fini(&spin_lo); + igt_spinner_fini(&spin_lo); err_spin_hi: - spinner_fini(&spin_hi); + igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); intel_runtime_pm_put(i915); @@ -573,6 +379,261 @@ err_unlock: return err; } +static int random_range(struct rnd_state *rnd, int min, int max) +{ + return i915_prandom_u32_max_state(max - min, rnd) + min; +} + +static int random_priority(struct rnd_state *rnd) +{ + return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); +} + +struct preempt_smoke { + struct drm_i915_private *i915; + struct i915_gem_context **contexts; + struct intel_engine_cs *engine; + struct drm_i915_gem_object *batch; + unsigned int ncontext; + struct rnd_state prng; + unsigned long count; +}; + +static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) +{ + return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, + &smoke->prng)]; +} + +static int smoke_submit(struct preempt_smoke *smoke, + struct i915_gem_context *ctx, int prio, + struct drm_i915_gem_object *batch) +{ + struct i915_request *rq; + struct i915_vma *vma = NULL; + int err = 0; + + if (batch) { + vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + } + + ctx->sched.priority = prio; + + rq = i915_request_alloc(smoke->engine, ctx); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto unpin; + } + + if (vma) { + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + } + + i915_request_add(rq); + +unpin: + if (vma) + i915_vma_unpin(vma); + + return err; +} + +static int smoke_crescendo_thread(void *arg) +{ + struct preempt_smoke *smoke = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + mutex_lock(&smoke->i915->drm.struct_mutex); + err = smoke_submit(smoke, + ctx, count % I915_PRIORITY_MAX, + smoke->batch); + mutex_unlock(&smoke->i915->drm.struct_mutex); + if (err) + return err; + + count++; + } while (!__igt_timeout(end_time, NULL)); + + smoke->count = count; + return 0; +} + +static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) +#define BATCH BIT(0) +{ + struct task_struct *tsk[I915_NUM_ENGINES] = {}; + struct preempt_smoke arg[I915_NUM_ENGINES]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long count; + int err = 0; + + mutex_unlock(&smoke->i915->drm.struct_mutex); + + for_each_engine(engine, smoke->i915, id) { + arg[id] = *smoke; + arg[id].engine = engine; + if (!(flags & BATCH)) + arg[id].batch = NULL; + arg[id].count = 0; + + tsk[id] = kthread_run(smoke_crescendo_thread, &arg, + "igt/smoke:%d", id); + if (IS_ERR(tsk[id])) { + err = PTR_ERR(tsk[id]); + break; + } + get_task_struct(tsk[id]); + } + + count = 0; + for_each_engine(engine, smoke->i915, id) { + int status; + + if (IS_ERR_OR_NULL(tsk[id])) + continue; + + status = kthread_stop(tsk[id]); + if (status && !err) + err = status; + + count += arg[id].count; + + put_task_struct(tsk[id]); + } + + mutex_lock(&smoke->i915->drm.struct_mutex); + + pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", + count, flags, + INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + return 0; +} + +static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) +{ + enum intel_engine_id id; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + for_each_engine(smoke->engine, smoke->i915, id) { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + err = smoke_submit(smoke, + ctx, random_priority(&smoke->prng), + flags & BATCH ? smoke->batch : NULL); + if (err) + return err; + + count++; + } + } while (!__igt_timeout(end_time, NULL)); + + pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", + count, flags, + INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext); + return 0; +} + +static int live_preempt_smoke(void *arg) +{ + struct preempt_smoke smoke = { + .i915 = arg, + .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), + .ncontext = 1024, + }; + const unsigned int phase[] = { 0, BATCH }; + int err = -ENOMEM; + u32 *cs; + int n; + + if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915)) + return 0; + + smoke.contexts = kmalloc_array(smoke.ncontext, + sizeof(*smoke.contexts), + GFP_KERNEL); + if (!smoke.contexts) + return -ENOMEM; + + mutex_lock(&smoke.i915->drm.struct_mutex); + intel_runtime_pm_get(smoke.i915); + + smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); + if (IS_ERR(smoke.batch)) { + err = PTR_ERR(smoke.batch); + goto err_unlock; + } + + cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_batch; + } + for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) + cs[n] = MI_ARB_CHECK; + cs[n] = MI_BATCH_BUFFER_END; + i915_gem_object_unpin_map(smoke.batch); + + err = i915_gem_object_set_to_gtt_domain(smoke.batch, false); + if (err) + goto err_batch; + + for (n = 0; n < smoke.ncontext; n++) { + smoke.contexts[n] = kernel_context(smoke.i915); + if (!smoke.contexts[n]) + goto err_ctx; + } + + for (n = 0; n < ARRAY_SIZE(phase); n++) { + err = smoke_crescendo(&smoke, phase[n]); + if (err) + goto err_ctx; + + err = smoke_random(&smoke, phase[n]); + if (err) + goto err_ctx; + } + +err_ctx: + if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED)) + err = -EIO; + + for (n = 0; n < smoke.ncontext; n++) { + if (!smoke.contexts[n]) + break; + kernel_context_close(smoke.contexts[n]); + } + +err_batch: + i915_gem_object_put(smoke.batch); +err_unlock: + intel_runtime_pm_put(smoke.i915); + mutex_unlock(&smoke.i915->drm.struct_mutex); + kfree(smoke.contexts); + + return err; +} + int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -580,6 +641,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) SUBTEST(live_preempt), SUBTEST(live_late_preempt), SUBTEST(live_preempt_hang), + SUBTEST(live_preempt_smoke), }; if (!HAS_EXECLISTS(i915)) |