aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c127
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c720
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c89
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c67
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c845
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c38
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h10
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_wedge_me.h58
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c359
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c29
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c4
26 files changed, 1037 insertions, 1461 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index c0b3537a5fa6..77d844ac8b71 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,7 +4,10 @@
* Copyright © 2018 Intel Corporation
*/
+#include <linux/kref.h>
+
#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -13,37 +16,86 @@
struct live_active {
struct i915_active base;
+ struct kref ref;
bool retired;
};
-static void __live_active_retire(struct i915_active *base)
+static void __live_get(struct live_active *active)
+{
+ kref_get(&active->ref);
+}
+
+static void __live_free(struct live_active *active)
+{
+ i915_active_fini(&active->base);
+ kfree(active);
+}
+
+static void __live_release(struct kref *ref)
+{
+ struct live_active *active = container_of(ref, typeof(*active), ref);
+
+ __live_free(active);
+}
+
+static void __live_put(struct live_active *active)
+{
+ kref_put(&active->ref, __live_release);
+}
+
+static int __live_active(struct i915_active *base)
+{
+ struct live_active *active = container_of(base, typeof(*active), base);
+
+ __live_get(active);
+ return 0;
+}
+
+static void __live_retire(struct i915_active *base)
{
struct live_active *active = container_of(base, typeof(*active), base);
active->retired = true;
+ __live_put(active);
+}
+
+static struct live_active *__live_alloc(struct drm_i915_private *i915)
+{
+ struct live_active *active;
+
+ active = kzalloc(sizeof(*active), GFP_KERNEL);
+ if (!active)
+ return NULL;
+
+ kref_init(&active->ref);
+ i915_active_init(i915, &active->base, __live_active, __live_retire);
+
+ return active;
}
-static int __live_active_setup(struct drm_i915_private *i915,
- struct live_active *active)
+static struct live_active *
+__live_active_setup(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
struct i915_sw_fence *submit;
+ struct live_active *active;
enum intel_engine_id id;
unsigned int count = 0;
int err = 0;
- submit = heap_fence_create(GFP_KERNEL);
- if (!submit)
- return -ENOMEM;
+ active = __live_alloc(i915);
+ if (!active)
+ return ERR_PTR(-ENOMEM);
- i915_active_init(i915, &active->base, __live_active_retire);
- active->retired = false;
+ submit = heap_fence_create(GFP_KERNEL);
+ if (!submit) {
+ kfree(active);
+ return ERR_PTR(-ENOMEM);
+ }
- if (!i915_active_acquire(&active->base)) {
- pr_err("First i915_active_acquire should report being idle\n");
- err = -EINVAL;
+ err = i915_active_acquire(&active->base);
+ if (err)
goto out;
- }
for_each_engine(engine, i915, id) {
struct i915_request *rq;
@@ -58,8 +110,7 @@ static int __live_active_setup(struct drm_i915_private *i915,
submit,
GFP_KERNEL);
if (err >= 0)
- err = i915_active_ref(&active->base,
- rq->fence.context, rq);
+ err = i915_active_ref(&active->base, rq->timeline, rq);
i915_request_add(rq);
if (err) {
pr_err("Failed to track active ref!\n");
@@ -74,74 +125,92 @@ static int __live_active_setup(struct drm_i915_private *i915,
pr_err("i915_active retired before submission!\n");
err = -EINVAL;
}
- if (active->base.count != count) {
+ if (atomic_read(&active->base.count) != count) {
pr_err("i915_active not tracking all requests, found %d, expected %d\n",
- active->base.count, count);
+ atomic_read(&active->base.count), count);
err = -EINVAL;
}
out:
i915_sw_fence_commit(submit);
heap_fence_put(submit);
+ if (err) {
+ __live_put(active);
+ active = ERR_PTR(err);
+ }
- return err;
+ return active;
}
static int live_active_wait(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
+ struct live_active *active;
intel_wakeref_t wakeref;
- int err;
+ int err = 0;
/* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- err = __live_active_setup(i915, &active);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto err;
+ }
- i915_active_wait(&active.base);
- if (!active.retired) {
+ i915_active_wait(&active->base);
+ if (!active->retired) {
pr_err("i915_active not retired after waiting!\n");
err = -EINVAL;
}
- i915_active_fini(&active.base);
+ __live_put(active);
+
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
+err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
+
return err;
}
static int live_active_retire(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct live_active active;
+ struct live_active *active;
intel_wakeref_t wakeref;
- int err;
+ int err = 0;
/* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- err = __live_active_setup(i915, &active);
+ active = __live_active_setup(i915);
+ if (IS_ERR(active)) {
+ err = PTR_ERR(active);
+ goto err;
+ }
/* waits for & retires all requests */
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- if (!active.retired) {
+ if (!active->retired) {
pr_err("i915_active not retired after flushing!\n");
err = -EINVAL;
}
- i915_active_fini(&active.base);
+ __live_put(active);
+
+err:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
+
return err;
}
@@ -152,7 +221,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
new file mode 100644
index 000000000000..23f784eae1e7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -0,0 +1,720 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#define SZ_8G (1ULL << 33)
+
+static void __igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ bool buddy)
+{
+ pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
+ block->header,
+ i915_buddy_block_state(block),
+ i915_buddy_block_order(block),
+ i915_buddy_block_offset(block),
+ i915_buddy_block_size(mm, block),
+ yesno(!block->parent),
+ yesno(buddy));
+}
+
+static void igt_dump_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+
+ __igt_dump_block(mm, block, false);
+
+ buddy = get_buddy(block);
+ if (buddy)
+ __igt_dump_block(mm, buddy, true);
+}
+
+static int igt_check_block(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block)
+{
+ struct i915_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = i915_buddy_block_state(block);
+
+ if (block_state != I915_BUDDY_ALLOCATED &&
+ block_state != I915_BUDDY_FREE &&
+ block_state != I915_BUDDY_SPLIT) {
+ pr_err("block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = i915_buddy_block_size(mm, block);
+ offset = i915_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ pr_err("block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ if (!is_power_of_2(block_size)) {
+ pr_err("block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ pr_err("block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ pr_err("block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ pr_err("block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = get_buddy(block);
+
+ if (!buddy && block->parent) {
+ pr_err("buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ pr_err("buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_size(mm, buddy) != block_size) {
+ pr_err("buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (i915_buddy_block_state(buddy) == block_state &&
+ block_state == I915_BUDDY_FREE) {
+ pr_err("block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int igt_check_blocks(struct i915_buddy_mm *mm,
+ struct list_head *blocks,
+ u64 expected_size,
+ bool is_contiguous)
+{
+ struct i915_buddy_block *block;
+ struct i915_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = igt_check_block(mm, block);
+
+ if (!i915_buddy_block_is_allocated(block)) {
+ pr_err("block not allocated\n"),
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += i915_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ pr_err("size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev block, dump:\n");
+ igt_dump_block(mm, prev);
+ }
+
+ if (block) {
+ pr_err("bad block, dump:\n");
+ igt_dump_block(mm, block);
+ }
+
+ return err;
+}
+
+static int igt_check_mm(struct i915_buddy_mm *mm)
+{
+ struct i915_buddy_block *root;
+ struct i915_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ pr_err("n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct i915_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ pr_err("root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = igt_check_block(mm, root);
+
+ if (!i915_buddy_block_is_free(root)) {
+ pr_err("root not free\n");
+ err = -EINVAL;
+ }
+
+ order = i915_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ pr_err("max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = i915_buddy_block_offset(prev);
+ prev_block_size = i915_buddy_block_size(mm, prev);
+ offset = i915_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ pr_err("root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct i915_buddy_block,
+ link);
+ if (block != root) {
+ pr_err("root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += i915_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ pr_err("expected mm size=%llx, found=%llx\n", mm->size,
+ total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ pr_err("prev root(%u), dump:\n", i - 1);
+ igt_dump_block(mm, prev);
+ }
+
+ if (root) {
+ pr_err("bad root(%u), dump:\n", i);
+ igt_dump_block(mm, root);
+ }
+
+ return err;
+}
+
+static void igt_mm_config(u64 *size, u64 *chunk_size)
+{
+ I915_RND_STATE(prng);
+ u64 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+
+ s = i915_prandom_u64_state(&prng) & (SZ_8G - 1);
+ ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12)));
+ s = max(s & -ms, ms);
+
+ *chunk_size = ms;
+ *size = s;
+}
+
+static int igt_buddy_alloc_smoke(void *arg)
+{
+ struct i915_buddy_mm mm;
+ int max_order;
+ u64 chunk_size;
+ u64 mm_size;
+ int err;
+
+ igt_mm_config(&mm_size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
+
+ err = i915_buddy_init(&mm, mm_size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ struct i915_buddy_block *block;
+ int order;
+ LIST_HEAD(blocks);
+ u64 total;
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort\n");
+ break;
+ }
+
+ pr_info("filling from max_order=%u\n", max_order);
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ err = PTR_ERR(block);
+ if (err == -ENOMEM) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ pr_err("buddy_alloc with order=%d failed(%d)\n",
+ order, err);
+ }
+
+ break;
+ }
+
+ list_add_tail(&block->link, &blocks);
+
+ if (i915_buddy_block_order(block) != order) {
+ pr_err("buddy_alloc order mismatch\n");
+ err = -EINVAL;
+ break;
+ }
+
+ total += i915_buddy_block_size(&mm, block);
+ } while (total < mm.size);
+
+ if (!err)
+ err = igt_check_blocks(&mm, &blocks, total, false);
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+ if (err)
+ break;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+static int igt_buddy_alloc_pessimistic(void *arg)
+{
+ const unsigned int max_order = 16;
+ struct i915_buddy_block *block, *bn;
+ struct i915_buddy_mm mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order < max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ block = i915_buddy_alloc(&mm, max_order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ i915_buddy_free(&mm, block);
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_optimistic(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ int order;
+ int err;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ err = i915_buddy_init(&mm,
+ PAGE_SIZE * ((1 << (max_order + 1)) - 1),
+ PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ err = PTR_ERR(block);
+ goto err;
+ }
+
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ block = i915_buddy_alloc(&mm, 0);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+
+err:
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_pathological(void *arg)
+{
+ const int max_order = 16;
+ struct i915_buddy_block *block;
+ struct i915_buddy_mm mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ int order, top;
+ int err;
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+ GEM_BUG_ON(mm.max_order != max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ i915_buddy_free(&mm, block);
+ }
+
+ for (order = top; order--; ) {
+ block = i915_buddy_alloc(&mm, order);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ block = i915_buddy_alloc(&mm, 0);
+ if (IS_ERR(block)) {
+ pr_info("buddy_alloc hit -ENOMEM for hole\n");
+ err = PTR_ERR(block);
+ goto err;
+ }
+ list_add_tail(&block->link, &holes);
+
+ block = i915_buddy_alloc(&mm, top);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ i915_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ block = i915_buddy_alloc(&mm, order);
+ if (!IS_ERR(block)) {
+ pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ list_add_tail(&block->link, &blocks);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ list_splice_tail(&holes, &blocks);
+ i915_buddy_free_list(&mm, &blocks);
+ i915_buddy_fini(&mm);
+ return err;
+}
+
+static int igt_buddy_alloc_range(void *arg)
+{
+ struct i915_buddy_mm mm;
+ unsigned long page_num;
+ LIST_HEAD(blocks);
+ u64 chunk_size;
+ u64 offset;
+ u64 size;
+ u64 rem;
+ int err;
+
+ igt_mm_config(&size, &chunk_size);
+
+ pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
+
+ err = i915_buddy_init(&mm, size, chunk_size);
+ if (err) {
+ pr_err("buddy_init failed(%d)\n", err);
+ return err;
+ }
+
+ err = igt_check_mm(&mm);
+ if (err) {
+ pr_err("pre-mm check failed, abort, abort, abort!\n");
+ goto err_fini;
+ }
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct i915_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+
+ err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("alloc_range hit -ENOMEM with size=%llx\n",
+ size);
+ } else {
+ pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
+ offset, size, err);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp,
+ struct i915_buddy_block,
+ link);
+ if (!block) {
+ pr_err("alloc_range has no blocks\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_buddy_block_offset(block) != offset) {
+ pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ i915_buddy_block_offset(block), offset);
+ err = -EINVAL;
+ }
+
+ if (!err)
+ err = igt_check_blocks(&mm, &tmp, size, true);
+
+ list_splice_tail(&tmp, &blocks);
+
+ if (err)
+ break;
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+ }
+
+ if (err == -ENOMEM)
+ err = 0;
+
+ i915_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ err = igt_check_mm(&mm);
+ if (err)
+ pr_err("post-mm check failed\n");
+ }
+
+err_fini:
+ i915_buddy_fini(&mm);
+
+ return err;
+}
+
+int i915_buddy_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_buddy_alloc_pessimistic),
+ SUBTEST(igt_buddy_alloc_optimistic),
+ SUBTEST(igt_buddy_alloc_pathological),
+ SUBTEST(igt_buddy_alloc_smoke),
+ SUBTEST(igt_buddy_alloc_range),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a01a6e87f1..bb6dd54a6ff3 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -8,6 +8,7 @@
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -115,7 +116,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- intel_gt_sanitize(i915, false);
+ intel_gt_sanitize(&i915->gt, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
}
@@ -154,8 +155,6 @@ static int igt_gem_suspend(void *arg)
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
@@ -195,8 +194,6 @@ static int igt_gem_hibernate(void *arg)
mutex_lock(&i915->drm.struct_mutex);
err = switch_to_context(i915, ctx);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
out:
mock_file_free(i915, file);
@@ -210,8 +207,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
- return i915_subtests(tests, i915);
+ return i915_live_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index a3cb0aade6f1..cb30c669b1b7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -25,6 +25,7 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/igt_gem_utils.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
#include "i915_selftest.h"
@@ -47,26 +48,29 @@ static int populate_ggtt(struct drm_i915_private *i915,
{
unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
- u64 size;
count = 0;
- for (size = 0;
- size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- size += I915_GTT_PAGE_SIZE) {
+ do {
struct i915_vma *vma;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- quirk_add(obj, objects);
-
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ if (vma == ERR_PTR(-ENOSPC))
+ break;
+
return PTR_ERR(vma);
+ }
+ quirk_add(obj, objects);
count++;
- }
+ } while (1);
+ pr_debug("Filled GGTT with %lu pages [%llu total]\n",
+ count, i915->ggtt.vm.total / PAGE_SIZE);
bound = 0;
unbound = 0;
@@ -557,7 +561,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 1a60b9fe8221..31a51ca1ddcb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -208,9 +208,7 @@ static int igt_ppgtt_alloc(void *arg)
}
err_ppgtt_cleanup:
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_vm_put(&ppgtt->vm);
- mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -1195,7 +1193,7 @@ static int igt_ggtt_page(void *arg)
iowrite32(n, vaddr + n);
io_mapping_unmap_atomic(vaddr);
}
- i915_gem_flush_ggtt_writes(i915);
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
i915_random_reorder(order, count, &prng);
for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index d5dc4427d664..1ccf0f731ac0 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -12,7 +12,9 @@
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, i915_timeline_live_selftests)
+selftest(gt_engines, intel_engine_live_selftests)
+selftest(gt_timelines, intel_timeline_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
@@ -24,7 +26,7 @@ selftest(gtt, i915_gem_gtt_live_selftests)
selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
-selftest(contexts, i915_gem_context_live_selftests)
+selftest(gem_contexts, i915_gem_context_live_selftests)
selftest(blt, i915_gem_object_blt_live_selftests)
selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 510eb176bb2c..b88084fe3269 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -15,7 +15,7 @@ selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
-selftest(timelines, i915_timeline_mock_selftests)
+selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
@@ -25,3 +25,4 @@ selftest(evict, i915_gem_evict_mock_selftests)
selftest(gtt, i915_gem_gtt_mock_selftests)
selftest(hugepages, i915_gem_huge_page_mock_selftests)
selftest(contexts, i915_gem_context_mock_selftests)
+selftest(buddy, i915_buddy_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 298bb7116c51..b3688543ed7d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -27,6 +27,8 @@
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
+#include "gt/intel_gt.h"
+
#include "i915_random.h"
#include "i915_selftest.h"
#include "igt_live_test.h"
@@ -44,9 +46,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0],
- i915->kernel_context,
- HZ / 10);
+ request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10);
if (!request)
goto out_unlock;
@@ -68,60 +68,63 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
}
+ i915_request_get(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed before submit!!\n");
- goto out_unlock;
+ goto out_request;
}
i915_request_add(request);
if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_completed(request)) {
pr_err("request completed immediately!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out!\n");
- goto out_unlock;
+ goto out_request;
}
if (!i915_request_completed(request)) {
pr_err("request not complete after waiting!\n");
- goto out_unlock;
+ goto out_request;
}
if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
- goto out_unlock;
+ goto out_request;
}
err = 0;
+out_request:
+ i915_request_put(request);
out_unlock:
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
@@ -138,7 +141,7 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0]->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
@@ -191,11 +194,15 @@ static int igt_request_rewind(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_request *request, *vip;
struct i915_gem_context *ctx[2];
+ struct intel_context *ce;
int err = -EINVAL;
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
+ ce = i915_gem_context_get_engine(ctx[0], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ request = mock_request(ce, 2 * HZ);
+ intel_context_put(ce);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -205,7 +212,10 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS0], ctx[1], 0);
+ ce = i915_gem_context_get_engine(ctx[1], RCS0);
+ GEM_BUG_ON(IS_ERR(ce));
+ vip = mock_request(ce, 0);
+ intel_context_put(ce);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -254,22 +264,19 @@ struct smoketest {
struct i915_gem_context **contexts;
atomic_long_t num_waits, num_fences;
int ncontexts, max_batch;
- struct i915_request *(*request_alloc)(struct i915_gem_context *,
- struct intel_engine_cs *);
+ struct i915_request *(*request_alloc)(struct intel_context *ce);
};
static struct i915_request *
-__mock_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__mock_request_alloc(struct intel_context *ce)
{
- return mock_request(engine, ctx, 0);
+ return mock_request(ce, 0);
}
static struct i915_request *
-__live_request_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+__live_request_alloc(struct intel_context *ce)
{
- return igt_request_alloc(ctx, engine);
+ return intel_context_create_request(ce);
}
static int __igt_breadcrumbs_smoketest(void *arg)
@@ -328,10 +335,14 @@ static int __igt_breadcrumbs_smoketest(void *arg)
struct i915_gem_context *ctx =
t->contexts[order[n] % t->ncontexts];
struct i915_request *rq;
+ struct intel_context *ce;
mutex_lock(BKL);
- rq = t->request_alloc(ctx, t->engine);
+ ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
+ GEM_BUG_ON(IS_ERR(ce));
+ rq = t->request_alloc(ce);
+ intel_context_put(ce);
if (IS_ERR(rq)) {
mutex_unlock(BKL);
err = PTR_ERR(rq);
@@ -366,14 +377,16 @@ static int __igt_breadcrumbs_smoketest(void *arg)
if (!wait_event_timeout(wait->wait,
i915_sw_fence_done(wait),
- HZ / 2)) {
+ 5 * HZ)) {
struct i915_request *rq = requests[count - 1];
- pr_err("waiting for %d fences (last %llx:%lld) on %s timed out!\n",
- count,
+ pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n",
+ atomic_read(&wait->pending), count,
rq->fence.context, rq->fence.seqno,
t->engine->name);
- i915_gem_set_wedged(t->engine->i915);
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(t->engine->gt);
GEM_BUG_ON(!i915_request_completed(rq));
i915_sw_fence_wait(wait);
err = -EIO;
@@ -622,7 +635,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -791,7 +804,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
__i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(&i915->gt);
return vma;
@@ -809,7 +822,7 @@ static int recursive_batch_resolve(struct i915_vma *batch)
return PTR_ERR(cmd);
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(batch->vm->i915);
+ intel_gt_chipset_flush(batch->vm->gt);
i915_gem_object_unpin_map(batch->obj);
@@ -863,7 +876,9 @@ static int live_all_engines(void *arg)
request[id]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[id], batch->obj, 0);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
@@ -979,7 +994,9 @@ static int live_sequential_engines(void *arg)
request[id]->batch = batch;
i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, request[id], 0);
+ err = i915_request_await_object(request[id], batch->obj, false);
+ if (err == 0)
+ err = i915_vma_move_to_active(batch, request[id], 0);
i915_vma_unlock(batch);
GEM_BUG_ON(err);
@@ -1031,7 +1048,7 @@ out_request:
I915_MAP_WC);
if (!IS_ERR(cmd)) {
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ intel_gt_chipset_flush(engine->gt);
i915_gem_object_unpin_map(request[id]->batch->obj);
}
@@ -1227,7 +1244,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(i915))
+ if (intel_gt_is_wedged(&i915->gt))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index b18eaefef798..438ea0eaa416 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -26,6 +26,8 @@
#include "../i915_drv.h"
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+
struct i915_selftest i915_selftest __read_mostly = {
.timeout_ms = 1000,
};
@@ -183,7 +185,7 @@ int i915_live_selftests(struct pci_dev *pdev)
if (!i915_selftest.live)
return 0;
- err = run_selftests(live, to_i915(pci_get_drvdata(pdev)));
+ err = run_selftests(live, pdev_to_i915(pdev));
if (err) {
i915_selftest.live = err;
return err;
@@ -240,7 +242,61 @@ static bool apply_subtest_filter(const char *caller, const char *name)
return result;
}
+int __i915_nop_setup(void *data)
+{
+ return 0;
+}
+
+int __i915_nop_teardown(int err, void *data)
+{
+ return err;
+}
+
+int __i915_live_setup(void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ return intel_gt_terminally_wedged(&i915->gt);
+}
+
+int __i915_live_teardown(int err, void *data)
+{
+ struct drm_i915_private *i915 = data;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ return err;
+}
+
+int __intel_gt_live_setup(void *data)
+{
+ struct intel_gt *gt = data;
+
+ return intel_gt_terminally_wedged(gt);
+}
+
+int __intel_gt_live_teardown(int err, void *data)
+{
+ struct intel_gt *gt = data;
+
+ mutex_lock(&gt->i915->drm.struct_mutex);
+ if (igt_flush_test(gt->i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ mutex_unlock(&gt->i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(gt->i915);
+
+ return err;
+}
+
int __i915_subtests(const char *caller,
+ int (*setup)(void *data),
+ int (*teardown)(int err, void *data),
const struct i915_subtest *st,
unsigned int count,
void *data)
@@ -255,10 +311,17 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
+ err = setup(data);
+ if (err) {
+ pr_err(DRIVER_NAME "/%s: setup failed for %s\n",
+ caller, st->name);
+ return err;
+ }
+
pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
- err = st->func(data);
+ err = teardown(st->func(data), data);
if (err && err != -EINTR) {
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
caller, st->name, err);
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
deleted file mode 100644
index 76d3977f1d4b..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ /dev/null
@@ -1,845 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#include <linux/prime_numbers.h>
-
-#include "gem/i915_gem_pm.h"
-
-#include "i915_random.h"
-#include "i915_selftest.h"
-
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
-#include "mock_timeline.h"
-
-static struct page *hwsp_page(struct i915_timeline *tl)
-{
- struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- return sg_page(obj->mm.pages->sgl);
-}
-
-static unsigned long hwsp_cacheline(struct i915_timeline *tl)
-{
- unsigned long address = (unsigned long)page_address(hwsp_page(tl));
-
- return (address + tl->hwsp_offset) / CACHELINE_BYTES;
-}
-
-#define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES)
-
-struct mock_hwsp_freelist {
- struct drm_i915_private *i915;
- struct radix_tree_root cachelines;
- struct i915_timeline **history;
- unsigned long count, max;
- struct rnd_state prng;
-};
-
-enum {
- SHUFFLE = BIT(0),
-};
-
-static void __mock_hwsp_record(struct mock_hwsp_freelist *state,
- unsigned int idx,
- struct i915_timeline *tl)
-{
- tl = xchg(&state->history[idx], tl);
- if (tl) {
- radix_tree_delete(&state->cachelines, hwsp_cacheline(tl));
- i915_timeline_put(tl);
- }
-}
-
-static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
- unsigned int count,
- unsigned int flags)
-{
- struct i915_timeline *tl;
- unsigned int idx;
-
- while (count--) {
- unsigned long cacheline;
- int err;
-
- tl = i915_timeline_create(state->i915, NULL);
- if (IS_ERR(tl))
- return PTR_ERR(tl);
-
- cacheline = hwsp_cacheline(tl);
- err = radix_tree_insert(&state->cachelines, cacheline, tl);
- if (err) {
- if (err == -EEXIST) {
- pr_err("HWSP cacheline %lu already used; duplicate allocation!\n",
- cacheline);
- }
- i915_timeline_put(tl);
- return err;
- }
-
- idx = state->count++ % state->max;
- __mock_hwsp_record(state, idx, tl);
- }
-
- if (flags & SHUFFLE)
- i915_prandom_shuffle(state->history,
- sizeof(*state->history),
- min(state->count, state->max),
- &state->prng);
-
- count = i915_prandom_u32_max_state(min(state->count, state->max),
- &state->prng);
- while (count--) {
- idx = --state->count % state->max;
- __mock_hwsp_record(state, idx, NULL);
- }
-
- return 0;
-}
-
-static int mock_hwsp_freelist(void *arg)
-{
- struct mock_hwsp_freelist state;
- const struct {
- const char *name;
- unsigned int flags;
- } phases[] = {
- { "linear", 0 },
- { "shuffled", SHUFFLE },
- { },
- }, *p;
- unsigned int na;
- int err = 0;
-
- INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL);
- state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed);
-
- state.i915 = mock_gem_device();
- if (!state.i915)
- return -ENOMEM;
-
- /*
- * Create a bunch of timelines and check that their HWSP do not overlap.
- * Free some, and try again.
- */
-
- state.max = PAGE_SIZE / sizeof(*state.history);
- state.count = 0;
- state.history = kcalloc(state.max, sizeof(*state.history), GFP_KERNEL);
- if (!state.history) {
- err = -ENOMEM;
- goto err_put;
- }
-
- mutex_lock(&state.i915->drm.struct_mutex);
- for (p = phases; p->name; p++) {
- pr_debug("%s(%s)\n", __func__, p->name);
- for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) {
- err = __mock_hwsp_timeline(&state, na, p->flags);
- if (err)
- goto out;
- }
- }
-
-out:
- for (na = 0; na < state.max; na++)
- __mock_hwsp_record(&state, na, NULL);
- mutex_unlock(&state.i915->drm.struct_mutex);
- kfree(state.history);
-err_put:
- drm_dev_put(&state.i915->drm);
- return err;
-}
-
-struct __igt_sync {
- const char *name;
- u32 seqno;
- bool expected;
- bool set;
-};
-
-static int __igt_sync(struct i915_timeline *tl,
- u64 ctx,
- const struct __igt_sync *p,
- const char *name)
-{
- int ret;
-
- if (__i915_timeline_sync_is_later(tl, ctx, p->seqno) != p->expected) {
- pr_err("%s: %s(ctx=%llu, seqno=%u) expected passed %s but failed\n",
- name, p->name, ctx, p->seqno, yesno(p->expected));
- return -EINVAL;
- }
-
- if (p->set) {
- ret = __i915_timeline_sync_set(tl, ctx, p->seqno);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int igt_sync(void *arg)
-{
- const struct __igt_sync pass[] = {
- { "unset", 0, false, false },
- { "new", 0, false, true },
- { "0a", 0, true, true },
- { "1a", 1, false, true },
- { "1b", 1, true, true },
- { "0b", 0, true, false },
- { "2a", 2, false, true },
- { "4", 4, false, true },
- { "INT_MAX", INT_MAX, false, true },
- { "INT_MAX-1", INT_MAX-1, true, false },
- { "INT_MAX+1", (u32)INT_MAX+1, false, true },
- { "INT_MAX", INT_MAX, true, false },
- { "UINT_MAX", UINT_MAX, false, true },
- { "wrap", 0, false, true },
- { "unwrap", UINT_MAX, true, false },
- {},
- }, *p;
- struct i915_timeline tl;
- int order, offset;
- int ret = -ENODEV;
-
- mock_timeline_init(&tl, 0);
- for (p = pass; p->name; p++) {
- for (order = 1; order < 64; order++) {
- for (offset = -1; offset <= (order > 1); offset++) {
- u64 ctx = BIT_ULL(order) + offset;
-
- ret = __igt_sync(&tl, ctx, p, "1");
- if (ret)
- goto out;
- }
- }
- }
- mock_timeline_fini(&tl);
-
- mock_timeline_init(&tl, 0);
- for (order = 1; order < 64; order++) {
- for (offset = -1; offset <= (order > 1); offset++) {
- u64 ctx = BIT_ULL(order) + offset;
-
- for (p = pass; p->name; p++) {
- ret = __igt_sync(&tl, ctx, p, "2");
- if (ret)
- goto out;
- }
- }
- }
-
-out:
- mock_timeline_fini(&tl);
- return ret;
-}
-
-static unsigned int random_engine(struct rnd_state *rnd)
-{
- return i915_prandom_u32_max_state(I915_NUM_ENGINES, rnd);
-}
-
-static int bench_sync(void *arg)
-{
- struct rnd_state prng;
- struct i915_timeline tl;
- unsigned long end_time, count;
- u64 prng32_1M;
- ktime_t kt;
- int order, last_order;
-
- mock_timeline_init(&tl, 0);
-
- /* Lookups from cache are very fast and so the random number generation
- * and the loop itself becomes a significant factor in the per-iteration
- * timings. We try to compensate the results by measuring the overhead
- * of the prng and subtract it from the reported results.
- */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- u32 x;
-
- /* Make sure the compiler doesn't optimise away the prng call */
- WRITE_ONCE(x, prandom_u32_state(&prng));
-
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- pr_debug("%s: %lu random evaluations, %lluns/prng\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
- prng32_1M = div64_ul(ktime_to_ns(kt) << 20, count);
-
- /* Benchmark (only) setting random context ids */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- u64 id = i915_prandom_u64_state(&prng);
-
- __i915_timeline_sync_set(&tl, id, 0);
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
- pr_info("%s: %lu random insertions, %lluns/insert\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- /* Benchmark looking up the exact same context ids as we just set */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- end_time = count;
- kt = ktime_get();
- while (end_time--) {
- u64 id = i915_prandom_u64_state(&prng);
-
- if (!__i915_timeline_sync_is_later(&tl, id, 0)) {
- mock_timeline_fini(&tl);
- pr_err("Lookup of %llu failed\n", id);
- return -EINVAL;
- }
- }
- kt = ktime_sub(ktime_get(), kt);
- kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
- pr_info("%s: %lu random lookups, %lluns/lookup\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- mock_timeline_fini(&tl);
- cond_resched();
-
- mock_timeline_init(&tl, 0);
-
- /* Benchmark setting the first N (in order) contexts */
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- __i915_timeline_sync_set(&tl, count++, 0);
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- pr_info("%s: %lu in-order insertions, %lluns/insert\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- /* Benchmark looking up the exact same context ids as we just set */
- end_time = count;
- kt = ktime_get();
- while (end_time--) {
- if (!__i915_timeline_sync_is_later(&tl, end_time, 0)) {
- pr_err("Lookup of %lu failed\n", end_time);
- mock_timeline_fini(&tl);
- return -EINVAL;
- }
- }
- kt = ktime_sub(ktime_get(), kt);
- pr_info("%s: %lu in-order lookups, %lluns/lookup\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
-
- mock_timeline_fini(&tl);
- cond_resched();
-
- mock_timeline_init(&tl, 0);
-
- /* Benchmark searching for a random context id and maybe changing it */
- prandom_seed_state(&prng, i915_selftest.random_seed);
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- u32 id = random_engine(&prng);
- u32 seqno = prandom_u32_state(&prng);
-
- if (!__i915_timeline_sync_is_later(&tl, id, seqno))
- __i915_timeline_sync_set(&tl, id, seqno);
-
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- kt = ktime_sub_ns(kt, (count * prng32_1M * 2) >> 20);
- pr_info("%s: %lu repeated insert/lookups, %lluns/op\n",
- __func__, count, (long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_fini(&tl);
- cond_resched();
-
- /* Benchmark searching for a known context id and changing the seqno */
- for (last_order = 1, order = 1; order < 32;
- ({ int tmp = last_order; last_order = order; order += tmp; })) {
- unsigned int mask = BIT(order) - 1;
-
- mock_timeline_init(&tl, 0);
-
- count = 0;
- kt = ktime_get();
- end_time = jiffies + HZ/10;
- do {
- /* Without assuming too many details of the underlying
- * implementation, try to identify its phase-changes
- * (if any)!
- */
- u64 id = (u64)(count & mask) << order;
-
- __i915_timeline_sync_is_later(&tl, id, 0);
- __i915_timeline_sync_set(&tl, id, 0);
-
- count++;
- } while (!time_after(jiffies, end_time));
- kt = ktime_sub(ktime_get(), kt);
- pr_info("%s: %lu cyclic/%d insert/lookups, %lluns/op\n",
- __func__, count, order,
- (long long)div64_ul(ktime_to_ns(kt), count));
- mock_timeline_fini(&tl);
- cond_resched();
- }
-
- return 0;
-}
-
-int i915_timeline_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(mock_hwsp_freelist),
- SUBTEST(igt_sync),
- SUBTEST(bench_sync),
- };
-
- return i915_subtests(tests, NULL);
-}
-
-static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
-{
- u32 *cs;
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- if (INTEL_GEN(rq->i915) >= 8) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = addr;
- *cs++ = 0;
- *cs++ = value;
- } else if (INTEL_GEN(rq->i915) >= 4) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = 0;
- *cs++ = addr;
- *cs++ = value;
- } else {
- *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = addr;
- *cs++ = value;
- *cs++ = MI_NOOP;
- }
-
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
-static struct i915_request *
-tl_write(struct i915_timeline *tl, struct intel_engine_cs *engine, u32 value)
-{
- struct i915_request *rq;
- int err;
-
- lockdep_assert_held(&tl->i915->drm.struct_mutex); /* lazy rq refs */
-
- err = i915_timeline_pin(tl);
- if (err) {
- rq = ERR_PTR(err);
- goto out;
- }
-
- rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq))
- goto out_unpin;
-
- err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
- i915_request_add(rq);
- if (err)
- rq = ERR_PTR(err);
-
-out_unpin:
- i915_timeline_unpin(tl);
-out:
- if (IS_ERR(rq))
- pr_err("Failed to write to timeline!\n");
- return rq;
-}
-
-static struct i915_timeline *
-checked_i915_timeline_create(struct drm_i915_private *i915)
-{
- struct i915_timeline *tl;
-
- tl = i915_timeline_create(i915, NULL);
- if (IS_ERR(tl))
- return tl;
-
- if (*tl->hwsp_seqno != tl->seqno) {
- pr_err("Timeline created with incorrect breadcrumb, found %x, expected %x\n",
- *tl->hwsp_seqno, tl->seqno);
- i915_timeline_put(tl);
- return ERR_PTR(-EINVAL);
- }
-
- return tl;
-}
-
-static int live_hwsp_engine(void *arg)
-{
-#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long count, n;
- int err = 0;
-
- /*
- * Create a bunch of timelines and check we can write
- * independently to each of their breadcrumb slots.
- */
-
- timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
- sizeof(*timelines),
- GFP_KERNEL);
- if (!timelines)
- return -ENOMEM;
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- count = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- for (n = 0; n < NUM_TIMELINES; n++) {
- struct i915_timeline *tl;
- struct i915_request *rq;
-
- tl = checked_i915_timeline_create(i915);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out;
- }
-
- rq = tl_write(tl, engine, count);
- if (IS_ERR(rq)) {
- i915_timeline_put(tl);
- err = PTR_ERR(rq);
- goto out;
- }
-
- timelines[count++] = tl;
- }
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
-
- for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
-
- if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
- err = -EINVAL;
- }
- i915_timeline_put(tl);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kvfree(timelines);
-
- return err;
-#undef NUM_TIMELINES
-}
-
-static int live_hwsp_alternate(void *arg)
-{
-#define NUM_TIMELINES 4096
- struct drm_i915_private *i915 = arg;
- struct i915_timeline **timelines;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long count, n;
- int err = 0;
-
- /*
- * Create a bunch of timelines and check we can write
- * independently to each of their breadcrumb slots with adjacent
- * engines.
- */
-
- timelines = kvmalloc_array(NUM_TIMELINES * I915_NUM_ENGINES,
- sizeof(*timelines),
- GFP_KERNEL);
- if (!timelines)
- return -ENOMEM;
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- count = 0;
- for (n = 0; n < NUM_TIMELINES; n++) {
- for_each_engine(engine, i915, id) {
- struct i915_timeline *tl;
- struct i915_request *rq;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- tl = checked_i915_timeline_create(i915);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out;
- }
-
- rq = tl_write(tl, engine, count);
- if (IS_ERR(rq)) {
- i915_timeline_put(tl);
- err = PTR_ERR(rq);
- goto out;
- }
-
- timelines[count++] = tl;
- }
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
-
- for (n = 0; n < count; n++) {
- struct i915_timeline *tl = timelines[n];
-
- if (!err && *tl->hwsp_seqno != n) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- n, *tl->hwsp_seqno);
- err = -EINVAL;
- }
- i915_timeline_put(tl);
- }
-
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- kvfree(timelines);
-
- return err;
-#undef NUM_TIMELINES
-}
-
-static int live_hwsp_wrap(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- struct i915_timeline *tl;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- int err = 0;
-
- /*
- * Across a seqno wrap, we need to keep the old cacheline alive for
- * foreign GPU references.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- tl = i915_timeline_create(i915, NULL);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out_rpm;
- }
- if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
- goto out_free;
-
- err = i915_timeline_pin(tl);
- if (err)
- goto out_free;
-
- for_each_engine(engine, i915, id) {
- const u32 *hwsp_seqno[2];
- struct i915_request *rq;
- u32 seqno[2];
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- rq = i915_request_create(engine->kernel_context);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out;
- }
-
- tl->seqno = -4u;
-
- err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
- seqno[0], tl->hwsp_offset);
-
- err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- hwsp_seqno[0] = tl->hwsp_seqno;
-
- err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
- seqno[1], tl->hwsp_offset);
-
- err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
- if (err) {
- i915_request_add(rq);
- goto out;
- }
- hwsp_seqno[1] = tl->hwsp_seqno;
-
- /* With wrap should come a new hwsp */
- GEM_BUG_ON(seqno[1] >= seqno[0]);
- GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
-
- i915_request_add(rq);
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("Wait for timeline writes timed out!\n");
- err = -EIO;
- goto out;
- }
-
- if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
- pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
- *hwsp_seqno[0], *hwsp_seqno[1],
- seqno[0], seqno[1]);
- err = -EINVAL;
- goto out;
- }
-
- i915_retire_requests(i915); /* recycle HWSP */
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
-
- i915_timeline_unpin(tl);
-out_free:
- i915_timeline_put(tl);
-out_rpm:
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
-}
-
-static int live_hwsp_recycle(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- unsigned long count;
- int err = 0;
-
- /*
- * Check seqno writes into one timeline at a time. We expect to
- * recycle the breadcrumb slot between iterations and neither
- * want to confuse ourselves or the GPU.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
-
- count = 0;
- for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- do {
- struct i915_timeline *tl;
- struct i915_request *rq;
-
- tl = checked_i915_timeline_create(i915);
- if (IS_ERR(tl)) {
- err = PTR_ERR(tl);
- goto out;
- }
-
- rq = tl_write(tl, engine, count);
- if (IS_ERR(rq)) {
- i915_timeline_put(tl);
- err = PTR_ERR(rq);
- goto out;
- }
-
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- pr_err("Wait for timeline writes timed out!\n");
- i915_timeline_put(tl);
- err = -EIO;
- goto out;
- }
-
- if (*tl->hwsp_seqno != count) {
- pr_err("Invalid seqno stored in timeline %lu, found 0x%x\n",
- count, *tl->hwsp_seqno);
- err = -EINVAL;
- }
-
- i915_timeline_put(tl);
- count++;
-
- if (err)
- goto out;
-
- i915_timelines_park(i915); /* Encourage recycling! */
- } while (!__igt_timeout(end_time, NULL));
- }
-
-out:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
-
- return err;
-}
-
-int i915_timeline_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(live_hwsp_recycle),
- SUBTEST(live_hwsp_engine),
- SUBTEST(live_hwsp_alternate),
- SUBTEST(live_hwsp_wrap),
- };
-
- if (i915_terminally_wedged(i915))
- return 0;
-
- return i915_subtests(tests, i915);
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fbc79b14823a..a5bec0a4cdcc 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -193,6 +193,8 @@ static int igt_vma_create(void *arg)
list_del_init(&ctx->link);
mock_context_close(ctx);
}
+
+ cond_resched();
}
end:
@@ -341,6 +343,8 @@ static int igt_vma_pin1(void *arg)
goto out;
}
}
+
+ cond_resched();
}
err = 0;
@@ -597,6 +601,8 @@ static int igt_vma_rotate_remap(void *arg)
}
i915_vma_unpin(vma);
+
+ cond_resched();
}
}
}
@@ -752,6 +758,8 @@ static int igt_vma_partial(void *arg)
i915_vma_unpin(vma);
nvma++;
+
+ cond_resched();
}
}
@@ -961,6 +969,8 @@ static int igt_vma_remapped_gtt(void *arg)
}
}
i915_vma_unpin_iomap(vma);
+
+ cond_resched();
}
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index 5bfd1b2626a2..d3b5eb402d33 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_selftest.h"
@@ -13,7 +14,7 @@
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
{
- int ret = i915_terminally_wedged(i915) ? -EIO : 0;
+ int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0;
int repeat = !!(flags & I915_WAIT_LOCKED);
cond_resched();
@@ -27,7 +28,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
__builtin_return_address(0));
GEM_TRACE_DUMP();
- i915_gem_set_wedged(i915);
+ intel_gt_set_wedged(&i915->gt);
repeat = 0;
ret = -EIO;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
index 587df6fd4ffe..7ec8f8b049c6 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.c
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -7,47 +7,45 @@
#include "igt_reset.h"
#include "gt/intel_engine.h"
+#include "gt/intel_gt.h"
#include "../i915_drv.h"
-void igt_global_reset_lock(struct drm_i915_private *i915)
+void igt_global_reset_lock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
+ pr_debug("%s: current gpu_error=%08lx\n", __func__, gt->reset.flags);
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
+ while (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags))
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, gt->i915, id) {
while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
+ &gt->reset.flags))
+ wait_on_bit(&gt->reset.flags, I915_RESET_ENGINE + id,
TASK_UNINTERRUPTIBLE);
}
}
-void igt_global_reset_unlock(struct drm_i915_private *i915)
+void igt_global_reset_unlock(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ for_each_engine(engine, gt->i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
+ clear_bit(I915_RESET_BACKOFF, &gt->reset.flags);
+ wake_up_all(&gt->reset.queue);
}
-bool igt_force_reset(struct drm_i915_private *i915)
+bool igt_force_reset(struct intel_gt *gt)
{
- i915_gem_set_wedged(i915);
- i915_reset(i915, 0, NULL);
+ intel_gt_set_wedged(gt);
+ intel_gt_reset(gt, 0, NULL);
- return !i915_reset_failed(i915);
+ return !intel_gt_is_wedged(gt);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
index 363bd853e50f..851873b67ab3 100644
--- a/drivers/gpu/drm/i915/selftests/igt_reset.h
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -7,10 +7,12 @@
#ifndef __I915_SELFTESTS_IGT_RESET_H__
#define __I915_SELFTESTS_IGT_RESET_H__
-#include "../i915_drv.h"
+#include <linux/types.h>
-void igt_global_reset_lock(struct drm_i915_private *i915);
-void igt_global_reset_unlock(struct drm_i915_private *i915);
-bool igt_force_reset(struct drm_i915_private *i915);
+struct intel_gt;
+
+void igt_global_reset_lock(struct intel_gt *gt);
+void igt_global_reset_unlock(struct intel_gt *gt);
+bool igt_force_reset(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 1e59b543cf27..11f04ad48e68 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,29 +3,30 @@
*
* Copyright © 2018 Intel Corporation
*/
+#include "gt/intel_gt.h"
#include "gem/selftests/igt_gem_utils.h"
#include "igt_spinner.h"
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
{
unsigned int mode;
void *vaddr;
int err;
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
+ GEM_BUG_ON(INTEL_GEN(gt->i915) < 8);
memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
+ spin->gt = gt;
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->hws)) {
err = PTR_ERR(spin->hws);
goto err;
}
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(spin->obj)) {
err = PTR_ERR(spin->obj);
goto err_hws;
@@ -39,7 +40,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
}
spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
- mode = i915_coherent_map_type(i915);
+ mode = i915_coherent_map_type(gt->i915);
vaddr = i915_gem_object_pin_map(spin->obj, mode);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -77,7 +78,10 @@ static int move_to_active(struct i915_vma *vma,
int err;
i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, flags);
+ err = i915_request_await_object(rq, vma->obj,
+ flags & EXEC_OBJECT_WRITE);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
@@ -85,20 +89,22 @@ static int move_to_active(struct i915_vma *vma,
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command)
{
+ struct intel_engine_cs *engine = ce->engine;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
- vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
+ GEM_BUG_ON(spin->gt != ce->vm->gt);
+
+ vma = i915_vma_instance(spin->obj, ce->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
- hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
+ hws = i915_vma_instance(spin->hws, ce->vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
@@ -110,7 +116,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
if (err)
goto unpin_vma;
- rq = igt_request_alloc(ctx, engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto unpin_hws;
@@ -138,7 +144,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = upper_32_bits(vma->node.start);
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(engine->gt);
if (engine->emit_init_breadcrumb &&
rq->timeline->has_initial_breadcrumb) {
@@ -172,7 +178,7 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
void igt_spinner_end(struct igt_spinner *spin)
{
*spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
+ intel_gt_chipset_flush(spin->gt);
}
void igt_spinner_fini(struct igt_spinner *spin)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index 34a88ac9b47a..ec62c9ef320b 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -14,21 +14,22 @@
#include "i915_request.h"
#include "i915_selftest.h"
+struct intel_gt;
+
struct igt_spinner {
- struct drm_i915_private *i915;
+ struct intel_gt *gt;
struct drm_i915_gem_object *hws;
struct drm_i915_gem_object *obj;
u32 *batch;
void *seqno;
};
-int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt);
void igt_spinner_fini(struct igt_spinner *spin);
struct i915_request *
igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
+ struct intel_context *ce,
u32 arbitration_command);
void igt_spinner_end(struct igt_spinner *spin);
diff --git a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h b/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
deleted file mode 100644
index 08e5ff11bbd9..000000000000
--- a/drivers/gpu/drm/i915/selftests/igt_wedge_me.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- */
-
-#ifndef IGT_WEDGE_ME_H
-#define IGT_WEDGE_ME_H
-
-#include <linux/workqueue.h>
-
-#include "../i915_gem.h"
-
-struct drm_i915_private;
-
-struct igt_wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const char *name;
-};
-
-static void __igt_wedge_me(struct work_struct *work)
-{
- struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
-
- pr_err("%s timed out, cancelling test.\n", w->name);
-
- GEM_TRACE("%s timed out.\n", w->name);
- GEM_TRACE_DUMP();
-
- i915_gem_set_wedged(w->i915);
-}
-
-static void __igt_init_wedge(struct igt_wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
-{
- w->i915 = i915;
- w->name = name;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __igt_fini_wedge(struct igt_wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __igt_fini_wedge((W)))
-
-#endif /* IGT_WEDGE_ME_H */
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
deleted file mode 100644
index 6ca8584cd64c..000000000000
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_selftest.h"
-#include "gem/i915_gem_pm.h"
-
-/* max doorbell number + negative test for each client type */
-#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
-
-static struct intel_guc_client *clients[ATTEMPTS];
-
-static bool available_dbs(struct intel_guc *guc, u32 priority)
-{
- unsigned long offset;
- unsigned long end;
- u16 id;
-
- /* first half is used for normal priority, second half for high */
- offset = 0;
- end = GUC_NUM_DOORBELLS / 2;
- if (priority <= GUC_CLIENT_PRIORITY_HIGH) {
- offset = end;
- end += offset;
- }
-
- id = find_next_zero_bit(guc->doorbell_bitmap, end, offset);
- if (id < end)
- return true;
-
- return false;
-}
-
-static int check_all_doorbells(struct intel_guc *guc)
-{
- u16 db_id;
-
- pr_info_once("Max number of doorbells: %d", GUC_NUM_DOORBELLS);
- for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id) {
- if (!doorbell_ok(guc, db_id)) {
- pr_err("doorbell %d, not ok\n", db_id);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-static int ring_doorbell_nop(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc = __get_process_desc(client);
- int err;
-
- client->use_nop_wqi = true;
-
- spin_lock_irq(&client->wq_lock);
-
- guc_wq_item_append(client, 0, 0, 0, 0);
- guc_ring_doorbell(client);
-
- spin_unlock_irq(&client->wq_lock);
-
- client->use_nop_wqi = false;
-
- /* if there are no issues GuC will update the WQ head and keep the
- * WQ in active status
- */
- err = wait_for(READ_ONCE(desc->head) == READ_ONCE(desc->tail), 10);
- if (err) {
- pr_err("doorbell %u ring failed!\n", client->doorbell_id);
- return -EIO;
- }
-
- if (desc->wq_status != WQ_STATUS_ACTIVE) {
- pr_err("doorbell %u ring put WQ in bad state (%u)!\n",
- client->doorbell_id, desc->wq_status);
- return -EIO;
- }
-
- return 0;
-}
-
-/*
- * Basic client sanity check, handy to validate create_clients.
- */
-static int validate_client(struct intel_guc_client *client,
- int client_priority,
- bool is_preempt_client)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
- struct i915_gem_context *ctx_owner = is_preempt_client ?
- dev_priv->preempt_context : dev_priv->kernel_context;
-
- if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->engine_mask ||
- client->priority != client_priority ||
- client->doorbell_id == GUC_DOORBELL_INVALID)
- return -EINVAL;
- else
- return 0;
-}
-
-static bool client_doorbell_in_sync(struct intel_guc_client *client)
-{
- return !client || doorbell_ok(client->guc, client->doorbell_id);
-}
-
-/*
- * Check that we're able to synchronize guc_clients with their doorbells
- *
- * We're creating clients and reserving doorbells once, at module load. During
- * module lifetime, GuC, doorbell HW, and i915 state may go out of sync due to
- * GuC being reset. In other words - GuC clients are still around, but the
- * status of their doorbells may be incorrect. This is the reason behind
- * validating that the doorbells status expected by the driver matches what the
- * GuC/HW have.
- */
-static int igt_guc_clients(void *args)
-{
- struct drm_i915_private *dev_priv = args;
- intel_wakeref_t wakeref;
- struct intel_guc *guc;
- int err = 0;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- /*
- * Get rid of clients created during driver load because the test will
- * recreate them.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- if (guc->execbuf_client || guc->preempt_client) {
- pr_err("guc_clients_destroy lied!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = guc_clients_create(guc);
- if (err) {
- pr_err("Failed to create clients\n");
- goto unlock;
- }
- GEM_BUG_ON(!guc->execbuf_client);
-
- err = validate_client(guc->execbuf_client,
- GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
- if (err) {
- pr_err("execbug client validation failed\n");
- goto out;
- }
-
- if (guc->preempt_client) {
- err = validate_client(guc->preempt_client,
- GUC_CLIENT_PRIORITY_KMD_HIGH, true);
- if (err) {
- pr_err("preempt client validation failed\n");
- goto out;
- }
- }
-
- /* each client should now have reserved a doorbell */
- if (!has_doorbell(guc->execbuf_client) ||
- (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
- pr_err("guc_clients_create didn't reserve doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /* Now enable the clients */
- guc_clients_enable(guc);
-
- /* each client should now have received a doorbell */
- if (!client_doorbell_in_sync(guc->execbuf_client) ||
- !client_doorbell_in_sync(guc->preempt_client)) {
- pr_err("failed to initialize the doorbells\n");
- err = -EINVAL;
- goto out;
- }
-
- /*
- * Basic test - an attempt to reallocate a valid doorbell to the
- * client it is currently assigned should not cause a failure.
- */
- err = create_doorbell(guc->execbuf_client);
-
-out:
- /*
- * Leave clean state for other test, plus the driver always destroy the
- * clients during unload.
- */
- guc_clients_disable(guc);
- guc_clients_destroy(guc);
- guc_clients_create(guc);
- guc_clients_enable(guc);
-unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return err;
-}
-
-/*
- * Create as many clients as number of doorbells. Note that there's already
- * client(s)/doorbell(s) created during driver load, but this test creates
- * its own and do not interact with the existing ones.
- */
-static int igt_guc_doorbells(void *arg)
-{
- struct drm_i915_private *dev_priv = arg;
- intel_wakeref_t wakeref;
- struct intel_guc *guc;
- int i, err = 0;
- u16 db_id;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- guc = &dev_priv->guc;
- if (!guc) {
- pr_err("No guc object!\n");
- err = -EINVAL;
- goto unlock;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto unlock;
-
- for (i = 0; i < ATTEMPTS; i++) {
- clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->engine_mask,
- i % GUC_CLIENT_PRIORITY_NUM,
- dev_priv->kernel_context);
-
- if (!clients[i]) {
- pr_err("[%d] No guc client\n", i);
- err = -EINVAL;
- goto out;
- }
-
- if (IS_ERR(clients[i])) {
- if (PTR_ERR(clients[i]) != -ENOSPC) {
- pr_err("[%d] unexpected error\n", i);
- err = PTR_ERR(clients[i]);
- goto out;
- }
-
- if (available_dbs(guc, i % GUC_CLIENT_PRIORITY_NUM)) {
- pr_err("[%d] non-db related alloc fail\n", i);
- err = -EINVAL;
- goto out;
- }
-
- /* expected, ran out of dbs for this client type */
- continue;
- }
-
- /*
- * The check below is only valid because we keep a doorbell
- * assigned during the whole life of the client.
- */
- if (clients[i]->stage_id >= GUC_NUM_DOORBELLS) {
- pr_err("[%d] more clients than doorbells (%d >= %d)\n",
- i, clients[i]->stage_id, GUC_NUM_DOORBELLS);
- err = -EINVAL;
- goto out;
- }
-
- err = validate_client(clients[i],
- i % GUC_CLIENT_PRIORITY_NUM, false);
- if (err) {
- pr_err("[%d] client_alloc sanity check failed!\n", i);
- err = -EINVAL;
- goto out;
- }
-
- db_id = clients[i]->doorbell_id;
-
- err = __guc_client_enable(clients[i]);
- if (err) {
- pr_err("[%d] Failed to create a doorbell\n", i);
- goto out;
- }
-
- /* doorbell id shouldn't change, we are holding the mutex */
- if (db_id != clients[i]->doorbell_id) {
- pr_err("[%d] doorbell id changed (%d != %d)\n",
- i, db_id, clients[i]->doorbell_id);
- err = -EINVAL;
- goto out;
- }
-
- err = check_all_doorbells(guc);
- if (err)
- goto out;
-
- err = ring_doorbell_nop(clients[i]);
- if (err)
- goto out;
- }
-
-out:
- for (i = 0; i < ATTEMPTS; i++)
- if (!IS_ERR_OR_NULL(clients[i])) {
- __guc_client_disable(clients[i]);
- guc_client_free(clients[i]);
- }
-unlock:
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- return err;
-}
-
-int intel_guc_live_selftest(struct drm_i915_private *dev_priv)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_guc_clients),
- SUBTEST(igt_guc_doorbells),
- };
-
- if (!USES_GUC_SUBMISSION(dev_priv))
- return 0;
-
- return i915_subtests(tests, dev_priv);
-}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
index b976c12817c5..080b90b63d16 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c
@@ -40,6 +40,7 @@ void __onstack_fence_init(struct i915_sw_fence *fence,
__init_waitqueue_head(&fence->wait, name, key);
atomic_set(&fence->pending, 1);
+ fence->error = 0;
fence->flags = (unsigned long)nop_fence_notify;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 64bc51400ae7..01a89c071bf5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,6 +25,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include "gt/intel_gt.h"
#include "gt/mock_engine.h"
#include "mock_request.h"
@@ -67,7 +68,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_contexts_fini(i915);
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
drain_workqueue(i915->wq);
i915_gem_drain_freed_objects(i915);
@@ -179,14 +180,9 @@ struct drm_i915_private *mock_gem_device(void)
mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
- intel_gt_pm_init(i915);
+ intel_gt_init_early(&i915->gt, i915);
atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
- init_waitqueue_head(&i915->gpu_error.wait_queue);
- init_waitqueue_head(&i915->gpu_error.reset_queue);
- init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
- mutex_init(&i915->gpu_error.wedge_mutex);
-
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
goto err_drv;
@@ -198,11 +194,7 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915_timelines_init(i915);
-
- INIT_LIST_HEAD(&i915->gt.active_rings);
- INIT_LIST_HEAD(&i915->gt.closed_vma);
- spin_lock_init(&i915->gt.closed_lock);
+ intel_timelines_init(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -221,6 +213,7 @@ struct drm_i915_private *mock_gem_device(void)
if (mock_engine_init(i915->engine[RCS0]))
goto err_context;
+ intel_engines_driver_register(i915);
mutex_unlock(&i915->drm.struct_mutex);
WARN_ON(i915_gemfs_init(i915));
@@ -233,7 +226,7 @@ err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
- i915_timelines_fini(i915);
+ intel_timelines_fini(i915);
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index f625c307a406..e62a67e0f79c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -98,6 +98,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
memset(ggtt, 0, sizeof(*ggtt));
+ ggtt->vm.gt = &i915->gt;
ggtt->vm.i915 = i915;
ggtt->vm.is_ggtt = true;
@@ -116,6 +117,8 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.clear_pages = clear_pages;
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
+
+ intel_gt_init_hw(i915);
}
void mock_fini_ggtt(struct i915_ggtt *ggtt)
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 9390fc09984b..09f747228dff 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -28,14 +28,12 @@
#include "mock_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay)
+mock_request(struct intel_context *ce, unsigned long delay)
{
struct i915_request *request;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
- request = igt_request_alloc(context, engine);
+ request = intel_context_create_request(ce);
if (IS_ERR(request))
return NULL;
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 4acf0211df20..8907b60c290d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -30,9 +30,7 @@
#include "../i915_request.h"
struct i915_request *
-mock_request(struct intel_engine_cs *engine,
- struct i915_gem_context *context,
- unsigned long delay);
+mock_request(struct intel_context *ce, unsigned long delay);
bool mock_cancel_request(struct i915_request *request);
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
deleted file mode 100644
index 65b52be23d42..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#include "../i915_timeline.h"
-
-#include "mock_timeline.h"
-
-void mock_timeline_init(struct i915_timeline *timeline, u64 context)
-{
- timeline->i915 = NULL;
- timeline->fence_context = context;
-
- mutex_init(&timeline->mutex);
-
- INIT_ACTIVE_REQUEST(&timeline->last_request);
- INIT_LIST_HEAD(&timeline->requests);
-
- i915_syncmap_init(&timeline->sync);
-
- INIT_LIST_HEAD(&timeline->link);
-}
-
-void mock_timeline_fini(struct i915_timeline *timeline)
-{
- i915_syncmap_free(&timeline->sync);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.h b/drivers/gpu/drm/i915/selftests/mock_timeline.h
deleted file mode 100644
index b6deaa61110d..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2017-2018 Intel Corporation
- */
-
-#ifndef __MOCK_TIMELINE__
-#define __MOCK_TIMELINE__
-
-struct i915_timeline;
-
-void mock_timeline_init(struct i915_timeline *timeline, u64 context);
-void mock_timeline_fini(struct i915_timeline *timeline);
-
-#endif /* !__MOCK_TIMELINE__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index ff8999c63a12..49585f16d4a2 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -41,6 +41,6 @@ __nop_read(64)
void mock_uncore_init(struct intel_uncore *uncore)
{
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
- ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop);
}