aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c135
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.h45
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c385
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c465
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c303
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c350
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1562
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c600
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c882
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c63
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h50
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c250
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c746
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c481
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c542
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c182
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c176
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.c73
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.h31
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c206
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.h54
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c226
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c138
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h35
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c63
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h46
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c364
34 files changed, 8662 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
new file mode 100644
index 000000000000..4e681fc13be4
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "huge_gem_object.h"
+
+static void huge_free_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ unsigned long nreal = obj->scratch / PAGE_SIZE;
+ struct scatterlist *sg;
+
+ for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
+ __free_page(sg_page(sg));
+
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static struct sg_table *
+huge_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+ const unsigned long nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct scatterlist *sg, *src, *end;
+ struct sg_table *pages;
+ unsigned long n;
+
+ pages = kmalloc(sizeof(*pages), GFP);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(pages, npages, GFP)) {
+ kfree(pages);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = pages->sgl;
+ for (n = 0; n < nreal; n++) {
+ struct page *page;
+
+ page = alloc_page(GFP | __GFP_HIGHMEM);
+ if (!page) {
+ sg_mark_end(sg);
+ goto err;
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ sg = __sg_next(sg);
+ }
+ if (nreal < npages) {
+ for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
+ sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
+ src = __sg_next(src);
+ if (src == end)
+ src = pages->sgl;
+ }
+ }
+
+ if (i915_gem_gtt_prepare_pages(obj, pages))
+ goto err;
+
+ return pages;
+
+err:
+ huge_free_pages(obj, pages);
+ return ERR_PTR(-ENOMEM);
+#undef GFP
+}
+
+static void huge_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ huge_free_pages(obj, pages);
+
+ obj->mm.dirty = false;
+}
+
+static const struct drm_i915_gem_object_ops huge_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = huge_get_pages,
+ .put_pages = huge_put_pages,
+};
+
+struct drm_i915_gem_object *
+huge_gem_object(struct drm_i915_private *i915,
+ phys_addr_t phys_size,
+ dma_addr_t dma_size)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!phys_size || phys_size > dma_size);
+ GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
+
+ if (overflows_type(dma_size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
+ i915_gem_object_init(obj, &huge_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->scratch = phys_size;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
new file mode 100644
index 000000000000..a6133a9e8029
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HUGE_GEM_OBJECT_H
+#define __HUGE_GEM_OBJECT_H
+
+struct drm_i915_gem_object *
+huge_gem_object(struct drm_i915_private *i915,
+ phys_addr_t phys_size,
+ dma_addr_t dma_size);
+
+static inline phys_addr_t
+huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
+{
+ return obj->scratch;
+}
+
+static inline dma_addr_t
+huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
+{
+ return obj->base.size;
+}
+
+#endif /* !__HUGE_GEM_OBJECT_H */
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
new file mode 100644
index 000000000000..f08d0179b3df
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+static int cpu_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ unsigned int needs_clflush;
+ struct page *page;
+ typeof(v) *map;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ if (err)
+ return err;
+
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ map = kmap_atomic(page);
+ if (needs_clflush & CLFLUSH_BEFORE)
+ clflush(map+offset_in_page(offset) / sizeof(*map));
+ map[offset_in_page(offset) / sizeof(*map)] = v;
+ if (needs_clflush & CLFLUSH_AFTER)
+ clflush(map+offset_in_page(offset) / sizeof(*map));
+ kunmap_atomic(map);
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return 0;
+}
+
+static int cpu_get(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 *v)
+{
+ unsigned int needs_clflush;
+ struct page *page;
+ typeof(v) map;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ if (err)
+ return err;
+
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ map = kmap_atomic(page);
+ if (needs_clflush & CLFLUSH_BEFORE)
+ clflush(map+offset_in_page(offset) / sizeof(*map));
+ *v = map[offset_in_page(offset) / sizeof(*map)];
+ kunmap_atomic(map);
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return 0;
+}
+
+static int gtt_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ struct i915_vma *vma;
+ typeof(v) *map;
+ int err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ map[offset / sizeof(*map)] = v;
+ i915_vma_unpin_iomap(vma);
+
+ return 0;
+}
+
+static int gtt_get(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 *v)
+{
+ struct i915_vma *vma;
+ typeof(v) map;
+ int err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ *v = map[offset / sizeof(*map)];
+ i915_vma_unpin_iomap(vma);
+
+ return 0;
+}
+
+static int wc_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ typeof(v) *map;
+ int err;
+
+ /* XXX GTT write followed by WC write go missing */
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ map[offset / sizeof(*map)] = v;
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int wc_get(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 *v)
+{
+ typeof(v) map;
+ int err;
+
+ /* XXX WC write followed by GTT write go missing */
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ return err;
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ *v = map[offset / sizeof(*map)];
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int gpu_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_vma_unpin(vma);
+ return PTR_ERR(rq);
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ __i915_add_request(rq, false);
+ i915_vma_unpin(vma);
+ return PTR_ERR(cs);
+ }
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
+ *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
+ *cs++ = v;
+ } else if (INTEL_GEN(i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + offset;
+ *cs++ = v;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cs++ = i915_ggtt_offset(vma) + offset;
+ *cs++ = v;
+ *cs++ = MI_NOOP;
+ }
+ intel_ring_advance(rq, cs);
+
+ i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unpin(vma);
+
+ reservation_object_lock(obj->resv, NULL);
+ reservation_object_add_excl_fence(obj->resv, &rq->fence);
+ reservation_object_unlock(obj->resv);
+
+ __i915_add_request(rq, true);
+
+ return 0;
+}
+
+static bool always_valid(struct drm_i915_private *i915)
+{
+ return true;
+}
+
+static bool needs_mi_store_dword(struct drm_i915_private *i915)
+{
+ return igt_can_mi_store_dword_imm(i915);
+}
+
+static const struct igt_coherency_mode {
+ const char *name;
+ int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
+ int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
+ bool (*valid)(struct drm_i915_private *i915);
+} igt_coherency_mode[] = {
+ { "cpu", cpu_set, cpu_get, always_valid },
+ { "gtt", gtt_set, gtt_get, always_valid },
+ { "wc", wc_set, wc_get, always_valid },
+ { "gpu", gpu_set, NULL, needs_mi_store_dword },
+ { },
+};
+
+static int igt_gem_coherency(void *arg)
+{
+ const unsigned int ncachelines = PAGE_SIZE/64;
+ I915_RND_STATE(prng);
+ struct drm_i915_private *i915 = arg;
+ const struct igt_coherency_mode *read, *write, *over;
+ struct drm_i915_gem_object *obj;
+ unsigned long count, n;
+ u32 *offsets, *values;
+ int err = 0;
+
+ /* We repeatedly write, overwrite and read from a sequence of
+ * cachelines in order to try and detect incoherency (unflushed writes
+ * from either the CPU or GPU). Each setter/getter uses our cache
+ * domain API which should prevent incoherency.
+ */
+
+ offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
+ if (!offsets)
+ return -ENOMEM;
+ for (count = 0; count < ncachelines; count++)
+ offsets[count] = count * 64 + 4 * (count % 16);
+
+ values = offsets + ncachelines;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (over = igt_coherency_mode; over->name; over++) {
+ if (!over->set)
+ continue;
+
+ if (!over->valid(i915))
+ continue;
+
+ for (write = igt_coherency_mode; write->name; write++) {
+ if (!write->set)
+ continue;
+
+ if (!write->valid(i915))
+ continue;
+
+ for (read = igt_coherency_mode; read->name; read++) {
+ if (!read->get)
+ continue;
+
+ if (!read->valid(i915))
+ continue;
+
+ for_each_prime_number_from(count, 1, ncachelines) {
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto unlock;
+ }
+
+ i915_random_reorder(offsets, ncachelines, &prng);
+ for (n = 0; n < count; n++)
+ values[n] = prandom_u32_state(&prng);
+
+ for (n = 0; n < count; n++) {
+ err = over->set(obj, offsets[n], ~values[n]);
+ if (err) {
+ pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
+ n, count, over->name, err);
+ goto put_object;
+ }
+ }
+
+ for (n = 0; n < count; n++) {
+ err = write->set(obj, offsets[n], values[n]);
+ if (err) {
+ pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
+ n, count, write->name, err);
+ goto put_object;
+ }
+ }
+
+ for (n = 0; n < count; n++) {
+ u32 found;
+
+ err = read->get(obj, offsets[n], &found);
+ if (err) {
+ pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
+ n, count, read->name, err);
+ goto put_object;
+ }
+
+ if (found != values[n]) {
+ pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
+ n, count, over->name,
+ write->name, values[n],
+ read->name, found,
+ ~values[n], offsets[n]);
+ err = -EINVAL;
+ goto put_object;
+ }
+ }
+
+ __i915_gem_object_release_unless_active(obj);
+ }
+ }
+ }
+ }
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ kfree(offsets);
+ return err;
+
+put_object:
+ __i915_gem_object_release_unless_active(obj);
+ goto unlock;
+}
+
+int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_coherency),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
new file mode 100644
index 000000000000..12b85b3278cd
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "huge_gem_object.h"
+
+#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
+
+static struct i915_vma *
+gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
+{
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned long n, size;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(!igt_can_mi_store_dword_imm(vma->vm->i915));
+
+ size = (4 * count + 1) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vma->vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = value;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? 1 << 22 : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = value;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = offset;
+ *cmd++ = value;
+ }
+ offset += PAGE_SIZE;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static unsigned long real_page_count(struct drm_i915_gem_object *obj)
+{
+ return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
+}
+
+static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
+{
+ return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
+}
+
+static int gpu_fill(struct drm_i915_gem_object *obj,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ unsigned int dw)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *vma;
+ struct i915_vma *batch;
+ unsigned int flags;
+ int err;
+
+ GEM_BUG_ON(obj->base.size > vm->total);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
+ if (err)
+ return err;
+
+ /* Within the GTT the huge objects maps every page onto
+ * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
+ * We set the nth dword within the page using the nth
+ * mapping via the GTT - this should exercise the GTT mapping
+ * whilst checking that each context provides a unique view
+ * into the object.
+ */
+ batch = gpu_fill_dw(vma,
+ (dw * real_page_count(obj)) << PAGE_SHIFT |
+ (dw * sizeof(u32)),
+ real_page_count(obj),
+ dw);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_vma;
+ }
+
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto err_request;
+
+ err = i915_switch_context(rq);
+ if (err)
+ goto err_request;
+
+ flags = 0;
+ if (INTEL_GEN(vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_move_to_active(batch, rq, 0);
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+
+ i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unpin(vma);
+
+ reservation_object_lock(obj->resv, NULL);
+ reservation_object_add_excl_fence(obj->resv, &rq->fence);
+ reservation_object_unlock(obj->resv);
+
+ __i915_add_request(rq, true);
+
+ return 0;
+
+err_request:
+ __i915_add_request(rq, false);
+err_batch:
+ i915_vma_unpin(batch);
+err_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
+{
+ const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
+ unsigned int n, m, need_flush;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
+ if (err)
+ return err;
+
+ for (n = 0; n < real_page_count(obj); n++) {
+ u32 *map;
+
+ map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ for (m = 0; m < DW_PER_PAGE; m++)
+ map[m] = value;
+ if (!has_llc)
+ drm_clflush_virt_range(map, PAGE_SIZE);
+ kunmap_atomic(map);
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = 0;
+ return 0;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+{
+ unsigned int n, m, needs_flush;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ if (err)
+ return err;
+
+ for (n = 0; n < real_page_count(obj); n++) {
+ u32 *map;
+
+ map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ if (needs_flush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(map, PAGE_SIZE);
+
+ for (m = 0; m < max; m++) {
+ if (map[m] != m) {
+ pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
+ n, m, map[m], m);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+ for (; m < DW_PER_PAGE; m++) {
+ if (map[m] != 0xdeadbeef) {
+ pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
+ n, m, map[m], 0xdeadbeef);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+out_unmap:
+ kunmap_atomic(map);
+ if (err)
+ break;
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return err;
+}
+
+static struct drm_i915_gem_object *
+create_test_object(struct i915_gem_context *ctx,
+ struct drm_file *file,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ u64 size;
+ u32 handle;
+ int err;
+
+ size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
+ size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
+
+ obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
+ if (IS_ERR(obj))
+ return obj;
+
+ /* tie the handle to the drm_file for easy reaping */
+ err = drm_gem_handle_create(file, &obj->base, &handle);
+ i915_gem_object_put(obj);
+ if (err)
+ return ERR_PTR(err);
+
+ err = cpu_fill(obj, 0xdeadbeef);
+ if (err) {
+ pr_err("Failed to fill object with cpu, err=%d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&obj->st_link, objects);
+ return obj;
+}
+
+static unsigned long max_dwords(struct drm_i915_gem_object *obj)
+{
+ unsigned long npages = fake_page_count(obj);
+
+ GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
+ return npages / DW_PER_PAGE;
+}
+
+static int igt_ctx_exec(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj = NULL;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+ unsigned long ncontexts, ndwords, dw;
+ bool first_shared_gtt = true;
+ int err;
+
+ /* Create a few different contexts (with different mm) and write
+ * through each ctx/mm using the GPU making sure those writes end
+ * up in the expected pages of our obj.
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ncontexts = 0;
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ unsigned int id;
+
+ if (first_shared_gtt) {
+ ctx = __create_hw_context(i915, file->driver_priv);
+ first_shared_gtt = false;
+ } else {
+ ctx = i915_gem_create_context(i915, file->driver_priv);
+ }
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+ }
+
+ err = gpu_fill(obj, ctx, engine, dw);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+ ndwords++;
+ }
+ ncontexts++;
+ }
+ pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
+ ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
+
+ dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+ err = cpu_check(obj, rem);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ err = i915_gem_init_aliasing_ppgtt(i915);
+ if (err)
+ return err;
+
+ list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ vma->flags &= ~I915_VMA_LOCAL_BIND;
+ }
+
+ return 0;
+}
+
+static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
+{
+ i915_gem_fini_aliasing_ppgtt(i915);
+}
+
+int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ctx_exec),
+ };
+ bool fake_alias = false;
+ int err;
+
+ /* Install a fake aliasing gtt for exercise */
+ if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ err = fake_aliasing_ppgtt_enable(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
+ fake_alias = true;
+ }
+
+ err = i915_subtests(tests, dev_priv);
+
+ if (fake_alias) {
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ fake_aliasing_ppgtt_disable(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
new file mode 100644
index 000000000000..817bef74bbcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "mock_dmabuf.h"
+
+static int igt_dmabuf_export(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ i915_gem_object_put(obj);
+ if (IS_ERR(dmabuf)) {
+ pr_err("i915_gem_prime_export failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ return PTR_ERR(dmabuf);
+ }
+
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+static int igt_dmabuf_import_self(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct drm_gem_object *import;
+ struct dma_buf *dmabuf;
+ int err;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ if (IS_ERR(dmabuf)) {
+ pr_err("i915_gem_prime_export failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ err = PTR_ERR(dmabuf);
+ goto out;
+ }
+
+ import = i915_gem_prime_import(&i915->drm, dmabuf);
+ if (IS_ERR(import)) {
+ pr_err("i915_gem_prime_import failed with err=%d\n",
+ (int)PTR_ERR(import));
+ err = PTR_ERR(import);
+ goto out_dmabuf;
+ }
+
+ if (import != &obj->base) {
+ pr_err("i915_gem_prime_import created a new object!\n");
+ err = -EINVAL;
+ goto out_import;
+ }
+
+ err = 0;
+out_import:
+ i915_gem_object_put(to_intel_bo(import));
+out_dmabuf:
+ dma_buf_put(dmabuf);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_dmabuf_import(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ void *obj_map, *dma_map;
+ u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
+ int err, i;
+
+ dmabuf = mock_dmabuf(1);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
+ if (IS_ERR(obj)) {
+ pr_err("i915_gem_prime_import failed with err=%d\n",
+ (int)PTR_ERR(obj));
+ err = PTR_ERR(obj);
+ goto out_dmabuf;
+ }
+
+ if (obj->base.dev != &i915->drm) {
+ pr_err("i915_gem_prime_import created a non-i915 object!\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ if (obj->base.size != PAGE_SIZE) {
+ pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n",
+ (long long)obj->base.size, PAGE_SIZE);
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ dma_map = dma_buf_vmap(dmabuf);
+ if (!dma_map) {
+ pr_err("dma_buf_vmap failed\n");
+ err = -ENOMEM;
+ goto out_obj;
+ }
+
+ if (0) { /* Can not yet map dmabuf */
+ obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(obj_map)) {
+ err = PTR_ERR(obj_map);
+ pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
+ goto out_dma_map;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pattern); i++) {
+ memset(dma_map, pattern[i], PAGE_SIZE);
+ if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
+ err = -EINVAL;
+ pr_err("imported vmap not all set to %x!\n", pattern[i]);
+ i915_gem_object_unpin_map(obj);
+ goto out_dma_map;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pattern); i++) {
+ memset(obj_map, pattern[i], PAGE_SIZE);
+ if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
+ err = -EINVAL;
+ pr_err("exported vmap not all set to %x!\n", pattern[i]);
+ i915_gem_object_unpin_map(obj);
+ goto out_dma_map;
+ }
+ }
+
+ i915_gem_object_unpin_map(obj);
+ }
+
+ err = 0;
+out_dma_map:
+ dma_buf_vunmap(dmabuf, dma_map);
+out_obj:
+ i915_gem_object_put(obj);
+out_dmabuf:
+ dma_buf_put(dmabuf);
+ return err;
+}
+
+static int igt_dmabuf_import_ownership(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ void *ptr;
+ int err;
+
+ dmabuf = mock_dmabuf(1);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ ptr = dma_buf_vmap(dmabuf);
+ if (!ptr) {
+ pr_err("dma_buf_vmap failed\n");
+ err = -ENOMEM;
+ goto err_dmabuf;
+ }
+
+ memset(ptr, 0xc5, PAGE_SIZE);
+ dma_buf_vunmap(dmabuf, ptr);
+
+ obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
+ if (IS_ERR(obj)) {
+ pr_err("i915_gem_prime_import failed with err=%d\n",
+ (int)PTR_ERR(obj));
+ err = PTR_ERR(obj);
+ goto err_dmabuf;
+ }
+
+ dma_buf_put(dmabuf);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
+ goto out_obj;
+ }
+
+ err = 0;
+ i915_gem_object_unpin_pages(obj);
+out_obj:
+ i915_gem_object_put(obj);
+ return err;
+
+err_dmabuf:
+ dma_buf_put(dmabuf);
+ return err;
+}
+
+static int igt_dmabuf_export_vmap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ void *ptr;
+ int err;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ if (IS_ERR(dmabuf)) {
+ pr_err("i915_gem_prime_export failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ err = PTR_ERR(dmabuf);
+ goto err_obj;
+ }
+ i915_gem_object_put(obj);
+
+ ptr = dma_buf_vmap(dmabuf);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ pr_err("dma_buf_vmap failed with err=%d\n", err);
+ goto out;
+ }
+
+ if (memchr_inv(ptr, 0, dmabuf->size)) {
+ pr_err("Exported object not initialiased to zero!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(ptr, 0xc5, dmabuf->size);
+
+ err = 0;
+ dma_buf_vunmap(dmabuf, ptr);
+out:
+ dma_buf_put(dmabuf);
+ return err;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+int i915_gem_dmabuf_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_dmabuf_export),
+ SUBTEST(igt_dmabuf_import_self),
+ SUBTEST(igt_dmabuf_import),
+ SUBTEST(igt_dmabuf_import_ownership),
+ SUBTEST(igt_dmabuf_export_vmap),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
+int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_dmabuf_export),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
new file mode 100644
index 000000000000..14e9c2fbc4e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+
+static int populate_ggtt(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ u64 size;
+
+ for (size = 0;
+ size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ size += I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+ }
+
+ if (!list_empty(&i915->mm.unbound_list)) {
+ size = 0;
+ list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
+ size++;
+
+ pr_err("Found %lld objects unbound!\n", size);
+ return -EINVAL;
+ }
+
+ if (list_empty(&i915->ggtt.base.inactive_list)) {
+ pr_err("No objects on the GGTT inactive list!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void unpin_ggtt(struct drm_i915_private *i915)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+ i915_vma_unpin(vma);
+}
+
+static void cleanup_objects(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
+ i915_gem_object_put(obj);
+
+ list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
+ i915_gem_object_put(obj);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+}
+
+static int igt_evict_something(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict one. */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ err = i915_gem_evict_something(&ggtt->base,
+ I915_GTT_PAGE_SIZE, 0, 0,
+ 0, U64_MAX,
+ 0);
+ if (err != -ENOSPC) {
+ pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(i915);
+
+ /* Everything is unpinned, we should be able to evict something */
+ err = i915_gem_evict_something(&ggtt->base,
+ I915_GTT_PAGE_SIZE, 0, 0,
+ 0, U64_MAX,
+ 0);
+ if (err) {
+ pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+static int igt_overcommit(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ /* Fill the GGTT with pinned objects and then try to pin one more.
+ * We expect it to fail.
+ */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+
+ list_move(&obj->global_link, &i915->mm.unbound_list);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
+ pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+static int igt_evict_for_vma(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_mm_node target = {
+ .start = 0,
+ .size = 4096,
+ };
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict a range. */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (err != -ENOSPC) {
+ pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(i915);
+
+ /* Everything is unpinned, we should be able to evict the node */
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (err) {
+ pr_err("i915_gem_evict_for_node returned err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+static void mock_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+}
+
+static int igt_evict_for_cache_color(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ const unsigned long flags = PIN_OFFSET_FIXED;
+ struct drm_mm_node target = {
+ .start = I915_GTT_PAGE_SIZE * 2,
+ .size = I915_GTT_PAGE_SIZE,
+ .color = I915_CACHE_LLC,
+ };
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ /* Currently the use of color_adjust is limited to cache domains within
+ * the ggtt, and so the presence of mm.color_adjust is assumed to be
+ * i915_gtt_color_adjust throughout our driver, so using a mock color
+ * adjust will work just fine for our purposes.
+ */
+ ggtt->base.mm.color_adjust = mock_color_adjust;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ I915_GTT_PAGE_SIZE | flags);
+ if (IS_ERR(vma)) {
+ pr_err("[0]i915_gem_object_ggtt_pin failed\n");
+ err = PTR_ERR(vma);
+ goto cleanup;
+ }
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ /* Neighbouring; same colour - should fit */
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ (I915_GTT_PAGE_SIZE * 2) | flags);
+ if (IS_ERR(vma)) {
+ pr_err("[1]i915_gem_object_ggtt_pin failed\n");
+ err = PTR_ERR(vma);
+ goto cleanup;
+ }
+
+ i915_vma_unpin(vma);
+
+ /* Remove just the second vma */
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (err) {
+ pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
+ goto cleanup;
+ }
+
+ /* Attempt to remove the first *pinned* vma, by removing the (empty)
+ * neighbour -- this should fail.
+ */
+ target.color = I915_CACHE_L3_LLC;
+
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (!err) {
+ pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ err = 0;
+
+cleanup:
+ unpin_ggtt(i915);
+ cleanup_objects(i915);
+ ggtt->base.mm.color_adjust = NULL;
+ return err;
+}
+
+static int igt_evict_vm(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict everything. */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ err = i915_gem_evict_vm(&ggtt->base, false);
+ if (err) {
+ pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(i915);
+
+ err = i915_gem_evict_vm(&ggtt->base, false);
+ if (err) {
+ pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+int i915_gem_evict_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_evict_something),
+ SUBTEST(igt_evict_for_vma),
+ SUBTEST(igt_evict_for_cache_color),
+ SUBTEST(igt_evict_vm),
+ SUBTEST(igt_overcommit),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
new file mode 100644
index 000000000000..50710e3f1caa
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -0,0 +1,1562 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/list_sort.h>
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_context.h"
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+
+static void fake_free_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static struct sg_table *
+fake_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define PFN_BIAS 0x1000
+ struct sg_table *pages;
+ struct scatterlist *sg;
+ typeof(obj->base.size) rem;
+
+ pages = kmalloc(sizeof(*pages), GFP);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ rem = round_up(obj->base.size, BIT(31)) >> 31;
+ if (sg_alloc_table(pages, rem, GFP)) {
+ kfree(pages);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rem = obj->base.size;
+ for (sg = pages->sgl; sg; sg = sg_next(sg)) {
+ unsigned long len = min_t(typeof(rem), rem, BIT(31));
+
+ GEM_BUG_ON(!len);
+ sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
+ sg_dma_address(sg) = page_to_phys(sg_page(sg));
+ sg_dma_len(sg) = len;
+
+ rem -= len;
+ }
+ GEM_BUG_ON(rem);
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+ return pages;
+#undef GFP
+}
+
+static void fake_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ fake_free_pages(obj, pages);
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_pages,
+ .put_pages = fake_put_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_dma_object(struct drm_i915_private *i915, u64 size)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ goto err;
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &fake_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ /* Preallocate the "backing storage" */
+ if (i915_gem_object_pin_pages(obj))
+ goto err_obj;
+
+ i915_gem_object_unpin_pages(obj);
+ return obj;
+
+err_obj:
+ i915_gem_object_put(obj);
+err:
+ return ERR_PTR(-ENOMEM);
+}
+
+static int igt_ppgtt_alloc(void *arg)
+{
+ struct drm_i915_private *dev_priv = arg;
+ struct i915_hw_ppgtt *ppgtt;
+ u64 size, last;
+ int err;
+
+ /* Allocate a ppggt and try to fill the entire range */
+
+ if (!USES_PPGTT(dev_priv))
+ return 0;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return -ENOMEM;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ err = __hw_ppgtt_init(ppgtt, dev_priv);
+ if (err)
+ goto err_ppgtt;
+
+ if (!ppgtt->base.allocate_va_range)
+ goto err_ppgtt_cleanup;
+
+ /* Check we can allocate the entire range */
+ for (size = 4096;
+ size <= ppgtt->base.total;
+ size <<= 2) {
+ err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
+ size, ilog2(size));
+ err = 0; /* virtual space too large! */
+ }
+ goto err_ppgtt_cleanup;
+ }
+
+ ppgtt->base.clear_range(&ppgtt->base, 0, size);
+ }
+
+ /* Check we can incrementally allocate the entire range */
+ for (last = 0, size = 4096;
+ size <= ppgtt->base.total;
+ last = size, size <<= 2) {
+ err = ppgtt->base.allocate_va_range(&ppgtt->base,
+ last, size - last);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
+ last, size - last, ilog2(size));
+ err = 0; /* virtual space too large! */
+ }
+ goto err_ppgtt_cleanup;
+ }
+ }
+
+err_ppgtt_cleanup:
+ ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ kfree(ppgtt);
+ return err;
+}
+
+static int lowlevel_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ I915_RND_STATE(seed_prng);
+ unsigned int size;
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (size = 12; (hole_end - hole_start) >> size; size++) {
+ I915_RND_SUBSTATE(prng, seed_prng);
+ struct drm_i915_gem_object *obj;
+ unsigned int *order, count, n;
+ u64 hole_size;
+
+ hole_size = (hole_end - hole_start) >> size;
+ if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
+ hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
+ count = hole_size;
+ do {
+ count >>= 1;
+ order = i915_random_order(count, &prng);
+ } while (!order && count);
+ if (!order)
+ break;
+
+ GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
+ GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
+
+ /* Ignore allocation failures (i.e. don't report them as
+ * a test failure) as we are purposefully allocating very
+ * large objects without checking that we have sufficient
+ * memory. We expect to hit -ENOMEM.
+ */
+
+ obj = fake_dma_object(i915, BIT_ULL(size));
+ if (IS_ERR(obj)) {
+ kfree(order);
+ break;
+ }
+
+ GEM_BUG_ON(obj->base.size != BIT_ULL(size));
+
+ if (i915_gem_object_pin_pages(obj)) {
+ i915_gem_object_put(obj);
+ kfree(order);
+ break;
+ }
+
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(size);
+
+ GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
+
+ if (igt_timeout(end_time,
+ "%s timed out before %d/%d\n",
+ __func__, n, count)) {
+ hole_end = hole_start; /* quit */
+ break;
+ }
+
+ if (vm->allocate_va_range &&
+ vm->allocate_va_range(vm, addr, BIT_ULL(size)))
+ break;
+
+ vm->insert_entries(vm, obj->mm.pages, addr,
+ I915_CACHE_NONE, 0);
+ }
+ count = n;
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(size);
+
+ GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
+ vm->clear_range(vm, addr, BIT_ULL(size));
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+
+ kfree(order);
+ }
+
+ return 0;
+}
+
+static void close_object_list(struct list_head *objects,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_gem_object *obj, *on;
+ int ignored;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (!IS_ERR(vma))
+ ignored = i915_vma_unbind(vma);
+ /* Only ppgtt vma may be closed before the object is freed */
+ if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+}
+
+static int fill_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ const u64 hole_size = hole_end - hole_start;
+ struct drm_i915_gem_object *obj;
+ const unsigned long max_pages =
+ min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
+ const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
+ unsigned long npages, prime, flags;
+ struct i915_vma *vma;
+ LIST_HEAD(objects);
+ int err;
+
+ /* Try binding many VMA working inwards from either edge */
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ for_each_prime_number_from(prime, 2, max_step) {
+ for (npages = 1; npages <= max_pages; npages *= prime) {
+ const u64 full_size = npages << PAGE_SHIFT;
+ const struct {
+ const char *name;
+ u64 offset;
+ int step;
+ } phases[] = {
+ { "top-down", hole_end, -1, },
+ { "bottom-up", hole_start, 1, },
+ { }
+ }, *p;
+
+ obj = fake_dma_object(i915, full_size);
+ if (IS_ERR(obj))
+ break;
+
+ list_add(&obj->st_link, &objects);
+
+ /* Align differing sized objects against the edges, and
+ * check we don't walk off into the void when binding
+ * them into the GTT.
+ */
+ for (p = phases; p->name; p++) {
+ u64 offset;
+
+ offset = p->offset;
+ list_for_each_entry(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, offset | flags);
+ if (err) {
+ pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+ __func__, p->name, err, npages, prime, offset);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ err);
+ goto err;
+ }
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry_reverse(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, offset | flags);
+ if (err) {
+ pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+ __func__, p->name, err, npages, prime, offset);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry_reverse(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ err);
+ goto err;
+ }
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+ }
+
+ if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
+ __func__, npages, prime)) {
+ err = -EINTR;
+ goto err;
+ }
+ }
+
+ close_object_list(&objects, vm);
+ }
+
+ return 0;
+
+err:
+ close_object_list(&objects, vm);
+ return err;
+}
+
+static int walk_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ const u64 hole_size = hole_end - hole_start;
+ const unsigned long max_pages =
+ min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
+ unsigned long flags;
+ u64 size;
+
+ /* Try binding a single VMA in different positions within the hole */
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ for_each_prime_number_from(size, 1, max_pages) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u64 addr;
+ int err = 0;
+
+ obj = fake_dma_object(i915, size << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ break;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put;
+ }
+
+ for (addr = hole_start;
+ addr + obj->base.size < hole_end;
+ addr += obj->base.size) {
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
+ __func__, addr, vma->size,
+ hole_start, hole_end, err);
+ goto err_close;
+ }
+ i915_vma_unpin(vma);
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, vma->size);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("%s unbind failed at %llx + %llx with err=%d\n",
+ __func__, addr, vma->size, err);
+ goto err_close;
+ }
+
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ if (igt_timeout(end_time,
+ "%s timed out at %llx\n",
+ __func__, addr)) {
+ err = -EINTR;
+ goto err_close;
+ }
+ }
+
+err_close:
+ if (!i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+err_put:
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int pot_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned long flags;
+ unsigned int pot;
+ int err = 0;
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ /* Insert a pair of pages across every pot boundary within the hole */
+ for (pot = fls64(hole_end - 1) - 1;
+ pot > ilog2(2 * I915_GTT_PAGE_SIZE);
+ pot--) {
+ u64 step = BIT_ULL(pot);
+ u64 addr;
+
+ for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
+ addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
+ addr += step) {
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
+ __func__,
+ addr,
+ hole_start, hole_end,
+ err);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, vma->size);
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ GEM_BUG_ON(err);
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out after %d/%d\n",
+ __func__, pot, fls64(hole_end - 1) - 1)) {
+ err = -EINTR;
+ goto err;
+ }
+ }
+
+err:
+ if (!i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int drunk_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ I915_RND_STATE(prng);
+ unsigned int size;
+ unsigned long flags;
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (size = 12; (hole_end - hole_start) >> size; size++) {
+ struct drm_i915_gem_object *obj;
+ unsigned int *order, count, n;
+ struct i915_vma *vma;
+ u64 hole_size;
+ int err;
+
+ hole_size = (hole_end - hole_start) >> size;
+ if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
+ hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
+ count = hole_size;
+ do {
+ count >>= 1;
+ order = i915_random_order(count, &prng);
+ } while (!order && count);
+ if (!order)
+ break;
+
+ /* Ignore allocation failures (i.e. don't report them as
+ * a test failure) as we are purposefully allocating very
+ * large objects without checking that we have sufficient
+ * memory. We expect to hit -ENOMEM.
+ */
+
+ obj = fake_dma_object(i915, BIT_ULL(size));
+ if (IS_ERR(obj)) {
+ kfree(order);
+ break;
+ }
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ GEM_BUG_ON(vma->size != BIT_ULL(size));
+
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(size);
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
+ __func__,
+ addr, BIT_ULL(size),
+ hole_start, hole_end,
+ err);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, BIT_ULL(size));
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ GEM_BUG_ON(err);
+
+ if (igt_timeout(end_time,
+ "%s timed out after %d/%d\n",
+ __func__, n, count)) {
+ err = -EINTR;
+ goto err;
+ }
+ }
+
+err:
+ if (!i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+err_obj:
+ i915_gem_object_put(obj);
+ kfree(order);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __shrink_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct drm_i915_gem_object *obj;
+ unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
+ unsigned int order = 12;
+ LIST_HEAD(objects);
+ int err = 0;
+ u64 addr;
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (addr = hole_start; addr < hole_end; ) {
+ struct i915_vma *vma;
+ u64 size = BIT_ULL(order++);
+
+ size = min(size, hole_end - addr);
+ obj = fake_dma_object(i915, size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
+
+ GEM_BUG_ON(vma->size != size);
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
+ __func__, addr, size, hole_start, hole_end, err);
+ break;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, size);
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ err = -EINVAL;
+ break;
+ }
+
+ i915_vma_unpin(vma);
+ addr += size;
+
+ if (igt_timeout(end_time,
+ "%s timed out at ofset %llx [%llx - %llx]\n",
+ __func__, addr, hole_start, hole_end)) {
+ err = -EINTR;
+ break;
+ }
+ }
+
+ close_object_list(&objects, vm);
+ return err;
+}
+
+static int shrink_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ unsigned long prime;
+ int err;
+
+ vm->fault_attr.probability = 999;
+ atomic_set(&vm->fault_attr.times, -1);
+
+ for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
+ vm->fault_attr.interval = prime;
+ err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+ if (err)
+ break;
+ }
+
+ memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+
+ return err;
+}
+
+static int exercise_ppgtt(struct drm_i915_private *dev_priv,
+ int (*func)(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct drm_file *file;
+ struct i915_hw_ppgtt *ppgtt;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ if (!USES_FULL_PPGTT(dev_priv))
+ return 0;
+
+ file = mock_file(dev_priv);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_unlock;
+ }
+ GEM_BUG_ON(offset_in_page(ppgtt->base.total));
+ GEM_BUG_ON(ppgtt->base.closed);
+
+ err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+
+ i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_put(ppgtt);
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ mock_file_free(dev_priv, file);
+ return err;
+}
+
+static int igt_ppgtt_fill(void *arg)
+{
+ return exercise_ppgtt(arg, fill_hole);
+}
+
+static int igt_ppgtt_walk(void *arg)
+{
+ return exercise_ppgtt(arg, walk_hole);
+}
+
+static int igt_ppgtt_pot(void *arg)
+{
+ return exercise_ppgtt(arg, pot_hole);
+}
+
+static int igt_ppgtt_drunk(void *arg)
+{
+ return exercise_ppgtt(arg, drunk_hole);
+}
+
+static int igt_ppgtt_lowlevel(void *arg)
+{
+ return exercise_ppgtt(arg, lowlevel_hole);
+}
+
+static int igt_ppgtt_shrink(void *arg)
+{
+ return exercise_ppgtt(arg, shrink_hole);
+}
+
+static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
+{
+ struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
+ struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
+
+ if (a->start < b->start)
+ return -1;
+ else
+ return 1;
+}
+
+static int exercise_ggtt(struct drm_i915_private *i915,
+ int (*func)(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ u64 hole_start, hole_end, last = 0;
+ struct drm_mm_node *node;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+restart:
+ list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
+ drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+ if (hole_start < last)
+ continue;
+
+ if (ggtt->base.mm.color_adjust)
+ ggtt->base.mm.color_adjust(node, 0,
+ &hole_start, &hole_end);
+ if (hole_start >= hole_end)
+ continue;
+
+ err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+ if (err)
+ break;
+
+ /* As we have manipulated the drm_mm, the list may be corrupt */
+ last = hole_end;
+ goto restart;
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+static int igt_ggtt_fill(void *arg)
+{
+ return exercise_ggtt(arg, fill_hole);
+}
+
+static int igt_ggtt_walk(void *arg)
+{
+ return exercise_ggtt(arg, walk_hole);
+}
+
+static int igt_ggtt_pot(void *arg)
+{
+ return exercise_ggtt(arg, pot_hole);
+}
+
+static int igt_ggtt_drunk(void *arg)
+{
+ return exercise_ggtt(arg, drunk_hole);
+}
+
+static int igt_ggtt_lowlevel(void *arg)
+{
+ return exercise_ggtt(arg, lowlevel_hole);
+}
+
+static int igt_ggtt_page(void *arg)
+{
+ const unsigned int count = PAGE_SIZE/sizeof(u32);
+ I915_RND_STATE(prng);
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node tmp;
+ unsigned int *order, n;
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_free;
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+ 1024 * PAGE_SIZE, 0,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (err)
+ goto out_unpin;
+
+ order = i915_random_order(count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
+
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + order[n] * PAGE_SIZE;
+ u32 __iomem *vaddr;
+
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
+
+ vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ iowrite32(n, vaddr + n);
+ io_mapping_unmap_atomic(vaddr);
+
+ wmb();
+ ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
+ }
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + order[n] * PAGE_SIZE;
+ u32 __iomem *vaddr;
+ u32 val;
+
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
+
+ vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ val = ioread32(vaddr + n);
+ io_mapping_unmap_atomic(vaddr);
+
+ ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
+
+ if (val != n) {
+ pr_err("insert page failed: found %d, expected %d\n",
+ val, n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ kfree(order);
+out_remove:
+ drm_mm_remove_node(&tmp);
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_free:
+ i915_gem_object_put(obj);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static void track_vma_bind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ obj->bind_count++; /* track for eviction later */
+ __i915_gem_object_pin_pages(obj);
+
+ vma->pages = obj->mm.pages;
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+}
+
+static int exercise_mock(struct drm_i915_private *i915,
+ int (*func)(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ctx = mock_context(i915, "mock");
+ if (!ctx)
+ return -ENOMEM;
+
+ ppgtt = ctx->ppgtt;
+ GEM_BUG_ON(!ppgtt);
+
+ err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+
+ mock_context_close(ctx);
+ return err;
+}
+
+static int igt_mock_fill(void *arg)
+{
+ return exercise_mock(arg, fill_hole);
+}
+
+static int igt_mock_walk(void *arg)
+{
+ return exercise_mock(arg, walk_hole);
+}
+
+static int igt_mock_pot(void *arg)
+{
+ return exercise_mock(arg, pot_hole);
+}
+
+static int igt_mock_drunk(void *arg)
+{
+ return exercise_mock(arg, drunk_hole);
+}
+
+static int igt_gtt_reserve(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *on;
+ LIST_HEAD(objects);
+ u64 total;
+ int err;
+
+ /* i915_gem_gtt_reserve() tries to reserve the precise range
+ * for the node, and evicts if it has to. So our test checks that
+ * it can give us the requsted space and prevent overlaps.
+ */
+
+ /* Start by filling the GGTT */
+ for (total = 0;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += 2*I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ obj->base.size,
+ total,
+ obj->cache_level,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != total ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ vma->node.start, vma->node.size,
+ total, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Now we start forcing evictions */
+ for (total = I915_GTT_PAGE_SIZE;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += 2*I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ obj->base.size,
+ total,
+ obj->cache_level,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != total ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ vma->node.start, vma->node.size,
+ total, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* And then try at random */
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ struct i915_vma *vma;
+ u64 offset;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("i915_vma_unbind failed with err=%d!\n", err);
+ goto out;
+ }
+
+ offset = random_offset(0, i915->ggtt.base.total,
+ 2*I915_GTT_PAGE_SIZE,
+ I915_GTT_MIN_ALIGNMENT);
+
+ err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ obj->base.size,
+ offset,
+ obj->cache_level,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != offset ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ vma->node.start, vma->node.size,
+ offset, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ return err;
+}
+
+static int igt_gtt_insert(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *on;
+ struct drm_mm_node tmp = {};
+ const struct invalid_insert {
+ u64 size;
+ u64 alignment;
+ u64 start, end;
+ } invalid_insert[] = {
+ {
+ i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
+ 0, i915->ggtt.base.total,
+ },
+ {
+ 2*I915_GTT_PAGE_SIZE, 0,
+ 0, I915_GTT_PAGE_SIZE,
+ },
+ {
+ -(u64)I915_GTT_PAGE_SIZE, 0,
+ 0, 4*I915_GTT_PAGE_SIZE,
+ },
+ {
+ -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
+ 0, 4*I915_GTT_PAGE_SIZE,
+ },
+ {
+ I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
+ I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
+ },
+ {}
+ }, *ii;
+ LIST_HEAD(objects);
+ u64 total;
+ int err;
+
+ /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
+ * to the node, evicting if required.
+ */
+
+ /* Check a couple of obviously invalid requests */
+ for (ii = invalid_insert; ii->size; ii++) {
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+ ii->size, ii->alignment,
+ I915_COLOR_UNEVICTABLE,
+ ii->start, ii->end,
+ 0);
+ if (err != -ENOSPC) {
+ pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
+ ii->size, ii->alignment, ii->start, ii->end,
+ err);
+ return -EINVAL;
+ }
+ }
+
+ /* Start by filling the GGTT */
+ for (total = 0;
+ total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ obj->base.size, 0, obj->cache_level,
+ 0, i915->ggtt.base.total,
+ 0);
+ if (err == -ENOSPC) {
+ /* maxed out the GGTT space */
+ i915_gem_object_put(obj);
+ break;
+ }
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+ __i915_vma_pin(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ }
+
+ list_for_each_entry(obj, &objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node)) {
+ pr_err("VMA was unexpectedly evicted!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ __i915_vma_unpin(vma);
+ }
+
+ /* If we then reinsert, we should find the same hole */
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ struct i915_vma *vma;
+ u64 offset;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ offset = vma->node.start;
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("i915_vma_unbind failed with err=%d!\n", err);
+ goto out;
+ }
+
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ obj->base.size, 0, obj->cache_level,
+ 0, i915->ggtt.base.total,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != offset) {
+ pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
+ offset, vma->node.start);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* And then force evictions */
+ for (total = 0;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += 2*I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ obj->base.size, 0, obj->cache_level,
+ 0, i915->ggtt.base.total,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ }
+
+out:
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ return err;
+}
+
+int i915_gem_gtt_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_drunk),
+ SUBTEST(igt_mock_walk),
+ SUBTEST(igt_mock_pot),
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_gtt_reserve),
+ SUBTEST(igt_gtt_insert),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
+int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ppgtt_alloc),
+ SUBTEST(igt_ppgtt_lowlevel),
+ SUBTEST(igt_ppgtt_drunk),
+ SUBTEST(igt_ppgtt_walk),
+ SUBTEST(igt_ppgtt_pot),
+ SUBTEST(igt_ppgtt_fill),
+ SUBTEST(igt_ppgtt_shrink),
+ SUBTEST(igt_ggtt_lowlevel),
+ SUBTEST(igt_ggtt_drunk),
+ SUBTEST(igt_ggtt_walk),
+ SUBTEST(igt_ggtt_pot),
+ SUBTEST(igt_ggtt_fill),
+ SUBTEST(igt_ggtt_page),
+ };
+
+ GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
new file mode 100644
index 000000000000..67d82bf1407f
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "huge_gem_object.h"
+
+static int igt_gem_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = -ENOMEM;
+
+ /* Basic test to ensure we can create an object */
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ err = 0;
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static int igt_phys_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ /* Create an object and bind it to a contiguous set of physical pages,
+ * i.e. exercise the i915_gem_object_phys API.
+ */
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
+ goto out_obj;
+ }
+
+ if (obj->ops != &i915_gem_phys_ops) {
+ pr_err("i915_gem_object_attach_phys did not create a phys object\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ if (!atomic_read(&obj->mm.pages_pin_count)) {
+ pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ /* Make the object dirty so that put_pages must do copy back the data */
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
+ err);
+ goto out_obj;
+ }
+
+out_obj:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static int igt_gem_huge(void *arg)
+{
+ const unsigned int nreal = 509; /* just to be awkward */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned int n;
+ int err;
+
+ /* Basic sanitycheck of our huge fake object allocation */
+
+ obj = huge_gem_object(i915,
+ nreal * PAGE_SIZE,
+ i915->ggtt.base.total + PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
+ if (i915_gem_object_get_page(obj, n) !=
+ i915_gem_object_get_page(obj, n % nreal)) {
+ pr_err("Page lookup mismatch at index %u [%u]\n",
+ n, n % nreal);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+ }
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+struct tile {
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int size;
+ unsigned int tiling;
+ unsigned int swizzle;
+};
+
+static u64 swizzle_bit(unsigned int bit, u64 offset)
+{
+ return (offset & BIT_ULL(bit)) >> (bit - 6);
+}
+
+static u64 tiled_offset(const struct tile *tile, u64 v)
+{
+ u64 x, y;
+
+ if (tile->tiling == I915_TILING_NONE)
+ return v;
+
+ y = div64_u64_rem(v, tile->stride, &x);
+ v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
+
+ if (tile->tiling == I915_TILING_X) {
+ v += y * tile->width;
+ v += div64_u64_rem(x, tile->width, &x) << tile->size;
+ v += x;
+ } else {
+ const unsigned int ytile_span = 16;
+ const unsigned int ytile_height = 32 * ytile_span;
+
+ v += y * ytile_span;
+ v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+ v += x;
+ }
+
+ switch (tile->swizzle) {
+ case I915_BIT_6_SWIZZLE_9:
+ v ^= swizzle_bit(9, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
+ break;
+ }
+
+ return v;
+}
+
+static int check_partial_mapping(struct drm_i915_gem_object *obj,
+ const struct tile *tile,
+ unsigned long end_time)
+{
+ const unsigned int nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_vma *vma;
+ unsigned long page;
+ int err;
+
+ if (igt_timeout(end_time,
+ "%s: timed out before tiling=%d stride=%d\n",
+ __func__, tile->tiling, tile->stride))
+ return -EINTR;
+
+ err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
+ GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+
+ for_each_prime_number_from(page, 1, npages) {
+ struct i915_ggtt_view view =
+ compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+ u32 __iomem *io;
+ struct page *p;
+ unsigned int n;
+ u64 offset;
+ u32 *cpu;
+
+ GEM_BUG_ON(view.partial.size > nreal);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ pr_err("Failed to pin partial view: offset=%lu\n",
+ page);
+ return PTR_ERR(vma);
+ }
+
+ n = page - view.partial.offset;
+ GEM_BUG_ON(n >= view.partial.size);
+
+ io = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(io)) {
+ pr_err("Failed to iomap partial view: offset=%lu\n",
+ page);
+ return PTR_ERR(io);
+ }
+
+ err = i915_vma_get_fence(vma);
+ if (err) {
+ pr_err("Failed to get fence for partial view: offset=%lu\n",
+ page);
+ i915_vma_unpin_iomap(vma);
+ return err;
+ }
+
+ iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
+ i915_vma_unpin_iomap(vma);
+
+ offset = tiled_offset(tile, page << PAGE_SHIFT);
+ if (offset >= obj->base.size)
+ continue;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ cpu = kmap(p) + offset_in_page(offset);
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ if (*cpu != (u32)page) {
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ page, n,
+ view.partial.offset,
+ view.partial.size,
+ vma->size >> PAGE_SHIFT,
+ tile_row_pages(obj),
+ vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
+ offset >> PAGE_SHIFT,
+ (unsigned int)offset_in_page(offset),
+ offset,
+ (u32)page, *cpu);
+ err = -EINVAL;
+ }
+ *cpu = 0;
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ kunmap(p);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int igt_partial_tiling(void *arg)
+{
+ const unsigned int nreal = 1 << 12; /* largest tile row x2 */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int tiling;
+ int err;
+
+ /* We want to check the page mapping and fencing of a large object
+ * mmapped through the GTT. The object we create is larger than can
+ * possibly be mmaped as a whole, and so we must use partial GGTT vma.
+ * We then check that a write through each partial GGTT vma ends up
+ * in the right set of pages within the object, and with the expected
+ * tiling, which we verify by manual swizzling.
+ */
+
+ obj = huge_gem_object(i915,
+ nreal << PAGE_SHIFT,
+ (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (1) {
+ IGT_TIMEOUT(end);
+ struct tile tile;
+
+ tile.height = 1;
+ tile.width = 1;
+ tile.size = 0;
+ tile.stride = 0;
+ tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
+ tile.tiling = I915_TILING_NONE;
+
+ err = check_partial_mapping(obj, &tile, end);
+ if (err && err != -EINTR)
+ goto out_unlock;
+ }
+
+ for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
+ IGT_TIMEOUT(end);
+ unsigned int max_pitch;
+ unsigned int pitch;
+ struct tile tile;
+
+ tile.tiling = tiling;
+ switch (tiling) {
+ case I915_TILING_X:
+ tile.swizzle = i915->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ tile.swizzle = i915->mm.bit_6_swizzle_y;
+ break;
+ }
+
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
+ continue;
+
+ if (INTEL_GEN(i915) <= 2) {
+ tile.height = 16;
+ tile.width = 128;
+ tile.size = 11;
+ } else if (tile.tiling == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(i915)) {
+ tile.height = 32;
+ tile.width = 128;
+ tile.size = 12;
+ } else {
+ tile.height = 8;
+ tile.width = 512;
+ tile.size = 12;
+ }
+
+ if (INTEL_GEN(i915) < 4)
+ max_pitch = 8192 / tile.width;
+ else if (INTEL_GEN(i915) < 7)
+ max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
+ else
+ max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
+
+ for (pitch = max_pitch; pitch; pitch >>= 1) {
+ tile.stride = tile.width * pitch;
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+
+ if (pitch > 2 && INTEL_GEN(i915) >= 4) {
+ tile.stride = tile.width * (pitch - 1);
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+ }
+
+ if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
+ tile.stride = tile.width * (pitch + 1);
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+ }
+ }
+
+ if (INTEL_GEN(i915) >= 4) {
+ for_each_prime_number(pitch, max_pitch) {
+ tile.stride = tile.width * pitch;
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+ }
+ }
+
+next_tiling: ;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int make_obj_busy(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_vma_unpin(vma);
+ return PTR_ERR(rq);
+ }
+
+ i915_vma_move_to_active(vma, rq, 0);
+ i915_add_request(rq);
+
+ i915_gem_object_set_active_reference(obj);
+ i915_vma_unpin(vma);
+ return 0;
+}
+
+static bool assert_mmap_offset(struct drm_i915_private *i915,
+ unsigned long size,
+ int expected)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_create_mmap_offset(obj);
+ i915_gem_object_put(obj);
+
+ return err == expected;
+}
+
+static int igt_mmap_offset_exhaustion(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node resv, *hole;
+ u64 hole_start, hole_end;
+ int loop, err;
+
+ /* Trim the device mmap space to only a page */
+ memset(&resv, 0, sizeof(resv));
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ resv.start = hole_start;
+ resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
+ err = drm_mm_reserve_node(mm, &resv);
+ if (err) {
+ pr_err("Failed to trim VMA manager, err=%d\n", err);
+ return err;
+ }
+ break;
+ }
+
+ /* Just fits! */
+ if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
+ pr_err("Unable to insert object into single page hole\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Too large */
+ if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
+ pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Fill the hole, further allocation attempts should then fail */
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_create_mmap_offset(obj);
+ if (err) {
+ pr_err("Unable to insert object into reclaimed hole\n");
+ goto err_obj;
+ }
+
+ if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+ pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
+ err = -EINVAL;
+ goto err_obj;
+ }
+
+ i915_gem_object_put(obj);
+
+ /* Now fill with busy dead objects that we expect to reap */
+ for (loop = 0; loop < 3; loop++) {
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = make_obj_busy(obj);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("[loop %d] Failed to busy the object\n", loop);
+ goto err_obj;
+ }
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ err = i915_gem_object_create_mmap_offset(obj);
+ if (err) {
+ pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
+ loop, err);
+ goto out;
+ }
+ }
+
+out:
+ drm_mm_remove_node(&resv);
+ return err;
+err_obj:
+ i915_gem_object_put(obj);
+ goto out;
+}
+
+int i915_gem_object_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_object),
+ SUBTEST(igt_phys_object),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
+int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_huge),
+ SUBTEST(igt_partial_tiling),
+ SUBTEST(igt_mmap_offset_exhaustion),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
new file mode 100644
index 000000000000..98b7aac41eec
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+#include "mock_gem_device.h"
+
+static int igt_add_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request;
+ int err = -ENOMEM;
+
+ /* Basic preliminary test to create a request and let it loose! */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ request = mock_request(i915->engine[RCS],
+ i915->kernel_context,
+ HZ / 10);
+ if (!request)
+ goto out_unlock;
+
+ i915_add_request(request);
+
+ err = 0;
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_wait_request(void *arg)
+{
+ const long T = HZ / 4;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request;
+ int err = -EINVAL;
+
+ /* Submit a request, then wait upon it */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ if (!request) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ pr_err("request wait succeeded (expected timeout before submit!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_gem_request_completed(request)) {
+ pr_err("request completed before submit!!\n");
+ goto out_unlock;
+ }
+
+ i915_add_request(request);
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_gem_request_completed(request)) {
+ pr_err("request completed immediately!\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ pr_err("request wait succeeded (expected timeout!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ pr_err("request wait timed out!\n");
+ goto out_unlock;
+ }
+
+ if (!i915_gem_request_completed(request)) {
+ pr_err("request not complete after waiting!\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ pr_err("request wait timed out when already complete!\n");
+ goto out_unlock;
+ }
+
+ err = 0;
+out_unlock:
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_fence_wait(void *arg)
+{
+ const long T = HZ / 4;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request;
+ int err = -EINVAL;
+
+ /* Submit a request, treat it as a fence and wait upon it */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ if (!request) {
+ err = -ENOMEM;
+ goto out_locked;
+ }
+ mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
+ pr_err("fence wait success before submit (expected timeout)!\n");
+ goto out_device;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_add_request(request);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (dma_fence_is_signaled(&request->fence)) {
+ pr_err("fence signaled immediately!\n");
+ goto out_device;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
+ pr_err("fence wait success after submit (expected timeout)!\n");
+ goto out_device;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
+ pr_err("fence wait timed out (expected success)!\n");
+ goto out_device;
+ }
+
+ if (!dma_fence_is_signaled(&request->fence)) {
+ pr_err("fence unsignaled after waiting!\n");
+ goto out_device;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
+ pr_err("fence wait timed out when complete (expected success)!\n");
+ goto out_device;
+ }
+
+ err = 0;
+out_device:
+ mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_request_rewind(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request, *vip;
+ struct i915_gem_context *ctx[2];
+ int err = -EINVAL;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx[0] = mock_context(i915, "A");
+ request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
+ if (!request) {
+ err = -ENOMEM;
+ goto err_context_0;
+ }
+
+ i915_gem_request_get(request);
+ i915_add_request(request);
+
+ ctx[1] = mock_context(i915, "B");
+ vip = mock_request(i915->engine[RCS], ctx[1], 0);
+ if (!vip) {
+ err = -ENOMEM;
+ goto err_context_1;
+ }
+
+ /* Simulate preemption by manual reordering */
+ if (!mock_cancel_request(request)) {
+ pr_err("failed to cancel request (already executed)!\n");
+ i915_add_request(vip);
+ goto err_context_1;
+ }
+ i915_gem_request_get(vip);
+ i915_add_request(vip);
+ request->engine->submit_request(request);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (i915_wait_request(vip, 0, HZ) == -ETIME) {
+ pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
+ vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+ goto err;
+ }
+
+ if (i915_gem_request_completed(request)) {
+ pr_err("low priority request already completed\n");
+ goto err;
+ }
+
+ err = 0;
+err:
+ i915_gem_request_put(vip);
+ mutex_lock(&i915->drm.struct_mutex);
+err_context_1:
+ mock_context_close(ctx[1]);
+ i915_gem_request_put(request);
+err_context_0:
+ mock_context_close(ctx[0]);
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+int i915_gem_request_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_add_request),
+ SUBTEST(igt_wait_request),
+ SUBTEST(igt_fence_wait),
+ SUBTEST(igt_request_rewind),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+ drm_dev_unref(&i915->drm);
+
+ return err;
+}
+
+struct live_test {
+ struct drm_i915_private *i915;
+ const char *func;
+ const char *name;
+
+ unsigned int reset_count;
+};
+
+static int begin_live_test(struct live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name)
+{
+ int err;
+
+ t->i915 = i915;
+ t->func = func;
+ t->name = name;
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ if (err) {
+ pr_err("%s(%s): failed to idle before, with err=%d!",
+ func, name, err);
+ return err;
+ }
+
+ i915->gpu_error.missed_irq_rings = 0;
+ t->reset_count = i915_reset_count(&i915->gpu_error);
+
+ return 0;
+}
+
+static int end_live_test(struct live_test *t)
+{
+ struct drm_i915_private *i915 = t->i915;
+
+ i915_gem_retire_requests(i915);
+
+ if (wait_for(intel_engines_are_idle(i915), 10)) {
+ pr_err("%s(%s): GPU not idle\n", t->func, t->name);
+ return -EIO;
+ }
+
+ if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+ pr_err("%s(%s): GPU was reset %d times!\n",
+ t->func, t->name,
+ i915_reset_count(&i915->gpu_error) - t->reset_count);
+ return -EIO;
+ }
+
+ if (i915->gpu_error.missed_irq_rings) {
+ pr_err("%s(%s): Missed interrupts on engines %lx\n",
+ t->func, t->name, i915->gpu_error.missed_irq_rings);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int live_nop_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_test t;
+ unsigned int id;
+ int err;
+
+ /* Submit various sized batches of empty requests, to each engine
+ * (individually), and wait for the batch to complete. We can check
+ * the overhead of submitting requests to the hardware.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+ struct drm_i915_gem_request *request;
+ unsigned long n, prime;
+ ktime_t times[2] = {};
+
+ err = begin_live_test(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_unlock;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ for (n = 0; n < prime; n++) {
+ request = i915_gem_request_alloc(engine,
+ i915->kernel_context);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ goto out_unlock;
+ }
+
+ /* This space is left intentionally blank.
+ *
+ * We do not actually want to perform any
+ * action with this request, we just want
+ * to measure the latency in allocation
+ * and submission of our breadcrumbs -
+ * ensuring that the bare request is sufficient
+ * for the system to work (i.e. proper HEAD
+ * tracking of the rings, interrupt handling,
+ * etc). It also gives us the lowest bounds
+ * for latency.
+ */
+
+ i915_add_request(request);
+ }
+ i915_wait_request(request,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = end_live_test(&t);
+ if (err)
+ goto out_unlock;
+
+ pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ engine->name,
+ ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static struct i915_vma *empty_batch(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static struct drm_i915_gem_request *
+empty_request(struct intel_engine_cs *engine,
+ struct i915_vma *batch)
+{
+ struct drm_i915_gem_request *request;
+ int err;
+
+ request = i915_gem_request_alloc(engine,
+ engine->i915->kernel_context);
+ if (IS_ERR(request))
+ return request;
+
+ err = engine->emit_flush(request, EMIT_INVALIDATE);
+ if (err)
+ goto out_request;
+
+ err = i915_switch_context(request);
+ if (err)
+ goto out_request;
+
+ err = engine->emit_bb_start(request,
+ batch->node.start,
+ batch->node.size,
+ I915_DISPATCH_SECURE);
+ if (err)
+ goto out_request;
+
+out_request:
+ __i915_add_request(request, err == 0);
+ return err ? ERR_PTR(err) : request;
+}
+
+static int live_empty_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_test t;
+ struct i915_vma *batch;
+ unsigned int id;
+ int err = 0;
+
+ /* Submit various sized batches of empty requests, to each engine
+ * (individually), and wait for the batch to complete. We can check
+ * the overhead of submitting requests to the hardware.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ batch = empty_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unlock;
+ }
+
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+ struct drm_i915_gem_request *request;
+ unsigned long n, prime;
+ ktime_t times[2] = {};
+
+ err = begin_live_test(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_batch;
+
+ /* Warmup / preload */
+ request = empty_request(engine, batch);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ goto out_batch;
+ }
+ i915_wait_request(request,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ for (n = 0; n < prime; n++) {
+ request = empty_request(engine, batch);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ goto out_batch;
+ }
+ }
+ i915_wait_request(request,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = end_live_test(&t);
+ if (err)
+ goto out_batch;
+
+ pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ engine->name,
+ ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+ }
+
+out_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
+{
+ struct i915_gem_context *ctx = i915->kernel_context;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(i915);
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ goto err;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ if (gen >= 8) {
+ *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = upper_32_bits(vma->node.start);
+ } else if (gen >= 6) {
+ *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
+ *cmd++ = lower_32_bits(vma->node.start);
+ } else if (gen >= 4) {
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cmd++ = lower_32_bits(vma->node.start);
+ } else {
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
+ *cmd++ = lower_32_bits(vma->node.start);
+ }
+ *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+
+ wmb();
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int recursive_batch_resolve(struct i915_vma *batch)
+{
+ u32 *cmd;
+
+ cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ wmb();
+
+ i915_gem_object_unpin_map(batch->obj);
+
+ return 0;
+}
+
+static int live_all_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request[I915_NUM_ENGINES];
+ struct i915_vma *batch;
+ struct live_test t;
+ unsigned int id;
+ int err;
+
+ /* Check we can submit requests to all engines simultaneously. We
+ * send a recursive batch to each engine - checking that we don't
+ * block doing so, and that they don't complete too soon.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
+ batch = recursive_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
+ goto out_unlock;
+ }
+
+ for_each_engine(engine, i915, id) {
+ request[id] = i915_gem_request_alloc(engine,
+ i915->kernel_context);
+ if (IS_ERR(request[id])) {
+ err = PTR_ERR(request[id]);
+ pr_err("%s: Request allocation failed with err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+
+ err = engine->emit_flush(request[id], EMIT_INVALIDATE);
+ GEM_BUG_ON(err);
+
+ err = i915_switch_context(request[id]);
+ GEM_BUG_ON(err);
+
+ err = engine->emit_bb_start(request[id],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[id]->batch = batch;
+
+ if (!i915_gem_object_has_active_reference(batch->obj)) {
+ i915_gem_object_get(batch->obj);
+ i915_gem_object_set_active_reference(batch->obj);
+ }
+
+ i915_vma_move_to_active(batch, request[id], 0);
+ i915_gem_request_get(request[id]);
+ i915_add_request(request[id]);
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (i915_gem_request_completed(request[id])) {
+ pr_err("%s(%s): request completed too early!\n",
+ __func__, engine->name);
+ err = -EINVAL;
+ goto out_request;
+ }
+ }
+
+ err = recursive_batch_resolve(batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
+ goto out_request;
+ }
+
+ for_each_engine(engine, i915, id) {
+ long timeout;
+
+ timeout = i915_wait_request(request[id],
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("%s: error waiting for request on %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+ i915_gem_request_put(request[id]);
+ request[id] = NULL;
+ }
+
+ err = end_live_test(&t);
+
+out_request:
+ for_each_engine(engine, i915, id)
+ if (request[id])
+ i915_gem_request_put(request[id]);
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_sequential_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
+ struct drm_i915_gem_request *prev = NULL;
+ struct intel_engine_cs *engine;
+ struct live_test t;
+ unsigned int id;
+ int err;
+
+ /* Check we can submit requests to all engines sequentially, such
+ * that each successive request waits for the earlier ones. This
+ * tests that we don't execute requests out of order, even though
+ * they are running on independent engines.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_vma *batch;
+
+ batch = recursive_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch for %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_unlock;
+ }
+
+ request[id] = i915_gem_request_alloc(engine,
+ i915->kernel_context);
+ if (IS_ERR(request[id])) {
+ err = PTR_ERR(request[id]);
+ pr_err("%s: Request allocation failed for %s with err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ if (prev) {
+ err = i915_gem_request_await_dma_fence(request[id],
+ &prev->fence);
+ if (err) {
+ i915_add_request(request[id]);
+ pr_err("%s: Request await failed for %s with err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+ }
+
+ err = engine->emit_flush(request[id], EMIT_INVALIDATE);
+ GEM_BUG_ON(err);
+
+ err = i915_switch_context(request[id]);
+ GEM_BUG_ON(err);
+
+ err = engine->emit_bb_start(request[id],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[id]->batch = batch;
+
+ i915_vma_move_to_active(batch, request[id], 0);
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_get(batch);
+
+ i915_gem_request_get(request[id]);
+ i915_add_request(request[id]);
+
+ prev = request[id];
+ }
+
+ for_each_engine(engine, i915, id) {
+ long timeout;
+
+ if (i915_gem_request_completed(request[id])) {
+ pr_err("%s(%s): request completed too early!\n",
+ __func__, engine->name);
+ err = -EINVAL;
+ goto out_request;
+ }
+
+ err = recursive_batch_resolve(request[id]->batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+
+ timeout = i915_wait_request(request[id],
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("%s: error waiting for request on %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+ }
+
+ err = end_live_test(&t);
+
+out_request:
+ for_each_engine(engine, i915, id) {
+ u32 *cmd;
+
+ if (!request[id])
+ break;
+
+ cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+ I915_MAP_WC);
+ if (!IS_ERR(cmd)) {
+ *cmd = MI_BATCH_BUFFER_END;
+ wmb();
+ i915_gem_object_unpin_map(request[id]->batch->obj);
+ }
+
+ i915_vma_put(request[id]->batch);
+ i915_gem_request_put(request[id]);
+ }
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+int i915_gem_request_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_nop_request),
+ SUBTEST(live_all_engines),
+ SUBTEST(live_sequential_engines),
+ SUBTEST(live_empty_request),
+ };
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
new file mode 100644
index 000000000000..18b174d855ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -0,0 +1,19 @@
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/drv_selftest
+ */
+selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
+selftest(uncore, intel_uncore_live_selftests)
+selftest(requests, i915_gem_request_live_selftests)
+selftest(objects, i915_gem_object_live_selftests)
+selftest(dmabuf, i915_gem_dmabuf_live_selftests)
+selftest(coherency, i915_gem_coherency_live_selftests)
+selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(contexts, i915_gem_context_live_selftests)
+selftest(hangcheck, intel_hangcheck_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
new file mode 100644
index 000000000000..be9a9ebf5692
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -0,0 +1,20 @@
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/drv_selftest
+ */
+selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(scatterlist, scatterlist_mock_selftests)
+selftest(uncore, intel_uncore_mock_selftests)
+selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
+selftest(requests, i915_gem_request_mock_selftests)
+selftest(objects, i915_gem_object_mock_selftests)
+selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
+selftest(vma, i915_vma_mock_selftests)
+selftest(evict, i915_gem_evict_mock_selftests)
+selftest(gtt, i915_gem_gtt_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
new file mode 100644
index 000000000000..c17c83c30637
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "i915_random.h"
+
+static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+{
+ return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
+}
+
+void i915_random_reorder(unsigned int *order, unsigned int count,
+ struct rnd_state *state)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < count; i++) {
+ BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32));
+ j = i915_prandom_u32_max_state(count, state);
+ swap(order[i], order[j]);
+ }
+}
+
+unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
+{
+ unsigned int *order, i;
+
+ order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
+ if (!order)
+ return order;
+
+ for (i = 0; i < count; i++)
+ order[i] = i;
+
+ i915_random_reorder(order, count, state);
+ return order;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
new file mode 100644
index 000000000000..b9c334ce6cd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_SELFTESTS_RANDOM_H__
+#define __I915_SELFTESTS_RANDOM_H__
+
+#include <linux/random.h>
+
+#include "../i915_selftest.h"
+
+#define I915_RND_STATE_INITIALIZER(x) ({ \
+ struct rnd_state state__; \
+ prandom_seed_state(&state__, (x)); \
+ state__; \
+})
+
+#define I915_RND_STATE(name__) \
+ struct rnd_state name__ = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed)
+
+#define I915_RND_SUBSTATE(name__, parent__) \
+ struct rnd_state name__ = I915_RND_STATE_INITIALIZER(prandom_u32_state(&(parent__)))
+
+unsigned int *i915_random_order(unsigned int count,
+ struct rnd_state *state);
+void i915_random_reorder(unsigned int *order,
+ unsigned int count,
+ struct rnd_state *state);
+
+#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
new file mode 100644
index 000000000000..addc5a599c4a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/random.h>
+
+#include "../i915_drv.h"
+#include "../i915_selftest.h"
+
+struct i915_selftest i915_selftest __read_mostly = {
+ .timeout_ms = 1000,
+};
+
+int i915_mock_sanitycheck(void)
+{
+ pr_info(DRIVER_NAME ": %s() - ok!\n", __func__);
+ return 0;
+}
+
+int i915_live_sanitycheck(struct drm_i915_private *i915)
+{
+ pr_info("%s: %s() - ok!\n", i915->drm.driver->name, __func__);
+ return 0;
+}
+
+enum {
+#define selftest(name, func) mock_##name,
+#include "i915_mock_selftests.h"
+#undef selftest
+};
+
+enum {
+#define selftest(name, func) live_##name,
+#include "i915_live_selftests.h"
+#undef selftest
+};
+
+struct selftest {
+ bool enabled;
+ const char *name;
+ union {
+ int (*mock)(void);
+ int (*live)(struct drm_i915_private *);
+ };
+};
+
+#define selftest(n, f) [mock_##n] = { .name = #n, { .mock = f } },
+static struct selftest mock_selftests[] = {
+#include "i915_mock_selftests.h"
+};
+#undef selftest
+
+#define selftest(n, f) [live_##n] = { .name = #n, { .live = f } },
+static struct selftest live_selftests[] = {
+#include "i915_live_selftests.h"
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define selftest(n, func) selftest_0(n, func, param(n))
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, mock_selftests[mock_##n].enabled, bool, 0400);
+#include "i915_mock_selftests.h"
+#undef selftest_0
+#undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __live_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, live_selftests[live_##n].enabled, bool, 0400);
+#include "i915_live_selftests.h"
+#undef selftest_0
+#undef param
+#undef selftest
+
+static void set_default_test_all(struct selftest *st, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ if (st[i].enabled)
+ return;
+
+ for (i = 0; i < count; i++)
+ st[i].enabled = true;
+}
+
+static int __run_selftests(const char *name,
+ struct selftest *st,
+ unsigned int count,
+ void *data)
+{
+ int err = 0;
+
+ while (!i915_selftest.random_seed)
+ i915_selftest.random_seed = get_random_int();
+
+ i915_selftest.timeout_jiffies =
+ i915_selftest.timeout_ms ?
+ msecs_to_jiffies_timeout(i915_selftest.timeout_ms) :
+ MAX_SCHEDULE_TIMEOUT;
+
+ set_default_test_all(st, count);
+
+ pr_info(DRIVER_NAME ": Performing %s selftests with st_random_seed=0x%x st_timeout=%u\n",
+ name, i915_selftest.random_seed, i915_selftest.timeout_ms);
+
+ /* Tests are listed in order in i915_*_selftests.h */
+ for (; count--; st++) {
+ if (!st->enabled)
+ continue;
+
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ pr_debug(DRIVER_NAME ": Running %s\n", st->name);
+ if (data)
+ err = st->live(data);
+ else
+ err = st->mock();
+ if (err == -EINTR && !signal_pending(current))
+ err = 0;
+ if (err)
+ break;
+ }
+
+ if (WARN(err > 0 || err == -ENOTTY,
+ "%s returned %d, conflicting with selftest's magic values!\n",
+ st->name, err))
+ err = -1;
+
+ return err;
+}
+
+#define run_selftests(x, data) \
+ __run_selftests(#x, x##_selftests, ARRAY_SIZE(x##_selftests), data)
+
+int i915_mock_selftests(void)
+{
+ int err;
+
+ if (!i915_selftest.mock)
+ return 0;
+
+ err = run_selftests(mock, NULL);
+ if (err) {
+ i915_selftest.mock = err;
+ return err;
+ }
+
+ if (i915_selftest.mock < 0) {
+ i915_selftest.mock = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
+int i915_live_selftests(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!i915_selftest.live)
+ return 0;
+
+ err = run_selftests(live, to_i915(pci_get_drvdata(pdev)));
+ if (err) {
+ i915_selftest.live = err;
+ return err;
+ }
+
+ if (i915_selftest.live < 0) {
+ i915_selftest.live = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
+int __i915_subtests(const char *caller,
+ const struct i915_subtest *st,
+ unsigned int count,
+ void *data)
+{
+ int err;
+
+ for (; count--; st++) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ err = st->func(data);
+ if (err && err != -EINTR) {
+ pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
+ caller, st->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
+{
+ va_list va;
+
+ if (!signal_pending(current)) {
+ cond_resched();
+ if (time_before(jiffies, timeout))
+ return false;
+ }
+
+ if (fmt) {
+ va_start(va, fmt);
+ vprintk(fmt, va);
+ va_end(va);
+ }
+
+ return true;
+}
+
+module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
+module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
+
+module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400);
+MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then exit module)");
+
+module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
+MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
new file mode 100644
index 000000000000..ad56566e24db
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -0,0 +1,746 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "mock_context.h"
+
+static bool assert_vma(struct i915_vma *vma,
+ struct drm_i915_gem_object *obj,
+ struct i915_gem_context *ctx)
+{
+ bool ok = true;
+
+ if (vma->vm != &ctx->ppgtt->base) {
+ pr_err("VMA created with wrong VM\n");
+ ok = false;
+ }
+
+ if (vma->size != obj->base.size) {
+ pr_err("VMA created with wrong size, found %llu, expected %zu\n",
+ vma->size, obj->base.size);
+ ok = false;
+ }
+
+ if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ pr_err("VMA created with wrong type [%d]\n",
+ vma->ggtt_view.type);
+ ok = false;
+ }
+
+ return ok;
+}
+
+static struct i915_vma *
+checked_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ struct i915_ggtt_view *view)
+{
+ struct i915_vma *vma;
+ bool ok = true;
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Manual checks, will be reinforced by i915_vma_compare! */
+ if (vma->vm != vm) {
+ pr_err("VMA's vm [%p] does not match request [%p]\n",
+ vma->vm, vm);
+ ok = false;
+ }
+
+ if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) {
+ pr_err("VMA ggtt status [%d] does not match parent [%d]\n",
+ i915_vma_is_ggtt(vma), i915_is_ggtt(vm));
+ ok = false;
+ }
+
+ if (i915_vma_compare(vma, vm, view)) {
+ pr_err("i915_vma_compare failed with create parmaters!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (i915_vma_compare(vma, vma->vm,
+ i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
+ pr_err("i915_vma_compare failed with itself\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ok) {
+ pr_err("i915_vma_compare failed to detect the difference!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vma;
+}
+
+static int create_vmas(struct drm_i915_private *i915,
+ struct list_head *objects,
+ struct list_head *contexts)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ int pinned;
+
+ list_for_each_entry(obj, objects, st_link) {
+ for (pinned = 0; pinned <= 1; pinned++) {
+ list_for_each_entry(ctx, contexts, link) {
+ struct i915_address_space *vm =
+ &ctx->ppgtt->base;
+ struct i915_vma *vma;
+ int err;
+
+ vma = checked_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ if (!assert_vma(vma, obj, ctx)) {
+ pr_err("VMA lookup/create failed\n");
+ return -EINVAL;
+ }
+
+ if (!pinned) {
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ pr_err("Failed to pin VMA\n");
+ return err;
+ }
+ } else {
+ i915_vma_unpin(vma);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int igt_vma_create(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *on;
+ struct i915_gem_context *ctx, *cn;
+ unsigned long num_obj, num_ctx;
+ unsigned long no, nc;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(contexts);
+ LIST_HEAD(objects);
+ int err;
+
+ /* Exercise creating many vma amonst many objections, checking the
+ * vma creation and lookup routines.
+ */
+
+ no = 0;
+ for_each_prime_number(num_obj, ULONG_MAX - 1) {
+ for (; no < num_obj; no++) {
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ list_add(&obj->st_link, &objects);
+ }
+
+ nc = 0;
+ for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
+ for (; nc < num_ctx; nc++) {
+ ctx = mock_context(i915, "mock");
+ if (!ctx)
+ goto out;
+
+ list_move(&ctx->link, &contexts);
+ }
+
+ err = create_vmas(i915, &objects, &contexts);
+ if (err)
+ goto out;
+
+ if (igt_timeout(end_time,
+ "%s timed out: after %lu objects in %lu contexts\n",
+ __func__, no, nc))
+ goto end;
+ }
+
+ list_for_each_entry_safe(ctx, cn, &contexts, link)
+ mock_context_close(ctx);
+ }
+
+end:
+ /* Final pass to lookup all created contexts */
+ err = create_vmas(i915, &objects, &contexts);
+out:
+ list_for_each_entry_safe(ctx, cn, &contexts, link)
+ mock_context_close(ctx);
+
+ list_for_each_entry_safe(obj, on, &objects, st_link)
+ i915_gem_object_put(obj);
+ return err;
+}
+
+struct pin_mode {
+ u64 size;
+ u64 flags;
+ bool (*assert)(const struct i915_vma *,
+ const struct pin_mode *mode,
+ int result);
+ const char *string;
+};
+
+static bool assert_pin_valid(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ if (result)
+ return false;
+
+ if (i915_vma_misplaced(vma, mode->size, 0, mode->flags))
+ return false;
+
+ return true;
+}
+
+__maybe_unused
+static bool assert_pin_e2big(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -E2BIG;
+}
+
+__maybe_unused
+static bool assert_pin_enospc(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -ENOSPC;
+}
+
+__maybe_unused
+static bool assert_pin_einval(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -EINVAL;
+}
+
+static int igt_vma_pin1(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const struct pin_mode modes[] = {
+#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
+#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
+#define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL)
+#define TOOBIG(sz, fl) __INVALID(sz, fl, assert_pin_e2big, E2BIG)
+#define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC)
+ VALID(0, PIN_GLOBAL),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE),
+
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
+
+ VALID(4096, PIN_GLOBAL),
+ VALID(8192, PIN_GLOBAL),
+ VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
+ TOOBIG(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.base.total, PIN_GLOBAL),
+ TOOBIG(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+ TOOBIG(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
+ INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
+
+ VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+
+#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ /* Misusing BIAS is a programming error (it is not controllable
+ * from userspace) so when debugging is enabled, it explodes.
+ * However, the tests are still quite interesting for checking
+ * variable start, end and size.
+ */
+ NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+ NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+#endif
+ { },
+#undef NOSPACE
+#undef TOOBIG
+#undef INVALID
+#undef __INVALID
+#undef VALID
+ }, *m;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err = -EINVAL;
+
+ /* Exercise all the weird and wonderful i915_vma_pin requests,
+ * focusing on error handling of boundary conditions.
+ */
+
+ GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto out;
+
+ for (m = modes; m->assert; m++) {
+ err = i915_vma_pin(vma, m->size, 0, m->flags);
+ if (!m->assert(vma, m, err)) {
+ pr_err("%s to pin single page into GGTT with mode[%d:%s]: size=%llx flags=%llx, err=%d\n",
+ m->assert == assert_pin_valid ? "Failed" : "Unexpectedly succeeded",
+ (int)(m - modes), m->string, m->size, m->flags,
+ err);
+ if (!err)
+ i915_vma_unpin(vma);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!err) {
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Failed to unbind single page from GGTT, err=%d\n", err);
+ goto out;
+ }
+ }
+ }
+
+ err = 0;
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static unsigned long rotated_index(const struct intel_rotation_info *r,
+ unsigned int n,
+ unsigned int x,
+ unsigned int y)
+{
+ return (r->plane[n].stride * (r->plane[n].height - y - 1) +
+ r->plane[n].offset + x);
+}
+
+static struct scatterlist *
+assert_rotated(struct drm_i915_gem_object *obj,
+ const struct intel_rotation_info *r, unsigned int n,
+ struct scatterlist *sg)
+{
+ unsigned int x, y;
+
+ for (x = 0; x < r->plane[n].width; x++) {
+ for (y = 0; y < r->plane[n].height; y++) {
+ unsigned long src_idx;
+ dma_addr_t src;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ src_idx = rotated_index(r, n, x, y);
+ src = i915_gem_object_get_dma_address(obj, src_idx);
+
+ if (sg_dma_len(sg) != PAGE_SIZE) {
+ pr_err("Invalid sg.length, found %d, expected %lu for rotated page (%d, %d) [src index %lu]\n",
+ sg_dma_len(sg), PAGE_SIZE,
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) != src) {
+ pr_err("Invalid address for rotated page (%d, %d) [src index %lu]\n",
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sg = sg_next(sg);
+ }
+ }
+
+ return sg;
+}
+
+static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
+ const struct intel_rotation_plane_info *b)
+{
+ return a->width * a->height + b->width * b->height;
+}
+
+static int igt_vma_rotate(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm = &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ const struct intel_rotation_plane_info planes[] = {
+ { .width = 1, .height = 1, .stride = 1 },
+ { .width = 2, .height = 2, .stride = 2 },
+ { .width = 4, .height = 4, .stride = 4 },
+ { .width = 8, .height = 8, .stride = 8 },
+
+ { .width = 3, .height = 5, .stride = 3 },
+ { .width = 3, .height = 5, .stride = 4 },
+ { .width = 3, .height = 5, .stride = 5 },
+
+ { .width = 5, .height = 3, .stride = 5 },
+ { .width = 5, .height = 3, .stride = 7 },
+ { .width = 5, .height = 3, .stride = 9 },
+
+ { .width = 4, .height = 6, .stride = 6 },
+ { .width = 6, .height = 4, .stride = 6 },
+ { }
+ }, *a, *b;
+ const unsigned int max_pages = 64;
+ int err = -ENOMEM;
+
+ /* Create VMA for many different combinations of planes and check
+ * that the page layout within the rotated VMA match our expectations.
+ */
+
+ obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ for (a = planes; a->width; a++) {
+ for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
+ struct i915_ggtt_view view;
+ unsigned int n, max_offset;
+
+ max_offset = max(a->stride * a->height,
+ b->stride * b->height);
+ GEM_BUG_ON(max_offset > max_pages);
+ max_offset = max_pages - max_offset;
+
+ view.type = I915_GGTT_VIEW_ROTATED;
+ view.rotated.plane[0] = *a;
+ view.rotated.plane[1] = *b;
+
+ for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) {
+ for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) {
+ struct scatterlist *sg;
+ struct i915_vma *vma;
+
+ vma = checked_vma_instance(obj, vm, &view);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ pr_err("Failed to pin VMA, err=%d\n", err);
+ goto out_object;
+ }
+
+ if (vma->size != rotated_size(a, b) * PAGE_SIZE) {
+ pr_err("VMA is wrong size, expected %lu, found %llu\n",
+ PAGE_SIZE * rotated_size(a, b), vma->size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->pages->nents != rotated_size(a, b)) {
+ pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
+ rotated_size(a, b), vma->pages->nents);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->node.size < vma->size) {
+ pr_err("VMA binding too small, expected %llu, found %llu\n",
+ vma->size, vma->node.size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->pages == obj->mm.pages) {
+ pr_err("VMA using unrotated object pages!\n");
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ sg = vma->pages->sgl;
+ for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
+ sg = assert_rotated(obj, &view.rotated, n, sg);
+ if (IS_ERR(sg)) {
+ pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n,
+ view.rotated.plane[0].width,
+ view.rotated.plane[0].height,
+ view.rotated.plane[0].stride,
+ view.rotated.plane[0].offset,
+ view.rotated.plane[1].width,
+ view.rotated.plane[1].height,
+ view.rotated.plane[1].stride,
+ view.rotated.plane[1].offset);
+ err = -EINVAL;
+ goto out_object;
+ }
+ }
+
+ i915_vma_unpin(vma);
+ }
+ }
+ }
+ }
+
+out_object:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static bool assert_partial(struct drm_i915_gem_object *obj,
+ struct i915_vma *vma,
+ unsigned long offset,
+ unsigned long size)
+{
+ struct sgt_iter sgt;
+ dma_addr_t dma;
+
+ for_each_sgt_dma(dma, sgt, vma->pages) {
+ dma_addr_t src;
+
+ if (!size) {
+ pr_err("Partial scattergather list too long\n");
+ return false;
+ }
+
+ src = i915_gem_object_get_dma_address(obj, offset);
+ if (src != dma) {
+ pr_err("DMA mismatch for partial page offset %lu\n",
+ offset);
+ return false;
+ }
+
+ offset++;
+ size--;
+ }
+
+ return true;
+}
+
+static bool assert_pin(struct i915_vma *vma,
+ struct i915_ggtt_view *view,
+ u64 size,
+ const char *name)
+{
+ bool ok = true;
+
+ if (vma->size != size) {
+ pr_err("(%s) VMA is wrong size, expected %llu, found %llu\n",
+ name, size, vma->size);
+ ok = false;
+ }
+
+ if (vma->node.size < vma->size) {
+ pr_err("(%s) VMA binding too small, expected %llu, found %llu\n",
+ name, vma->size, vma->node.size);
+ ok = false;
+ }
+
+ if (view && view->type != I915_GGTT_VIEW_NORMAL) {
+ if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
+ pr_err("(%s) VMA mismatch upon creation!\n",
+ name);
+ ok = false;
+ }
+
+ if (vma->pages == vma->obj->mm.pages) {
+ pr_err("(%s) VMA using original object pages!\n",
+ name);
+ ok = false;
+ }
+ } else {
+ if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ pr_err("Not the normal ggtt view! Found %d\n",
+ vma->ggtt_view.type);
+ ok = false;
+ }
+
+ if (vma->pages != vma->obj->mm.pages) {
+ pr_err("VMA not using object pages!\n");
+ ok = false;
+ }
+ }
+
+ return ok;
+}
+
+static int igt_vma_partial(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm = &i915->ggtt.base;
+ const unsigned int npages = 1021; /* prime! */
+ struct drm_i915_gem_object *obj;
+ const struct phase {
+ const char *name;
+ } phases[] = {
+ { "create" },
+ { "lookup" },
+ { },
+ }, *p;
+ unsigned int sz, offset;
+ struct i915_vma *vma;
+ int err = -ENOMEM;
+
+ /* Create lots of different VMA for the object and check that
+ * we are returned the same VMA when we later request the same range.
+ */
+
+ obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ for (p = phases; p->name; p++) { /* exercise both create/lookup */
+ unsigned int count, nvma;
+
+ nvma = 0;
+ for_each_prime_number_from(sz, 1, npages) {
+ for_each_prime_number_from(offset, 0, npages - sz) {
+ struct i915_ggtt_view view;
+
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.partial.offset = offset;
+ view.partial.size = sz;
+
+ if (sz == npages)
+ view.type = I915_GGTT_VIEW_NORMAL;
+
+ vma = checked_vma_instance(obj, vm, &view);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_object;
+
+ if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
+ pr_err("(%s) Inconsistent partial pinning for (offset=%d, size=%d)\n",
+ p->name, offset, sz);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (!assert_partial(obj, vma, offset, sz)) {
+ pr_err("(%s) Inconsistent partial pages for (offset=%d, size=%d)\n",
+ p->name, offset, sz);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ i915_vma_unpin(vma);
+ nvma++;
+ }
+ }
+
+ count = 0;
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
+ count++;
+ if (count != nvma) {
+ pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n",
+ p->name, count, nvma);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ /* Check that we did create the whole object mapping */
+ vma = checked_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_object;
+
+ if (!assert_pin(vma, NULL, obj->base.size, p->name)) {
+ pr_err("(%s) inconsistent full pin\n", p->name);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ i915_vma_unpin(vma);
+
+ count = 0;
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
+ count++;
+ if (count != nvma) {
+ pr_err("(%s) allocated an extra full vma!\n", p->name);
+ err = -EINVAL;
+ goto out_object;
+ }
+ }
+
+out_object:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+int i915_vma_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_vma_create),
+ SUBTEST(igt_vma_pin1),
+ SUBTEST(igt_vma_rotate),
+ SUBTEST(igt_vma_partial),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
new file mode 100644
index 000000000000..19860a372d90
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_gem_device.h"
+#include "mock_engine.h"
+
+static int check_rbtree(struct intel_engine_cs *engine,
+ const unsigned long *bitmap,
+ const struct intel_wait *waiters,
+ const int count)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+ int n;
+
+ if (&b->irq_wait->node != rb_first(&b->waiters)) {
+ pr_err("First waiter does not match first element of wait-tree\n");
+ return -EINVAL;
+ }
+
+ n = find_first_bit(bitmap, count);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+ int idx = w - waiters;
+
+ if (!test_bit(idx, bitmap)) {
+ pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n",
+ idx, w->seqno);
+ return -EINVAL;
+ }
+
+ if (n != idx) {
+ pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n",
+ idx, w->seqno, n);
+ return -EINVAL;
+ }
+
+ n = find_next_bit(bitmap, count, n + 1);
+ }
+
+ return 0;
+}
+
+static int check_completion(struct intel_engine_cs *engine,
+ const unsigned long *bitmap,
+ const struct intel_wait *waiters,
+ const int count)
+{
+ int n;
+
+ for (n = 0; n < count; n++) {
+ if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap))
+ continue;
+
+ pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n",
+ n, waiters[n].seqno,
+ intel_wait_complete(&waiters[n]) ? "complete" : "active",
+ test_bit(n, bitmap) ? "active" : "complete");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_rbtree_empty(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ if (b->irq_wait) {
+ pr_err("Empty breadcrumbs still has a waiter\n");
+ return -EINVAL;
+ }
+
+ if (!RB_EMPTY_ROOT(&b->waiters)) {
+ pr_err("Empty breadcrumbs, but wait-tree not empty\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_random_insert_remove(void *arg)
+{
+ const u32 seqno_bias = 0x1000;
+ I915_RND_STATE(prng);
+ struct intel_engine_cs *engine = arg;
+ struct intel_wait *waiters;
+ const int count = 4096;
+ unsigned int *order;
+ unsigned long *bitmap;
+ int err = -ENOMEM;
+ int n;
+
+ mock_engine_reset(engine);
+
+ waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ if (!waiters)
+ goto out_engines;
+
+ bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
+ GFP_TEMPORARY);
+ if (!bitmap)
+ goto out_waiters;
+
+ order = i915_random_order(count, &prng);
+ if (!order)
+ goto out_bitmap;
+
+ for (n = 0; n < count; n++)
+ intel_wait_init_for_seqno(&waiters[n], seqno_bias + n);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_order;
+
+ /* Add and remove waiters into the rbtree in random order. At each
+ * step, we verify that the rbtree is correctly ordered.
+ */
+ for (n = 0; n < count; n++) {
+ int i = order[n];
+
+ intel_engine_add_wait(engine, &waiters[i]);
+ __set_bit(i, bitmap);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_order;
+ }
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ int i = order[n];
+
+ intel_engine_remove_wait(engine, &waiters[i]);
+ __clear_bit(i, bitmap);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_order;
+ }
+
+ err = check_rbtree_empty(engine);
+out_order:
+ kfree(order);
+out_bitmap:
+ kfree(bitmap);
+out_waiters:
+ drm_free_large(waiters);
+out_engines:
+ mock_engine_flush(engine);
+ return err;
+}
+
+static int igt_insert_complete(void *arg)
+{
+ const u32 seqno_bias = 0x1000;
+ struct intel_engine_cs *engine = arg;
+ struct intel_wait *waiters;
+ const int count = 4096;
+ unsigned long *bitmap;
+ int err = -ENOMEM;
+ int n, m;
+
+ mock_engine_reset(engine);
+
+ waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ if (!waiters)
+ goto out_engines;
+
+ bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
+ GFP_TEMPORARY);
+ if (!bitmap)
+ goto out_waiters;
+
+ for (n = 0; n < count; n++) {
+ intel_wait_init_for_seqno(&waiters[n], n + seqno_bias);
+ intel_engine_add_wait(engine, &waiters[n]);
+ __set_bit(n, bitmap);
+ }
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_bitmap;
+
+ /* On each step, we advance the seqno so that several waiters are then
+ * complete (we increase the seqno by increasingly larger values to
+ * retire more and more waiters at once). All retired waiters should
+ * be woken and removed from the rbtree, and so that we check.
+ */
+ for (n = 0; n < count; n = m) {
+ int seqno = 2 * n;
+
+ GEM_BUG_ON(find_first_bit(bitmap, count) != n);
+
+ if (intel_wait_complete(&waiters[n])) {
+ pr_err("waiter[%d, seqno=%d] completed too early\n",
+ n, waiters[n].seqno);
+ err = -EINVAL;
+ goto out_bitmap;
+ }
+
+ /* complete the following waiters */
+ mock_seqno_advance(engine, seqno + seqno_bias);
+ for (m = n; m <= seqno; m++) {
+ if (m == count)
+ break;
+
+ GEM_BUG_ON(!test_bit(m, bitmap));
+ __clear_bit(m, bitmap);
+ }
+
+ intel_engine_remove_wait(engine, &waiters[n]);
+ RB_CLEAR_NODE(&waiters[n].node);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err) {
+ pr_err("rbtree corrupt after seqno advance to %d\n",
+ seqno + seqno_bias);
+ goto out_bitmap;
+ }
+
+ err = check_completion(engine, bitmap, waiters, count);
+ if (err) {
+ pr_err("completions after seqno advance to %d failed\n",
+ seqno + seqno_bias);
+ goto out_bitmap;
+ }
+ }
+
+ err = check_rbtree_empty(engine);
+out_bitmap:
+ kfree(bitmap);
+out_waiters:
+ drm_free_large(waiters);
+out_engines:
+ mock_engine_flush(engine);
+ return err;
+}
+
+struct igt_wakeup {
+ struct task_struct *tsk;
+ atomic_t *ready, *set, *done;
+ struct intel_engine_cs *engine;
+ unsigned long flags;
+#define STOP 0
+#define IDLE 1
+ wait_queue_head_t *wq;
+ u32 seqno;
+};
+
+static int wait_atomic(atomic_t *p)
+{
+ schedule();
+ return 0;
+}
+
+static int wait_atomic_timeout(atomic_t *p)
+{
+ return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+static bool wait_for_ready(struct igt_wakeup *w)
+{
+ DEFINE_WAIT(ready);
+
+ set_bit(IDLE, &w->flags);
+ if (atomic_dec_and_test(w->done))
+ wake_up_atomic_t(w->done);
+
+ if (test_bit(STOP, &w->flags))
+ goto out;
+
+ for (;;) {
+ prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
+ if (atomic_read(w->ready) == 0)
+ break;
+
+ schedule();
+ }
+ finish_wait(w->wq, &ready);
+
+out:
+ clear_bit(IDLE, &w->flags);
+ if (atomic_dec_and_test(w->set))
+ wake_up_atomic_t(w->set);
+
+ return !test_bit(STOP, &w->flags);
+}
+
+static int igt_wakeup_thread(void *arg)
+{
+ struct igt_wakeup *w = arg;
+ struct intel_wait wait;
+
+ while (wait_for_ready(w)) {
+ GEM_BUG_ON(kthread_should_stop());
+
+ intel_wait_init_for_seqno(&wait, w->seqno);
+ intel_engine_add_wait(w->engine, &wait);
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
+ w->seqno))
+ break;
+
+ if (test_bit(STOP, &w->flags)) /* emergency escape */
+ break;
+
+ schedule();
+ }
+ intel_engine_remove_wait(w->engine, &wait);
+ __set_current_state(TASK_RUNNING);
+ }
+
+ return 0;
+}
+
+static void igt_wake_all_sync(atomic_t *ready,
+ atomic_t *set,
+ atomic_t *done,
+ wait_queue_head_t *wq,
+ int count)
+{
+ atomic_set(set, count);
+ atomic_set(ready, 0);
+ wake_up_all(wq);
+
+ wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
+ atomic_set(ready, count);
+ atomic_set(done, count);
+}
+
+static int igt_wakeup(void *arg)
+{
+ I915_RND_STATE(prng);
+ const int state = TASK_UNINTERRUPTIBLE;
+ struct intel_engine_cs *engine = arg;
+ struct igt_wakeup *waiters;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ const int count = 4096;
+ const u32 max_seqno = count / 4;
+ atomic_t ready, set, done;
+ int err = -ENOMEM;
+ int n, step;
+
+ mock_engine_reset(engine);
+
+ waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ if (!waiters)
+ goto out_engines;
+
+ /* Create a large number of threads, each waiting on a random seqno.
+ * Multiple waiters will be waiting for the same seqno.
+ */
+ atomic_set(&ready, count);
+ for (n = 0; n < count; n++) {
+ waiters[n].wq = &wq;
+ waiters[n].ready = &ready;
+ waiters[n].set = &set;
+ waiters[n].done = &done;
+ waiters[n].engine = engine;
+ waiters[n].flags = BIT(IDLE);
+
+ waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
+ "i915/igt:%d", n);
+ if (IS_ERR(waiters[n].tsk))
+ goto out_waiters;
+
+ get_task_struct(waiters[n].tsk);
+ }
+
+ for (step = 1; step <= max_seqno; step <<= 1) {
+ u32 seqno;
+
+ /* The waiter threads start paused as we assign them a random
+ * seqno and reset the engine. Once the engine is reset,
+ * we signal that the threads may begin their wait upon their
+ * seqno.
+ */
+ for (n = 0; n < count; n++) {
+ GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
+ waiters[n].seqno =
+ 1 + prandom_u32_state(&prng) % max_seqno;
+ }
+ mock_seqno_advance(engine, 0);
+ igt_wake_all_sync(&ready, &set, &done, &wq, count);
+
+ /* Simulate the GPU doing chunks of work, with one or more
+ * seqno appearing to finish at the same time. A random number
+ * of threads will be waiting upon the update and hopefully be
+ * woken.
+ */
+ for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
+ usleep_range(50, 500);
+ mock_seqno_advance(engine, seqno);
+ }
+ GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);
+
+ /* With the seqno now beyond any of the waiting threads, they
+ * should all be woken, see that they are complete and signal
+ * that they are ready for the next test. We wait until all
+ * threads are complete and waiting for us (i.e. not a seqno).
+ */
+ err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
+ if (err) {
+ pr_err("Timed out waiting for %d remaining waiters\n",
+ atomic_read(&done));
+ break;
+ }
+
+ err = check_rbtree_empty(engine);
+ if (err)
+ break;
+ }
+
+out_waiters:
+ for (n = 0; n < count; n++) {
+ if (IS_ERR(waiters[n].tsk))
+ break;
+
+ set_bit(STOP, &waiters[n].flags);
+ }
+ mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
+ igt_wake_all_sync(&ready, &set, &done, &wq, n);
+
+ for (n = 0; n < count; n++) {
+ if (IS_ERR(waiters[n].tsk))
+ break;
+
+ kthread_stop(waiters[n].tsk);
+ put_task_struct(waiters[n].tsk);
+ }
+
+ drm_free_large(waiters);
+out_engines:
+ mock_engine_flush(engine);
+ return err;
+}
+
+int intel_breadcrumbs_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_random_insert_remove),
+ SUBTEST(igt_insert_complete),
+ SUBTEST(igt_wakeup),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915->engine[RCS]);
+ drm_dev_unref(&i915->drm);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
new file mode 100644
index 000000000000..aa31d6c0cdfb
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+struct hang {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *seqno;
+ u32 *batch;
+};
+
+static int hang_init(struct hang *h, struct drm_i915_private *i915)
+{
+ void *vaddr;
+ int err;
+
+ memset(h, 0, sizeof(*h));
+ h->i915 = i915;
+
+ h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(h->hws))
+ return PTR_ERR(h->hws);
+
+ h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(h->obj)) {
+ err = PTR_ERR(h->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ vaddr = i915_gem_object_pin_map(h->obj,
+ HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ h->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(h->hws);
+err_obj:
+ i915_gem_object_put(h->obj);
+err_hws:
+ i915_gem_object_put(h->hws);
+ return err;
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct drm_i915_gem_request *rq)
+{
+ return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+}
+
+static int emit_recurse_batch(struct hang *h,
+ struct drm_i915_gem_request *rq)
+{
+ struct drm_i915_private *i915 = h->i915;
+ struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_vma *hws, *vma;
+ unsigned int flags;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(h->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(h->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto unpin_hws;
+
+ err = i915_switch_context(rq);
+ if (err)
+ goto unpin_hws;
+
+ i915_vma_move_to_active(vma, rq, 0);
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ i915_vma_move_to_active(hws, rq, 0);
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = h->batch;
+ if (INTEL_GEN(i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ } else if (INTEL_GEN(i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
+ *batch++ = lower_32_bits(vma->node.start);
+ } else if (INTEL_GEN(i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *batch++ = 0;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
+ *batch++ = lower_32_bits(vma->node.start);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ }
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ flags = 0;
+ if (INTEL_GEN(vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static struct drm_i915_gem_request *
+hang_create_request(struct hang *h,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_request *rq;
+ int err;
+
+ if (i915_gem_object_is_active(h->obj)) {
+ struct drm_i915_gem_object *obj;
+ void *vaddr;
+
+ obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vaddr = i915_gem_object_pin_map(obj,
+ HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ h->obj = obj;
+ h->batch = vaddr;
+ }
+
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(h, rq);
+ if (err) {
+ __i915_add_request(rq, false);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32 hws_seqno(const struct hang *h,
+ const struct drm_i915_gem_request *rq)
+{
+ return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
+}
+
+static void hang_fini(struct hang *h)
+{
+ *h->batch = MI_BATCH_BUFFER_END;
+ wmb();
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ i915_gem_object_unpin_map(h->hws);
+ i915_gem_object_put(h->hws);
+
+ i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
+}
+
+static int igt_hang_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct hang h;
+ int err;
+
+ /* Basic check that we can execute our hanging batch */
+
+ if (!igt_can_mi_store_dword_imm(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ for_each_engine(engine, i915, id) {
+ long timeout;
+
+ rq = hang_create_request(&h, engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("Failed to create request for %s, err=%d\n",
+ engine->name, err);
+ goto fini;
+ }
+
+ i915_gem_request_get(rq);
+
+ *h.batch = MI_BATCH_BUFFER_END;
+ __i915_add_request(rq, true);
+
+ timeout = i915_wait_request(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ i915_gem_request_put(rq);
+
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("Wait for request failed on %s, err=%d\n",
+ engine->name, err);
+ goto fini;
+ }
+ }
+
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_global_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ unsigned int reset_count;
+ int err = 0;
+
+ /* Check that we can issue a global GPU reset */
+
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ reset_count = i915_reset_count(&i915->gpu_error);
+
+ i915_reset(i915);
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ err = -EIO;
+
+ return err;
+}
+
+static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
+{
+ u32 reset_count;
+
+ rq->engine->hangcheck.stalled = true;
+ rq->engine->hangcheck.seqno = intel_engine_get_seqno(rq->engine);
+
+ reset_count = i915_reset_count(&rq->i915->gpu_error);
+
+ set_bit(I915_RESET_HANDOFF, &rq->i915->gpu_error.flags);
+ wake_up_all(&rq->i915->gpu_error.wait_queue);
+
+ return reset_count;
+}
+
+static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+{
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 1000));
+}
+
+static int igt_wait_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *rq;
+ unsigned int reset_count;
+ struct hang h;
+ long timeout;
+ int err;
+
+ /* Check that we detect a stuck waiter and issue a reset */
+
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ rq = hang_create_request(&h, i915->engine[RCS], i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto fini;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+
+ if (!wait_for_hang(&h, rq)) {
+ pr_err("Failed to start request %x\n", rq->fence.seqno);
+ err = -EIO;
+ goto out_rq;
+ }
+
+ reset_count = fake_hangcheck(rq);
+
+ timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10);
+ if (timeout < 0) {
+ pr_err("i915_wait_request failed on a stuck request: err=%ld\n",
+ timeout);
+ err = timeout;
+ goto out_rq;
+ }
+
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+out_rq:
+ i915_gem_request_put(rq);
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_reset_queue(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct hang h;
+ int err;
+
+ /* Check that we replay pending requests following a hang */
+
+ if (!igt_can_mi_store_dword_imm(i915))
+ return 0;
+
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ for_each_engine(engine, i915, id) {
+ struct drm_i915_gem_request *prev;
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+
+ prev = hang_create_request(&h, engine, i915->kernel_context);
+ if (IS_ERR(prev)) {
+ err = PTR_ERR(prev);
+ goto fini;
+ }
+
+ i915_gem_request_get(prev);
+ __i915_add_request(prev, true);
+
+ count = 0;
+ do {
+ struct drm_i915_gem_request *rq;
+ unsigned int reset_count;
+
+ rq = hang_create_request(&h,
+ engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto fini;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+
+ if (!wait_for_hang(&h, prev)) {
+ pr_err("Failed to start request %x\n",
+ prev->fence.seqno);
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EIO;
+ goto fini;
+ }
+
+ reset_count = fake_hangcheck(prev);
+
+ i915_reset(i915);
+
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
+ &i915->gpu_error.flags));
+
+ if (prev->fence.error != -EIO) {
+ pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
+ prev->fence.error);
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EINVAL;
+ goto fini;
+ }
+
+ if (rq->fence.error) {
+ pr_err("Fence error status not zero [%d] after unrelated reset\n",
+ rq->fence.error);
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EINVAL;
+ goto fini;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EINVAL;
+ goto fini;
+ }
+
+ i915_gem_request_put(prev);
+ prev = rq;
+ count++;
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: Completed %d resets\n", engine->name, count);
+
+ *h.batch = MI_BATCH_BUFFER_END;
+ wmb();
+
+ i915_gem_request_put(prev);
+ }
+
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_hang_sanitycheck),
+ SUBTEST(igt_global_reset),
+ SUBTEST(igt_wait_reset),
+ SUBTEST(igt_reset_queue),
+ };
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
new file mode 100644
index 000000000000..2d0fef2cfca6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
+ unsigned int num_ranges,
+ bool is_watertight)
+{
+ unsigned int i;
+ s32 prev;
+
+ for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ /* Check that the table is watertight */
+ if (is_watertight && (prev + 1) != (s32)ranges->start) {
+ pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
+ __func__, i, ranges->start, ranges->end, prev);
+ return -EINVAL;
+ }
+
+ /* Check that the table never goes backwards */
+ if (prev >= (s32)ranges->start) {
+ pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
+ __func__, i, ranges->start, ranges->end, prev);
+ return -EINVAL;
+ }
+
+ /* Check that the entry is valid */
+ if (ranges->start >= ranges->end) {
+ pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
+ __func__, i, ranges->start, ranges->end);
+ return -EINVAL;
+ }
+
+ prev = ranges->end;
+ }
+
+ return 0;
+}
+
+static int intel_shadow_table_check(void)
+{
+ const i915_reg_t *reg = gen8_shadowed_regs;
+ unsigned int i;
+ s32 prev;
+
+ for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
+ u32 offset = i915_mmio_reg_offset(*reg);
+
+ if (prev >= (s32)offset) {
+ pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
+ __func__, i, offset, prev);
+ return -EINVAL;
+ }
+
+ prev = offset;
+ }
+
+ return 0;
+}
+
+int intel_uncore_mock_selftests(void)
+{
+ struct {
+ const struct intel_forcewake_range *ranges;
+ unsigned int num_ranges;
+ bool is_watertight;
+ } fw[] = {
+ { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
+ { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
+ { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
+ };
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(fw); i++) {
+ err = intel_fw_table_check(fw[i].ranges,
+ fw[i].num_ranges,
+ fw[i].is_watertight);
+ if (err)
+ return err;
+ }
+
+ err = intel_shadow_table_check();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
+{
+#define FW_RANGE 0x40000
+ unsigned long *valid;
+ u32 offset;
+ int err;
+
+ if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
+ !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ if (IS_VALLEYVIEW(dev_priv)) /* XXX system lockup! */
+ return 0;
+
+ if (IS_BROADWELL(dev_priv)) /* XXX random GPU hang afterwards! */
+ return 0;
+
+ valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
+ GFP_TEMPORARY);
+ if (!valid)
+ return -ENOMEM;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ check_for_unclaimed_mmio(dev_priv);
+ for (offset = 0; offset < FW_RANGE; offset += 4) {
+ i915_reg_t reg = { offset };
+
+ (void)I915_READ_FW(reg);
+ if (!check_for_unclaimed_mmio(dev_priv))
+ set_bit(offset, valid);
+ }
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ err = 0;
+ for_each_set_bit(offset, valid, FW_RANGE) {
+ i915_reg_t reg = { offset };
+
+ intel_uncore_forcewake_reset(dev_priv, false);
+ check_for_unclaimed_mmio(dev_priv);
+
+ (void)I915_READ(reg);
+ if (check_for_unclaimed_mmio(dev_priv)) {
+ pr_err("Unclaimed mmio read to register 0x%04x\n",
+ offset);
+ err = -EINVAL;
+ }
+ }
+
+ kfree(valid);
+ return err;
+}
+
+int intel_uncore_live_selftests(struct drm_i915_private *i915)
+{
+ int err;
+
+ /* Confirm the table we load is still valid */
+ err = intel_fw_table_check(i915->uncore.fw_domains_table,
+ i915->uncore.fw_domains_table_entries,
+ INTEL_GEN(i915) >= 9);
+ if (err)
+ return err;
+
+ err = intel_uncore_check_forcewake_domains(i915);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
new file mode 100644
index 000000000000..8d3a90c3f8ac
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_context.h"
+#include "mock_gtt.h"
+
+struct i915_gem_context *
+mock_context(struct drm_i915_private *i915,
+ const char *name)
+{
+ struct i915_gem_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ kref_init(&ctx->ref);
+ INIT_LIST_HEAD(&ctx->link);
+ ctx->i915 = i915;
+
+ ret = ida_simple_get(&i915->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free;
+ ctx->hw_id = ret;
+
+ if (name) {
+ ctx->name = kstrdup(name, GFP_KERNEL);
+ if (!ctx->name)
+ goto err_put;
+
+ ctx->ppgtt = mock_ppgtt(i915, name);
+ if (!ctx->ppgtt)
+ goto err_put;
+ }
+
+ return ctx;
+
+err_free:
+ kfree(ctx);
+ return NULL;
+
+err_put:
+ i915_gem_context_set_closed(ctx);
+ i915_gem_context_put(ctx);
+ return NULL;
+}
+
+void mock_context_close(struct i915_gem_context *ctx)
+{
+ i915_gem_context_set_closed(ctx);
+
+ i915_ppgtt_close(&ctx->ppgtt->base);
+
+ i915_gem_context_put(ctx);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
new file mode 100644
index 000000000000..2427e5c0916a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_context.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_CONTEXT_H
+#define __MOCK_CONTEXT_H
+
+struct i915_gem_context *
+mock_context(struct drm_i915_private *i915,
+ const char *name);
+
+void mock_context_close(struct i915_gem_context *ctx);
+
+#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
new file mode 100644
index 000000000000..302f7d103635
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_dmabuf.h"
+
+static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i, err;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
+ if (err)
+ goto err_free;
+
+ sg = st->sgl;
+ for (i = 0; i < mock->npages; i++) {
+ sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ err = -ENOMEM;
+ goto err_st;
+ }
+
+ return st;
+
+err_st:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+ return ERR_PTR(err);
+}
+
+static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *st,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
+ sg_free_table(st);
+ kfree(st);
+}
+
+static void mock_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+ int i;
+
+ for (i = 0; i < mock->npages; i++)
+ put_page(mock->pages[i]);
+
+ kfree(mock);
+}
+
+static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
+}
+
+static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ vm_unmap_ram(vaddr, mock->npages);
+}
+
+static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return kmap_atomic(mock->pages[page_num]);
+}
+
+static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+ kunmap_atomic(addr);
+}
+
+static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return kmap(mock->pages[page_num]);
+}
+
+static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return kunmap(mock->pages[page_num]);
+}
+
+static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -ENODEV;
+}
+
+static const struct dma_buf_ops mock_dmabuf_ops = {
+ .map_dma_buf = mock_map_dma_buf,
+ .unmap_dma_buf = mock_unmap_dma_buf,
+ .release = mock_dmabuf_release,
+ .map = mock_dmabuf_kmap,
+ .map_atomic = mock_dmabuf_kmap_atomic,
+ .unmap = mock_dmabuf_kunmap,
+ .unmap_atomic = mock_dmabuf_kunmap_atomic,
+ .mmap = mock_dmabuf_mmap,
+ .vmap = mock_dmabuf_vmap,
+ .vunmap = mock_dmabuf_vunmap,
+};
+
+static struct dma_buf *mock_dmabuf(int npages)
+{
+ struct mock_dmabuf *mock;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dmabuf;
+ int i;
+
+ mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!mock)
+ return ERR_PTR(-ENOMEM);
+
+ mock->npages = npages;
+ for (i = 0; i < npages; i++) {
+ mock->pages[i] = alloc_page(GFP_KERNEL);
+ if (!mock->pages[i])
+ goto err;
+ }
+
+ exp_info.ops = &mock_dmabuf_ops;
+ exp_info.size = npages * PAGE_SIZE;
+ exp_info.flags = O_CLOEXEC;
+ exp_info.priv = mock;
+
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf))
+ goto err;
+
+ return dmabuf;
+
+err:
+ while (i--)
+ put_page(mock->pages[i]);
+ kfree(mock);
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
new file mode 100644
index 000000000000..ec80613159b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
@@ -0,0 +1,41 @@
+
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_DMABUF_H__
+#define __MOCK_DMABUF_H__
+
+#include <linux/dma-buf.h>
+
+struct mock_dmabuf {
+ int npages;
+ struct page *pages[];
+};
+
+static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+{
+ return buf->priv;
+}
+
+#endif /* !__MOCK_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
new file mode 100644
index 000000000000..09c704153456
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_drm.h"
+
+struct drm_file *mock_file(struct drm_i915_private *i915)
+{
+ struct file *filp;
+ struct inode *inode;
+ struct drm_file *file;
+ int err;
+
+ inode = kzalloc(sizeof(*inode), GFP_KERNEL);
+ if (!inode) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ inode->i_rdev = i915->drm.primary->index;
+
+ filp = kzalloc(sizeof(*filp), GFP_KERNEL);
+ if (!filp) {
+ err = -ENOMEM;
+ goto err_inode;
+ }
+
+ err = drm_open(inode, filp);
+ if (err)
+ goto err_filp;
+
+ file = filp->private_data;
+ memset(&file->filp, POISON_INUSE, sizeof(file->filp));
+ file->authenticated = true;
+
+ kfree(filp);
+ kfree(inode);
+ return file;
+
+err_filp:
+ kfree(filp);
+err_inode:
+ kfree(inode);
+err:
+ return ERR_PTR(err);
+}
+
+void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
+{
+ struct file filp = { .private_data = file };
+
+ drm_release(NULL, &filp);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h
new file mode 100644
index 000000000000..b39beee9f8f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_DRM_H
+#define __MOCK_DRM_H
+
+struct drm_file *mock_file(struct drm_i915_private *i915);
+void mock_file_free(struct drm_i915_private *i915, struct drm_file *file);
+
+#endif /* !__MOCK_DRM_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
new file mode 100644
index 000000000000..0ad624a1db90
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_engine.h"
+#include "mock_request.h"
+
+static struct mock_request *first_request(struct mock_engine *engine)
+{
+ return list_first_entry_or_null(&engine->hw_queue,
+ struct mock_request,
+ link);
+}
+
+static void hw_delay_complete(unsigned long data)
+{
+ struct mock_engine *engine = (typeof(engine))data;
+ struct mock_request *request;
+
+ spin_lock(&engine->hw_lock);
+
+ request = first_request(engine);
+ if (request) {
+ list_del_init(&request->link);
+ mock_seqno_advance(&engine->base, request->base.global_seqno);
+ }
+
+ request = first_request(engine);
+ if (request)
+ mod_timer(&engine->hw_delay, jiffies + request->delay);
+
+ spin_unlock(&engine->hw_lock);
+}
+
+static int mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
+{
+ i915_gem_context_get(ctx);
+ return 0;
+}
+
+static void mock_context_unpin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
+{
+ i915_gem_context_put(ctx);
+}
+
+static int mock_request_alloc(struct drm_i915_gem_request *request)
+{
+ struct mock_request *mock = container_of(request, typeof(*mock), base);
+
+ INIT_LIST_HEAD(&mock->link);
+ mock->delay = 0;
+
+ request->ring = request->engine->buffer;
+ return 0;
+}
+
+static int mock_emit_flush(struct drm_i915_gem_request *request,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static void mock_emit_breadcrumb(struct drm_i915_gem_request *request,
+ u32 *flags)
+{
+}
+
+static void mock_submit_request(struct drm_i915_gem_request *request)
+{
+ struct mock_request *mock = container_of(request, typeof(*mock), base);
+ struct mock_engine *engine =
+ container_of(request->engine, typeof(*engine), base);
+
+ i915_gem_request_submit(request);
+ GEM_BUG_ON(!request->global_seqno);
+
+ spin_lock_irq(&engine->hw_lock);
+ list_add_tail(&mock->link, &engine->hw_queue);
+ if (mock->link.prev == &engine->hw_queue)
+ mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ spin_unlock_irq(&engine->hw_lock);
+}
+
+static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+{
+ const unsigned long sz = roundup_pow_of_two(sizeof(struct intel_ring));
+ struct intel_ring *ring;
+
+ ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ ring->engine = engine;
+ ring->size = sz;
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+
+ INIT_LIST_HEAD(&ring->request_list);
+ intel_ring_update_space(ring);
+
+ return ring;
+}
+
+struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
+ const char *name)
+{
+ struct mock_engine *engine;
+ static int id;
+
+ engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
+ if (!engine)
+ return NULL;
+
+ engine->base.buffer = mock_ring(&engine->base);
+ if (!engine->base.buffer) {
+ kfree(engine);
+ return NULL;
+ }
+
+ /* minimal engine setup for requests */
+ engine->base.i915 = i915;
+ engine->base.name = name;
+ engine->base.id = id++;
+ engine->base.status_page.page_addr = (void *)(engine + 1);
+
+ engine->base.context_pin = mock_context_pin;
+ engine->base.context_unpin = mock_context_unpin;
+ engine->base.request_alloc = mock_request_alloc;
+ engine->base.emit_flush = mock_emit_flush;
+ engine->base.emit_breadcrumb = mock_emit_breadcrumb;
+ engine->base.submit_request = mock_submit_request;
+
+ engine->base.timeline =
+ &i915->gt.global_timeline.engine[engine->base.id];
+
+ intel_engine_init_breadcrumbs(&engine->base);
+ engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
+
+ /* fake hw queue */
+ spin_lock_init(&engine->hw_lock);
+ setup_timer(&engine->hw_delay,
+ hw_delay_complete,
+ (unsigned long)engine);
+ INIT_LIST_HEAD(&engine->hw_queue);
+
+ return &engine->base;
+}
+
+void mock_engine_flush(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+ struct mock_request *request, *rn;
+
+ del_timer_sync(&mock->hw_delay);
+
+ spin_lock_irq(&mock->hw_lock);
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
+ list_del_init(&request->link);
+ mock_seqno_advance(&mock->base, request->base.global_seqno);
+ }
+ spin_unlock_irq(&mock->hw_lock);
+}
+
+void mock_engine_reset(struct intel_engine_cs *engine)
+{
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0);
+}
+
+void mock_engine_free(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+
+ GEM_BUG_ON(timer_pending(&mock->hw_delay));
+
+ if (engine->last_retired_context)
+ engine->context_unpin(engine, engine->last_retired_context);
+
+ intel_engine_fini_breadcrumbs(engine);
+
+ kfree(engine->buffer);
+ kfree(engine);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h
new file mode 100644
index 000000000000..e5e240216ba3
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_ENGINE_H__
+#define __MOCK_ENGINE_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+#include "../intel_ringbuffer.h"
+
+struct mock_engine {
+ struct intel_engine_cs base;
+
+ spinlock_t hw_lock;
+ struct list_head hw_queue;
+ struct timer_list hw_delay;
+};
+
+struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
+ const char *name);
+void mock_engine_flush(struct intel_engine_cs *engine);
+void mock_engine_reset(struct intel_engine_cs *engine);
+void mock_engine_free(struct intel_engine_cs *engine);
+
+static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno)
+{
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+ intel_engine_wakeup(engine);
+}
+
+#endif /* !__MOCK_ENGINE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
new file mode 100644
index 000000000000..9f24c5da3f8d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "mock_engine.h"
+#include "mock_context.h"
+#include "mock_request.h"
+#include "mock_gem_device.h"
+#include "mock_gem_object.h"
+#include "mock_gtt.h"
+
+void mock_device_flush(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id)
+ mock_engine_flush(engine);
+
+ i915_gem_retire_requests(i915);
+}
+
+static void mock_device_release(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ cancel_delayed_work_sync(&i915->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gt.idle_work);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for_each_engine(engine, i915, id)
+ mock_engine_free(engine);
+ i915_gem_context_fini(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drain_workqueue(i915->wq);
+ i915_gem_drain_freed_objects(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ mock_fini_ggtt(i915);
+ i915_gem_timeline_fini(&i915->gt.global_timeline);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ destroy_workqueue(i915->wq);
+
+ kmem_cache_destroy(i915->dependencies);
+ kmem_cache_destroy(i915->requests);
+ kmem_cache_destroy(i915->vmas);
+ kmem_cache_destroy(i915->objects);
+
+ drm_dev_fini(&i915->drm);
+ put_device(&i915->drm.pdev->dev);
+}
+
+static struct drm_driver mock_driver = {
+ .name = "mock",
+ .driver_features = DRIVER_GEM,
+ .release = mock_device_release,
+
+ .gem_close_object = i915_gem_close_object,
+ .gem_free_object_unlocked = i915_gem_free_object,
+};
+
+static void release_dev(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ kfree(pdev);
+}
+
+static void mock_retire_work_handler(struct work_struct *work)
+{
+}
+
+static void mock_idle_work_handler(struct work_struct *work)
+{
+}
+
+struct drm_i915_private *mock_gem_device(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct pci_dev *pdev;
+ int err;
+
+ pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
+ if (!pdev)
+ goto err;
+
+ device_initialize(&pdev->dev);
+ pdev->dev.release = release_dev;
+ dev_set_name(&pdev->dev, "mock");
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ i915 = (struct drm_i915_private *)(pdev + 1);
+ pci_set_drvdata(pdev, i915);
+
+ err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
+ if (err) {
+ pr_err("Failed to initialise mock GEM device: err=%d\n", err);
+ goto put_device;
+ }
+ i915->drm.pdev = pdev;
+ i915->drm.dev_private = i915;
+
+ /* Using the global GTT may ask questions about KMS users, so prepare */
+ drm_mode_config_init(&i915->drm);
+
+ mkwrite_device_info(i915)->gen = -1;
+
+ spin_lock_init(&i915->mm.object_stat_lock);
+
+ init_waitqueue_head(&i915->gpu_error.wait_queue);
+ init_waitqueue_head(&i915->gpu_error.reset_queue);
+
+ i915->wq = alloc_ordered_workqueue("mock", 0);
+ if (!i915->wq)
+ goto put_device;
+
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ init_llist_head(&i915->mm.free_list);
+ INIT_LIST_HEAD(&i915->mm.unbound_list);
+ INIT_LIST_HEAD(&i915->mm.bound_list);
+
+ ida_init(&i915->context_hw_ida);
+
+ INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
+ INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
+
+ i915->gt.awake = true;
+
+ i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
+ if (!i915->objects)
+ goto err_wq;
+
+ i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!i915->vmas)
+ goto err_objects;
+
+ i915->requests = KMEM_CACHE(mock_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!i915->requests)
+ goto err_vmas;
+
+ i915->dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!i915->dependencies)
+ goto err_requests;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ INIT_LIST_HEAD(&i915->gt.timelines);
+ err = i915_gem_timeline_init__global(i915);
+ if (err) {
+ mutex_unlock(&i915->drm.struct_mutex);
+ goto err_dependencies;
+ }
+
+ mock_init_ggtt(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mkwrite_device_info(i915)->ring_mask = BIT(0);
+ i915->engine[RCS] = mock_engine(i915, "mock");
+ if (!i915->engine[RCS])
+ goto err_dependencies;
+
+ i915->kernel_context = mock_context(i915, NULL);
+ if (!i915->kernel_context)
+ goto err_engine;
+
+ return i915;
+
+err_engine:
+ for_each_engine(engine, i915, id)
+ mock_engine_free(engine);
+err_dependencies:
+ kmem_cache_destroy(i915->dependencies);
+err_requests:
+ kmem_cache_destroy(i915->requests);
+err_vmas:
+ kmem_cache_destroy(i915->vmas);
+err_objects:
+ kmem_cache_destroy(i915->objects);
+err_wq:
+ destroy_workqueue(i915->wq);
+put_device:
+ put_device(&pdev->dev);
+err:
+ return NULL;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
new file mode 100644
index 000000000000..4cca4d57f52c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
@@ -0,0 +1,9 @@
+#ifndef __MOCK_GEM_DEVICE_H__
+#define __MOCK_GEM_DEVICE_H__
+
+struct drm_i915_private;
+
+struct drm_i915_private *mock_gem_device(void);
+void mock_device_flush(struct drm_i915_private *i915);
+
+#endif /* !__MOCK_GEM_DEVICE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
new file mode 100644
index 000000000000..9fbf67321662
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
@@ -0,0 +1,8 @@
+#ifndef __MOCK_GEM_OBJECT_H__
+#define __MOCK_GEM_OBJECT_H__
+
+struct mock_object {
+ struct drm_i915_gem_object base;
+};
+
+#endif /* !__MOCK_GEM_OBJECT_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
new file mode 100644
index 000000000000..a61309c7cb3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_gtt.h"
+
+static void mock_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+}
+
+static void mock_insert_entries(struct i915_address_space *vm,
+ struct sg_table *st,
+ u64 start,
+ enum i915_cache_level level, u32 flags)
+{
+}
+
+static int mock_bind_ppgtt(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
+ vma->pages = vma->obj->mm.pages;
+ vma->flags |= I915_VMA_LOCAL_BIND;
+ return 0;
+}
+
+static void mock_unbind_ppgtt(struct i915_vma *vma)
+{
+}
+
+static void mock_cleanup(struct i915_address_space *vm)
+{
+}
+
+struct i915_hw_ppgtt *
+mock_ppgtt(struct drm_i915_private *i915,
+ const char *name)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return NULL;
+
+ kref_init(&ppgtt->ref);
+ ppgtt->base.i915 = i915;
+ ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
+ ppgtt->base.file = ERR_PTR(-ENODEV);
+
+ INIT_LIST_HEAD(&ppgtt->base.active_list);
+ INIT_LIST_HEAD(&ppgtt->base.inactive_list);
+ INIT_LIST_HEAD(&ppgtt->base.unbound_list);
+
+ INIT_LIST_HEAD(&ppgtt->base.global_link);
+ drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
+ i915_gem_timeline_init(i915, &ppgtt->base.timeline, name);
+
+ ppgtt->base.clear_range = nop_clear_range;
+ ppgtt->base.insert_page = mock_insert_page;
+ ppgtt->base.insert_entries = mock_insert_entries;
+ ppgtt->base.bind_vma = mock_bind_ppgtt;
+ ppgtt->base.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->base.cleanup = mock_cleanup;
+
+ return ppgtt;
+}
+
+static int mock_bind_ggtt(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ int err;
+
+ err = i915_get_ggtt_vma_pages(vma);
+ if (err)
+ return err;
+
+ vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ return 0;
+}
+
+static void mock_unbind_ggtt(struct i915_vma *vma)
+{
+}
+
+void mock_init_ggtt(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ INIT_LIST_HEAD(&i915->vm_list);
+
+ ggtt->base.i915 = i915;
+
+ ggtt->mappable_base = 0;
+ ggtt->mappable_end = 2048 * PAGE_SIZE;
+ ggtt->base.total = 4096 * PAGE_SIZE;
+
+ ggtt->base.clear_range = nop_clear_range;
+ ggtt->base.insert_page = mock_insert_page;
+ ggtt->base.insert_entries = mock_insert_entries;
+ ggtt->base.bind_vma = mock_bind_ggtt;
+ ggtt->base.unbind_vma = mock_unbind_ggtt;
+ ggtt->base.cleanup = mock_cleanup;
+
+ i915_address_space_init(&ggtt->base, i915, "global");
+}
+
+void mock_fini_ggtt(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ i915_address_space_fini(&ggtt->base);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
new file mode 100644
index 000000000000..9a0a833bb545
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_GTT_H
+#define __MOCK_GTT_H
+
+void mock_init_ggtt(struct drm_i915_private *i915);
+void mock_fini_ggtt(struct drm_i915_private *i915);
+
+struct i915_hw_ppgtt *
+mock_ppgtt(struct drm_i915_private *i915,
+ const char *name);
+
+#endif /* !__MOCK_GTT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
new file mode 100644
index 000000000000..8097e3693ec4
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_engine.h"
+#include "mock_request.h"
+
+struct drm_i915_gem_request *
+mock_request(struct intel_engine_cs *engine,
+ struct i915_gem_context *context,
+ unsigned long delay)
+{
+ struct drm_i915_gem_request *request;
+ struct mock_request *mock;
+
+ /* NB the i915->requests slab cache is enlarged to fit mock_request */
+ request = i915_gem_request_alloc(engine, context);
+ if (IS_ERR(request))
+ return NULL;
+
+ mock = container_of(request, typeof(*mock), base);
+ mock->delay = delay;
+
+ return &mock->base;
+}
+
+bool mock_cancel_request(struct drm_i915_gem_request *request)
+{
+ struct mock_request *mock = container_of(request, typeof(*mock), base);
+ struct mock_engine *engine =
+ container_of(request->engine, typeof(*engine), base);
+ bool was_queued;
+
+ spin_lock_irq(&engine->hw_lock);
+ was_queued = !list_empty(&mock->link);
+ list_del_init(&mock->link);
+ spin_unlock_irq(&engine->hw_lock);
+
+ if (was_queued)
+ i915_gem_request_unsubmit(request);
+
+ return was_queued;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
new file mode 100644
index 000000000000..4dea74c8e96d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_REQUEST__
+#define __MOCK_REQUEST__
+
+#include <linux/list.h>
+
+#include "../i915_gem_request.h"
+
+struct mock_request {
+ struct drm_i915_gem_request base;
+
+ struct list_head link;
+ unsigned long delay;
+};
+
+struct drm_i915_gem_request *
+mock_request(struct intel_engine_cs *engine,
+ struct i915_gem_context *context,
+ unsigned long delay);
+
+bool mock_cancel_request(struct drm_i915_gem_request *request);
+
+#endif /* !__MOCK_REQUEST__ */
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
new file mode 100644
index 000000000000..1cc5d2931753
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/prime_numbers.h>
+#include <linux/random.h>
+
+#include "../i915_selftest.h"
+
+#define PFN_BIAS (1 << 10)
+
+struct pfn_table {
+ struct sg_table st;
+ unsigned long start, end;
+};
+
+typedef unsigned int (*npages_fn_t)(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd);
+
+static noinline int expect_pfn_sg(struct pfn_table *pt,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ const char *who,
+ unsigned long timeout)
+{
+ struct scatterlist *sg;
+ unsigned long pfn, n;
+
+ pfn = pt->start;
+ for_each_sg(pt->st.sgl, sg, pt->st.nents, n) {
+ struct page *page = sg_page(sg);
+ unsigned int npages = npages_fn(n, pt->st.nents, rnd);
+
+ if (page_to_pfn(page) != pfn) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (sg->length != npages * PAGE_SIZE) {
+ pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
+ __func__, who, npages * PAGE_SIZE, sg->length);
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn += npages;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt,
+ const char *who,
+ unsigned long timeout)
+{
+ struct sg_page_iter sgiter;
+ unsigned long pfn;
+
+ pfn = pt->start;
+ for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) {
+ struct page *page = sg_page_iter_page(&sgiter);
+
+ if (page != pfn_to_page(pfn)) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn++;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static noinline int expect_pfn_sgtiter(struct pfn_table *pt,
+ const char *who,
+ unsigned long timeout)
+{
+ struct sgt_iter sgt;
+ struct page *page;
+ unsigned long pfn;
+
+ pfn = pt->start;
+ for_each_sgt_page(page, sgt, &pt->st) {
+ if (page != pfn_to_page(pfn)) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn++;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int expect_pfn_sgtable(struct pfn_table *pt,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ const char *who,
+ unsigned long timeout)
+{
+ int err;
+
+ err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout);
+ if (err)
+ return err;
+
+ err = expect_pfn_sg_page_iter(pt, who, timeout);
+ if (err)
+ return err;
+
+ err = expect_pfn_sgtiter(pt, who, timeout);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static unsigned int one(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return 1;
+}
+
+static unsigned int grow(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return n + 1;
+}
+
+static unsigned int shrink(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return count - n;
+}
+
+static unsigned int random(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return 1 + (prandom_u32_state(rnd) % 1024);
+}
+
+static inline bool page_contiguous(struct page *first,
+ struct page *last,
+ unsigned long npages)
+{
+ return first + npages == last;
+}
+
+static int alloc_table(struct pfn_table *pt,
+ unsigned long count, unsigned long max,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ int alloc_error)
+{
+ struct scatterlist *sg;
+ unsigned long n, pfn;
+
+ if (sg_alloc_table(&pt->st, max,
+ GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
+ return alloc_error;
+
+ /* count should be less than 20 to prevent overflowing sg->length */
+ GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
+
+ /* Construct a table where each scatterlist contains different number
+ * of entries. The idea is to check that we can iterate the individual
+ * pages from inside the coalesced lists.
+ */
+ pt->start = PFN_BIAS;
+ pfn = pt->start;
+ sg = pt->st.sgl;
+ for (n = 0; n < count; n++) {
+ unsigned long npages = npages_fn(n, count, rnd);
+
+ /* Nobody expects the Sparse Memmap! */
+ if (!page_contiguous(pfn_to_page(pfn),
+ pfn_to_page(pfn + npages),
+ npages)) {
+ sg_free_table(&pt->st);
+ return -ENOSPC;
+ }
+
+ if (n)
+ sg = sg_next(sg);
+ sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
+
+ GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
+ GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
+ GEM_BUG_ON(sg->offset != 0);
+
+ pfn += npages;
+ }
+ sg_mark_end(sg);
+ pt->st.nents = n;
+ pt->end = pfn;
+
+ return 0;
+}
+
+static const npages_fn_t npages_funcs[] = {
+ one,
+ grow,
+ shrink,
+ random,
+ NULL,
+};
+
+static int igt_sg_alloc(void *ignored)
+{
+ IGT_TIMEOUT(end_time);
+ const unsigned long max_order = 20; /* approximating a 4GiB object */
+ struct rnd_state prng;
+ unsigned long prime;
+ int alloc_error = -ENOMEM;
+
+ for_each_prime_number(prime, max_order) {
+ unsigned long size = BIT(prime);
+ int offset;
+
+ for (offset = -1; offset <= 1; offset++) {
+ unsigned long sz = size + offset;
+ const npages_fn_t *npages;
+ struct pfn_table pt;
+ int err;
+
+ for (npages = npages_funcs; *npages; npages++) {
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = alloc_table(&pt, sz, sz, *npages, &prng,
+ alloc_error);
+ if (err == -ENOSPC)
+ break;
+ if (err)
+ return err;
+
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = expect_pfn_sgtable(&pt, *npages, &prng,
+ "sg_alloc_table",
+ end_time);
+ sg_free_table(&pt.st);
+ if (err)
+ return err;
+ }
+ }
+
+ /* Test at least one continuation before accepting oom */
+ if (size > SG_MAX_SINGLE_ALLOC)
+ alloc_error = -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int igt_sg_trim(void *ignored)
+{
+ IGT_TIMEOUT(end_time);
+ const unsigned long max = PAGE_SIZE; /* not prime! */
+ struct pfn_table pt;
+ unsigned long prime;
+ int alloc_error = -ENOMEM;
+
+ for_each_prime_number(prime, max) {
+ const npages_fn_t *npages;
+ int err;
+
+ for (npages = npages_funcs; *npages; npages++) {
+ struct rnd_state prng;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ err = alloc_table(&pt, prime, max, *npages, &prng,
+ alloc_error);
+ if (err == -ENOSPC)
+ break;
+ if (err)
+ return err;
+
+ if (i915_sg_trim(&pt.st)) {
+ if (pt.st.orig_nents != prime ||
+ pt.st.nents != prime) {
+ pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
+ pt.st.nents, pt.st.orig_nents, prime);
+ err = -EINVAL;
+ } else {
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = expect_pfn_sgtable(&pt,
+ *npages, &prng,
+ "i915_sg_trim",
+ end_time);
+ }
+ }
+ sg_free_table(&pt.st);
+ if (err)
+ return err;
+ }
+
+ /* Test at least one continuation before accepting oom */
+ if (prime > SG_MAX_SINGLE_ALLOC)
+ alloc_error = -ENOSPC;
+ }
+
+ return 0;
+}
+
+int scatterlist_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_sg_alloc),
+ SUBTEST(igt_sg_trim),
+ };
+
+ return i915_subtests(tests, NULL);
+}