aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/selftests
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c139
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.h45
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c1793
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c397
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c1752
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c404
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c33
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c658
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c73
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c26
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_gem_utils.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_gem_utils.h17
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c19
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/lib_sw_fence.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c129
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h42
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c162
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c11
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_object.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c3
35 files changed, 146 insertions, 5770 deletions
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
deleted file mode 100644
index 419fd4d6a8f0..000000000000
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "huge_gem_object.h"
-
-static void huge_free_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- unsigned long nreal = obj->scratch / PAGE_SIZE;
- struct scatterlist *sg;
-
- for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
- __free_page(sg_page(sg));
-
- sg_free_table(pages);
- kfree(pages);
-}
-
-static int huge_get_pages(struct drm_i915_gem_object *obj)
-{
-#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
- const unsigned long nreal = obj->scratch / PAGE_SIZE;
- const unsigned long npages = obj->base.size / PAGE_SIZE;
- struct scatterlist *sg, *src, *end;
- struct sg_table *pages;
- unsigned long n;
-
- pages = kmalloc(sizeof(*pages), GFP);
- if (!pages)
- return -ENOMEM;
-
- if (sg_alloc_table(pages, npages, GFP)) {
- kfree(pages);
- return -ENOMEM;
- }
-
- sg = pages->sgl;
- for (n = 0; n < nreal; n++) {
- struct page *page;
-
- page = alloc_page(GFP | __GFP_HIGHMEM);
- if (!page) {
- sg_mark_end(sg);
- goto err;
- }
-
- sg_set_page(sg, page, PAGE_SIZE, 0);
- sg = __sg_next(sg);
- }
- if (nreal < npages) {
- for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
- sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
- src = __sg_next(src);
- if (src == end)
- src = pages->sgl;
- }
- }
-
- if (i915_gem_gtt_prepare_pages(obj, pages))
- goto err;
-
- __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
-
- return 0;
-
-err:
- huge_free_pages(obj, pages);
-
- return -ENOMEM;
-#undef GFP
-}
-
-static void huge_put_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- i915_gem_gtt_finish_pages(obj, pages);
- huge_free_pages(obj, pages);
-
- obj->mm.dirty = false;
-}
-
-static const struct drm_i915_gem_object_ops huge_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
- .get_pages = huge_get_pages,
- .put_pages = huge_put_pages,
-};
-
-struct drm_i915_gem_object *
-huge_gem_object(struct drm_i915_private *i915,
- phys_addr_t phys_size,
- dma_addr_t dma_size)
-{
- struct drm_i915_gem_object *obj;
- unsigned int cache_level;
-
- GEM_BUG_ON(!phys_size || phys_size > dma_size);
- GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
- GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
-
- if (overflows_type(dma_size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops);
-
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
- i915_gem_object_set_cache_coherency(obj, cache_level);
- obj->scratch = phys_size;
-
- return obj;
-}
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
deleted file mode 100644
index a6133a9e8029..000000000000
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HUGE_GEM_OBJECT_H
-#define __HUGE_GEM_OBJECT_H
-
-struct drm_i915_gem_object *
-huge_gem_object(struct drm_i915_private *i915,
- phys_addr_t phys_size,
- dma_addr_t dma_size);
-
-static inline phys_addr_t
-huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
-{
- return obj->scratch;
-}
-
-static inline dma_addr_t
-huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
-{
- return obj->base.size;
-}
-
-#endif /* !__HUGE_GEM_OBJECT_H */
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
deleted file mode 100644
index 1e1f83326a96..000000000000
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ /dev/null
@@ -1,1793 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "../i915_selftest.h"
-
-#include <linux/prime_numbers.h>
-
-#include "igt_gem_utils.h"
-#include "mock_drm.h"
-#include "i915_random.h"
-
-static const unsigned int page_sizes[] = {
- I915_GTT_PAGE_SIZE_2M,
- I915_GTT_PAGE_SIZE_64K,
- I915_GTT_PAGE_SIZE_4K,
-};
-
-static unsigned int get_largest_page_size(struct drm_i915_private *i915,
- u64 rem)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
- unsigned int page_size = page_sizes[i];
-
- if (HAS_PAGE_SIZES(i915, page_size) && rem >= page_size)
- return page_size;
- }
-
- return 0;
-}
-
-static void huge_pages_free_pages(struct sg_table *st)
-{
- struct scatterlist *sg;
-
- for (sg = st->sgl; sg; sg = __sg_next(sg)) {
- if (sg_page(sg))
- __free_pages(sg_page(sg), get_order(sg->length));
- }
-
- sg_free_table(st);
- kfree(st);
-}
-
-static int get_huge_pages(struct drm_i915_gem_object *obj)
-{
-#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
- unsigned int page_mask = obj->mm.page_mask;
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int sg_page_sizes;
- u64 rem;
-
- st = kmalloc(sizeof(*st), GFP);
- if (!st)
- return -ENOMEM;
-
- if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
- kfree(st);
- return -ENOMEM;
- }
-
- rem = obj->base.size;
- sg = st->sgl;
- st->nents = 0;
- sg_page_sizes = 0;
-
- /*
- * Our goal here is simple, we want to greedily fill the object from
- * largest to smallest page-size, while ensuring that we use *every*
- * page-size as per the given page-mask.
- */
- do {
- unsigned int bit = ilog2(page_mask);
- unsigned int page_size = BIT(bit);
- int order = get_order(page_size);
-
- do {
- struct page *page;
-
- GEM_BUG_ON(order >= MAX_ORDER);
- page = alloc_pages(GFP | __GFP_ZERO, order);
- if (!page)
- goto err;
-
- sg_set_page(sg, page, page_size, 0);
- sg_page_sizes |= page_size;
- st->nents++;
-
- rem -= page_size;
- if (!rem) {
- sg_mark_end(sg);
- break;
- }
-
- sg = __sg_next(sg);
- } while ((rem - ((page_size-1) & page_mask)) >= page_size);
-
- page_mask &= (page_size-1);
- } while (page_mask);
-
- if (i915_gem_gtt_prepare_pages(obj, st))
- goto err;
-
- obj->mm.madv = I915_MADV_DONTNEED;
-
- GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
-
- return 0;
-
-err:
- sg_set_page(sg, NULL, 0, 0);
- sg_mark_end(sg);
- huge_pages_free_pages(st);
-
- return -ENOMEM;
-}
-
-static void put_huge_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- i915_gem_gtt_finish_pages(obj, pages);
- huge_pages_free_pages(pages);
-
- obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
-}
-
-static const struct drm_i915_gem_object_ops huge_page_ops = {
- .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
- .get_pages = get_huge_pages,
- .put_pages = put_huge_pages,
-};
-
-static struct drm_i915_gem_object *
-huge_pages_object(struct drm_i915_private *i915,
- u64 size,
- unsigned int page_mask)
-{
- struct drm_i915_gem_object *obj;
-
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
-
- if (size >> PAGE_SHIFT > INT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops);
-
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
-
- obj->mm.page_mask = page_mask;
-
- return obj;
-}
-
-static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- const u64 max_len = rounddown_pow_of_two(UINT_MAX);
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int sg_page_sizes;
- u64 rem;
-
- st = kmalloc(sizeof(*st), GFP);
- if (!st)
- return -ENOMEM;
-
- if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
- kfree(st);
- return -ENOMEM;
- }
-
- /* Use optimal page sized chunks to fill in the sg table */
- rem = obj->base.size;
- sg = st->sgl;
- st->nents = 0;
- sg_page_sizes = 0;
- do {
- unsigned int page_size = get_largest_page_size(i915, rem);
- unsigned int len = min(page_size * div_u64(rem, page_size),
- max_len);
-
- GEM_BUG_ON(!page_size);
-
- sg->offset = 0;
- sg->length = len;
- sg_dma_len(sg) = len;
- sg_dma_address(sg) = page_size;
-
- sg_page_sizes |= len;
-
- st->nents++;
-
- rem -= len;
- if (!rem) {
- sg_mark_end(sg);
- break;
- }
-
- sg = sg_next(sg);
- } while (1);
-
- i915_sg_trim(st);
-
- obj->mm.madv = I915_MADV_DONTNEED;
-
- __i915_gem_object_set_pages(obj, st, sg_page_sizes);
-
- return 0;
-}
-
-static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int page_size;
-
- st = kmalloc(sizeof(*st), GFP);
- if (!st)
- return -ENOMEM;
-
- if (sg_alloc_table(st, 1, GFP)) {
- kfree(st);
- return -ENOMEM;
- }
-
- sg = st->sgl;
- st->nents = 1;
-
- page_size = get_largest_page_size(i915, obj->base.size);
- GEM_BUG_ON(!page_size);
-
- sg->offset = 0;
- sg->length = obj->base.size;
- sg_dma_len(sg) = obj->base.size;
- sg_dma_address(sg) = page_size;
-
- obj->mm.madv = I915_MADV_DONTNEED;
-
- __i915_gem_object_set_pages(obj, st, sg->length);
-
- return 0;
-#undef GFP
-}
-
-static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- sg_free_table(pages);
- kfree(pages);
-}
-
-static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
- struct sg_table *pages)
-{
- fake_free_huge_pages(obj, pages);
- obj->mm.dirty = false;
- obj->mm.madv = I915_MADV_WILLNEED;
-}
-
-static const struct drm_i915_gem_object_ops fake_ops = {
- .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
- .get_pages = fake_get_huge_pages,
- .put_pages = fake_put_huge_pages,
-};
-
-static const struct drm_i915_gem_object_ops fake_ops_single = {
- .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
- .get_pages = fake_get_huge_pages_single,
- .put_pages = fake_put_huge_pages,
-};
-
-static struct drm_i915_gem_object *
-fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
-{
- struct drm_i915_gem_object *obj;
-
- GEM_BUG_ON(!size);
- GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
-
- if (size >> PAGE_SHIFT > UINT_MAX)
- return ERR_PTR(-E2BIG);
-
- if (overflows_type(size, obj->base.size))
- return ERR_PTR(-E2BIG);
-
- obj = i915_gem_object_alloc();
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- drm_gem_private_object_init(&i915->drm, &obj->base, size);
-
- if (single)
- i915_gem_object_init(obj, &fake_ops_single);
- else
- i915_gem_object_init(obj, &fake_ops);
-
- obj->write_domain = I915_GEM_DOMAIN_CPU;
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
-
- return obj;
-}
-
-static int igt_check_page_sizes(struct i915_vma *vma)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- unsigned int supported = INTEL_INFO(i915)->page_sizes;
- struct drm_i915_gem_object *obj = vma->obj;
- int err = 0;
-
- if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
- pr_err("unsupported page_sizes.sg=%u, supported=%u\n",
- vma->page_sizes.sg & ~supported, supported);
- err = -EINVAL;
- }
-
- if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) {
- pr_err("unsupported page_sizes.gtt=%u, supported=%u\n",
- vma->page_sizes.gtt & ~supported, supported);
- err = -EINVAL;
- }
-
- if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
- pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
- vma->page_sizes.phys, obj->mm.page_sizes.phys);
- err = -EINVAL;
- }
-
- if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
- pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
- vma->page_sizes.sg, obj->mm.page_sizes.sg);
- err = -EINVAL;
- }
-
- if (obj->mm.page_sizes.gtt) {
- pr_err("obj->page_sizes.gtt(%u) should never be set\n",
- obj->mm.page_sizes.gtt);
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int igt_mock_exhaust_device_supported_pages(void *arg)
-{
- struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int i, j, single;
- int err;
-
- /*
- * Sanity check creating objects with every valid page support
- * combination for our mock device.
- */
-
- for (i = 1; i < BIT(ARRAY_SIZE(page_sizes)); i++) {
- unsigned int combination = 0;
-
- for (j = 0; j < ARRAY_SIZE(page_sizes); j++) {
- if (i & BIT(j))
- combination |= page_sizes[j];
- }
-
- mkwrite_device_info(i915)->page_sizes = combination;
-
- for (single = 0; single <= 1; ++single) {
- obj = fake_huge_pages_object(i915, combination, !!single);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_device;
- }
-
- if (obj->base.size != combination) {
- pr_err("obj->base.size=%zu, expected=%u\n",
- obj->base.size, combination);
- err = -EINVAL;
- goto out_put;
- }
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto out_close;
-
- err = igt_check_page_sizes(vma);
-
- if (vma->page_sizes.sg != combination) {
- pr_err("page_sizes.sg=%u, expected=%u\n",
- vma->page_sizes.sg, combination);
- err = -EINVAL;
- }
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- i915_gem_object_put(obj);
-
- if (err)
- goto out_device;
- }
- }
-
- goto out_device;
-
-out_close:
- i915_vma_close(vma);
-out_put:
- i915_gem_object_put(obj);
-out_device:
- mkwrite_device_info(i915)->page_sizes = saved_mask;
-
- return err;
-}
-
-static int igt_mock_ppgtt_misaligned_dma(void *arg)
-{
- struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
- struct drm_i915_gem_object *obj;
- int bit;
- int err;
-
- /*
- * Sanity check dma misalignment for huge pages -- the dma addresses we
- * insert into the paging structures need to always respect the page
- * size alignment.
- */
-
- bit = ilog2(I915_GTT_PAGE_SIZE_64K);
-
- for_each_set_bit_from(bit, &supported,
- ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
- IGT_TIMEOUT(end_time);
- unsigned int page_size = BIT(bit);
- unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- unsigned int offset;
- unsigned int size =
- round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
- struct i915_vma *vma;
-
- obj = fake_huge_pages_object(i915, size, true);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- if (obj->base.size != size) {
- pr_err("obj->base.size=%zu, expected=%u\n",
- obj->base.size, size);
- err = -EINVAL;
- goto out_put;
- }
-
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_put;
-
- /* Force the page size for this object */
- obj->mm.page_sizes.sg = page_size;
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_unpin;
- }
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err) {
- i915_vma_close(vma);
- goto out_unpin;
- }
-
-
- err = igt_check_page_sizes(vma);
-
- if (vma->page_sizes.gtt != page_size) {
- pr_err("page_sizes.gtt=%u, expected %u\n",
- vma->page_sizes.gtt, page_size);
- err = -EINVAL;
- }
-
- i915_vma_unpin(vma);
-
- if (err) {
- i915_vma_close(vma);
- goto out_unpin;
- }
-
- /*
- * Try all the other valid offsets until the next
- * boundary -- should always fall back to using 4K
- * pages.
- */
- for (offset = 4096; offset < page_size; offset += 4096) {
- err = i915_vma_unbind(vma);
- if (err) {
- i915_vma_close(vma);
- goto out_unpin;
- }
-
- err = i915_vma_pin(vma, 0, 0, flags | offset);
- if (err) {
- i915_vma_close(vma);
- goto out_unpin;
- }
-
- err = igt_check_page_sizes(vma);
-
- if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
- pr_err("page_sizes.gtt=%u, expected %llu\n",
- vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
- err = -EINVAL;
- }
-
- i915_vma_unpin(vma);
-
- if (err) {
- i915_vma_close(vma);
- goto out_unpin;
- }
-
- if (igt_timeout(end_time,
- "%s timed out at offset %x with page-size %x\n",
- __func__, offset, page_size))
- break;
- }
-
- i915_vma_close(vma);
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
- }
-
- return 0;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
-
- return err;
-}
-
-static void close_object_list(struct list_head *objects,
- struct i915_hw_ppgtt *ppgtt)
-{
- struct drm_i915_gem_object *obj, *on;
-
- list_for_each_entry_safe(obj, on, objects, st_link) {
- struct i915_vma *vma;
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (!IS_ERR(vma))
- i915_vma_close(vma);
-
- list_del(&obj->st_link);
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
- }
-}
-
-static int igt_mock_ppgtt_huge_fill(void *arg)
-{
- struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->vm.i915;
- unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
- unsigned long page_num;
- bool single = false;
- LIST_HEAD(objects);
- IGT_TIMEOUT(end_time);
- int err = -ENODEV;
-
- for_each_prime_number_from(page_num, 1, max_pages) {
- struct drm_i915_gem_object *obj;
- u64 size = page_num << PAGE_SHIFT;
- struct i915_vma *vma;
- unsigned int expected_gtt = 0;
- int i;
-
- obj = fake_huge_pages_object(i915, size, single);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- break;
- }
-
- if (obj->base.size != size) {
- pr_err("obj->base.size=%zd, expected=%llu\n",
- obj->base.size, size);
- i915_gem_object_put(obj);
- err = -EINVAL;
- break;
- }
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- i915_gem_object_put(obj);
- break;
- }
-
- list_add(&obj->st_link, &objects);
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- break;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- break;
-
- err = igt_check_page_sizes(vma);
- if (err) {
- i915_vma_unpin(vma);
- break;
- }
-
- /*
- * Figure out the expected gtt page size knowing that we go from
- * largest to smallest page size sg chunks, and that we align to
- * the largest page size.
- */
- for (i = 0; i < ARRAY_SIZE(page_sizes); ++i) {
- unsigned int page_size = page_sizes[i];
-
- if (HAS_PAGE_SIZES(i915, page_size) &&
- size >= page_size) {
- expected_gtt |= page_size;
- size &= page_size-1;
- }
- }
-
- GEM_BUG_ON(!expected_gtt);
- GEM_BUG_ON(size);
-
- if (expected_gtt & I915_GTT_PAGE_SIZE_4K)
- expected_gtt &= ~I915_GTT_PAGE_SIZE_64K;
-
- i915_vma_unpin(vma);
-
- if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
- if (!IS_ALIGNED(vma->node.start,
- I915_GTT_PAGE_SIZE_2M)) {
- pr_err("node.start(%llx) not aligned to 2M\n",
- vma->node.start);
- err = -EINVAL;
- break;
- }
-
- if (!IS_ALIGNED(vma->node.size,
- I915_GTT_PAGE_SIZE_2M)) {
- pr_err("node.size(%llx) not aligned to 2M\n",
- vma->node.size);
- err = -EINVAL;
- break;
- }
- }
-
- if (vma->page_sizes.gtt != expected_gtt) {
- pr_err("gtt=%u, expected=%u, size=%zd, single=%s\n",
- vma->page_sizes.gtt, expected_gtt,
- obj->base.size, yesno(!!single));
- err = -EINVAL;
- break;
- }
-
- if (igt_timeout(end_time,
- "%s timed out at size %zd\n",
- __func__, obj->base.size))
- break;
-
- single = !single;
- }
-
- close_object_list(&objects, ppgtt);
-
- if (err == -ENOMEM || err == -ENOSPC)
- err = 0;
-
- return err;
-}
-
-static int igt_mock_ppgtt_64K(void *arg)
-{
- struct i915_hw_ppgtt *ppgtt = arg;
- struct drm_i915_private *i915 = ppgtt->vm.i915;
- struct drm_i915_gem_object *obj;
- const struct object_info {
- unsigned int size;
- unsigned int gtt;
- unsigned int offset;
- } objects[] = {
- /* Cases with forced padding/alignment */
- {
- .size = SZ_64K,
- .gtt = I915_GTT_PAGE_SIZE_64K,
- .offset = 0,
- },
- {
- .size = SZ_64K + SZ_4K,
- .gtt = I915_GTT_PAGE_SIZE_4K,
- .offset = 0,
- },
- {
- .size = SZ_64K - SZ_4K,
- .gtt = I915_GTT_PAGE_SIZE_4K,
- .offset = 0,
- },
- {
- .size = SZ_2M,
- .gtt = I915_GTT_PAGE_SIZE_64K,
- .offset = 0,
- },
- {
- .size = SZ_2M - SZ_4K,
- .gtt = I915_GTT_PAGE_SIZE_4K,
- .offset = 0,
- },
- {
- .size = SZ_2M + SZ_4K,
- .gtt = I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_4K,
- .offset = 0,
- },
- {
- .size = SZ_2M + SZ_64K,
- .gtt = I915_GTT_PAGE_SIZE_64K,
- .offset = 0,
- },
- {
- .size = SZ_2M - SZ_64K,
- .gtt = I915_GTT_PAGE_SIZE_64K,
- .offset = 0,
- },
- /* Try without any forced padding/alignment */
- {
- .size = SZ_64K,
- .offset = SZ_2M,
- .gtt = I915_GTT_PAGE_SIZE_4K,
- },
- {
- .size = SZ_128K,
- .offset = SZ_2M - SZ_64K,
- .gtt = I915_GTT_PAGE_SIZE_4K,
- },
- };
- struct i915_vma *vma;
- int i, single;
- int err;
-
- /*
- * Sanity check some of the trickiness with 64K pages -- either we can
- * safely mark the whole page-table(2M block) as 64K, or we have to
- * always fallback to 4K.
- */
-
- if (!HAS_PAGE_SIZES(i915, I915_GTT_PAGE_SIZE_64K))
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(objects); ++i) {
- unsigned int size = objects[i].size;
- unsigned int expected_gtt = objects[i].gtt;
- unsigned int offset = objects[i].offset;
- unsigned int flags = PIN_USER;
-
- for (single = 0; single <= 1; single++) {
- obj = fake_huge_pages_object(i915, size, !!single);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_object_put;
-
- /*
- * Disable 2M pages -- We only want to use 64K/4K pages
- * for this test.
- */
- obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_object_unpin;
- }
-
- if (offset)
- flags |= PIN_OFFSET_FIXED | offset;
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- goto out_vma_close;
-
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_vma_unpin;
-
- if (!offset && vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K) {
- if (!IS_ALIGNED(vma->node.start,
- I915_GTT_PAGE_SIZE_2M)) {
- pr_err("node.start(%llx) not aligned to 2M\n",
- vma->node.start);
- err = -EINVAL;
- goto out_vma_unpin;
- }
-
- if (!IS_ALIGNED(vma->node.size,
- I915_GTT_PAGE_SIZE_2M)) {
- pr_err("node.size(%llx) not aligned to 2M\n",
- vma->node.size);
- err = -EINVAL;
- goto out_vma_unpin;
- }
- }
-
- if (vma->page_sizes.gtt != expected_gtt) {
- pr_err("gtt=%u, expected=%u, i=%d, single=%s\n",
- vma->page_sizes.gtt, expected_gtt, i,
- yesno(!!single));
- err = -EINVAL;
- goto out_vma_unpin;
- }
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
- }
- }
-
- return 0;
-
-out_vma_unpin:
- i915_vma_unpin(vma);
-out_vma_close:
- i915_vma_close(vma);
-out_object_unpin:
- i915_gem_object_unpin_pages(obj);
-out_object_put:
- i915_gem_object_put(obj);
-
- return err;
-}
-
-static struct i915_vma *
-gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
-{
- struct drm_i915_private *i915 = vma->vm->i915;
- const int gen = INTEL_GEN(i915);
- unsigned int count = vma->size >> PAGE_SHIFT;
- struct drm_i915_gem_object *obj;
- struct i915_vma *batch;
- unsigned int size;
- u32 *cmd;
- int n;
- int err;
-
- size = (1 + 4 * count) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = val;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = val;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = val;
- }
-
- offset += PAGE_SIZE;
- }
-
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
-
- i915_gem_object_unpin_map(obj);
-
- batch = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err;
- }
-
- err = i915_vma_pin(batch, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return batch;
-
-err:
- i915_gem_object_put(obj);
-
- return ERR_PTR(err);
-}
-
-static int gpu_write(struct i915_vma *vma,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 dword,
- u32 value)
-{
- struct i915_request *rq;
- struct i915_vma *batch;
- int err;
-
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
-
- err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (err)
- return err;
-
- batch = gpu_write_dw(vma, dword * sizeof(u32), value);
- if (IS_ERR(batch))
- return PTR_ERR(batch);
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- err = i915_vma_move_to_active(batch, rq, 0);
- if (err)
- goto err_request;
-
- i915_gem_object_set_active_reference(batch->obj);
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- if (err)
- goto err_request;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
-err_request:
- if (err)
- i915_request_skip(rq, err);
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_close(batch);
-
- return err;
-}
-
-static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
-{
- unsigned int needs_flush;
- unsigned long n;
- int err;
-
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
- if (err)
- return err;
-
- for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
-
- if (needs_flush & CLFLUSH_BEFORE)
- drm_clflush_virt_range(ptr, PAGE_SIZE);
-
- if (ptr[dword] != val) {
- pr_err("n=%lu ptr[%u]=%u, val=%u\n",
- n, dword, ptr[dword], val);
- kunmap_atomic(ptr);
- err = -EINVAL;
- break;
- }
-
- kunmap_atomic(ptr);
- }
-
- i915_gem_obj_finish_shmem_access(obj);
-
- return err;
-}
-
-static int __igt_write_huge(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct drm_i915_gem_object *obj,
- u64 size, u64 offset,
- u32 dword, u32 val)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
- unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- struct i915_vma *vma;
- int err;
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_vma_unbind(vma);
- if (err)
- goto out_vma_close;
-
- err = i915_vma_pin(vma, size, 0, flags | offset);
- if (err) {
- /*
- * The ggtt may have some pages reserved so
- * refrain from erroring out.
- */
- if (err == -ENOSPC && i915_is_ggtt(vm))
- err = 0;
-
- goto out_vma_close;
- }
-
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_vma_unpin;
-
- err = gpu_write(vma, ctx, engine, dword, val);
- if (err) {
- pr_err("gpu-write failed at offset=%llx\n", offset);
- goto out_vma_unpin;
- }
-
- err = cpu_check(obj, dword, val);
- if (err) {
- pr_err("cpu-check failed at offset=%llx\n", offset);
- goto out_vma_unpin;
- }
-
-out_vma_unpin:
- i915_vma_unpin(vma);
-out_vma_close:
- i915_vma_destroy(vma);
-
- return err;
-}
-
-static int igt_write_huge(struct i915_gem_context *ctx,
- struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
- static struct intel_engine_cs *engines[I915_NUM_ENGINES];
- struct intel_engine_cs *engine;
- I915_RND_STATE(prng);
- IGT_TIMEOUT(end_time);
- unsigned int max_page_size;
- unsigned int id;
- u64 max;
- u64 num;
- u64 size;
- int *order;
- int i, n;
- int err = 0;
-
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- size = obj->base.size;
- if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
- size = round_up(size, I915_GTT_PAGE_SIZE_2M);
-
- max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
- max = div_u64((vm->total - size), max_page_size);
-
- n = 0;
- for_each_engine(engine, i915, id) {
- if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n",
- id);
- continue;
- }
- engines[n++] = engine;
- }
-
- if (!n)
- return 0;
-
- /*
- * To keep things interesting when alternating between engines in our
- * randomized order, lets also make feeding to the same engine a few
- * times in succession a possibility by enlarging the permutation array.
- */
- order = i915_random_order(n * I915_NUM_ENGINES, &prng);
- if (!order)
- return -ENOMEM;
-
- /*
- * Try various offsets in an ascending/descending fashion until we
- * timeout -- we want to avoid issues hidden by effectively always using
- * offset = 0.
- */
- i = 0;
- for_each_prime_number_from(num, 0, max) {
- u64 offset_low = num * max_page_size;
- u64 offset_high = (max - num) * max_page_size;
- u32 dword = offset_in_page(num) / 4;
-
- engine = engines[order[i] % n];
- i = (i + 1) % (n * I915_NUM_ENGINES);
-
- /*
- * In order to utilize 64K pages we need to both pad the vma
- * size and ensure the vma offset is at the start of the pt
- * boundary, however to improve coverage we opt for testing both
- * aligned and unaligned offsets.
- */
- if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
- offset_low = round_down(offset_low,
- I915_GTT_PAGE_SIZE_2M);
-
- err = __igt_write_huge(ctx, engine, obj, size, offset_low,
- dword, num + 1);
- if (err)
- break;
-
- err = __igt_write_huge(ctx, engine, obj, size, offset_high,
- dword, num + 1);
- if (err)
- break;
-
- if (igt_timeout(end_time,
- "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high,
- max_page_size))
- break;
- }
-
- kfree(order);
-
- return err;
-}
-
-static int igt_ppgtt_exhaust_huge(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
- static unsigned int pages[ARRAY_SIZE(page_sizes)];
- struct drm_i915_gem_object *obj;
- unsigned int size_mask;
- unsigned int page_mask;
- int n, i;
- int err = -ENODEV;
-
- if (supported == I915_GTT_PAGE_SIZE_4K)
- return 0;
-
- /*
- * Sanity check creating objects with a varying mix of page sizes --
- * ensuring that our writes lands in the right place.
- */
-
- n = 0;
- for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
- pages[n++] = BIT(i);
-
- for (size_mask = 2; size_mask < BIT(n); size_mask++) {
- unsigned int size = 0;
-
- for (i = 0; i < n; i++) {
- if (size_mask & BIT(i))
- size |= pages[i];
- }
-
- /*
- * For our page mask we want to enumerate all the page-size
- * combinations which will fit into our chosen object size.
- */
- for (page_mask = 2; page_mask <= size_mask; page_mask++) {
- unsigned int page_sizes = 0;
-
- for (i = 0; i < n; i++) {
- if (page_mask & BIT(i))
- page_sizes |= pages[i];
- }
-
- /*
- * Ensure that we can actually fill the given object
- * with our chosen page mask.
- */
- if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
- continue;
-
- obj = huge_pages_object(i915, size, page_sizes);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_device;
- }
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- i915_gem_object_put(obj);
-
- if (err == -ENOMEM) {
- pr_info("unable to get pages, size=%u, pages=%u\n",
- size, page_sizes);
- err = 0;
- break;
- }
-
- pr_err("pin_pages failed, size=%u, pages=%u\n",
- size_mask, page_mask);
-
- goto out_device;
- }
-
- /* Force the page-size for the gtt insertion */
- obj->mm.page_sizes.sg = page_sizes;
-
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("exhaust write-huge failed with size=%u\n",
- size);
- goto out_unpin;
- }
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
- }
- }
-
- goto out_device;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
- i915_gem_object_put(obj);
-out_device:
- mkwrite_device_info(i915)->page_sizes = supported;
-
- return err;
-}
-
-static int igt_ppgtt_internal_huge(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_64K,
- SZ_128K,
- SZ_256K,
- SZ_512K,
- SZ_1M,
- SZ_2M,
- };
- int i;
- int err;
-
- /*
- * Sanity check that the HW uses huge pages correctly through internal
- * -- ensure that our writes land in the right place.
- */
-
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
-
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_put;
-
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
- pr_info("internal unable to allocate huge-page(s) with size=%u\n",
- size);
- goto out_unpin;
- }
-
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("internal write-huge failed with size=%u\n",
- size);
- goto out_unpin;
- }
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
- }
-
- return 0;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
-
- return err;
-}
-
-static inline bool igt_can_allocate_thp(struct drm_i915_private *i915)
-{
- return i915->mm.gemfs && has_transparent_hugepage();
-}
-
-static int igt_ppgtt_gemfs_huge(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- static const unsigned int sizes[] = {
- SZ_2M,
- SZ_4M,
- SZ_8M,
- SZ_16M,
- SZ_32M,
- };
- int i;
- int err;
-
- /*
- * Sanity check that the HW uses huge pages correctly through gemfs --
- * ensure that our writes land in the right place.
- */
-
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
- unsigned int size = sizes[i];
-
- obj = i915_gem_object_create(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_pin_pages(obj);
- if (err)
- goto out_put;
-
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
- pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
- size);
- goto out_unpin;
- }
-
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("gemfs write-huge failed with size=%u\n",
- size);
- goto out_unpin;
- }
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- i915_gem_object_put(obj);
- }
-
- return 0;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out_put:
- i915_gem_object_put(obj);
-
- return err;
-}
-
-static int igt_ppgtt_pin_update(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *dev_priv = ctx->i915;
- unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
- int first, last;
- int err;
-
- /*
- * Make sure there's no funny business when doing a PIN_UPDATE -- in the
- * past we had a subtle issue with being able to incorrectly do multiple
- * alloc va ranges on the same object when doing a PIN_UPDATE, which
- * resulted in some pretty nasty bugs, though only when using
- * huge-gtt-pages.
- */
-
- if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
- pr_info("48b PPGTT not supported, skipping\n");
- return 0;
- }
-
- first = ilog2(I915_GTT_PAGE_SIZE_64K);
- last = ilog2(I915_GTT_PAGE_SIZE_2M);
-
- for_each_set_bit_from(first, &supported, last + 1) {
- unsigned int page_size = BIT(first);
-
- obj = i915_gem_object_create_internal(dev_priv, page_size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, SZ_2M, 0, flags);
- if (err)
- goto out_close;
-
- if (vma->page_sizes.sg < page_size) {
- pr_info("Unable to allocate page-size %x, finishing test early\n",
- page_size);
- goto out_unpin;
- }
-
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_unpin;
-
- if (vma->page_sizes.gtt != page_size) {
- dma_addr_t addr = i915_gem_object_get_dma_address(obj, 0);
-
- /*
- * The only valid reason for this to ever fail would be
- * if the dma-mapper screwed us over when we did the
- * dma_map_sg(), since it has the final say over the dma
- * address.
- */
- if (IS_ALIGNED(addr, page_size)) {
- pr_err("page_sizes.gtt=%u, expected=%u\n",
- vma->page_sizes.gtt, page_size);
- err = -EINVAL;
- } else {
- pr_info("dma address misaligned, finishing test early\n");
- }
-
- goto out_unpin;
- }
-
- err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE);
- if (err)
- goto out_unpin;
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- i915_gem_object_put(obj);
- }
-
- obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- goto out_close;
-
- /*
- * Make sure we don't end up with something like where the pde is still
- * pointing to the 2M page, and the pt we just filled-in is dangling --
- * we can check this by writing to the first page where it would then
- * land in the now stale 2M page.
- */
-
- err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
- if (err)
- goto out_unpin;
-
- err = cpu_check(obj, 0, 0xdeadbeaf);
-
-out_unpin:
- i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
-out_put:
- i915_gem_object_put(obj);
-
- return err;
-}
-
-static int igt_tmpfs_fallback(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 *vaddr;
- int err = 0;
-
- /*
- * Make sure that we don't burst into a ball of flames upon falling back
- * to tmpfs, which we rely on if on the off-chance we encouter a failure
- * when setting up gemfs.
- */
-
- i915->mm.gemfs = NULL;
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_restore;
- }
-
- vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto out_put;
- }
- *vaddr = 0xdeadbeaf;
-
- __i915_gem_object_flush_map(obj, 0, 64);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto out_close;
-
- err = igt_check_page_sizes(vma);
-
- i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
-out_put:
- i915_gem_object_put(obj);
-out_restore:
- i915->mm.gemfs = gemfs;
-
- return err;
-}
-
-static int igt_shrink_thp(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- unsigned int flags = PIN_USER;
- int err;
-
- /*
- * Sanity check shrinking huge-paged object -- make sure nothing blows
- * up.
- */
-
- if (!igt_can_allocate_thp(i915)) {
- pr_info("missing THP support, skipping\n");
- return 0;
- }
-
- obj = i915_gem_object_create(i915, SZ_2M);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_put;
- }
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- goto out_close;
-
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
- pr_info("failed to allocate THP, finishing test early\n");
- goto out_unpin;
- }
-
- err = igt_check_page_sizes(vma);
- if (err)
- goto out_unpin;
-
- err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
- if (err)
- goto out_unpin;
-
- i915_vma_unpin(vma);
-
- /*
- * Now that the pages are *unpinned* shrink-all should invoke
- * shmem to truncate our pages.
- */
- i915_gem_shrink_all(i915);
- if (i915_gem_object_has_pages(obj)) {
- pr_err("shrink-all didn't truncate the pages\n");
- err = -EINVAL;
- goto out_close;
- }
-
- if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
- pr_err("residual page-size bits left\n");
- err = -EINVAL;
- goto out_close;
- }
-
- err = i915_vma_pin(vma, 0, 0, flags);
- if (err)
- goto out_close;
-
- err = cpu_check(obj, 0, 0xdeadbeaf);
-
-out_unpin:
- i915_vma_unpin(vma);
-out_close:
- i915_vma_close(vma);
-out_put:
- i915_gem_object_put(obj);
-
- return err;
-}
-
-int i915_gem_huge_page_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_mock_exhaust_device_supported_pages),
- SUBTEST(igt_mock_ppgtt_misaligned_dma),
- SUBTEST(igt_mock_ppgtt_huge_fill),
- SUBTEST(igt_mock_ppgtt_64K),
- };
- struct drm_i915_private *dev_priv;
- struct i915_hw_ppgtt *ppgtt;
- int err;
-
- dev_priv = mock_gem_device();
- if (!dev_priv)
- return -ENOMEM;
-
- /* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
- mkwrite_device_info(dev_priv)->ppgtt_size = 48;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv);
- if (IS_ERR(ppgtt)) {
- err = PTR_ERR(ppgtt);
- goto out_unlock;
- }
-
- if (!i915_vm_is_4lvl(&ppgtt->vm)) {
- pr_err("failed to create 48b PPGTT\n");
- err = -EINVAL;
- goto out_close;
- }
-
- /* If we were ever hit this then it's time to mock the 64K scratch */
- if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
- pr_err("PPGTT missing 64K scratch page\n");
- err = -EINVAL;
- goto out_close;
- }
-
- err = i915_subtests(tests, ppgtt);
-
-out_close:
- i915_ppgtt_put(ppgtt);
-
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
- drm_dev_put(&dev_priv->drm);
-
- return err;
-}
-
-int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_shrink_thp),
- SUBTEST(igt_ppgtt_pin_update),
- SUBTEST(igt_tmpfs_fallback),
- SUBTEST(igt_ppgtt_exhaust_huge),
- SUBTEST(igt_ppgtt_gemfs_huge),
- SUBTEST(igt_ppgtt_internal_huge),
- };
- struct drm_file *file;
- struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
- int err;
-
- if (!HAS_PPGTT(dev_priv)) {
- pr_info("PPGTT not supported, skipping live-selftests\n");
- return 0;
- }
-
- if (i915_terminally_wedged(dev_priv))
- return 0;
-
- file = mock_file(dev_priv);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
-
- ctx = live_context(dev_priv, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_unlock;
- }
-
- if (ctx->ppgtt)
- ctx->ppgtt->vm.scrub_64K = true;
-
- err = i915_subtests(tests, ctx);
-
-out_unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- mock_file_free(dev_priv, file);
-
- return err;
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index eee838dc0634..c0b3537a5fa6 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -4,7 +4,9 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+
+#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "lib_sw_fence.h"
@@ -95,7 +97,7 @@ static int live_active_wait(void *arg)
/* Check that we get a callback when requests retire upon waiting */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
@@ -109,7 +111,7 @@ static int live_active_wait(void *arg)
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -124,7 +126,7 @@ static int live_active_retire(void *arg)
/* Check that we get a callback when requests are indirectly retired */
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = __live_active_setup(i915, &active);
@@ -138,7 +140,7 @@ static int live_active_retire(void *arg)
}
i915_active_fini(&active.base);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a9bff85311..c6a01a6e87f1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -6,11 +6,13 @@
#include <linux/random.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
+
+#include "i915_selftest.h"
-#include "igt_gem_utils.h"
#include "igt_flush_test.h"
-#include "mock_context.h"
+#include "mock_drm.h"
static int switch_to_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
@@ -61,7 +63,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
@@ -72,7 +74,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
*/
trash_stolen(i915);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
@@ -86,7 +88,7 @@ static void pm_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_suspend_late(i915);
}
@@ -96,7 +98,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_gem_suspend_gtt_mappings(i915);
i915_gem_freeze(i915);
@@ -112,7 +114,7 @@ static void pm_resume(struct drm_i915_private *i915)
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
- with_intel_runtime_pm(i915, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
intel_gt_sanitize(i915, false);
i915_gem_sanitize(i915);
i915_gem_resume(i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
deleted file mode 100644
index 046a38743152..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <linux/prime_numbers.h>
-
-#include "../i915_selftest.h"
-#include "i915_random.h"
-
-static int cpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
-{
- unsigned int needs_clflush;
- struct page *page;
- void *map;
- u32 *cpu;
- int err;
-
- err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
- if (err)
- return err;
-
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
-
- if (needs_clflush & CLFLUSH_BEFORE)
- drm_clflush_virt_range(cpu, sizeof(*cpu));
-
- *cpu = v;
-
- if (needs_clflush & CLFLUSH_AFTER)
- drm_clflush_virt_range(cpu, sizeof(*cpu));
-
- kunmap_atomic(map);
- i915_gem_obj_finish_shmem_access(obj);
-
- return 0;
-}
-
-static int cpu_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
-{
- unsigned int needs_clflush;
- struct page *page;
- void *map;
- u32 *cpu;
- int err;
-
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
- if (err)
- return err;
-
- page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
-
- if (needs_clflush & CLFLUSH_BEFORE)
- drm_clflush_virt_range(cpu, sizeof(*cpu));
-
- *v = *cpu;
-
- kunmap_atomic(map);
- i915_gem_obj_finish_shmem_access(obj);
-
- return 0;
-}
-
-static int gtt_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
-{
- struct i915_vma *vma;
- u32 __iomem *map;
- int err;
-
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- if (err)
- return err;
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- map = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- iowrite32(v, &map[offset / sizeof(*map)]);
- i915_vma_unpin_iomap(vma);
-
- return 0;
-}
-
-static int gtt_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
-{
- struct i915_vma *vma;
- u32 __iomem *map;
- int err;
-
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- return err;
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- map = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- *v = ioread32(&map[offset / sizeof(*map)]);
- i915_vma_unpin_iomap(vma);
-
- return 0;
-}
-
-static int wc_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
-{
- u32 *map;
- int err;
-
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- return err;
-
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- map[offset / sizeof(*map)] = v;
- i915_gem_object_unpin_map(obj);
-
- return 0;
-}
-
-static int wc_get(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 *v)
-{
- u32 *map;
- int err;
-
- err = i915_gem_object_set_to_wc_domain(obj, false);
- if (err)
- return err;
-
- map = i915_gem_object_pin_map(obj, I915_MAP_WC);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- *v = map[offset / sizeof(*map)];
- i915_gem_object_unpin_map(obj);
-
- return 0;
-}
-
-static int gpu_set(struct drm_i915_gem_object *obj,
- unsigned long offset,
- u32 v)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_request *rq;
- struct i915_vma *vma;
- u32 *cs;
- int err;
-
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- if (err)
- return err;
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
- if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
- }
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- i915_vma_unpin(vma);
- return PTR_ERR(cs);
- }
-
- if (INTEL_GEN(i915) >= 8) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
- *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
- *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
- *cs++ = v;
- } else if (INTEL_GEN(i915) >= 4) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = 0;
- *cs++ = i915_ggtt_offset(vma) + offset;
- *cs++ = v;
- } else {
- *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cs++ = i915_ggtt_offset(vma) + offset;
- *cs++ = v;
- *cs++ = MI_NOOP;
- }
- intel_ring_advance(rq, cs);
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unpin(vma);
-
- i915_request_add(rq);
-
- return err;
-}
-
-static bool always_valid(struct drm_i915_private *i915)
-{
- return true;
-}
-
-static bool needs_fence_registers(struct drm_i915_private *i915)
-{
- return !i915_terminally_wedged(i915);
-}
-
-static bool needs_mi_store_dword(struct drm_i915_private *i915)
-{
- if (i915_terminally_wedged(i915))
- return false;
-
- return intel_engine_can_store_dword(i915->engine[RCS0]);
-}
-
-static const struct igt_coherency_mode {
- const char *name;
- int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
- int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
- bool (*valid)(struct drm_i915_private *i915);
-} igt_coherency_mode[] = {
- { "cpu", cpu_set, cpu_get, always_valid },
- { "gtt", gtt_set, gtt_get, needs_fence_registers },
- { "wc", wc_set, wc_get, always_valid },
- { "gpu", gpu_set, NULL, needs_mi_store_dword },
- { },
-};
-
-static int igt_gem_coherency(void *arg)
-{
- const unsigned int ncachelines = PAGE_SIZE/64;
- I915_RND_STATE(prng);
- struct drm_i915_private *i915 = arg;
- const struct igt_coherency_mode *read, *write, *over;
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
- unsigned long count, n;
- u32 *offsets, *values;
- int err = 0;
-
- /* We repeatedly write, overwrite and read from a sequence of
- * cachelines in order to try and detect incoherency (unflushed writes
- * from either the CPU or GPU). Each setter/getter uses our cache
- * domain API which should prevent incoherency.
- */
-
- offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
- if (!offsets)
- return -ENOMEM;
- for (count = 0; count < ncachelines; count++)
- offsets[count] = count * 64 + 4 * (count % 16);
-
- values = offsets + ncachelines;
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
- for (over = igt_coherency_mode; over->name; over++) {
- if (!over->set)
- continue;
-
- if (!over->valid(i915))
- continue;
-
- for (write = igt_coherency_mode; write->name; write++) {
- if (!write->set)
- continue;
-
- if (!write->valid(i915))
- continue;
-
- for (read = igt_coherency_mode; read->name; read++) {
- if (!read->get)
- continue;
-
- if (!read->valid(i915))
- continue;
-
- for_each_prime_number_from(count, 1, ncachelines) {
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto unlock;
- }
-
- i915_random_reorder(offsets, ncachelines, &prng);
- for (n = 0; n < count; n++)
- values[n] = prandom_u32_state(&prng);
-
- for (n = 0; n < count; n++) {
- err = over->set(obj, offsets[n], ~values[n]);
- if (err) {
- pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
- n, count, over->name, err);
- goto put_object;
- }
- }
-
- for (n = 0; n < count; n++) {
- err = write->set(obj, offsets[n], values[n]);
- if (err) {
- pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
- n, count, write->name, err);
- goto put_object;
- }
- }
-
- for (n = 0; n < count; n++) {
- u32 found;
-
- err = read->get(obj, offsets[n], &found);
- if (err) {
- pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
- n, count, read->name, err);
- goto put_object;
- }
-
- if (found != values[n]) {
- pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
- n, count, over->name,
- write->name, values[n],
- read->name, found,
- ~values[n], offsets[n]);
- err = -EINVAL;
- goto put_object;
- }
- }
-
- __i915_gem_object_release_unless_active(obj);
- }
- }
- }
- }
-unlock:
- intel_runtime_pm_put(i915, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
- kfree(offsets);
- return err;
-
-put_object:
- __i915_gem_object_release_unless_active(obj);
- goto unlock;
-}
-
-int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_gem_coherency),
- };
-
- return i915_subtests(tests, i915);
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
deleted file mode 100644
index 34ac5cc6d59f..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ /dev/null
@@ -1,1752 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <linux/prime_numbers.h>
-
-#include "gt/intel_reset.h"
-#include "i915_selftest.h"
-
-#include "i915_random.h"
-#include "igt_flush_test.h"
-#include "igt_gem_utils.h"
-#include "igt_live_test.h"
-#include "igt_reset.h"
-#include "igt_spinner.h"
-
-#include "mock_drm.h"
-#include "mock_gem_device.h"
-#include "huge_gem_object.h"
-
-#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
-
-static int live_nop_switch(void *arg)
-{
- const unsigned int nctx = 1024;
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- struct i915_gem_context **ctx;
- enum intel_engine_id id;
- intel_wakeref_t wakeref;
- struct igt_live_test t;
- struct drm_file *file;
- unsigned long n;
- int err = -ENODEV;
-
- /*
- * Create as many contexts as we can feasibly get away with
- * and check we can switch between them rapidly.
- *
- * Serves as very simple stress test for submission and HW switching
- * between contexts.
- */
-
- if (!DRIVER_CAPS(i915)->has_logical_contexts)
- return 0;
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
-
- ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- err = -ENOMEM;
- goto out_unlock;
- }
-
- for (n = 0; n < nctx; n++) {
- ctx[n] = live_context(i915, file);
- if (IS_ERR(ctx[n])) {
- err = PTR_ERR(ctx[n]);
- goto out_unlock;
- }
- }
-
- for_each_engine(engine, i915, id) {
- struct i915_request *rq;
- unsigned long end_time, prime;
- ktime_t times[2] = {};
-
- times[0] = ktime_get_raw();
- for (n = 0; n < nctx; n++) {
- rq = igt_request_alloc(ctx[n], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_unlock;
- }
- i915_request_add(rq);
- }
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
- pr_err("Failed to populated %d contexts\n", nctx);
- i915_gem_set_wedged(i915);
- err = -EIO;
- goto out_unlock;
- }
-
- times[1] = ktime_get_raw();
-
- pr_info("Populated %d contexts on %s in %lluns\n",
- nctx, engine->name, ktime_to_ns(times[1] - times[0]));
-
- err = igt_live_test_begin(&t, i915, __func__, engine->name);
- if (err)
- goto out_unlock;
-
- end_time = jiffies + i915_selftest.timeout_jiffies;
- for_each_prime_number_from(prime, 2, 8192) {
- times[1] = ktime_get_raw();
-
- for (n = 0; n < prime; n++) {
- rq = igt_request_alloc(ctx[n % nctx], engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto out_unlock;
- }
-
- /*
- * This space is left intentionally blank.
- *
- * We do not actually want to perform any
- * action with this request, we just want
- * to measure the latency in allocation
- * and submission of our breadcrumbs -
- * ensuring that the bare request is sufficient
- * for the system to work (i.e. proper HEAD
- * tracking of the rings, interrupt handling,
- * etc). It also gives us the lowest bounds
- * for latency.
- */
-
- i915_request_add(rq);
- }
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
- pr_err("Switching between %ld contexts timed out\n",
- prime);
- i915_gem_set_wedged(i915);
- break;
- }
-
- times[1] = ktime_sub(ktime_get_raw(), times[1]);
- if (prime == 2)
- times[0] = times[1];
-
- if (__igt_timeout(end_time, NULL))
- break;
- }
-
- err = igt_live_test_end(&t);
- if (err)
- goto out_unlock;
-
- pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
- engine->name,
- ktime_to_ns(times[0]),
- prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
- }
-
-out_unlock:
- intel_runtime_pm_put(i915, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
- mock_file_free(i915, file);
- return err;
-}
-
-static struct i915_vma *
-gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
-{
- struct drm_i915_gem_object *obj;
- const int gen = INTEL_GEN(vma->vm->i915);
- unsigned long n, size;
- u32 *cmd;
- int err;
-
- size = (4 * count + 1) * sizeof(u32);
- size = round_up(size, PAGE_SIZE);
- obj = i915_gem_object_create_internal(vma->vm->i915, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
-
- for (n = 0; n < count; n++) {
- if (gen >= 8) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = value;
- } else if (gen >= 4) {
- *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
- (gen < 6 ? MI_USE_GGTT : 0);
- *cmd++ = 0;
- *cmd++ = offset;
- *cmd++ = value;
- } else {
- *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *cmd++ = offset;
- *cmd++ = value;
- }
- offset += PAGE_SIZE;
- }
- *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
- vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
-static unsigned long real_page_count(struct drm_i915_gem_object *obj)
-{
- return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
-}
-
-static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
-{
- return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
-}
-
-static int gpu_fill(struct drm_i915_gem_object *obj,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- unsigned int dw)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
- struct i915_request *rq;
- struct i915_vma *vma;
- struct i915_vma *batch;
- unsigned int flags;
- int err;
-
- GEM_BUG_ON(obj->base.size > vm->total);
- GEM_BUG_ON(!intel_engine_can_store_dword(engine));
-
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- return err;
-
- err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
- if (err)
- return err;
-
- /* Within the GTT the huge objects maps every page onto
- * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
- * We set the nth dword within the page using the nth
- * mapping via the GTT - this should exercise the GTT mapping
- * whilst checking that each context provides a unique view
- * into the object.
- */
- batch = gpu_fill_dw(vma,
- (dw * real_page_count(obj)) << PAGE_SHIFT |
- (dw * sizeof(u32)),
- real_page_count(obj),
- dw);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_vma;
- }
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
- flags |= I915_DISPATCH_SECURE;
-
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
- if (err)
- goto err_request;
-
- err = i915_vma_move_to_active(batch, rq, 0);
- if (err)
- goto skip_request;
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- if (err)
- goto skip_request;
-
- i915_gem_object_set_active_reference(batch->obj);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
-
- i915_vma_unpin(vma);
-
- i915_request_add(rq);
-
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
- i915_vma_put(batch);
-err_vma:
- i915_vma_unpin(vma);
- return err;
-}
-
-static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
-{
- const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
- unsigned int n, m, need_flush;
- int err;
-
- err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
- if (err)
- return err;
-
- for (n = 0; n < real_page_count(obj); n++) {
- u32 *map;
-
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
- for (m = 0; m < DW_PER_PAGE; m++)
- map[m] = value;
- if (!has_llc)
- drm_clflush_virt_range(map, PAGE_SIZE);
- kunmap_atomic(map);
- }
-
- i915_gem_obj_finish_shmem_access(obj);
- obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
- obj->write_domain = 0;
- return 0;
-}
-
-static noinline int cpu_check(struct drm_i915_gem_object *obj,
- unsigned int idx, unsigned int max)
-{
- unsigned int n, m, needs_flush;
- int err;
-
- err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
- if (err)
- return err;
-
- for (n = 0; n < real_page_count(obj); n++) {
- u32 *map;
-
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
- if (needs_flush & CLFLUSH_BEFORE)
- drm_clflush_virt_range(map, PAGE_SIZE);
-
- for (m = 0; m < max; m++) {
- if (map[m] != m) {
- pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
- __builtin_return_address(0), idx,
- n, real_page_count(obj), m, max,
- map[m], m);
- err = -EINVAL;
- goto out_unmap;
- }
- }
-
- for (; m < DW_PER_PAGE; m++) {
- if (map[m] != STACK_MAGIC) {
- pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
- __builtin_return_address(0), idx, n, m,
- map[m], STACK_MAGIC);
- err = -EINVAL;
- goto out_unmap;
- }
- }
-
-out_unmap:
- kunmap_atomic(map);
- if (err)
- break;
- }
-
- i915_gem_obj_finish_shmem_access(obj);
- return err;
-}
-
-static int file_add_object(struct drm_file *file,
- struct drm_i915_gem_object *obj)
-{
- int err;
-
- GEM_BUG_ON(obj->base.handle_count);
-
- /* tie the object to the drm_file for easy reaping */
- err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
- if (err < 0)
- return err;
-
- i915_gem_object_get(obj);
- obj->base.handle_count++;
- return 0;
-}
-
-static struct drm_i915_gem_object *
-create_test_object(struct i915_gem_context *ctx,
- struct drm_file *file,
- struct list_head *objects)
-{
- struct drm_i915_gem_object *obj;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
- u64 size;
- int err;
-
- size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
- size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
-
- obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
- if (IS_ERR(obj))
- return obj;
-
- err = file_add_object(file, obj);
- i915_gem_object_put(obj);
- if (err)
- return ERR_PTR(err);
-
- err = cpu_fill(obj, STACK_MAGIC);
- if (err) {
- pr_err("Failed to fill object with cpu, err=%d\n",
- err);
- return ERR_PTR(err);
- }
-
- list_add_tail(&obj->st_link, objects);
- return obj;
-}
-
-static unsigned long max_dwords(struct drm_i915_gem_object *obj)
-{
- unsigned long npages = fake_page_count(obj);
-
- GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
- return npages / DW_PER_PAGE;
-}
-
-static int igt_ctx_exec(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENODEV;
-
- /*
- * Create a few different contexts (with different mm) and write
- * through each ctx/mm using the GPU making sure those writes end
- * up in the expected pages of our obj.
- */
-
- if (!DRIVER_CAPS(i915)->has_logical_contexts)
- return 0;
-
- for_each_engine(engine, i915, id) {
- struct drm_i915_gem_object *obj = NULL;
- unsigned long ncontexts, ndwords, dw;
- struct igt_live_test t;
- struct drm_file *file;
- IGT_TIMEOUT(end_time);
- LIST_HEAD(objects);
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (!engine->context_size)
- continue; /* No logical context support in HW */
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- err = igt_live_test_begin(&t, i915, __func__, engine->name);
- if (err)
- goto out_unlock;
-
- ncontexts = 0;
- ndwords = 0;
- dw = 0;
- while (!time_after(jiffies, end_time)) {
- struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_unlock;
- }
-
- if (!obj) {
- obj = create_test_object(ctx, file, &objects);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_unlock;
- }
- }
-
- with_intel_runtime_pm(i915, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
- if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
- ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
- goto out_unlock;
- }
-
- if (++dw == max_dwords(obj)) {
- obj = NULL;
- dw = 0;
- }
-
- ndwords++;
- ncontexts++;
- }
-
- pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
- ncontexts, engine->name, ndwords);
-
- ncontexts = dw = 0;
- list_for_each_entry(obj, &objects, st_link) {
- unsigned int rem =
- min_t(unsigned int, ndwords - dw, max_dwords(obj));
-
- err = cpu_check(obj, ncontexts++, rem);
- if (err)
- break;
-
- dw += rem;
- }
-
-out_unlock:
- if (igt_live_test_end(&t))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
- mock_file_free(i915, file);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int igt_shared_ctx_exec(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct i915_gem_context *parent;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- struct igt_live_test t;
- struct drm_file *file;
- int err = 0;
-
- /*
- * Create a few different contexts with the same mm and write
- * through each ctx using the GPU making sure those writes end
- * up in the expected pages of our obj.
- */
- if (!DRIVER_CAPS(i915)->has_logical_contexts)
- return 0;
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- parent = live_context(i915, file);
- if (IS_ERR(parent)) {
- err = PTR_ERR(parent);
- goto out_unlock;
- }
-
- if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
- err = 0;
- goto out_unlock;
- }
-
- err = igt_live_test_begin(&t, i915, __func__, "");
- if (err)
- goto out_unlock;
-
- for_each_engine(engine, i915, id) {
- unsigned long ncontexts, ndwords, dw;
- struct drm_i915_gem_object *obj = NULL;
- IGT_TIMEOUT(end_time);
- LIST_HEAD(objects);
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- dw = 0;
- ndwords = 0;
- ncontexts = 0;
- while (!time_after(jiffies, end_time)) {
- struct i915_gem_context *ctx;
- intel_wakeref_t wakeref;
-
- ctx = kernel_context(i915);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_test;
- }
-
- __assign_ppgtt(ctx, parent->ppgtt);
-
- if (!obj) {
- obj = create_test_object(parent, file, &objects);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- kernel_context_close(ctx);
- goto out_test;
- }
- }
-
- err = 0;
- with_intel_runtime_pm(i915, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
- if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
- ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
- kernel_context_close(ctx);
- goto out_test;
- }
-
- if (++dw == max_dwords(obj)) {
- obj = NULL;
- dw = 0;
- }
-
- ndwords++;
- ncontexts++;
-
- kernel_context_close(ctx);
- }
- pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
- ncontexts, engine->name, ndwords);
-
- ncontexts = dw = 0;
- list_for_each_entry(obj, &objects, st_link) {
- unsigned int rem =
- min_t(unsigned int, ndwords - dw, max_dwords(obj));
-
- err = cpu_check(obj, ncontexts++, rem);
- if (err)
- goto out_test;
-
- dw += rem;
- }
- }
-out_test:
- if (igt_live_test_end(&t))
- err = -EIO;
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- mock_file_free(i915, file);
- return err;
-}
-
-static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
-{
- struct drm_i915_gem_object *obj;
- u32 *cmd;
- int err;
-
- if (INTEL_GEN(vma->vm->i915) < 8)
- return ERR_PTR(-EINVAL);
-
- obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
- *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
- *cmd++ = lower_32_bits(vma->node.start);
- *cmd++ = upper_32_bits(vma->node.start);
- *cmd = MI_BATCH_BUFFER_END;
-
- __i915_gem_object_flush_map(obj, 0, 64);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, vma->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- goto err;
-
- return vma;
-
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(err);
-}
-
-static int
-emit_rpcs_query(struct drm_i915_gem_object *obj,
- struct intel_context *ce,
- struct i915_request **rq_out)
-{
- struct i915_request *rq;
- struct i915_vma *batch;
- struct i915_vma *vma;
- int err;
-
- GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
-
- vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- return err;
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
-
- batch = rpcs_query_batch(vma);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_vma;
- }
-
- rq = i915_request_create(ce);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_batch;
- }
-
- err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- 0);
- if (err)
- goto err_request;
-
- err = i915_vma_move_to_active(batch, rq, 0);
- if (err)
- goto skip_request;
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- if (err)
- goto skip_request;
-
- i915_gem_object_set_active_reference(batch->obj);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
-
- i915_vma_unpin(vma);
-
- *rq_out = i915_request_get(rq);
-
- i915_request_add(rq);
-
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_batch:
- i915_vma_unpin(batch);
-err_vma:
- i915_vma_unpin(vma);
-
- return err;
-}
-
-#define TEST_IDLE BIT(0)
-#define TEST_BUSY BIT(1)
-#define TEST_RESET BIT(2)
-
-static int
-__sseu_prepare(struct drm_i915_private *i915,
- const char *name,
- unsigned int flags,
- struct intel_context *ce,
- struct igt_spinner **spin)
-{
- struct i915_request *rq;
- int ret;
-
- *spin = NULL;
- if (!(flags & (TEST_BUSY | TEST_RESET)))
- return 0;
-
- *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
- if (!*spin)
- return -ENOMEM;
-
- ret = igt_spinner_init(*spin, i915);
- if (ret)
- goto err_free;
-
- rq = igt_spinner_create_request(*spin,
- ce->gem_context,
- ce->engine,
- MI_NOOP);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- goto err_fini;
- }
-
- i915_request_add(rq);
-
- if (!igt_wait_for_spinner(*spin, rq)) {
- pr_err("%s: Spinner failed to start!\n", name);
- ret = -ETIMEDOUT;
- goto err_end;
- }
-
- return 0;
-
-err_end:
- igt_spinner_end(*spin);
-err_fini:
- igt_spinner_fini(*spin);
-err_free:
- kfree(fetch_and_zero(spin));
- return ret;
-}
-
-static int
-__read_slice_count(struct drm_i915_private *i915,
- struct intel_context *ce,
- struct drm_i915_gem_object *obj,
- struct igt_spinner *spin,
- u32 *rpcs)
-{
- struct i915_request *rq = NULL;
- u32 s_mask, s_shift;
- unsigned int cnt;
- u32 *buf, val;
- long ret;
-
- ret = emit_rpcs_query(obj, ce, &rq);
- if (ret)
- return ret;
-
- if (spin)
- igt_spinner_end(spin);
-
- ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
- i915_request_put(rq);
- if (ret < 0)
- return ret;
-
- buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(buf)) {
- ret = PTR_ERR(buf);
- return ret;
- }
-
- if (INTEL_GEN(i915) >= 11) {
- s_mask = GEN11_RPCS_S_CNT_MASK;
- s_shift = GEN11_RPCS_S_CNT_SHIFT;
- } else {
- s_mask = GEN8_RPCS_S_CNT_MASK;
- s_shift = GEN8_RPCS_S_CNT_SHIFT;
- }
-
- val = *buf;
- cnt = (val & s_mask) >> s_shift;
- *rpcs = val;
-
- i915_gem_object_unpin_map(obj);
-
- return cnt;
-}
-
-static int
-__check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
- const char *prefix, const char *suffix)
-{
- if (slices == expected)
- return 0;
-
- if (slices < 0) {
- pr_err("%s: %s read slice count failed with %d%s\n",
- name, prefix, slices, suffix);
- return slices;
- }
-
- pr_err("%s: %s slice count %d is not %u%s\n",
- name, prefix, slices, expected, suffix);
-
- pr_info("RPCS=0x%x; %u%sx%u%s\n",
- rpcs, slices,
- (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
- (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
- (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
-
- return -EINVAL;
-}
-
-static int
-__sseu_finish(struct drm_i915_private *i915,
- const char *name,
- unsigned int flags,
- struct intel_context *ce,
- struct drm_i915_gem_object *obj,
- unsigned int expected,
- struct igt_spinner *spin)
-{
- unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
- u32 rpcs = 0;
- int ret = 0;
-
- if (flags & TEST_RESET) {
- ret = i915_reset_engine(ce->engine, "sseu");
- if (ret)
- goto out;
- }
-
- ret = __read_slice_count(i915, ce, obj,
- flags & TEST_RESET ? NULL : spin, &rpcs);
- ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
- if (ret)
- goto out;
-
- ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
- NULL, &rpcs);
- ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
-
-out:
- if (spin)
- igt_spinner_end(spin);
-
- if ((flags & TEST_IDLE) && ret == 0) {
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
- ret = __check_rpcs(name, rpcs, ret, expected,
- "Context", " after idle!");
- }
-
- return ret;
-}
-
-static int
-__sseu_test(struct drm_i915_private *i915,
- const char *name,
- unsigned int flags,
- struct intel_context *ce,
- struct drm_i915_gem_object *obj,
- struct intel_sseu sseu)
-{
- struct igt_spinner *spin = NULL;
- int ret;
-
- ret = __sseu_prepare(i915, name, flags, ce, &spin);
- if (ret)
- return ret;
-
- ret = __intel_context_reconfigure_sseu(ce, sseu);
- if (ret)
- goto out_spin;
-
- ret = __sseu_finish(i915, name, flags, ce, obj,
- hweight32(sseu.slice_mask), spin);
-
-out_spin:
- if (spin) {
- igt_spinner_end(spin);
- igt_spinner_fini(spin);
- kfree(spin);
- }
- return ret;
-}
-
-static int
-__igt_ctx_sseu(struct drm_i915_private *i915,
- const char *name,
- unsigned int flags)
-{
- struct intel_engine_cs *engine = i915->engine[RCS0];
- struct intel_sseu default_sseu = engine->sseu;
- struct drm_i915_gem_object *obj;
- struct i915_gem_context *ctx;
- struct intel_context *ce;
- struct intel_sseu pg_sseu;
- intel_wakeref_t wakeref;
- struct drm_file *file;
- int ret;
-
- if (INTEL_GEN(i915) < 9)
- return 0;
-
- if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
- return 0;
-
- if (hweight32(default_sseu.slice_mask) < 2)
- return 0;
-
- /*
- * Gen11 VME friendly power-gated configuration with half enabled
- * sub-slices.
- */
- pg_sseu = default_sseu;
- pg_sseu.slice_mask = 1;
- pg_sseu.subslice_mask =
- ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
-
- pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
- name, flags, hweight32(default_sseu.slice_mask),
- hweight32(pg_sseu.slice_mask));
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- if (flags & TEST_RESET)
- igt_global_reset_lock(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out_unlock;
- }
- i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
-
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- ret = PTR_ERR(obj);
- goto out_unlock;
- }
-
- wakeref = intel_runtime_pm_get(i915);
-
- ce = i915_gem_context_get_engine(ctx, RCS0);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto out_rpm;
- }
-
- ret = intel_context_pin(ce);
- if (ret)
- goto out_context;
-
- /* First set the default mask. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
- if (ret)
- goto out_fail;
-
- /* Then set a power-gated configuration. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
-
- /* Back to defaults. */
- ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
- if (ret)
- goto out_fail;
-
- /* One last power-gated configuration for the road. */
- ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
- if (ret)
- goto out_fail;
-
-out_fail:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- ret = -EIO;
-
- intel_context_unpin(ce);
-out_context:
- intel_context_put(ce);
-out_rpm:
- intel_runtime_pm_put(i915, wakeref);
- i915_gem_object_put(obj);
-
-out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
-
- if (flags & TEST_RESET)
- igt_global_reset_unlock(i915);
-
- mock_file_free(i915, file);
-
- if (ret)
- pr_err("%s: Failed with %d!\n", name, ret);
-
- return ret;
-}
-
-static int igt_ctx_sseu(void *arg)
-{
- struct {
- const char *name;
- unsigned int flags;
- } *phase, phases[] = {
- { .name = "basic", .flags = 0 },
- { .name = "idle", .flags = TEST_IDLE },
- { .name = "busy", .flags = TEST_BUSY },
- { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
- { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
- { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
- };
- unsigned int i;
- int ret = 0;
-
- for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
- i++, phase++)
- ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
-
- return ret;
-}
-
-static int igt_ctx_readonly(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj = NULL;
- struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
- unsigned long idx, ndwords, dw;
- struct igt_live_test t;
- struct drm_file *file;
- I915_RND_STATE(prng);
- IGT_TIMEOUT(end_time);
- LIST_HEAD(objects);
- int err = -ENODEV;
-
- /*
- * Create a few read-only objects (with the occasional writable object)
- * and try to write into these object checking that the GPU discards
- * any write to a read-only object.
- */
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- err = igt_live_test_begin(&t, i915, __func__, "");
- if (err)
- goto out_unlock;
-
- ctx = live_context(i915, file);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_unlock;
- }
-
- ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
- if (!ppgtt || !ppgtt->vm.has_read_only) {
- err = 0;
- goto out_unlock;
- }
-
- ndwords = 0;
- dw = 0;
- while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- unsigned int id;
-
- for_each_engine(engine, i915, id) {
- intel_wakeref_t wakeref;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- if (!obj) {
- obj = create_test_object(ctx, file, &objects);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_unlock;
- }
-
- if (prandom_u32_state(&prng) & 1)
- i915_gem_object_set_readonly(obj);
- }
-
- err = 0;
- with_intel_runtime_pm(i915, wakeref)
- err = gpu_fill(obj, ctx, engine, dw);
- if (err) {
- pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
- ndwords, dw, max_dwords(obj),
- engine->name, ctx->hw_id,
- yesno(!!ctx->ppgtt), err);
- goto out_unlock;
- }
-
- if (++dw == max_dwords(obj)) {
- obj = NULL;
- dw = 0;
- }
- ndwords++;
- }
- }
- pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_engines);
-
- dw = 0;
- idx = 0;
- list_for_each_entry(obj, &objects, st_link) {
- unsigned int rem =
- min_t(unsigned int, ndwords - dw, max_dwords(obj));
- unsigned int num_writes;
-
- num_writes = rem;
- if (i915_gem_object_is_readonly(obj))
- num_writes = 0;
-
- err = cpu_check(obj, idx++, num_writes);
- if (err)
- break;
-
- dw += rem;
- }
-
-out_unlock:
- if (igt_live_test_end(&t))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
- mock_file_free(i915, file);
- return err;
-}
-
-static int check_scratch(struct i915_gem_context *ctx, u64 offset)
-{
- struct drm_mm_node *node =
- __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
- offset, offset + sizeof(u32) - 1);
- if (!node || node->start > offset)
- return 0;
-
- GEM_BUG_ON(offset >= node->start + node->size);
-
- pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
- upper_32_bits(offset), lower_32_bits(offset));
- return -EINVAL;
-}
-
-static int write_to_scratch(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset, u32 value)
-{
- struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- struct i915_request *rq;
- struct i915_vma *vma;
- u32 *cmd;
- int err;
-
- GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
-
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- *cmd++ = MI_STORE_DWORD_IMM_GEN4;
- if (INTEL_GEN(i915) >= 8) {
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- } else {
- *cmd++ = 0;
- *cmd++ = offset;
- }
- *cmd++ = value;
- *cmd = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, 64);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
- if (err)
- goto err;
-
- err = check_scratch(ctx, offset);
- if (err)
- goto err_unpin;
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_unpin;
- }
-
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
- if (err)
- goto err_request;
-
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
- goto skip_request;
-
- i915_gem_object_set_active_reference(obj);
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- i915_request_add(rq);
-
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_unpin:
- i915_vma_unpin(vma);
-err:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int read_from_scratch(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u64 offset, u32 *value)
-{
- struct drm_i915_private *i915 = ctx->i915;
- struct drm_i915_gem_object *obj;
- const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
- const u32 result = 0x100;
- struct i915_request *rq;
- struct i915_vma *vma;
- u32 *cmd;
- int err;
-
- GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
-
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- memset(cmd, POISON_INUSE, PAGE_SIZE);
- if (INTEL_GEN(i915) >= 8) {
- *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
- *cmd++ = lower_32_bits(offset);
- *cmd++ = upper_32_bits(offset);
- *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
- *cmd++ = result;
- *cmd++ = 0;
- } else {
- *cmd++ = MI_LOAD_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
- *cmd++ = offset;
- *cmd++ = MI_STORE_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
- *cmd++ = result;
- }
- *cmd = MI_BATCH_BUFFER_END;
-
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
- if (err)
- goto err;
-
- err = check_scratch(ctx, offset);
- if (err)
- goto err_unpin;
-
- rq = igt_request_alloc(ctx, engine);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_unpin;
- }
-
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
- if (err)
- goto err_request;
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- if (err)
- goto skip_request;
-
- i915_vma_unpin(vma);
- i915_vma_close(vma);
-
- i915_request_add(rq);
-
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- if (err)
- goto err;
-
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err;
- }
-
- *value = cmd[result / sizeof(*cmd)];
- i915_gem_object_unpin_map(obj);
- i915_gem_object_put(obj);
-
- return 0;
-
-skip_request:
- i915_request_skip(rq, err);
-err_request:
- i915_request_add(rq);
-err_unpin:
- i915_vma_unpin(vma);
-err:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int igt_vm_isolation(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct i915_gem_context *ctx_a, *ctx_b;
- struct intel_engine_cs *engine;
- intel_wakeref_t wakeref;
- struct igt_live_test t;
- struct drm_file *file;
- I915_RND_STATE(prng);
- unsigned long count;
- unsigned int id;
- u64 vm_total;
- int err;
-
- if (INTEL_GEN(i915) < 7)
- return 0;
-
- /*
- * The simple goal here is that a write into one context is not
- * observed in a second (separate page tables and scratch).
- */
-
- file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
- mutex_lock(&i915->drm.struct_mutex);
-
- err = igt_live_test_begin(&t, i915, __func__, "");
- if (err)
- goto out_unlock;
-
- ctx_a = live_context(i915, file);
- if (IS_ERR(ctx_a)) {
- err = PTR_ERR(ctx_a);
- goto out_unlock;
- }
-
- ctx_b = live_context(i915, file);
- if (IS_ERR(ctx_b)) {
- err = PTR_ERR(ctx_b);
- goto out_unlock;
- }
-
- /* We can only test vm isolation, if the vm are distinct */
- if (ctx_a->ppgtt == ctx_b->ppgtt)
- goto out_unlock;
-
- vm_total = ctx_a->ppgtt->vm.total;
- GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
- vm_total -= I915_GTT_PAGE_SIZE;
-
- wakeref = intel_runtime_pm_get(i915);
-
- count = 0;
- for_each_engine(engine, i915, id) {
- IGT_TIMEOUT(end_time);
- unsigned long this = 0;
-
- if (!intel_engine_can_store_dword(engine))
- continue;
-
- while (!__igt_timeout(end_time, NULL)) {
- u32 value = 0xc5c5c5c5;
- u64 offset;
-
- div64_u64_rem(i915_prandom_u64_state(&prng),
- vm_total, &offset);
- offset &= -sizeof(u32);
- offset += I915_GTT_PAGE_SIZE;
-
- err = write_to_scratch(ctx_a, engine,
- offset, 0xdeadbeef);
- if (err == 0)
- err = read_from_scratch(ctx_b, engine,
- offset, &value);
- if (err)
- goto out_rpm;
-
- if (value) {
- pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
- engine->name, value,
- upper_32_bits(offset),
- lower_32_bits(offset),
- this);
- err = -EINVAL;
- goto out_rpm;
- }
-
- this++;
- }
- count += this;
- }
- pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_engines);
-
-out_rpm:
- intel_runtime_pm_put(i915, wakeref);
-out_unlock:
- if (igt_live_test_end(&t))
- err = -EIO;
- mutex_unlock(&i915->drm.struct_mutex);
-
- mock_file_free(i915, file);
- return err;
-}
-
-static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
-{
- struct intel_engine_cs *engine;
- intel_engine_mask_t tmp;
-
- if (engines == ALL_ENGINES)
- return "all";
-
- for_each_engine_masked(engine, i915, engines, tmp)
- return engine->name;
-
- return "none";
-}
-
-static void mock_barrier_task(void *data)
-{
- unsigned int *counter = data;
-
- ++*counter;
-}
-
-static int mock_context_barrier(void *arg)
-{
-#undef pr_fmt
-#define pr_fmt(x) "context_barrier_task():" # x
- struct drm_i915_private *i915 = arg;
- struct i915_gem_context *ctx;
- struct i915_request *rq;
- unsigned int counter;
- int err;
-
- /*
- * The context barrier provides us with a callback after it emits
- * a request; useful for retiring old state after loading new.
- */
-
- mutex_lock(&i915->drm.struct_mutex);
-
- ctx = mock_context(i915, "mock");
- if (!ctx) {
- err = -ENOMEM;
- goto unlock;
- }
-
- counter = 0;
- err = context_barrier_task(ctx, 0,
- NULL, mock_barrier_task, &counter);
- if (err) {
- pr_err("Failed at line %d, err=%d\n", __LINE__, err);
- goto out;
- }
- if (counter == 0) {
- pr_err("Did not retire immediately with 0 engines\n");
- err = -EINVAL;
- goto out;
- }
-
- counter = 0;
- err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
- if (err) {
- pr_err("Failed at line %d, err=%d\n", __LINE__, err);
- goto out;
- }
- if (counter == 0) {
- pr_err("Did not retire immediately for all unused engines\n");
- err = -EINVAL;
- goto out;
- }
-
- rq = igt_request_alloc(ctx, i915->engine[RCS0]);
- if (IS_ERR(rq)) {
- pr_err("Request allocation failed!\n");
- goto out;
- }
- i915_request_add(rq);
-
- counter = 0;
- context_barrier_inject_fault = BIT(RCS0);
- err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
- context_barrier_inject_fault = 0;
- if (err == -ENXIO)
- err = 0;
- else
- pr_err("Did not hit fault injection!\n");
- if (counter != 0) {
- pr_err("Invoked callback on error!\n");
- err = -EIO;
- }
- if (err)
- goto out;
-
- counter = 0;
- err = context_barrier_task(ctx, ALL_ENGINES,
- NULL, mock_barrier_task, &counter);
- if (err) {
- pr_err("Failed at line %d, err=%d\n", __LINE__, err);
- goto out;
- }
- mock_device_flush(i915);
- if (counter == 0) {
- pr_err("Did not retire on each active engines\n");
- err = -EINVAL;
- goto out;
- }
-
-out:
- mock_context_close(ctx);
-unlock:
- mutex_unlock(&i915->drm.struct_mutex);
- return err;
-#undef pr_fmt
-#define pr_fmt(x) x
-}
-
-int i915_gem_context_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(mock_context_barrier),
- };
- struct drm_i915_private *i915;
- int err;
-
- i915 = mock_gem_device();
- if (!i915)
- return -ENOMEM;
-
- err = i915_subtests(tests, i915);
-
- drm_dev_put(&i915->drm);
- return err;
-}
-
-int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(live_nop_switch),
- SUBTEST(igt_ctx_exec),
- SUBTEST(igt_ctx_readonly),
- SUBTEST(igt_ctx_sseu),
- SUBTEST(igt_shared_ctx_exec),
- SUBTEST(igt_vm_isolation),
- };
-
- if (i915_terminally_wedged(dev_priv))
- return 0;
-
- return i915_subtests(tests, dev_priv);
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
deleted file mode 100644
index 2b943ee246c9..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "../i915_selftest.h"
-
-#include "mock_gem_device.h"
-#include "mock_dmabuf.h"
-
-static int igt_dmabuf_export(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct dma_buf *dmabuf;
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
- i915_gem_object_put(obj);
- if (IS_ERR(dmabuf)) {
- pr_err("i915_gem_prime_export failed with err=%d\n",
- (int)PTR_ERR(dmabuf));
- return PTR_ERR(dmabuf);
- }
-
- dma_buf_put(dmabuf);
- return 0;
-}
-
-static int igt_dmabuf_import_self(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct drm_gem_object *import;
- struct dma_buf *dmabuf;
- int err;
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
- if (IS_ERR(dmabuf)) {
- pr_err("i915_gem_prime_export failed with err=%d\n",
- (int)PTR_ERR(dmabuf));
- err = PTR_ERR(dmabuf);
- goto out;
- }
-
- import = i915_gem_prime_import(&i915->drm, dmabuf);
- if (IS_ERR(import)) {
- pr_err("i915_gem_prime_import failed with err=%d\n",
- (int)PTR_ERR(import));
- err = PTR_ERR(import);
- goto out_dmabuf;
- }
-
- if (import != &obj->base) {
- pr_err("i915_gem_prime_import created a new object!\n");
- err = -EINVAL;
- goto out_import;
- }
-
- err = 0;
-out_import:
- i915_gem_object_put(to_intel_bo(import));
-out_dmabuf:
- dma_buf_put(dmabuf);
-out:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int igt_dmabuf_import(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct dma_buf *dmabuf;
- void *obj_map, *dma_map;
- u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
- int err, i;
-
- dmabuf = mock_dmabuf(1);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
- if (IS_ERR(obj)) {
- pr_err("i915_gem_prime_import failed with err=%d\n",
- (int)PTR_ERR(obj));
- err = PTR_ERR(obj);
- goto out_dmabuf;
- }
-
- if (obj->base.dev != &i915->drm) {
- pr_err("i915_gem_prime_import created a non-i915 object!\n");
- err = -EINVAL;
- goto out_obj;
- }
-
- if (obj->base.size != PAGE_SIZE) {
- pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n",
- (long long)obj->base.size, PAGE_SIZE);
- err = -EINVAL;
- goto out_obj;
- }
-
- dma_map = dma_buf_vmap(dmabuf);
- if (!dma_map) {
- pr_err("dma_buf_vmap failed\n");
- err = -ENOMEM;
- goto out_obj;
- }
-
- if (0) { /* Can not yet map dmabuf */
- obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(obj_map)) {
- err = PTR_ERR(obj_map);
- pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
- goto out_dma_map;
- }
-
- for (i = 0; i < ARRAY_SIZE(pattern); i++) {
- memset(dma_map, pattern[i], PAGE_SIZE);
- if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
- err = -EINVAL;
- pr_err("imported vmap not all set to %x!\n", pattern[i]);
- i915_gem_object_unpin_map(obj);
- goto out_dma_map;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(pattern); i++) {
- memset(obj_map, pattern[i], PAGE_SIZE);
- if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
- err = -EINVAL;
- pr_err("exported vmap not all set to %x!\n", pattern[i]);
- i915_gem_object_unpin_map(obj);
- goto out_dma_map;
- }
- }
-
- i915_gem_object_unpin_map(obj);
- }
-
- err = 0;
-out_dma_map:
- dma_buf_vunmap(dmabuf, dma_map);
-out_obj:
- i915_gem_object_put(obj);
-out_dmabuf:
- dma_buf_put(dmabuf);
- return err;
-}
-
-static int igt_dmabuf_import_ownership(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct dma_buf *dmabuf;
- void *ptr;
- int err;
-
- dmabuf = mock_dmabuf(1);
- if (IS_ERR(dmabuf))
- return PTR_ERR(dmabuf);
-
- ptr = dma_buf_vmap(dmabuf);
- if (!ptr) {
- pr_err("dma_buf_vmap failed\n");
- err = -ENOMEM;
- goto err_dmabuf;
- }
-
- memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_vunmap(dmabuf, ptr);
-
- obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
- if (IS_ERR(obj)) {
- pr_err("i915_gem_prime_import failed with err=%d\n",
- (int)PTR_ERR(obj));
- err = PTR_ERR(obj);
- goto err_dmabuf;
- }
-
- dma_buf_put(dmabuf);
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
- goto out_obj;
- }
-
- err = 0;
- i915_gem_object_unpin_pages(obj);
-out_obj:
- i915_gem_object_put(obj);
- return err;
-
-err_dmabuf:
- dma_buf_put(dmabuf);
- return err;
-}
-
-static int igt_dmabuf_export_vmap(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct dma_buf *dmabuf;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
- if (IS_ERR(dmabuf)) {
- pr_err("i915_gem_prime_export failed with err=%d\n",
- (int)PTR_ERR(dmabuf));
- err = PTR_ERR(dmabuf);
- goto err_obj;
- }
- i915_gem_object_put(obj);
-
- ptr = dma_buf_vmap(dmabuf);
- if (!ptr) {
- pr_err("dma_buf_vmap failed\n");
- err = -ENOMEM;
- goto out;
- }
-
- if (memchr_inv(ptr, 0, dmabuf->size)) {
- pr_err("Exported object not initialiased to zero!\n");
- err = -EINVAL;
- goto out;
- }
-
- memset(ptr, 0xc5, dmabuf->size);
-
- err = 0;
- dma_buf_vunmap(dmabuf, ptr);
-out:
- dma_buf_put(dmabuf);
- return err;
-
-err_obj:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int igt_dmabuf_export_kmap(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- struct dma_buf *dmabuf;
- void *ptr;
- int err;
-
- obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
- i915_gem_object_put(obj);
- if (IS_ERR(dmabuf)) {
- err = PTR_ERR(dmabuf);
- pr_err("i915_gem_prime_export failed with err=%d\n", err);
- return err;
- }
-
- ptr = dma_buf_kmap(dmabuf, 0);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
-
- if (memchr_inv(ptr, 0, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 0, ptr);
- pr_err("Exported page[0] not initialiased to zero!\n");
- err = -EINVAL;
- goto err;
- }
-
- memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_kunmap(dmabuf, 0, ptr);
-
- ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
- goto err;
- }
- memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
-
- ptr = dma_buf_kmap(dmabuf, 1);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
-
- if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 1, ptr);
- pr_err("Exported page[1] not set to 0xaa!\n");
- err = -EINVAL;
- goto err;
- }
-
- memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_kunmap(dmabuf, 1, ptr);
-
- ptr = dma_buf_kmap(dmabuf, 0);
- if (!ptr) {
- pr_err("dma_buf_kmap failed\n");
- err = -ENOMEM;
- goto err;
- }
- if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
- dma_buf_kunmap(dmabuf, 0, ptr);
- pr_err("Exported page[0] did not retain 0xc5!\n");
- err = -EINVAL;
- goto err;
- }
- dma_buf_kunmap(dmabuf, 0, ptr);
-
- ptr = dma_buf_kmap(dmabuf, 2);
- if (ptr) {
- pr_err("Erroneously kmapped beyond the end of the object!\n");
- dma_buf_kunmap(dmabuf, 2, ptr);
- err = -EINVAL;
- goto err;
- }
-
- ptr = dma_buf_kmap(dmabuf, -1);
- if (ptr) {
- pr_err("Erroneously kmapped before the start of the object!\n");
- dma_buf_kunmap(dmabuf, -1, ptr);
- err = -EINVAL;
- goto err;
- }
-
- err = 0;
-err:
- dma_buf_put(dmabuf);
- return err;
-}
-
-int i915_gem_dmabuf_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_dmabuf_export),
- SUBTEST(igt_dmabuf_import_self),
- SUBTEST(igt_dmabuf_import),
- SUBTEST(igt_dmabuf_import_ownership),
- SUBTEST(igt_dmabuf_export_vmap),
- SUBTEST(igt_dmabuf_export_kmap),
- };
- struct drm_i915_private *i915;
- int err;
-
- i915 = mock_gem_device();
- if (!i915)
- return -ENOMEM;
-
- err = i915_subtests(tests, i915);
-
- drm_dev_put(&i915->drm);
- return err;
-}
-
-int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_dmabuf_export),
- };
-
- return i915_subtests(tests, i915);
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 4fc6e5445dd1..a3cb0aade6f1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -22,11 +22,14 @@
*
*/
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/igt_gem_utils.h"
+#include "gem/selftests/mock_context.h"
-#include "igt_gem_utils.h"
+#include "i915_selftest.h"
+
+#include "igt_flush_test.h"
#include "lib_sw_fence.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -65,20 +68,24 @@ static int populate_ggtt(struct drm_i915_private *i915,
count++;
}
+ bound = 0;
unbound = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- if (obj->mm.quirked)
+ list_for_each_entry(obj, objects, st_link) {
+ GEM_BUG_ON(!obj->mm.quirked);
+
+ if (atomic_read(&obj->bind_count))
+ bound++;
+ else
unbound++;
+ }
+ GEM_BUG_ON(bound + unbound != count);
+
if (unbound) {
pr_err("%s: Found %lu objects unbound, expected %u!\n",
__func__, unbound, 0);
return -EINVAL;
}
- bound = 0;
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
- if (obj->mm.quirked)
- bound++;
if (bound != count) {
pr_err("%s: Found %lu objects bound, expected %lu!\n",
__func__, bound, count);
@@ -398,7 +405,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
@@ -499,6 +506,8 @@ static int igt_evict_contexts(void *arg)
mutex_lock(&i915->drm.struct_mutex);
out_locked:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
while (reserved) {
struct reserved *next = reserved->next;
@@ -509,7 +518,7 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -533,7 +542,7 @@ int i915_gem_evict_mock_selftests(void)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
mutex_unlock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 9cca66e4420a..1a60b9fe8221 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -25,10 +25,11 @@
#include <linux/list_sort.h>
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/mock_context.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -147,7 +148,7 @@ err:
static int igt_ppgtt_alloc(void *arg)
{
struct drm_i915_private *dev_priv = arg;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
u64 size, last, limit;
int err = 0;
@@ -156,7 +157,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = __hw_ppgtt_create(dev_priv);
+ ppgtt = __ppgtt_create(dev_priv);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -208,7 +209,7 @@ static int igt_ppgtt_alloc(void *arg)
err_ppgtt_cleanup:
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -294,9 +295,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
count = n;
@@ -998,7 +999,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
unsigned long end_time))
{
struct drm_file *file;
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
int err;
@@ -1020,7 +1021,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_put(ppgtt);
+ i915_vm_put(&ppgtt->vm);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1170,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
if (err)
goto out_unpin;
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
@@ -1217,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -1232,7 +1233,7 @@ static void track_vma_bind(struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
- obj->bind_count++; /* track for eviction later */
+ atomic_inc(&obj->bind_count); /* track for eviction later */
__i915_gem_object_pin_pages(obj);
vma->pages = obj->mm.pages;
@@ -1250,7 +1251,6 @@ static int exercise_mock(struct drm_i915_private *i915,
{
const u64 limit = totalram_pages() << PAGE_SHIFT;
struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
IGT_TIMEOUT(end_time);
int err;
@@ -1258,10 +1258,7 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- ppgtt = ctx->ppgtt;
- GEM_BUG_ON(!ppgtt);
-
- err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
+ err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
mock_context_close(ctx);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
deleted file mode 100644
index b926d1cd165d..000000000000
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "../i915_selftest.h"
-
-#include "igt_flush_test.h"
-#include "mock_gem_device.h"
-#include "huge_gem_object.h"
-
-static int igt_gem_object(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- int err = -ENOMEM;
-
- /* Basic test to ensure we can create an object */
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- pr_err("i915_gem_object_create failed, err=%d\n", err);
- goto out;
- }
-
- err = 0;
- i915_gem_object_put(obj);
-out:
- return err;
-}
-
-static int igt_phys_object(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- int err;
-
- /* Create an object and bind it to a contiguous set of physical pages,
- * i.e. exercise the i915_gem_object_phys API.
- */
-
- obj = i915_gem_object_create(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- pr_err("i915_gem_object_create failed, err=%d\n", err);
- goto out;
- }
-
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err) {
- pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
- goto out_obj;
- }
-
- if (obj->ops != &i915_gem_phys_ops) {
- pr_err("i915_gem_object_attach_phys did not create a phys object\n");
- err = -EINVAL;
- goto out_obj;
- }
-
- if (!atomic_read(&obj->mm.pages_pin_count)) {
- pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
- err = -EINVAL;
- goto out_obj;
- }
-
- /* Make the object dirty so that put_pages must do copy back the data */
- mutex_lock(&i915->drm.struct_mutex);
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err) {
- pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
- err);
- goto out_obj;
- }
-
-out_obj:
- i915_gem_object_put(obj);
-out:
- return err;
-}
-
-static int igt_gem_huge(void *arg)
-{
- const unsigned int nreal = 509; /* just to be awkward */
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- unsigned int n;
- int err;
-
- /* Basic sanitycheck of our huge fake object allocation */
-
- obj = huge_gem_object(i915,
- nreal * PAGE_SIZE,
- i915->ggtt.vm.total + PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
- nreal, obj->base.size / PAGE_SIZE, err);
- goto out;
- }
-
- for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
- if (i915_gem_object_get_page(obj, n) !=
- i915_gem_object_get_page(obj, n % nreal)) {
- pr_err("Page lookup mismatch at index %u [%u]\n",
- n, n % nreal);
- err = -EINVAL;
- goto out_unpin;
- }
- }
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
-out:
- i915_gem_object_put(obj);
- return err;
-}
-
-struct tile {
- unsigned int width;
- unsigned int height;
- unsigned int stride;
- unsigned int size;
- unsigned int tiling;
- unsigned int swizzle;
-};
-
-static u64 swizzle_bit(unsigned int bit, u64 offset)
-{
- return (offset & BIT_ULL(bit)) >> (bit - 6);
-}
-
-static u64 tiled_offset(const struct tile *tile, u64 v)
-{
- u64 x, y;
-
- if (tile->tiling == I915_TILING_NONE)
- return v;
-
- y = div64_u64_rem(v, tile->stride, &x);
- v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
-
- if (tile->tiling == I915_TILING_X) {
- v += y * tile->width;
- v += div64_u64_rem(x, tile->width, &x) << tile->size;
- v += x;
- } else if (tile->width == 128) {
- const unsigned int ytile_span = 16;
- const unsigned int ytile_height = 512;
-
- v += y * ytile_span;
- v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
- v += x;
- } else {
- const unsigned int ytile_span = 32;
- const unsigned int ytile_height = 256;
-
- v += y * ytile_span;
- v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
- v += x;
- }
-
- switch (tile->swizzle) {
- case I915_BIT_6_SWIZZLE_9:
- v ^= swizzle_bit(9, v);
- break;
- case I915_BIT_6_SWIZZLE_9_10:
- v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
- break;
- case I915_BIT_6_SWIZZLE_9_11:
- v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
- break;
- case I915_BIT_6_SWIZZLE_9_10_11:
- v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
- break;
- }
-
- return v;
-}
-
-static int check_partial_mapping(struct drm_i915_gem_object *obj,
- const struct tile *tile,
- unsigned long end_time)
-{
- const unsigned int nreal = obj->scratch / PAGE_SIZE;
- const unsigned long npages = obj->base.size / PAGE_SIZE;
- struct i915_vma *vma;
- unsigned long page;
- int err;
-
- if (igt_timeout(end_time,
- "%s: timed out before tiling=%d stride=%d\n",
- __func__, tile->tiling, tile->stride))
- return -EINTR;
-
- err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
- if (err) {
- pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
- tile->tiling, tile->stride, err);
- return err;
- }
-
- GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
- GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
-
- for_each_prime_number_from(page, 1, npages) {
- struct i915_ggtt_view view =
- compute_partial_view(obj, page, MIN_CHUNK_PAGES);
- u32 __iomem *io;
- struct page *p;
- unsigned int n;
- u64 offset;
- u32 *cpu;
-
- GEM_BUG_ON(view.partial.size > nreal);
- cond_resched();
-
- err = i915_gem_object_set_to_gtt_domain(obj, true);
- if (err) {
- pr_err("Failed to flush to GTT write domain; err=%d\n",
- err);
- return err;
- }
-
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
- if (IS_ERR(vma)) {
- pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
- page, (int)PTR_ERR(vma));
- return PTR_ERR(vma);
- }
-
- n = page - view.partial.offset;
- GEM_BUG_ON(n >= view.partial.size);
-
- io = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
- if (IS_ERR(io)) {
- pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
- page, (int)PTR_ERR(io));
- return PTR_ERR(io);
- }
-
- iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
- i915_vma_unpin_iomap(vma);
-
- offset = tiled_offset(tile, page << PAGE_SHIFT);
- if (offset >= obj->base.size)
- continue;
-
- flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
-
- p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
- cpu = kmap(p) + offset_in_page(offset);
- drm_clflush_virt_range(cpu, sizeof(*cpu));
- if (*cpu != (u32)page) {
- pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
- page, n,
- view.partial.offset,
- view.partial.size,
- vma->size >> PAGE_SHIFT,
- tile->tiling ? tile_row_pages(obj) : 0,
- vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
- offset >> PAGE_SHIFT,
- (unsigned int)offset_in_page(offset),
- offset,
- (u32)page, *cpu);
- err = -EINVAL;
- }
- *cpu = 0;
- drm_clflush_virt_range(cpu, sizeof(*cpu));
- kunmap(p);
- if (err)
- return err;
-
- i915_vma_destroy(vma);
- }
-
- return 0;
-}
-
-static int igt_partial_tiling(void *arg)
-{
- const unsigned int nreal = 1 << 12; /* largest tile row x2 */
- struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj;
- intel_wakeref_t wakeref;
- int tiling;
- int err;
-
- /* We want to check the page mapping and fencing of a large object
- * mmapped through the GTT. The object we create is larger than can
- * possibly be mmaped as a whole, and so we must use partial GGTT vma.
- * We then check that a write through each partial GGTT vma ends up
- * in the right set of pages within the object, and with the expected
- * tiling, which we verify by manual swizzling.
- */
-
- obj = huge_gem_object(i915,
- nreal << PAGE_SHIFT,
- (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
- nreal, obj->base.size / PAGE_SIZE, err);
- goto out;
- }
-
- mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
-
- if (1) {
- IGT_TIMEOUT(end);
- struct tile tile;
-
- tile.height = 1;
- tile.width = 1;
- tile.size = 0;
- tile.stride = 0;
- tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
- tile.tiling = I915_TILING_NONE;
-
- err = check_partial_mapping(obj, &tile, end);
- if (err && err != -EINTR)
- goto out_unlock;
- }
-
- for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
- IGT_TIMEOUT(end);
- unsigned int max_pitch;
- unsigned int pitch;
- struct tile tile;
-
- if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
- /*
- * The swizzling pattern is actually unknown as it
- * varies based on physical address of each page.
- * See i915_gem_detect_bit_6_swizzle().
- */
- break;
-
- tile.tiling = tiling;
- switch (tiling) {
- case I915_TILING_X:
- tile.swizzle = i915->mm.bit_6_swizzle_x;
- break;
- case I915_TILING_Y:
- tile.swizzle = i915->mm.bit_6_swizzle_y;
- break;
- }
-
- GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
- if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
- tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
- continue;
-
- if (INTEL_GEN(i915) <= 2) {
- tile.height = 16;
- tile.width = 128;
- tile.size = 11;
- } else if (tile.tiling == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(i915)) {
- tile.height = 32;
- tile.width = 128;
- tile.size = 12;
- } else {
- tile.height = 8;
- tile.width = 512;
- tile.size = 12;
- }
-
- if (INTEL_GEN(i915) < 4)
- max_pitch = 8192 / tile.width;
- else if (INTEL_GEN(i915) < 7)
- max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
- else
- max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
-
- for (pitch = max_pitch; pitch; pitch >>= 1) {
- tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
- if (err == -EINTR)
- goto next_tiling;
- if (err)
- goto out_unlock;
-
- if (pitch > 2 && INTEL_GEN(i915) >= 4) {
- tile.stride = tile.width * (pitch - 1);
- err = check_partial_mapping(obj, &tile, end);
- if (err == -EINTR)
- goto next_tiling;
- if (err)
- goto out_unlock;
- }
-
- if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
- tile.stride = tile.width * (pitch + 1);
- err = check_partial_mapping(obj, &tile, end);
- if (err == -EINTR)
- goto next_tiling;
- if (err)
- goto out_unlock;
- }
- }
-
- if (INTEL_GEN(i915) >= 4) {
- for_each_prime_number(pitch, max_pitch) {
- tile.stride = tile.width * pitch;
- err = check_partial_mapping(obj, &tile, end);
- if (err == -EINTR)
- goto next_tiling;
- if (err)
- goto out_unlock;
- }
- }
-
-next_tiling: ;
- }
-
-out_unlock:
- intel_runtime_pm_put(i915, wakeref);
- mutex_unlock(&i915->drm.struct_mutex);
- i915_gem_object_unpin_pages(obj);
-out:
- i915_gem_object_put(obj);
- return err;
-}
-
-static int make_obj_busy(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_request *rq;
- struct i915_vma *vma;
- int err;
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
-
- rq = i915_request_create(i915->engine[RCS0]->kernel_context);
- if (IS_ERR(rq)) {
- i915_vma_unpin(vma);
- return PTR_ERR(rq);
- }
-
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
- i915_request_add(rq);
-
- __i915_gem_object_release_unless_active(obj);
- i915_vma_unpin(vma);
-
- return err;
-}
-
-static bool assert_mmap_offset(struct drm_i915_private *i915,
- unsigned long size,
- int expected)
-{
- struct drm_i915_gem_object *obj;
- int err;
-
- obj = i915_gem_object_create_internal(i915, size);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- err = i915_gem_object_create_mmap_offset(obj);
- i915_gem_object_put(obj);
-
- return err == expected;
-}
-
-static void disable_retire_worker(struct drm_i915_private *i915)
-{
- i915_gem_shrinker_unregister(i915);
-
- intel_gt_pm_get(i915);
-
- cancel_delayed_work_sync(&i915->gem.retire_work);
- flush_work(&i915->gem.idle_work);
-}
-
-static void restore_retire_worker(struct drm_i915_private *i915)
-{
- intel_gt_pm_put(i915);
-
- mutex_lock(&i915->drm.struct_mutex);
- igt_flush_test(i915, I915_WAIT_LOCKED);
- mutex_unlock(&i915->drm.struct_mutex);
-
- i915_gem_shrinker_register(i915);
-}
-
-static int igt_mmap_offset_exhaustion(void *arg)
-{
- struct drm_i915_private *i915 = arg;
- struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
- struct drm_i915_gem_object *obj;
- struct drm_mm_node resv, *hole;
- u64 hole_start, hole_end;
- int loop, err;
-
- /* Disable background reaper */
- disable_retire_worker(i915);
- GEM_BUG_ON(!i915->gt.awake);
-
- /* Trim the device mmap space to only a page */
- memset(&resv, 0, sizeof(resv));
- drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- resv.start = hole_start;
- resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
- err = drm_mm_reserve_node(mm, &resv);
- if (err) {
- pr_err("Failed to trim VMA manager, err=%d\n", err);
- goto out_park;
- }
- break;
- }
-
- /* Just fits! */
- if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
- pr_err("Unable to insert object into single page hole\n");
- err = -EINVAL;
- goto out;
- }
-
- /* Too large */
- if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
- pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
- err = -EINVAL;
- goto out;
- }
-
- /* Fill the hole, further allocation attempts should then fail */
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out;
- }
-
- err = i915_gem_object_create_mmap_offset(obj);
- if (err) {
- pr_err("Unable to insert object into reclaimed hole\n");
- goto err_obj;
- }
-
- if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
- pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
- err = -EINVAL;
- goto err_obj;
- }
-
- i915_gem_object_put(obj);
-
- /* Now fill with busy dead objects that we expect to reap */
- for (loop = 0; loop < 3; loop++) {
- intel_wakeref_t wakeref;
-
- if (i915_terminally_wedged(i915))
- break;
-
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out;
- }
-
- err = 0;
- mutex_lock(&i915->drm.struct_mutex);
- with_intel_runtime_pm(i915, wakeref)
- err = make_obj_busy(obj);
- mutex_unlock(&i915->drm.struct_mutex);
- if (err) {
- pr_err("[loop %d] Failed to busy the object\n", loop);
- goto err_obj;
- }
-
- /* NB we rely on the _active_ reference to access obj now */
- GEM_BUG_ON(!i915_gem_object_is_active(obj));
- err = i915_gem_object_create_mmap_offset(obj);
- if (err) {
- pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
- loop, err);
- goto out;
- }
- }
-
-out:
- drm_mm_remove_node(&resv);
-out_park:
- restore_retire_worker(i915);
- return err;
-err_obj:
- i915_gem_object_put(obj);
- goto out;
-}
-
-int i915_gem_object_mock_selftests(void)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_gem_object),
- SUBTEST(igt_phys_object),
- };
- struct drm_i915_private *i915;
- int err;
-
- i915 = mock_gem_device();
- if (!i915)
- return -ENOMEM;
-
- err = i915_subtests(tests, i915);
-
- drm_dev_put(&i915->drm);
- return err;
-}
-
-int i915_gem_object_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_gem_huge),
- SUBTEST(igt_partial_tiling),
- SUBTEST(igt_mmap_offset_exhaustion),
- };
-
- return i915_subtests(tests, i915);
-}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index a953125b14c4..d5dc4427d664 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -16,6 +16,7 @@ selftest(timelines, i915_timeline_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
+selftest(mman, i915_gem_mman_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)
selftest(vma, i915_vma_live_selftests)
selftest(coherency, i915_gem_coherency_live_selftests)
@@ -24,6 +25,8 @@ selftest(gem, i915_gem_live_selftests)
selftest(evict, i915_gem_evict_live_selftests)
selftest(hugepages, i915_gem_huge_page_live_selftests)
selftest(contexts, i915_gem_context_live_selftests)
+selftest(blt, i915_gem_object_blt_live_selftests)
+selftest(client, i915_gem_client_blt_live_selftests)
selftest(reset, intel_reset_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 88e5ab586337..510eb176bb2c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -18,6 +18,7 @@ selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, i915_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
+selftest(phys, i915_gem_phys_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index b60591531e4a..298bb7116c51 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -24,12 +24,14 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+#include "gem/selftests/mock_context.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
#include "igt_live_test.h"
#include "lib_sw_fence.h"
-#include "mock_context.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -72,12 +74,12 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ if (i915_request_wait(request, 0, T) != -ETIME) {
pr_err("request wait succeeded (expected timeout before submit!)\n");
goto out_unlock;
}
@@ -89,7 +91,7 @@ static int igt_wait_request(void *arg)
i915_request_add(request);
- if (i915_request_wait(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ if (i915_request_wait(request, 0, 0) != -ETIME) {
pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
goto out_unlock;
}
@@ -99,12 +101,12 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ if (i915_request_wait(request, 0, T / 2) != -ETIME) {
pr_err("request wait succeeded (expected timeout!)\n");
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out!\n");
goto out_unlock;
}
@@ -114,7 +116,7 @@ static int igt_wait_request(void *arg)
goto out_unlock;
}
- if (i915_request_wait(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ if (i915_request_wait(request, 0, T) == -ETIME) {
pr_err("request wait timed out when already complete!\n");
goto out_unlock;
}
@@ -512,7 +514,7 @@ int i915_request_mock_selftests(void)
if (!i915)
return -ENOMEM;
- with_intel_runtime_pm(i915, wakeref)
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm);
@@ -535,7 +537,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
@@ -572,9 +574,7 @@ static int live_nop_request(void *arg)
i915_request_add(request);
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -595,7 +595,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -680,7 +680,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
@@ -704,9 +704,7 @@ static int live_empty_request(void *arg)
err = PTR_ERR(request);
goto out_batch;
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
for_each_prime_number_from(prime, 1, 8192) {
times[1] = ktime_get_raw();
@@ -718,9 +716,7 @@ static int live_empty_request(void *arg)
goto out_batch;
}
}
- i915_request_wait(request,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT);
times[1] = ktime_sub(ktime_get_raw(), times[1]);
if (prime == 1)
@@ -744,7 +740,7 @@ out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -752,8 +748,7 @@ out_unlock:
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -838,7 +833,7 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@@ -867,12 +862,9 @@ static int live_all_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
- if (!i915_gem_object_has_active_reference(batch->obj)) {
- i915_gem_object_get(batch->obj);
- i915_gem_object_set_active_reference(batch->obj);
- }
-
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0);
+ i915_vma_unlock(batch);
GEM_BUG_ON(err);
i915_request_get(request[id]);
@@ -897,8 +889,7 @@ static int live_all_engines(void *arg)
for_each_engine(engine, i915, id) {
long timeout;
- timeout = i915_request_wait(request[id],
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(request[id], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -921,7 +912,7 @@ out_request:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -944,7 +935,7 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
@@ -987,12 +978,11 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
request[id]->batch = batch;
+ i915_vma_lock(batch);
err = i915_vma_move_to_active(batch, request[id], 0);
+ i915_vma_unlock(batch);
GEM_BUG_ON(err);
- i915_gem_object_set_active_reference(batch->obj);
- i915_vma_get(batch);
-
i915_request_get(request[id]);
i915_request_add(request[id]);
@@ -1016,8 +1006,7 @@ static int live_sequential_engines(void *arg)
goto out_request;
}
- timeout = i915_request_wait(request[id],
- I915_WAIT_LOCKED,
+ timeout = i915_request_wait(request[id], 0,
MAX_SCHEDULE_TIMEOUT);
if (timeout < 0) {
err = timeout;
@@ -1051,7 +1040,7 @@ out_request:
i915_request_put(request[id]);
}
out_unlock:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -1116,7 +1105,7 @@ static int live_breadcrumbs_smoketest(void *arg)
* On real hardware this time.
*/
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
file = mock_file(i915);
if (IS_ERR(file)) {
@@ -1223,7 +1212,7 @@ out_threads:
out_file:
mock_file_free(i915, file);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index ff9ebe50fae8..76d3977f1d4b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -6,8 +6,10 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/i915_gem_pm.h"
+
#include "i915_random.h"
+#include "i915_selftest.h"
#include "igt_flush_test.h"
#include "mock_gem_device.h"
@@ -513,7 +515,7 @@ static int live_hwsp_engine(void *arg)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -556,7 +558,7 @@ out:
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
@@ -589,7 +591,7 @@ static int live_hwsp_alternate(void *arg)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for (n = 0; n < NUM_TIMELINES; n++) {
@@ -632,7 +634,7 @@ out:
i915_timeline_put(tl);
}
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kvfree(timelines);
@@ -656,7 +658,7 @@ static int live_hwsp_wrap(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl)) {
@@ -722,7 +724,7 @@ static int live_hwsp_wrap(void *arg)
i915_request_add(rq);
- if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
err = -EIO;
goto out;
@@ -747,7 +749,7 @@ out:
out_free:
i915_timeline_put(tl);
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -769,7 +771,7 @@ static int live_hwsp_recycle(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
count = 0;
for_each_engine(engine, i915, id) {
@@ -795,9 +797,7 @@ static int live_hwsp_recycle(void *arg)
goto out;
}
- if (i915_request_wait(rq,
- I915_WAIT_LOCKED,
- HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
pr_err("Wait for timeline writes timed out!\n");
i915_timeline_put(tl);
err = -EIO;
@@ -823,7 +823,7 @@ static int live_hwsp_recycle(void *arg)
out:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 0027c1fac336..fbc79b14823a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -24,10 +24,12 @@
#include <linux/prime_numbers.h>
-#include "../i915_selftest.h"
+#include "gem/selftests/mock_context.h"
+
+#include "i915_scatterlist.h"
+#include "i915_selftest.h"
#include "mock_gem_device.h"
-#include "mock_context.h"
#include "mock_gtt.h"
static bool assert_vma(struct i915_vma *vma,
@@ -36,7 +38,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != &ctx->ppgtt->vm) {
+ if (vma->vm != ctx->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -111,7 +113,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) {
- struct i915_address_space *vm = &ctx->ppgtt->vm;
+ struct i915_address_space *vm = ctx->vm;
struct i915_vma *vma;
int err;
@@ -871,7 +873,7 @@ static int igt_vma_remapped_gtt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for (t = types; *t; t++) {
for (p = planes; p->width; p++) {
@@ -884,7 +886,9 @@ static int igt_vma_remapped_gtt(void *arg)
unsigned int x, y;
int err;
+ i915_gem_object_lock(obj);
err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
if (err)
goto out;
@@ -961,7 +965,7 @@ static int igt_vma_remapped_gtt(void *arg)
}
out:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index e42f3c58536a..5bfd1b2626a2 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -4,9 +4,11 @@
* Copyright © 2018 Intel Corporation
*/
-#include "../i915_drv.h"
+#include "gem/i915_gem_context.h"
+
+#include "i915_drv.h"
+#include "i915_selftest.h"
-#include "../i915_selftest.h"
#include "igt_flush_test.h"
int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/selftests/igt_gem_utils.c
deleted file mode 100644
index 16891b1a3e50..000000000000
--- a/drivers/gpu/drm/i915/selftests/igt_gem_utils.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- */
-
-#include "igt_gem_utils.h"
-
-#include "gt/intel_context.h"
-
-#include "../i915_gem_context.h"
-#include "../i915_gem_pm.h"
-#include "../i915_request.h"
-
-struct i915_request *
-igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
-{
- struct intel_context *ce;
- struct i915_request *rq;
-
- /*
- * Pinning the contexts may generate requests in order to acquire
- * GGTT space, so do this first before we reserve a seqno for
- * ourselves.
- */
- ce = i915_gem_context_get_engine(ctx, engine->id);
- if (IS_ERR(ce))
- return ERR_CAST(ce);
-
- rq = intel_context_create_request(ce);
- intel_context_put(ce);
-
- return rq;
-}
diff --git a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/selftests/igt_gem_utils.h
deleted file mode 100644
index 0f17251cf75d..000000000000
--- a/drivers/gpu/drm/i915/selftests/igt_gem_utils.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2018 Intel Corporation
- */
-
-#ifndef __IGT_GEM_UTILS_H__
-#define __IGT_GEM_UTILS_H__
-
-struct i915_request;
-struct i915_gem_context;
-struct intel_engine_cs;
-
-struct i915_request *
-igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
-
-#endif /* __IGT_GEM_UTILS_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index ece8a8a0d3b0..1e59b543cf27 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -4,7 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
-#include "igt_gem_utils.h"
+#include "gem/selftests/igt_gem_utils.h"
+
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
@@ -75,16 +76,11 @@ static int move_to_active(struct i915_vma *vma,
{
int err;
+ i915_vma_lock(vma);
err = i915_vma_move_to_active(vma, rq, flags);
- if (err)
- return err;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
+ i915_vma_unlock(vma);
- return 0;
+ return err;
}
struct i915_request *
@@ -93,17 +89,16 @@ igt_spinner_create_request(struct igt_spinner *spin,
struct intel_engine_cs *engine,
u32 arbitration_command)
{
- struct i915_address_space *vm = &ctx->ppgtt->vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
- vma = i915_vma_instance(spin->obj, vm, NULL);
+ vma = i915_vma_instance(spin->obj, ctx->vm, NULL);
if (IS_ERR(vma))
return ERR_CAST(vma);
- hws = i915_vma_instance(spin->hws, vm, NULL);
+ hws = i915_vma_instance(spin->hws, ctx->vm, NULL);
if (IS_ERR(hws))
return ERR_CAST(hws);
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
index d312e7cdab68..34a88ac9b47a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.h
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -7,13 +7,12 @@
#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
#define __I915_SELFTESTS_IGT_SPINNER_H__
-#include "../i915_selftest.h"
-
+#include "gem/i915_gem_context.h"
#include "gt/intel_engine.h"
-#include "../i915_drv.h"
-#include "../i915_request.h"
-#include "../i915_gem_context.h"
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_selftest.h"
struct igt_spinner {
struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index b05a21eaa8f4..6ca8584cd64c 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -22,7 +22,8 @@
*
*/
-#include "../i915_selftest.h"
+#include "i915_selftest.h"
+#include "gem/i915_gem_pm.h"
/* max doorbell number + negative test for each client type */
#define ATTEMPTS (GUC_NUM_DOORBELLS + GUC_CLIENT_PRIORITY_NUM)
@@ -143,7 +144,7 @@ static int igt_guc_clients(void *args)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
@@ -226,7 +227,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -246,7 +247,7 @@ static int igt_guc_doorbells(void *arg)
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- wakeref = intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
guc = &dev_priv->guc;
if (!guc) {
@@ -339,7 +340,7 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(dev_priv, wakeref);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index e0d7ebecb215..86815c6072a1 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -176,7 +176,7 @@ static int live_forcewake_ops(void *arg)
return 0;
}
- wakeref = intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
@@ -247,7 +247,7 @@ static int live_forcewake_ops(void *arg)
}
out_rpm:
- intel_runtime_pm_put(i915, wakeref);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
index 1f9927e10f3a..e54d6bc23dc3 100644
--- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
+++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.h
@@ -1,10 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* lib_sw_fence.h - library routines for testing N:M synchronisation points
*
* Copyright (C) 2017 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _LIB_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
deleted file mode 100644
index 10e67c931ed1..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "mock_context.h"
-#include "mock_gtt.h"
-
-struct i915_gem_context *
-mock_context(struct drm_i915_private *i915,
- const char *name)
-{
- struct i915_gem_context *ctx;
- struct i915_gem_engines *e;
- int ret;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return NULL;
-
- kref_init(&ctx->ref);
- INIT_LIST_HEAD(&ctx->link);
- ctx->i915 = i915;
-
- mutex_init(&ctx->engines_mutex);
- e = default_engines(ctx);
- if (IS_ERR(e))
- goto err_free;
- RCU_INIT_POINTER(ctx->engines, e);
-
- INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
- INIT_LIST_HEAD(&ctx->handles_list);
- INIT_LIST_HEAD(&ctx->hw_id_link);
- mutex_init(&ctx->mutex);
-
- ret = i915_gem_context_pin_hw_id(ctx);
- if (ret < 0)
- goto err_engines;
-
- if (name) {
- struct i915_hw_ppgtt *ppgtt;
-
- ctx->name = kstrdup(name, GFP_KERNEL);
- if (!ctx->name)
- goto err_put;
-
- ppgtt = mock_ppgtt(i915, name);
- if (!ppgtt)
- goto err_put;
-
- __set_ppgtt(ctx, ppgtt);
- }
-
- return ctx;
-
-err_engines:
- free_engines(rcu_access_pointer(ctx->engines));
-err_free:
- kfree(ctx);
- return NULL;
-
-err_put:
- i915_gem_context_set_closed(ctx);
- i915_gem_context_put(ctx);
- return NULL;
-}
-
-void mock_context_close(struct i915_gem_context *ctx)
-{
- context_close(ctx);
-}
-
-void mock_init_contexts(struct drm_i915_private *i915)
-{
- init_contexts(i915);
-}
-
-struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file)
-{
- struct i915_gem_context *ctx;
- int err;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- ctx = i915_gem_create_context(i915, 0);
- if (IS_ERR(ctx))
- return ctx;
-
- err = gem_context_register(ctx, file->driver_priv);
- if (err < 0)
- goto err_ctx;
-
- return ctx;
-
-err_ctx:
- context_close(ctx);
- return ERR_PTR(err);
-}
-
-struct i915_gem_context *
-kernel_context(struct drm_i915_private *i915)
-{
- return i915_gem_context_create_kernel(i915, I915_PRIORITY_NORMAL);
-}
-
-void kernel_context_close(struct i915_gem_context *ctx)
-{
- context_close(ctx);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
deleted file mode 100644
index 29b9d60a158b..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_context.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __MOCK_CONTEXT_H
-#define __MOCK_CONTEXT_H
-
-void mock_init_contexts(struct drm_i915_private *i915);
-
-struct i915_gem_context *
-mock_context(struct drm_i915_private *i915,
- const char *name);
-
-void mock_context_close(struct i915_gem_context *ctx);
-
-struct i915_gem_context *
-live_context(struct drm_i915_private *i915, struct drm_file *file);
-
-struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
-void kernel_context_close(struct i915_gem_context *ctx);
-
-#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
deleted file mode 100644
index ca682caf1062..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "mock_dmabuf.h"
-
-static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction dir)
-{
- struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
- struct sg_table *st;
- struct scatterlist *sg;
- int i, err;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- return ERR_PTR(-ENOMEM);
-
- err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
- if (err)
- goto err_free;
-
- sg = st->sgl;
- for (i = 0; i < mock->npages; i++) {
- sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
- sg = sg_next(sg);
- }
-
- if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
- err = -ENOMEM;
- goto err_st;
- }
-
- return st;
-
-err_st:
- sg_free_table(st);
-err_free:
- kfree(st);
- return ERR_PTR(err);
-}
-
-static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *st,
- enum dma_data_direction dir)
-{
- dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
- sg_free_table(st);
- kfree(st);
-}
-
-static void mock_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
- int i;
-
- for (i = 0; i < mock->npages; i++)
- put_page(mock->pages[i]);
-
- kfree(mock);
-}
-
-static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
-}
-
-static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- vm_unmap_ram(vaddr, mock->npages);
-}
-
-static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kmap(mock->pages[page_num]);
-}
-
-static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
-{
- struct mock_dmabuf *mock = to_mock(dma_buf);
-
- return kunmap(mock->pages[page_num]);
-}
-
-static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
-{
- return -ENODEV;
-}
-
-static const struct dma_buf_ops mock_dmabuf_ops = {
- .map_dma_buf = mock_map_dma_buf,
- .unmap_dma_buf = mock_unmap_dma_buf,
- .release = mock_dmabuf_release,
- .map = mock_dmabuf_kmap,
- .unmap = mock_dmabuf_kunmap,
- .mmap = mock_dmabuf_mmap,
- .vmap = mock_dmabuf_vmap,
- .vunmap = mock_dmabuf_vunmap,
-};
-
-static struct dma_buf *mock_dmabuf(int npages)
-{
- struct mock_dmabuf *mock;
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *dmabuf;
- int i;
-
- mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
- GFP_KERNEL);
- if (!mock)
- return ERR_PTR(-ENOMEM);
-
- mock->npages = npages;
- for (i = 0; i < npages; i++) {
- mock->pages[i] = alloc_page(GFP_KERNEL);
- if (!mock->pages[i])
- goto err;
- }
-
- exp_info.ops = &mock_dmabuf_ops;
- exp_info.size = npages * PAGE_SIZE;
- exp_info.flags = O_CLOEXEC;
- exp_info.priv = mock;
-
- dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(dmabuf))
- goto err;
-
- return dmabuf;
-
-err:
- while (i--)
- put_page(mock->pages[i]);
- kfree(mock);
- return ERR_PTR(-ENOMEM);
-}
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
deleted file mode 100644
index ec80613159b9..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
+++ /dev/null
@@ -1,41 +0,0 @@
-
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __MOCK_DMABUF_H__
-#define __MOCK_DMABUF_H__
-
-#include <linux/dma-buf.h>
-
-struct mock_dmabuf {
- int npages;
- struct page *pages[];
-};
-
-static struct mock_dmabuf *to_mock(struct dma_buf *buf)
-{
- return buf->priv;
-}
-
-#endif /* !__MOCK_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 9fd02025d382..64bc51400ae7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -27,13 +27,14 @@
#include "gt/mock_engine.h"
-#include "mock_context.h"
#include "mock_request.h"
#include "mock_gem_device.h"
-#include "mock_gem_object.h"
#include "mock_gtt.h"
#include "mock_uncore.h"
+#include "gem/selftests/mock_context.h"
+#include "gem/selftests/mock_gem_object.h"
+
void mock_device_flush(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -55,7 +56,6 @@ static void mock_device_release(struct drm_device *dev)
mutex_lock(&i915->drm.struct_mutex);
mock_device_flush(i915);
- i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
flush_work(&i915->gem.idle_work);
@@ -151,8 +151,6 @@ struct drm_i915_private *mock_gem_device(void)
i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
- intel_runtime_pm_init_early(i915);
-
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -167,6 +165,8 @@ struct drm_i915_private *mock_gem_device(void)
i915->drm.pdev = pdev;
i915->drm.dev_private = i915;
+ intel_runtime_pm_init_early(&i915->runtime_pm);
+
/* Using the global GTT may ask questions about KMS users, so prepare */
drm_mode_config_init(&i915->drm);
@@ -202,6 +202,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma);
+ spin_lock_init(&i915->gt.closed_lock);
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
deleted file mode 100644
index 20acdbee7bd0..000000000000
--- a/drivers/gpu/drm/i915/selftests/mock_gem_object.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __MOCK_GEM_OBJECT_H__
-#define __MOCK_GEM_OBJECT_H__
-
-struct mock_object {
- struct drm_i915_gem_object base;
-};
-
-#endif /* !__MOCK_GEM_OBJECT_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index cd83929fde8e..f625c307a406 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -55,17 +55,14 @@ static void mock_cleanup(struct i915_address_space *vm)
{
}
-struct i915_hw_ppgtt *
-mock_ppgtt(struct drm_i915_private *i915,
- const char *name)
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
{
- struct i915_hw_ppgtt *ppgtt;
+ struct i915_ppgtt *ppgtt;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return NULL;
- kref_init(&ppgtt->ref);
ppgtt->vm.i915 = i915;
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 40d544bde1d5..3387393286de 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -28,8 +28,6 @@
void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
void mock_fini_ggtt(struct i915_ggtt *ggtt);
-struct i915_hw_ppgtt *
-mock_ppgtt(struct drm_i915_private *i915,
- const char *name);
+struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name);
#endif /* !__MOCK_GTT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index b99f7576153c..9390fc09984b 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -22,9 +22,9 @@
*
*/
+#include "gem/selftests/igt_gem_utils.h"
#include "gt/mock_engine.h"
-#include "igt_gem_utils.h"
#include "mock_request.h"
struct i915_request *
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index e084476469ef..65b52be23d42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -13,7 +13,6 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->i915 = NULL;
timeline->fence_context = context;
- spin_lock_init(&timeline->lock);
mutex_init(&timeline->mutex);
INIT_ACTIVE_REQUEST(&timeline->last_request);
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index cd6d2a16071f..d599186d5b71 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -24,7 +24,8 @@
#include <linux/prime_numbers.h>
#include <linux/random.h>
-#include "../i915_selftest.h"
+#include "i915_selftest.h"
+#include "i915_utils.h"
#define PFN_BIAS (1 << 10)