aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2022-01-10 18:22:19 +0100
committerThomas Hellström <thomas.hellstrom@linux.intel.com>2022-01-11 10:54:11 +0100
commit60dc43d1190db1bf80c696ab4561ed53f8f42f33 (patch)
tree89e5048947c6ae86c12eae7f45685724cf457fcc /drivers/gpu/drm/i915/i915_vma.c
parentdrm/i915: Asynchronous migration selftest (diff)
downloadlinux-dev-60dc43d1190db1bf80c696ab4561ed53f8f42f33.tar.xz
linux-dev-60dc43d1190db1bf80c696ab4561ed53f8f42f33.zip
drm/i915: Use struct vma_resource instead of struct vma_snapshot
There is always a struct vma_resource guaranteed to be alive when we access a corresponding struct vma_snapshot. So ditch the latter and instead of allocating vma_snapshots, reference the already existning vma_resource. This requires a couple of extra members in struct vma_resource but that's a small price to pay for the simplification. v2: - Fix a missing include and declaration (kernel test robot <lkp@intel.com>) Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220110172219.107131-7-thomas.hellstrom@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c16
1 files changed, 4 insertions, 12 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index b86666f653ca..9d859b0a3fbe 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -288,7 +288,6 @@ struct i915_vma_work {
struct i915_vma_resource *vma_res;
struct drm_i915_gem_object *pinned;
struct i915_sw_dma_fence_cb cb;
- struct i915_refct_sgt *rsgt;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -314,8 +313,6 @@ static void __vma_release(struct dma_fence_work *work)
i915_vm_put(vw->vm);
if (vw->vma_res)
i915_vma_resource_put(vw->vma_res);
- if (vw->rsgt)
- i915_refct_sgt_put(vw->rsgt);
}
static const struct dma_fence_work_ops bind_ops = {
@@ -386,8 +383,8 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
struct drm_i915_gem_object *obj = vma->obj;
i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
- i915_gem_object_is_readonly(obj),
- i915_gem_object_is_lmem(obj),
+ obj->mm.rsgt, i915_gem_object_is_readonly(obj),
+ i915_gem_object_is_lmem(obj), obj->mm.region,
vma->ops, vma->private, vma->node.start,
vma->node.size, vma->size);
}
@@ -478,8 +475,6 @@ int i915_vma_bind(struct i915_vma *vma,
work->vma_res = i915_vma_resource_get(vma->resource);
work->cache_level = cache_level;
work->flags = bind_flags;
- if (vma->obj->mm.rsgt)
- work->rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
/*
* Note we only want to chain up to the migration fence on
@@ -505,7 +500,7 @@ int i915_vma_bind(struct i915_vma *vma,
* on the object to avoid waiting for the async bind to
* complete in the object destruction path.
*/
- if (!work->rsgt)
+ if (!work->vma_res->bi.pages_rsgt)
work->pinned = i915_gem_object_get(vma->obj);
} else {
if (vma->obj) {
@@ -1771,7 +1766,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
GEM_BUG_ON(i915_vma_has_userfault(vma));
/* Object backend must be async capable. */
- GEM_WARN_ON(async && !vma->obj->mm.rsgt);
+ GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
/* If vm is not open, unbind is a nop. */
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
@@ -1784,9 +1779,6 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
- /* Object backend must be async capable. */
- GEM_WARN_ON(async && !vma->obj->mm.rsgt);
-
i915_vma_detach(vma);
if (!async && unbind_fence) {