aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2022-01-10 18:22:17 +0100
committerThomas Hellström <thomas.hellstrom@linux.intel.com>2022-01-11 10:54:11 +0100
commit2f6b90da919208621725d1703c0391f63724fa62 (patch)
tree6f642fbfcc697fab154caaeb56398d441b23bc74 /drivers/gpu/drm/i915/i915_vma.c
parentdrm/i915: Don't pin the object pages during pending vma binds (diff)
downloadlinux-dev-2f6b90da919208621725d1703c0391f63724fa62.tar.xz
linux-dev-2f6b90da919208621725d1703c0391f63724fa62.zip
drm/i915: Use vma resources for async unbinding
Implement async (non-blocking) unbinding by not syncing the vma before calling unbind on the vma_resource. Add the resulting unbind fence to the object's dma_resv from where it is picked up by the ttm migration code. Ideally these unbind fences should be coalesced with the migration blit fence to avoid stalling the migration blit waiting for unbind, as they can certainly go on in parallel, but since we don't yet have a reasonable data structure to use to coalesce fences and attach the resulting fence to a timeline, we defer that for now. Note that with async unbinding, even while the unbind waits for the preceding bind to complete before unbinding, the vma itself might have been destroyed in the process, clearing the vma pages. Therefore we can only allow async unbinding if we have a refcounted sg-list and keep a refcount on that for the vma resource pages to stay intact until binding occurs. If this condition is not met, a request for an async unbind is diverted to a sync unbind. v2: - Use a separate kmem_cache for vma resources for now to isolate their memory allocation and aid debugging. - Move the check for vm closed to the actual unbinding thread. Regardless of whether the vm is closed, we need the unbind fence to properly wait for capture. - Clear vma_res::vm on unbind and update its documentation. v4: - Take cache coloring into account when searching for vma resources pending unbind. (Matthew Auld) v5: - Fix timeout and error check in i915_vma_resource_bind_dep_await(). - Avoid taking a reference on the object for async binding if async unbind capable. - Fix braces around a single-line if statement. v6: - Fix up the cache coloring adjustment. (Kernel test robot <lkp@intel.com>) - Don't allow async unbinding if the vma_res pages are not the same as the object pages. (Matthew Auld) v7: - s/unsigned long/u64/ in a number of places (Matthew Auld) Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220110172219.107131-5-thomas.hellstrom@linux.intel.com
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c205
1 files changed, 175 insertions, 30 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9f00dfa5e022..b86666f653ca 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -285,9 +285,10 @@ struct i915_vma_work {
struct dma_fence_work base;
struct i915_address_space *vm;
struct i915_vm_pt_stash stash;
- struct i915_vma *vma;
+ struct i915_vma_resource *vma_res;
struct drm_i915_gem_object *pinned;
struct i915_sw_dma_fence_cb cb;
+ struct i915_refct_sgt *rsgt;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -295,10 +296,11 @@ struct i915_vma_work {
static void __vma_bind(struct dma_fence_work *work)
{
struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
- struct i915_vma *vma = vw->vma;
+ struct i915_vma_resource *vma_res = vw->vma_res;
+
+ vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
+ vma_res, vw->cache_level, vw->flags);
- vma->ops->bind_vma(vw->vm, &vw->stash,
- vma->resource, vw->cache_level, vw->flags);
}
static void __vma_release(struct dma_fence_work *work)
@@ -310,6 +312,10 @@ static void __vma_release(struct dma_fence_work *work)
i915_vm_free_pt_stash(vw->vm, &vw->stash);
i915_vm_put(vw->vm);
+ if (vw->vma_res)
+ i915_vma_resource_put(vw->vma_res);
+ if (vw->rsgt)
+ i915_refct_sgt_put(vw->rsgt);
}
static const struct dma_fence_work_ops bind_ops = {
@@ -379,13 +385,11 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
{
struct drm_i915_gem_object *obj = vma->obj;
- i915_vma_resource_init(vma_res, vma->pages, &vma->page_sizes,
+ i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
i915_gem_object_is_readonly(obj),
i915_gem_object_is_lmem(obj),
- vma->private,
- vma->node.start,
- vma->node.size,
- vma->size);
+ vma->ops, vma->private, vma->node.start,
+ vma->node.size, vma->size);
}
/**
@@ -409,6 +413,7 @@ int i915_vma_bind(struct i915_vma *vma,
{
u32 bind_flags;
u32 vma_flags;
+ int ret;
lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
@@ -417,12 +422,12 @@ int i915_vma_bind(struct i915_vma *vma,
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
vma->vm->total))) {
- kfree(vma_res);
+ i915_vma_resource_free(vma_res);
return -ENODEV;
}
if (GEM_DEBUG_WARN_ON(!flags)) {
- kfree(vma_res);
+ i915_vma_resource_free(vma_res);
return -EINVAL;
}
@@ -434,12 +439,30 @@ int i915_vma_bind(struct i915_vma *vma,
bind_flags &= ~vma_flags;
if (bind_flags == 0) {
- kfree(vma_res);
+ i915_vma_resource_free(vma_res);
return 0;
}
GEM_BUG_ON(!atomic_read(&vma->pages_count));
+ /* Wait for or await async unbinds touching our range */
+ if (work && bind_flags & vma->vm->bind_async_flags)
+ ret = i915_vma_resource_bind_dep_await(vma->vm,
+ &work->base.chain,
+ vma->node.start,
+ vma->node.size,
+ true,
+ GFP_NOWAIT |
+ __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+ else
+ ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
+ vma->node.size, true);
+ if (ret) {
+ i915_vma_resource_free(vma_res);
+ return ret;
+ }
+
if (vma->resource || !vma_res) {
/* Rebinding with an additional I915_VMA_*_BIND */
GEM_WARN_ON(!vma_flags);
@@ -452,9 +475,11 @@ int i915_vma_bind(struct i915_vma *vma,
if (work && bind_flags & vma->vm->bind_async_flags) {
struct dma_fence *prev;
- work->vma = vma;
+ work->vma_res = i915_vma_resource_get(vma->resource);
work->cache_level = cache_level;
work->flags = bind_flags;
+ if (vma->obj->mm.rsgt)
+ work->rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt);
/*
* Note we only want to chain up to the migration fence on
@@ -475,14 +500,24 @@ int i915_vma_bind(struct i915_vma *vma,
work->base.dma.error = 0; /* enable the queue_work() */
- work->pinned = i915_gem_object_get(vma->obj);
+ /*
+ * If we don't have the refcounted pages list, keep a reference
+ * on the object to avoid waiting for the async bind to
+ * complete in the object destruction path.
+ */
+ if (!work->rsgt)
+ work->pinned = i915_gem_object_get(vma->obj);
} else {
if (vma->obj) {
int ret;
ret = i915_gem_object_wait_moving_fence(vma->obj, true);
- if (ret)
+ if (ret) {
+ i915_vma_resource_free(vma->resource);
+ vma->resource = NULL;
+
return ret;
+ }
}
vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
bind_flags);
@@ -1700,8 +1735,9 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
return 0;
}
-void __i915_vma_evict(struct i915_vma *vma)
+struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
{
+ struct i915_vma_resource *vma_res = vma->resource;
struct dma_fence *unbind_fence;
GEM_BUG_ON(i915_vma_is_pinned(vma));
@@ -1734,27 +1770,39 @@ void __i915_vma_evict(struct i915_vma *vma)
GEM_BUG_ON(vma->fence);
GEM_BUG_ON(i915_vma_has_userfault(vma));
- if (likely(atomic_read(&vma->vm->open))) {
- trace_i915_vma_unbind(vma);
- vma->ops->unbind_vma(vma->vm, vma->resource);
- }
+ /* Object backend must be async capable. */
+ GEM_WARN_ON(async && !vma->obj->mm.rsgt);
+
+ /* If vm is not open, unbind is a nop. */
+ vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
+ atomic_read(&vma->vm->open);
+ trace_i915_vma_unbind(vma);
+
+ unbind_fence = i915_vma_resource_unbind(vma_res);
+ vma->resource = NULL;
+
atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
&vma->flags);
- unbind_fence = i915_vma_resource_unbind(vma->resource);
- i915_vma_resource_put(vma->resource);
- vma->resource = NULL;
+ /* Object backend must be async capable. */
+ GEM_WARN_ON(async && !vma->obj->mm.rsgt);
i915_vma_detach(vma);
- vma_unbind_pages(vma);
+
+ if (!async && unbind_fence) {
+ dma_fence_wait(unbind_fence, false);
+ dma_fence_put(unbind_fence);
+ unbind_fence = NULL;
+ }
/*
- * This uninterruptible wait under the vm mutex is currently
- * only ever blocking while the vma is being captured from.
- * With async unbinding, this wait here will be removed.
+ * Binding itself may not have completed until the unbind fence signals,
+ * so don't drop the pages until that happens, unless the resource is
+ * async_capable.
*/
- dma_fence_wait(unbind_fence, false);
- dma_fence_put(unbind_fence);
+
+ vma_unbind_pages(vma);
+ return unbind_fence;
}
int __i915_vma_unbind(struct i915_vma *vma)
@@ -1781,12 +1829,47 @@ int __i915_vma_unbind(struct i915_vma *vma)
return ret;
GEM_BUG_ON(i915_vma_is_active(vma));
- __i915_vma_evict(vma);
+ __i915_vma_evict(vma, false);
drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
return 0;
}
+static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
+{
+ struct dma_fence *fence;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return NULL;
+
+ if (i915_vma_is_pinned(vma) ||
+ &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
+ return ERR_PTR(-EAGAIN);
+
+ /*
+ * We probably need to replace this with awaiting the fences of the
+ * object's dma_resv when the vma active goes away. When doing that
+ * we need to be careful to not add the vma_resource unbind fence
+ * immediately to the object's dma_resv, because then unbinding
+ * the next vma from the object, in case there are many, will
+ * actually await the unbinding of the previous vmas, which is
+ * undesirable.
+ */
+ if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
+ I915_ACTIVE_AWAIT_EXCL |
+ I915_ACTIVE_AWAIT_ACTIVE) < 0) {
+ return ERR_PTR(-EBUSY);
+ }
+
+ fence = __i915_vma_evict(vma, true);
+
+ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
+
+ return fence;
+}
+
int i915_vma_unbind(struct i915_vma *vma)
{
struct i915_address_space *vm = vma->vm;
@@ -1823,6 +1906,68 @@ out_rpm:
return err;
}
+int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref = 0;
+ struct dma_fence *fence;
+ int err;
+
+ /*
+ * We need the dma-resv lock since we add the
+ * unbind fence to the dma-resv object.
+ */
+ assert_object_held(obj);
+
+ if (!drm_mm_node_allocated(&vma->node))
+ return 0;
+
+ if (i915_vma_is_pinned(vma)) {
+ vma_print_allocator(vma, "is pinned");
+ return -EAGAIN;
+ }
+
+ if (!obj->mm.rsgt)
+ return -EBUSY;
+
+ err = dma_resv_reserve_shared(obj->base.resv, 1);
+ if (err)
+ return -EBUSY;
+
+ /*
+ * It would be great if we could grab this wakeref from the
+ * async unbind work if needed, but we can't because it uses
+ * kmalloc and it's in the dma-fence signalling critical path.
+ */
+ if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
+ wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+
+ if (trylock_vm && !mutex_trylock(&vm->mutex)) {
+ err = -EBUSY;
+ goto out_rpm;
+ } else if (!trylock_vm) {
+ err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
+ if (err)
+ goto out_rpm;
+ }
+
+ fence = __i915_vma_unbind_async(vma);
+ mutex_unlock(&vm->mutex);
+ if (IS_ERR_OR_NULL(fence)) {
+ err = PTR_ERR_OR_ZERO(fence);
+ goto out_rpm;
+ }
+
+ dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_fence_put(fence);
+
+out_rpm:
+ if (wakeref)
+ intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+ return err;
+}
+
struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
{
i915_gem_object_make_unshrinkable(vma->obj);