aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c127
1 files changed, 74 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index eeaa8d0d0407..4f6db539571a 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -47,7 +47,7 @@ static inline void assert_vma_held_evict(const struct i915_vma *vma)
* This is the only exception to the requirement of the object lock
* being held.
*/
- if (atomic_read(&vma->vm->open))
+ if (kref_read(&vma->vm->ref))
assert_object_held_shared(vma->obj);
}
@@ -113,6 +113,7 @@ vma_create(struct drm_i915_gem_object *obj,
struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
struct rb_node *rb, **p;
+ int err;
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
@@ -121,8 +122,6 @@ vma_create(struct drm_i915_gem_object *obj,
if (vma == NULL)
return ERR_PTR(-ENOMEM);
- kref_init(&vma->ref);
- vma->vm = i915_vm_get(vm);
vma->ops = &vm->vma_ops;
vma->obj = obj;
vma->size = obj->base.size;
@@ -138,6 +137,8 @@ vma_create(struct drm_i915_gem_object *obj,
}
INIT_LIST_HEAD(&vma->closed_link);
+ INIT_LIST_HEAD(&vma->obj_link);
+ RB_CLEAR_NODE(&vma->obj_node);
if (view && view->type != I915_GGTT_VIEW_NORMAL) {
vma->ggtt_view = *view;
@@ -163,8 +164,16 @@ vma_create(struct drm_i915_gem_object *obj,
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
- spin_lock(&obj->vma.lock);
+ err = mutex_lock_interruptible(&vm->mutex);
+ if (err) {
+ pos = ERR_PTR(err);
+ goto err_vma;
+ }
+
+ vma->vm = vm;
+ list_add_tail(&vma->vm_link, &vm->unbound_list);
+ spin_lock(&obj->vma.lock);
if (i915_is_ggtt(vm)) {
if (unlikely(overflows_type(vma->size, u32)))
goto err_unlock;
@@ -222,13 +231,15 @@ vma_create(struct drm_i915_gem_object *obj,
list_add_tail(&vma->obj_link, &obj->vma.list);
spin_unlock(&obj->vma.lock);
+ mutex_unlock(&vm->mutex);
return vma;
err_unlock:
spin_unlock(&obj->vma.lock);
+ list_del_init(&vma->vm_link);
+ mutex_unlock(&vm->mutex);
err_vma:
- i915_vm_put(vm);
i915_vma_free(vma);
return pos;
}
@@ -279,7 +290,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
- GEM_BUG_ON(!atomic_read(&vm->open));
+ GEM_BUG_ON(!kref_read(&vm->ref));
spin_lock(&obj->vma.lock);
vma = i915_vma_lookup(obj, vm, view);
@@ -322,7 +333,6 @@ static void __vma_release(struct dma_fence_work *work)
i915_gem_object_put(vw->pinned);
i915_vm_free_pt_stash(vw->vm, &vw->stash);
- i915_vm_put(vw->vm);
if (vw->vma_res)
i915_vma_resource_put(vw->vma_res);
}
@@ -515,21 +525,18 @@ int i915_vma_bind(struct i915_vma *vma,
if (!work->vma_res->bi.pages_rsgt)
work->pinned = i915_gem_object_get(vma->obj);
} else {
- if (vma->obj) {
- ret = i915_gem_object_wait_moving_fence(vma->obj, true);
- if (ret) {
- i915_vma_resource_free(vma->resource);
- vma->resource = NULL;
+ ret = i915_gem_object_wait_moving_fence(vma->obj, true);
+ if (ret) {
+ i915_vma_resource_free(vma->resource);
+ vma->resource = NULL;
- return ret;
- }
+ return ret;
}
vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
bind_flags);
}
- if (vma->obj)
- set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
+ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
atomic_or(bind_flags, &vma->flags);
return 0;
@@ -541,7 +548,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
int err;
if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
- return IO_ERR_PTR(-EINVAL);
+ return IOMEM_ERR_PTR(-EINVAL);
if (!i915_gem_object_is_lmem(vma->obj)) {
if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
@@ -594,7 +601,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
err_unpin:
__i915_vma_unpin(vma);
err:
- return IO_ERR_PTR(err);
+ return IOMEM_ERR_PTR(err);
}
void i915_vma_flush_writes(struct i915_vma *vma)
@@ -841,7 +848,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
- list_add_tail(&vma->vm_link, &vma->vm->bound_list);
+ list_move_tail(&vma->vm_link, &vma->vm->bound_list);
return 0;
}
@@ -857,7 +864,7 @@ i915_vma_detach(struct i915_vma *vma)
* vma, we can drop its hold on the backing storage and allow
* it to be reaped by the shrinker.
*/
- list_del(&vma->vm_link);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
}
static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
@@ -1360,8 +1367,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (flags & PIN_GLOBAL)
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
- moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL;
- if (flags & vma->vm->bind_async_flags || moving) {
+ if (flags & vma->vm->bind_async_flags) {
/* lock VM */
err = i915_vm_lock_objects(vma->vm, ww);
if (err)
@@ -1373,7 +1379,11 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
goto err_rpm;
}
- work->vm = i915_vm_get(vma->vm);
+ work->vm = vma->vm;
+
+ err = i915_gem_object_get_moving_fence(vma->obj, &moving);
+ if (err)
+ goto err_rpm;
dma_fence_work_chain(&work->base, moving);
@@ -1555,9 +1565,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (ww)
return __i915_ggtt_pin(vma, ww, align, flags);
-#ifdef CONFIG_LOCKDEP
- WARN_ON(dma_resv_held(vma->obj->base.resv));
-#endif
+ lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
for_i915_gem_ww(&_ww, err, true) {
err = i915_gem_object_lock(vma->obj, &_ww);
@@ -1618,16 +1626,6 @@ void i915_vma_reopen(struct i915_vma *vma)
spin_unlock_irq(&gt->closed_lock);
}
-void i915_vma_release(struct kref *ref)
-{
- struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
-
- i915_vm_put(vma->vm);
- i915_active_fini(&vma->active);
- GEM_WARN_ON(vma->resource);
- i915_vma_free(vma);
-}
-
static void force_unbind(struct i915_vma *vma)
{
if (!drm_mm_node_allocated(&vma->node))
@@ -1638,7 +1636,7 @@ static void force_unbind(struct i915_vma *vma)
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
}
-static void release_references(struct i915_vma *vma)
+static void release_references(struct i915_vma *vma, bool vm_ddestroy)
{
struct drm_i915_gem_object *obj = vma->obj;
struct intel_gt *gt = vma->vm->gt;
@@ -1649,13 +1647,19 @@ static void release_references(struct i915_vma *vma)
list_del(&vma->obj_link);
if (!RB_EMPTY_NODE(&vma->obj_node))
rb_erase(&vma->obj_node, &obj->vma.tree);
+
spin_unlock(&obj->vma.lock);
spin_lock_irq(&gt->closed_lock);
__i915_vma_remove_closed(vma);
spin_unlock_irq(&gt->closed_lock);
- __i915_vma_put(vma);
+ if (vm_ddestroy)
+ i915_vm_resv_put(vma->vm);
+
+ i915_active_fini(&vma->active);
+ GEM_WARN_ON(vma->resource);
+ i915_vma_free(vma);
}
/**
@@ -1670,8 +1674,12 @@ static void release_references(struct i915_vma *vma)
* - __i915_gem_object_pages_fini()
* - __i915_vm_close() - Blocks the above function by taking a reference on
* the object.
- * - __i915_vma_parked() - Blocks the above functions by taking an open-count on
- * the vm and a reference on the object.
+ * - __i915_vma_parked() - Blocks the above functions by taking a reference
+ * on the vm and a reference on the object. Also takes the object lock so
+ * destruction from __i915_vma_parked() can be blocked by holding the
+ * object lock. Since the object lock is only allowed from within i915 with
+ * an object refcount, holding the object lock also implicitly blocks the
+ * vma freeing from __i915_gem_object_pages_fini().
*
* Because of locks taken during destruction, a vma is also guaranteed to
* stay alive while the following locks are held if it was looked up while
@@ -1679,24 +1687,27 @@ static void release_references(struct i915_vma *vma)
* - vm->mutex
* - obj->vma.lock
* - gt->closed_lock
- *
- * A vma user can also temporarily keep the vma alive while holding a vma
- * reference.
*/
void i915_vma_destroy_locked(struct i915_vma *vma)
{
lockdep_assert_held(&vma->vm->mutex);
force_unbind(vma);
- release_references(vma);
+ list_del_init(&vma->vm_link);
+ release_references(vma, false);
}
void i915_vma_destroy(struct i915_vma *vma)
{
+ bool vm_ddestroy;
+
mutex_lock(&vma->vm->mutex);
force_unbind(vma);
+ list_del_init(&vma->vm_link);
+ vm_ddestroy = vma->vm_ddestroy;
+ vma->vm_ddestroy = false;
mutex_unlock(&vma->vm->mutex);
- release_references(vma);
+ release_references(vma, vm_ddestroy);
}
void i915_vma_parked(struct intel_gt *gt)
@@ -1714,7 +1725,7 @@ void i915_vma_parked(struct intel_gt *gt)
if (!kref_get_unless_zero(&obj->base.refcount))
continue;
- if (!i915_vm_tryopen(vm)) {
+ if (!i915_vm_tryget(vm)) {
i915_gem_object_put(obj);
continue;
}
@@ -1740,7 +1751,7 @@ void i915_vma_parked(struct intel_gt *gt)
}
i915_gem_object_put(obj);
- i915_vm_close(vm);
+ i915_vm_put(vm);
}
}
@@ -1822,20 +1833,28 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
intel_frontbuffer_put(front);
}
+ if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ if (unlikely(err))
+ return err;
+ }
+
if (fence) {
- dma_resv_add_excl_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_WRITE);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
- err = dma_resv_reserve_shared(vma->obj->base.resv, 1);
+ err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
if (unlikely(err))
return err;
}
if (fence) {
- dma_resv_add_shared_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_READ);
obj->write_domain = 0;
}
}
@@ -1891,7 +1910,9 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
/* If vm is not open, unbind is a nop. */
vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
- atomic_read(&vma->vm->open);
+ kref_read(&vma->vm->ref);
+ vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
+ vma->vm->skip_pte_rewrite;
trace_i915_vma_unbind(vma);
unbind_fence = i915_vma_resource_unbind(vma_res);
@@ -2047,7 +2068,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
if (!obj->mm.rsgt)
return -EBUSY;
- err = dma_resv_reserve_shared(obj->base.resv, 1);
+ err = dma_resv_reserve_fences(obj->base.resv, 1);
if (err)
return -EBUSY;
@@ -2075,7 +2096,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
goto out_rpm;
}
- dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
dma_fence_put(fence);
out_rpm: