aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_vma.c
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2022-01-28 09:57:39 +0100
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2022-01-28 12:17:51 +0100
commita594525c82e0b8d677a7e5fd13c7c115d41e9722 (patch)
treec4d527b9e66c276c5409e50676a25ac507660f01 /drivers/gpu/drm/i915/i915_vma.c
parentdrm/i915: Lock dpt_obj around set_cache_level, v2. (diff)
downloadlinux-dev-a594525c82e0b8d677a7e5fd13c7c115d41e9722.tar.xz
linux-dev-a594525c82e0b8d677a7e5fd13c7c115d41e9722.zip
drm/i915: Allow dead vm to unbind vma's without lock.
i915_gem_vm_close may take the lock, and we currently have no better way of handling this. At least for now, allow a path in which holding vm->mutex is sufficient. This is the case, because the object destroy path will forcefully take vm->mutex now. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220128085739.1464568-1-maarten.lankhorst@linux.intel.com Reviewed-by: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_vma.c')
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 2a14a4e8b0bc..22cdc55c4863 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -39,6 +39,17 @@
#include "i915_vma.h"
#include "i915_vma_resource.h"
+static inline void assert_vma_held_evict(const struct i915_vma *vma)
+{
+ /*
+ * We may be forced to unbind when the vm is dead, to clean it up.
+ * This is the only exception to the requirement of the object lock
+ * being held.
+ */
+ if (atomic_read(&vma->vm->open))
+ assert_object_held_shared(vma->obj);
+}
+
static struct kmem_cache *slab_vmas;
static struct i915_vma *i915_vma_alloc(void)
@@ -1721,7 +1732,7 @@ struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
struct dma_fence *unbind_fence;
GEM_BUG_ON(i915_vma_is_pinned(vma));
- assert_object_held_shared(vma->obj);
+ assert_vma_held_evict(vma);
if (i915_vma_is_map_and_fenceable(vma)) {
/* Force a pagefault for domain tracking on next user access */
@@ -1788,7 +1799,7 @@ int __i915_vma_unbind(struct i915_vma *vma)
int ret;
lockdep_assert_held(&vma->vm->mutex);
- assert_object_held_shared(vma->obj);
+ assert_vma_held_evict(vma);
if (!drm_mm_node_allocated(&vma->node))
return 0;