aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2019-11-04 18:37:20 +0100
committerMaarten Lankhorst <maarten.lankhorst@linux.intel.com>2019-11-07 09:58:54 +0100
commit74ceefd10b1f40b0b4bc71bcf6fe14d4df66c163 (patch)
treeb01b9b7ba9e4903bdb240366e67b5a5b5b127e87 /drivers/gpu
parentlockdep: add might_lock_nested() (diff)
downloadwireguard-linux-74ceefd10b1f40b0b4bc71bcf6fe14d4df66c163.tar.xz
wireguard-linux-74ceefd10b1f40b0b4bc71bcf6fe14d4df66c163.zip
drm/i915: use might_lock_nested in get_pages annotation
So strictly speaking the existing annotation is also ok, because we have a chain of obj->mm.lock#I915_MM_GET_PAGES -> fs_reclaim -> obj->mm.lock (the shrinker cannot get at an object while we're in get_pages, hence this is safe). But it's confusing, so try to take the right subclass of the lock. This does a bit reduce our lockdep based checking, but then it's also less fragile, in case we ever change the nesting around. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Will Deacon <will@kernel.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191104173720.2696-3-daniel.vetter@ffwll.ch
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h36
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index edaf7126a84d..e5750d506cc9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -271,10 +271,27 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+ I915_MM_NORMAL = 0,
+ /*
+ * Only used by struct_mutex, when called "recursively" from
+ * direct-reclaim-esque. Safe because there is only every one
+ * struct_mutex in the entire system.
+ */
+ I915_MM_SHRINKER = 1,
+ /*
+ * Used for obj->mm.lock when allocating pages. Safe because the object
+ * isn't yet on any LRU, and therefore the shrinker can't deadlock on
+ * it. As soon as the object has pages, obj->mm.lock nests within
+ * fs_reclaim.
+ */
+ I915_MM_GET_PAGES = 1,
+};
+
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- might_lock(&obj->mm.lock);
+ might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0;
@@ -317,23 +334,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj);
}
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- /*
- * Only used by struct_mutex, when called "recursively" from
- * direct-reclaim-esque. Safe because there is only every one
- * struct_mutex in the entire system.
- */
- I915_MM_SHRINKER = 1,
- /*
- * Used for obj->mm.lock when allocating pages. Safe because the object
- * isn't yet on any LRU, and therefore the shrinker can't deadlock on
- * it. As soon as the object has pages, obj->mm.lock nests within
- * fs_reclaim.
- */
- I915_MM_GET_PAGES = 1,
-};
-
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);