aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/lockdep.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-10-09 08:55:17 +0200
committerIngo Molnar <mingo@kernel.org>2020-10-09 08:55:17 +0200
commite705d397965811ac528d7213b42d74ffe43caf38 (patch)
tree8a5bbe85cc42e64992b97859976e307027f83e33 /include/linux/lockdep.h
parentlocking/atomics: Check atomic-arch-fallback.h too (diff)
parentlockdep: Revert "lockdep: Use raw_cpu_*() for per-cpu variables" (diff)
downloadlinux-dev-e705d397965811ac528d7213b42d74ffe43caf38.tar.xz
linux-dev-e705d397965811ac528d7213b42d74ffe43caf38.zip
Merge branch 'locking/urgent' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r--include/linux/lockdep.h29
1 files changed, 12 insertions, 17 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 57d642d378c7..f5594879175a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -537,19 +537,19 @@ do { \
#define lock_map_release(l) lock_release(l, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
-# define might_lock(lock) \
+# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_read(lock) \
+# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
-# define might_lock_nested(lock, subclass) \
+# define might_lock_nested(lock, subclass) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
@@ -559,44 +559,39 @@ do { \
DECLARE_PER_CPU(int, hardirqs_enabled);
DECLARE_PER_CPU(int, hardirq_context);
+DECLARE_PER_CPU(unsigned int, lockdep_recursion);
-/*
- * The below lockdep_assert_*() macros use raw_cpu_read() to access the above
- * per-cpu variables. This is required because this_cpu_read() will potentially
- * call into preempt/irq-disable and that obviously isn't right. This is also
- * correct because when IRQs are enabled, it doesn't matter if we accidentally
- * read the value from our previous CPU.
- */
+#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
#define lockdep_assert_irqs_enabled() \
do { \
- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_irqs_disabled() \
do { \
- WARN_ON_ONCE(debug_locks && raw_cpu_read(hardirqs_enabled)); \
+ WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
} while (0)
#define lockdep_assert_in_irq() \
do { \
- WARN_ON_ONCE(debug_locks && !raw_cpu_read(hardirq_context)); \
+ WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
} while (0)
#define lockdep_assert_preemption_enabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
- debug_locks && \
+ __lockdep_enabled && \
(preempt_count() != 0 || \
- !raw_cpu_read(hardirqs_enabled))); \
+ !this_cpu_read(hardirqs_enabled))); \
} while (0)
#define lockdep_assert_preemption_disabled() \
do { \
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
- debug_locks && \
+ __lockdep_enabled && \
(preempt_count() == 0 && \
- raw_cpu_read(hardirqs_enabled))); \
+ this_cpu_read(hardirqs_enabled))); \
} while (0)
#else