aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/lockdep.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-08-23 13:23:30 +0200
committerIngo Molnar <mingo@kernel.org>2017-08-25 11:06:33 +0200
commite6f3faa734a00c606b7b06c6b9f15e5627d3245b (patch)
tree4c3f0047d1fa1796442512e03147c40c026f25a8 /include/linux/lockdep.h
parentworkqueue/lockdep: 'Fix' flush_work() annotation (diff)
downloadwireguard-linux-e6f3faa734a00c606b7b06c6b9f15e5627d3245b.tar.xz
wireguard-linux-e6f3faa734a00c606b7b06c6b9f15e5627d3245b.zip
locking/lockdep: Fix workqueue crossrelease annotation
The new completion/crossrelease annotations interact unfavourable with the extant flush_work()/flush_workqueue() annotations. The problem is that when a single work class does: wait_for_completion(&C) and complete(&C) in different executions, we'll build dependencies like: lock_map_acquire(W) complete_acquire(C) and lock_map_acquire(W) complete_release(C) which results in the dependency chain: W->C->W, which lockdep thinks spells deadlock, even though there is no deadlock potential since works are ran concurrently. One possibility would be to change the work 'lock' to recursive-read, but that would mean hitting a lockdep limitation on recursive locks. Also, unconditinoally switching to recursive-read here would fail to detect the actual deadlock on single-threaded workqueues, which do have a problem with this. For now, forcefully disregard these locks for crossrelease. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: boqun.feng@gmail.com Cc: byungchul.park@lge.com Cc: david@fromorbit.com Cc: johannes@sipsolutions.net Cc: oleg@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/lockdep.h')
-rw-r--r--include/linux/lockdep.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index fc827cab6d6e..78bb7133abed 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -18,6 +18,8 @@ extern int lock_stat;
#define MAX_LOCKDEP_SUBCLASSES 8UL
+#include <linux/types.h>
+
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -578,11 +580,11 @@ extern void lock_commit_crosslock(struct lockdep_map *lock);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), .cross = 0, }
-extern void crossrelease_hist_start(enum xhlock_context_t c);
+extern void crossrelease_hist_start(enum xhlock_context_t c, bool force);
extern void crossrelease_hist_end(enum xhlock_context_t c);
extern void lockdep_init_task(struct task_struct *task);
extern void lockdep_free_task(struct task_struct *task);
-#else
+#else /* !CROSSRELEASE */
#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
/*
* To initialize a lockdep_map statically use this macro.
@@ -591,11 +593,11 @@ extern void lockdep_free_task(struct task_struct *task);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
{ .name = (_name), .key = (void *)(_key), }
-static inline void crossrelease_hist_start(enum xhlock_context_t c) {}
+static inline void crossrelease_hist_start(enum xhlock_context_t c, bool force) {}
static inline void crossrelease_hist_end(enum xhlock_context_t c) {}
static inline void lockdep_init_task(struct task_struct *task) {}
static inline void lockdep_free_task(struct task_struct *task) {}
-#endif
+#endif /* CROSSRELEASE */
#ifdef CONFIG_LOCK_STAT