aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c792
1 files changed, 535 insertions, 257 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef996c76..f74d2d7aa605 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
#include <linux/hash.h>
#include <linux/ftrace.h>
#include <linux/stringify.h>
+#include <linux/bitops.h>
#include <asm/sections.h>
@@ -366,11 +367,21 @@ static int save_trace(struct stack_trace *trace)
save_stack_trace(trace);
+ /*
+ * Some daft arches put -1 at the end to indicate its a full trace.
+ *
+ * <rant> this is buggy anyway, since it takes a whole extra entry so a
+ * complete trace that maxes out the entries provided will be reported
+ * as incomplete, friggin useless </rant>
+ */
+ if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
+ trace->nr_entries--;
+
trace->max_entries = trace->nr_entries;
nr_stack_trace_entries += trace->nr_entries;
- if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
+ if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
if (!debug_locks_off_graph_unlock())
return 0;
@@ -388,20 +399,6 @@ unsigned int nr_hardirq_chains;
unsigned int nr_softirq_chains;
unsigned int nr_process_chains;
unsigned int max_lockdep_depth;
-unsigned int max_recursion_depth;
-
-static unsigned int lockdep_dependency_gen_id;
-
-static bool lockdep_dependency_visit(struct lock_class *source,
- unsigned int depth)
-{
- if (!depth)
- lockdep_dependency_gen_id++;
- if (source->dep_gen_id == lockdep_dependency_gen_id)
- return true;
- source->dep_gen_id = lockdep_dependency_gen_id;
- return false;
-}
#ifdef CONFIG_DEBUG_LOCKDEP
/*
@@ -431,11 +428,8 @@ atomic_t redundant_softirqs_on;
atomic_t redundant_softirqs_off;
atomic_t nr_unused_locks;
atomic_t nr_cyclic_checks;
-atomic_t nr_cyclic_check_recursions;
atomic_t nr_find_usage_forwards_checks;
-atomic_t nr_find_usage_forwards_recursions;
atomic_t nr_find_usage_backwards_checks;
-atomic_t nr_find_usage_backwards_recursions;
#endif
/*
@@ -551,58 +545,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
}
}
-static void print_lock_class_header(struct lock_class *class, int depth)
-{
- int bit;
-
- printk("%*s->", depth, "");
- print_lock_name(class);
- printk(" ops: %lu", class->ops);
- printk(" {\n");
-
- for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
- if (class->usage_mask & (1 << bit)) {
- int len = depth;
-
- len += printk("%*s %s", depth, "", usage_str[bit]);
- len += printk(" at:\n");
- print_stack_trace(class->usage_traces + bit, len);
- }
- }
- printk("%*s }\n", depth, "");
-
- printk("%*s ... key at: ",depth,"");
- print_ip_sym((unsigned long)class->key);
-}
-
-/*
- * printk all lock dependencies starting at <entry>:
- */
-static void __used
-print_lock_dependencies(struct lock_class *class, int depth)
-{
- struct lock_list *entry;
-
- if (lockdep_dependency_visit(class, depth))
- return;
-
- if (DEBUG_LOCKS_WARN_ON(depth >= 20))
- return;
-
- print_lock_class_header(class, depth);
-
- list_for_each_entry(entry, &class->locks_after, entry) {
- if (DEBUG_LOCKS_WARN_ON(!entry->class))
- return;
-
- print_lock_dependencies(entry->class, depth + 1);
-
- printk("%*s ... acquired at:\n",depth,"");
- print_stack_trace(&entry->trace, 2);
- printk("\n");
- }
-}
-
static void print_kernel_version(void)
{
printk("%s %.*s\n", init_utsname()->release,
@@ -898,22 +840,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
}
/*
+ * For good efficiency of modular, we use power of 2
+ */
+#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
+#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
+
+/*
+ * The circular_queue and helpers is used to implement the
+ * breadth-first search(BFS)algorithem, by which we can build
+ * the shortest path from the next lock to be acquired to the
+ * previous held lock if there is a circular between them.
+ */
+struct circular_queue {
+ unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
+ unsigned int front, rear;
+};
+
+static struct circular_queue lock_cq;
+
+unsigned int max_bfs_queue_depth;
+
+static unsigned int lockdep_dependency_gen_id;
+
+static inline void __cq_init(struct circular_queue *cq)
+{
+ cq->front = cq->rear = 0;
+ lockdep_dependency_gen_id++;
+}
+
+static inline int __cq_empty(struct circular_queue *cq)
+{
+ return (cq->front == cq->rear);
+}
+
+static inline int __cq_full(struct circular_queue *cq)
+{
+ return ((cq->rear + 1) & CQ_MASK) == cq->front;
+}
+
+static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
+{
+ if (__cq_full(cq))
+ return -1;
+
+ cq->element[cq->rear] = elem;
+ cq->rear = (cq->rear + 1) & CQ_MASK;
+ return 0;
+}
+
+static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
+{
+ if (__cq_empty(cq))
+ return -1;
+
+ *elem = cq->element[cq->front];
+ cq->front = (cq->front + 1) & CQ_MASK;
+ return 0;
+}
+
+static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
+{
+ return (cq->rear - cq->front) & CQ_MASK;
+}
+
+static inline void mark_lock_accessed(struct lock_list *lock,
+ struct lock_list *parent)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= nr_list_entries);
+ lock->parent = parent;
+ lock->class->dep_gen_id = lockdep_dependency_gen_id;
+}
+
+static inline unsigned long lock_accessed(struct lock_list *lock)
+{
+ unsigned long nr;
+
+ nr = lock - list_entries;
+ WARN_ON(nr >= nr_list_entries);
+ return lock->class->dep_gen_id == lockdep_dependency_gen_id;
+}
+
+static inline struct lock_list *get_lock_parent(struct lock_list *child)
+{
+ return child->parent;
+}
+
+static inline int get_lock_depth(struct lock_list *child)
+{
+ int depth = 0;
+ struct lock_list *parent;
+
+ while ((parent = get_lock_parent(child))) {
+ child = parent;
+ depth++;
+ }
+ return depth;
+}
+
+static int __bfs(struct lock_list *source_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry,
+ int forward)
+{
+ struct lock_list *entry;
+ struct list_head *head;
+ struct circular_queue *cq = &lock_cq;
+ int ret = 1;
+
+ if (match(source_entry, data)) {
+ *target_entry = source_entry;
+ ret = 0;
+ goto exit;
+ }
+
+ if (forward)
+ head = &source_entry->class->locks_after;
+ else
+ head = &source_entry->class->locks_before;
+
+ if (list_empty(head))
+ goto exit;
+
+ __cq_init(cq);
+ __cq_enqueue(cq, (unsigned long)source_entry);
+
+ while (!__cq_empty(cq)) {
+ struct lock_list *lock;
+
+ __cq_dequeue(cq, (unsigned long *)&lock);
+
+ if (!lock->class) {
+ ret = -2;
+ goto exit;
+ }
+
+ if (forward)
+ head = &lock->class->locks_after;
+ else
+ head = &lock->class->locks_before;
+
+ list_for_each_entry(entry, head, entry) {
+ if (!lock_accessed(entry)) {
+ unsigned int cq_depth;
+ mark_lock_accessed(entry, lock);
+ if (match(entry, data)) {
+ *target_entry = entry;
+ ret = 0;
+ goto exit;
+ }
+
+ if (__cq_enqueue(cq, (unsigned long)entry)) {
+ ret = -1;
+ goto exit;
+ }
+ cq_depth = __cq_get_elem_count(cq);
+ if (max_bfs_queue_depth < cq_depth)
+ max_bfs_queue_depth = cq_depth;
+ }
+ }
+ }
+exit:
+ return ret;
+}
+
+static inline int __bfs_forwards(struct lock_list *src_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry)
+{
+ return __bfs(src_entry, data, match, target_entry, 1);
+
+}
+
+static inline int __bfs_backwards(struct lock_list *src_entry,
+ void *data,
+ int (*match)(struct lock_list *entry, void *data),
+ struct lock_list **target_entry)
+{
+ return __bfs(src_entry, data, match, target_entry, 0);
+
+}
+
+/*
* Recursive, forwards-direction lock-dependency checking, used for
* both noncyclic checking and for hardirq-unsafe/softirq-unsafe
* checking.
- *
- * (to keep the stackframe of the recursive functions small we
- * use these global variables, and we also mark various helper
- * functions as noinline.)
*/
-static struct held_lock *check_source, *check_target;
/*
* Print a dependency chain entry (this is only done when a deadlock
* has been detected):
*/
static noinline int
-print_circular_bug_entry(struct lock_list *target, unsigned int depth)
+print_circular_bug_entry(struct lock_list *target, int depth)
{
if (debug_locks_silent)
return 0;
@@ -930,11 +1053,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
* header first:
*/
static noinline int
-print_circular_bug_header(struct lock_list *entry, unsigned int depth)
+print_circular_bug_header(struct lock_list *entry, unsigned int depth,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
- if (!debug_locks_off_graph_unlock() || debug_locks_silent)
+ if (debug_locks_silent)
return 0;
printk("\n=======================================================\n");
@@ -943,9 +1068,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
printk( "-------------------------------------------------------\n");
printk("%s/%d is trying to acquire lock:\n",
curr->comm, task_pid_nr(curr));
- print_lock(check_source);
+ print_lock(check_src);
printk("\nbut task is already holding lock:\n");
- print_lock(check_target);
+ print_lock(check_tgt);
printk("\nwhich lock already depends on the new lock.\n\n");
printk("\nthe existing dependency chain (in reverse order) is:\n");
@@ -954,19 +1079,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
return 0;
}
-static noinline int print_circular_bug_tail(void)
+static inline int class_equal(struct lock_list *entry, void *data)
+{
+ return entry->class == data;
+}
+
+static noinline int print_circular_bug(struct lock_list *this,
+ struct lock_list *target,
+ struct held_lock *check_src,
+ struct held_lock *check_tgt)
{
struct task_struct *curr = current;
- struct lock_list this;
+ struct lock_list *parent;
+ int depth;
- if (debug_locks_silent)
+ if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return 0;
- this.class = hlock_class(check_source);
- if (!save_trace(&this.trace))
+ if (!save_trace(&this->trace))
return 0;
- print_circular_bug_entry(&this, 0);
+ depth = get_lock_depth(target);
+
+ print_circular_bug_header(target, depth, check_src, check_tgt);
+
+ parent = get_lock_parent(target);
+
+ while (parent) {
+ print_circular_bug_entry(parent, --depth);
+ parent = get_lock_parent(parent);
+ }
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
@@ -977,73 +1119,69 @@ static noinline int print_circular_bug_tail(void)
return 0;
}
-#define RECURSION_LIMIT 40
-
-static int noinline print_infinite_recursion_bug(void)
+static noinline int print_bfs_bug(int ret)
{
if (!debug_locks_off_graph_unlock())
return 0;
- WARN_ON(1);
+ WARN(1, "lockdep bfs error:%d\n", ret);
return 0;
}
-unsigned long __lockdep_count_forward_deps(struct lock_class *class,
- unsigned int depth)
+static int noop_count(struct lock_list *entry, void *data)
{
- struct lock_list *entry;
- unsigned long ret = 1;
+ (*(unsigned long *)data)++;
+ return 0;
+}
- if (lockdep_dependency_visit(class, depth))
- return 0;
+unsigned long __lockdep_count_forward_deps(struct lock_list *this)
+{
+ unsigned long count = 0;
+ struct lock_list *uninitialized_var(target_entry);
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_after, entry)
- ret += __lockdep_count_forward_deps(entry->class, depth + 1);
+ __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
- return ret;
+ return count;
}
-
unsigned long lockdep_count_forward_deps(struct lock_class *class)
{
unsigned long ret, flags;
+ struct lock_list this;
+
+ this.parent = NULL;
+ this.class = class;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
- ret = __lockdep_count_forward_deps(class, 0);
+ ret = __lockdep_count_forward_deps(&this);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
return ret;
}
-unsigned long __lockdep_count_backward_deps(struct lock_class *class,
- unsigned int depth)
+unsigned long __lockdep_count_backward_deps(struct lock_list *this)
{
- struct lock_list *entry;
- unsigned long ret = 1;
+ unsigned long count = 0;
+ struct lock_list *uninitialized_var(target_entry);
- if (lockdep_dependency_visit(class, depth))
- return 0;
- /*
- * Recurse this class's dependency list:
- */
- list_for_each_entry(entry, &class->locks_before, entry)
- ret += __lockdep_count_backward_deps(entry->class, depth + 1);
+ __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
- return ret;
+ return count;
}
unsigned long lockdep_count_backward_deps(struct lock_class *class)
{
unsigned long ret, flags;
+ struct lock_list this;
+
+ this.parent = NULL;
+ this.class = class;
local_irq_save(flags);
__raw_spin_lock(&lockdep_lock);
- ret = __lockdep_count_backward_deps(class, 0);
+ ret = __lockdep_count_backward_deps(&this);
__raw_spin_unlock(&lockdep_lock);
local_irq_restore(flags);
@@ -1055,29 +1193,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
* lead to <target>. Print an error and return 0 if it does.
*/
static noinline int
-check_noncircular(struct lock_class *source, unsigned int depth)
+check_noncircular(struct lock_list *root, struct lock_class *target,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
+ int result;
- if (lockdep_dependency_visit(source, depth))
- return 1;
+ debug_atomic_inc(&nr_cyclic_checks);
- debug_atomic_inc(&nr_cyclic_check_recursions);
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_after, entry) {
- if (entry->class == hlock_class(check_target))
- return print_circular_bug_header(entry, depth+1);
- debug_atomic_inc(&nr_cyclic_checks);
- if (!check_noncircular(entry->class, depth+1))
- return print_circular_bug_entry(entry, depth+1);
- }
- return 1;
+ result = __bfs_forwards(root, target, class_equal, target_entry);
+
+ return result;
}
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
@@ -1086,103 +1211,121 @@ check_noncircular(struct lock_class *source, unsigned int depth)
* proving that two subgraphs can be connected by a new dependency
* without creating any illegal irq-safe -> irq-unsafe lock dependency.
*/
-static enum lock_usage_bit find_usage_bit;
-static struct lock_class *forwards_match, *backwards_match;
+
+static inline int usage_match(struct lock_list *entry, void *bit)
+{
+ return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
+}
+
+
/*
* Find a node in the forwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
*
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <forwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
*
- * Return 1 otherwise and keep <forwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
*/
-static noinline int
-find_usage_forwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
- int ret;
-
- if (lockdep_dependency_visit(source, depth))
- return 1;
-
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
+ int result;
debug_atomic_inc(&nr_find_usage_forwards_checks);
- if (source->usage_mask & (1 << find_usage_bit)) {
- forwards_match = source;
- return 2;
- }
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_after, entry) {
- debug_atomic_inc(&nr_find_usage_forwards_recursions);
- ret = find_usage_forwards(entry->class, depth+1);
- if (ret == 2 || ret == 0)
- return ret;
- }
- return 1;
+ result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
+
+ return result;
}
/*
* Find a node in the backwards-direction dependency sub-graph starting
- * at <source> that matches <find_usage_bit>.
+ * at @root->class that matches @bit.
*
- * Return 2 if such a node exists in the subgraph, and put that node
- * into <backwards_match>.
+ * Return 0 if such a node exists in the subgraph, and put that node
+ * into *@target_entry.
*
- * Return 1 otherwise and keep <backwards_match> unchanged.
- * Return 0 on error.
+ * Return 1 otherwise and keep *@target_entry unchanged.
+ * Return <0 on error.
*/
-static noinline int
-find_usage_backwards(struct lock_class *source, unsigned int depth)
+static int
+find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
+ struct lock_list **target_entry)
{
- struct lock_list *entry;
- int ret;
+ int result;
- if (lockdep_dependency_visit(source, depth))
- return 1;
+ debug_atomic_inc(&nr_find_usage_backwards_checks);
- if (!__raw_spin_is_locked(&lockdep_lock))
- return DEBUG_LOCKS_WARN_ON(1);
+ result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
- if (depth > max_recursion_depth)
- max_recursion_depth = depth;
- if (depth >= RECURSION_LIMIT)
- return print_infinite_recursion_bug();
+ return result;
+}
- debug_atomic_inc(&nr_find_usage_backwards_checks);
- if (source->usage_mask & (1 << find_usage_bit)) {
- backwards_match = source;
- return 2;
- }
+static void print_lock_class_header(struct lock_class *class, int depth)
+{
+ int bit;
- if (!source && debug_locks_off_graph_unlock()) {
- WARN_ON(1);
- return 0;
- }
+ printk("%*s->", depth, "");
+ print_lock_name(class);
+ printk(" ops: %lu", class->ops);
+ printk(" {\n");
- /*
- * Check this lock's dependency list:
- */
- list_for_each_entry(entry, &source->locks_before, entry) {
- debug_atomic_inc(&nr_find_usage_backwards_recursions);
- ret = find_usage_backwards(entry->class, depth+1);
- if (ret == 2 || ret == 0)
- return ret;
+ for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
+ if (class->usage_mask & (1 << bit)) {
+ int len = depth;
+
+ len += printk("%*s %s", depth, "", usage_str[bit]);
+ len += printk(" at:\n");
+ print_stack_trace(class->usage_traces + bit, len);
+ }
}
- return 1;
+ printk("%*s }\n", depth, "");
+
+ printk("%*s ... key at: ",depth,"");
+ print_ip_sym((unsigned long)class->key);
+}
+
+/*
+ * printk the shortest lock dependencies from @start to @end in reverse order:
+ */
+static void __used
+print_shortest_lock_dependencies(struct lock_list *leaf,
+ struct lock_list *root)
+{
+ struct lock_list *entry = leaf;
+ int depth;
+
+ /*compute depth from generated tree by BFS*/
+ depth = get_lock_depth(leaf);
+
+ do {
+ print_lock_class_header(entry->class, depth);
+ printk("%*s ... acquired at:\n", depth, "");
+ print_stack_trace(&entry->trace, 2);
+ printk("\n");
+
+ if (depth == 0 && (entry != root)) {
+ printk("lockdep:%s bad BFS generated tree\n", __func__);
+ break;
+ }
+
+ entry = get_lock_parent(entry);
+ depth--;
+ } while (entry && (depth >= 0));
+
+ return;
}
static int
print_bad_irq_dependency(struct task_struct *curr,
+ struct lock_list *prev_root,
+ struct lock_list *next_root,
+ struct lock_list *backwards_entry,
+ struct lock_list *forwards_entry,
struct held_lock *prev,
struct held_lock *next,
enum lock_usage_bit bit1,
@@ -1215,26 +1358,32 @@ print_bad_irq_dependency(struct task_struct *curr,
printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
irqclass);
- print_lock_name(backwards_match);
+ print_lock_name(backwards_entry->class);
printk("\n... which became %s-irq-safe at:\n", irqclass);
- print_stack_trace(backwards_match->usage_traces + bit1, 1);
+ print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
printk("\nto a %s-irq-unsafe lock:\n", irqclass);
- print_lock_name(forwards_match);
+ print_lock_name(forwards_entry->class);
printk("\n... which became %s-irq-unsafe at:\n", irqclass);
printk("...");
- print_stack_trace(forwards_match->usage_traces + bit2, 1);
+ print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
printk("\nother info that might help us debug this:\n\n");
lockdep_print_held_locks(curr);
- printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
- print_lock_dependencies(backwards_match, 0);
+ printk("\nthe dependencies between %s-irq-safe lock", irqclass);
+ printk(" and the holding lock:\n");
+ if (!save_trace(&prev_root->trace))
+ return 0;
+ print_shortest_lock_dependencies(backwards_entry, prev_root);
- printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
- print_lock_dependencies(forwards_match, 0);
+ printk("\nthe dependencies between the lock to be acquired");
+ printk(" and %s-irq-unsafe lock:\n", irqclass);
+ if (!save_trace(&next_root->trace))
+ return 0;
+ print_shortest_lock_dependencies(forwards_entry, next_root);
printk("\nstack backtrace:\n");
dump_stack();
@@ -1248,19 +1397,30 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
enum lock_usage_bit bit_forwards, const char *irqclass)
{
int ret;
+ struct lock_list this, that;
+ struct lock_list *uninitialized_var(target_entry);
+ struct lock_list *uninitialized_var(target_entry1);
- find_usage_bit = bit_backwards;
- /* fills in <backwards_match> */
- ret = find_usage_backwards(hlock_class(prev), 0);
- if (!ret || ret == 1)
+ this.parent = NULL;
+
+ this.class = hlock_class(prev);
+ ret = find_usage_backwards(&this, bit_backwards, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- find_usage_bit = bit_forwards;
- ret = find_usage_forwards(hlock_class(next), 0);
- if (!ret || ret == 1)
+ that.parent = NULL;
+ that.class = hlock_class(next);
+ ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- /* ret == 2 */
- return print_bad_irq_dependency(curr, prev, next,
+
+ return print_bad_irq_dependency(curr, &this, &that,
+ target_entry, target_entry1,
+ prev, next,
bit_backwards, bit_forwards, irqclass);
}
@@ -1472,6 +1632,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
{
struct lock_list *entry;
int ret;
+ struct lock_list this;
+ struct lock_list *uninitialized_var(target_entry);
/*
* Prove that the new <prev> -> <next> dependency would not
@@ -1482,10 +1644,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
* We are using global variables to control the recursion, to
* keep the stackframe size of the recursive functions low:
*/
- check_source = next;
- check_target = prev;
- if (!(check_noncircular(hlock_class(next), 0)))
- return print_circular_bug_tail();
+ this.class = hlock_class(next);
+ this.parent = NULL;
+ ret = check_noncircular(&this, hlock_class(prev), &target_entry);
+ if (unlikely(!ret))
+ return print_circular_bug(&this, target_entry, next, prev);
+ else if (unlikely(ret < 0))
+ return print_bfs_bug(ret);
if (!check_prev_add_irq(curr, prev, next))
return 0;
@@ -1884,7 +2049,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
* print irq inversion bug:
*/
static int
-print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
+print_irq_inversion_bug(struct task_struct *curr,
+ struct lock_list *root, struct lock_list *other,
struct held_lock *this, int forwards,
const char *irqclass)
{
@@ -1902,17 +2068,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
else
printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
- print_lock_name(other);
+ print_lock_name(other->class);
printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
printk("\nother info that might help us debug this:\n");
lockdep_print_held_locks(curr);
- printk("\nthe first lock's dependencies:\n");
- print_lock_dependencies(hlock_class(this), 0);
-
- printk("\nthe second lock's dependencies:\n");
- print_lock_dependencies(other, 0);
+ printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
+ if (!save_trace(&root->trace))
+ return 0;
+ print_shortest_lock_dependencies(other, root);
printk("\nstack backtrace:\n");
dump_stack();
@@ -1929,14 +2094,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
-
- find_usage_bit = bit;
- /* fills in <forwards_match> */
- ret = find_usage_forwards(hlock_class(this), 0);
- if (!ret || ret == 1)
+ struct lock_list root;
+ struct lock_list *uninitialized_var(target_entry);
+
+ root.parent = NULL;
+ root.class = hlock_class(this);
+ ret = find_usage_forwards(&root, bit, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
+ return print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
}
/*
@@ -1948,14 +2118,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit bit, const char *irqclass)
{
int ret;
-
- find_usage_bit = bit;
- /* fills in <backwards_match> */
- ret = find_usage_backwards(hlock_class(this), 0);
- if (!ret || ret == 1)
+ struct lock_list root;
+ struct lock_list *uninitialized_var(target_entry);
+
+ root.parent = NULL;
+ root.class = hlock_class(this);
+ ret = find_usage_backwards(&root, bit, &target_entry);
+ if (ret < 0)
+ return print_bfs_bug(ret);
+ if (ret == 1)
return ret;
- return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
+ return print_irq_inversion_bug(curr, &root, target_entry,
+ this, 1, irqclass);
}
void print_irqtrace_events(struct task_struct *curr)
@@ -2530,13 +2705,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
*/
static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check, int hardirqs_off,
- struct lockdep_map *nest_lock, unsigned long ip)
+ struct lockdep_map *nest_lock, unsigned long ip,
+ int references)
{
struct task_struct *curr = current;
struct lock_class *class = NULL;
struct held_lock *hlock;
unsigned int depth, id;
int chain_head = 0;
+ int class_idx;
u64 chain_key;
if (!prove_locking)
@@ -2584,10 +2761,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
return 0;
+ class_idx = class - lock_classes + 1;
+
+ if (depth) {
+ hlock = curr->held_locks + depth - 1;
+ if (hlock->class_idx == class_idx && nest_lock) {
+ if (hlock->references)
+ hlock->references++;
+ else
+ hlock->references = 2;
+
+ return 1;
+ }
+ }
+
hlock = curr->held_locks + depth;
if (DEBUG_LOCKS_WARN_ON(!class))
return 0;
- hlock->class_idx = class - lock_classes + 1;
+ hlock->class_idx = class_idx;
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
@@ -2595,6 +2786,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->read = read;
hlock->check = check;
hlock->hardirqs_off = !!hardirqs_off;
+ hlock->references = references;
#ifdef CONFIG_LOCK_STAT
hlock->waittime_stamp = 0;
hlock->holdtime_stamp = sched_clock();
@@ -2703,6 +2895,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
return 1;
}
+static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+{
+ if (hlock->instance == lock)
+ return 1;
+
+ if (hlock->references) {
+ struct lock_class *class = lock->class_cache;
+
+ if (!class)
+ class = look_up_lock_class(lock, 0);
+
+ if (DEBUG_LOCKS_WARN_ON(!class))
+ return 0;
+
+ if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
+ return 0;
+
+ if (hlock->class_idx == class - lock_classes + 1)
+ return 1;
+ }
+
+ return 0;
+}
+
static int
__lock_set_class(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, unsigned int subclass,
@@ -2726,7 +2942,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -2745,7 +2961,8 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->nest_lock, hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip,
+ hlock->references))
return 0;
}
@@ -2784,20 +3001,34 @@ lock_release_non_nested(struct task_struct *curr,
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
return print_unlock_inbalance_bug(curr, lock, ip);
found_it:
- lock_release_holdtime(hlock);
+ if (hlock->instance == lock)
+ lock_release_holdtime(hlock);
+
+ if (hlock->references) {
+ hlock->references--;
+ if (hlock->references) {
+ /*
+ * We had, and after removing one, still have
+ * references, the current lock stack is still
+ * valid. We're done!
+ */
+ return 1;
+ }
+ }
/*
* We have the right lock to unlock, 'hlock' points to it.
* Now we remove it from the stack, and add back the other
* entries (if any), recalculating the hash along the way:
*/
+
curr->lockdep_depth = i;
curr->curr_chain_key = hlock->prev_chain_key;
@@ -2806,7 +3037,8 @@ found_it:
if (!__lock_acquire(hlock->instance,
hlock_class(hlock)->subclass, hlock->trylock,
hlock->read, hlock->check, hlock->hardirqs_off,
- hlock->nest_lock, hlock->acquire_ip))
+ hlock->nest_lock, hlock->acquire_ip,
+ hlock->references))
return 0;
}
@@ -2836,7 +3068,7 @@ static int lock_release_nested(struct task_struct *curr,
/*
* Is the unlock non-nested:
*/
- if (hlock->instance != lock)
+ if (hlock->instance != lock || hlock->references)
return lock_release_non_nested(curr, lock, ip);
curr->lockdep_depth--;
@@ -2881,6 +3113,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
check_chain_key(curr);
}
+static int __lock_is_held(struct lockdep_map *lock)
+{
+ struct task_struct *curr = current;
+ int i;
+
+ for (i = 0; i < curr->lockdep_depth; i++) {
+ struct held_lock *hlock = curr->held_locks + i;
+
+ if (match_held_lock(hlock, lock))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Check whether we follow the irq-flags state precisely:
*/
@@ -2957,7 +3204,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
current->lockdep_recursion = 1;
__lock_acquire(lock, subclass, trylock, read, check,
- irqs_disabled_flags(flags), nest_lock, ip);
+ irqs_disabled_flags(flags), nest_lock, ip, 0);
current->lockdep_recursion = 0;
raw_local_irq_restore(flags);
}
@@ -2982,6 +3229,26 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
+int lock_is_held(struct lockdep_map *lock)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (unlikely(current->lockdep_recursion))
+ return ret;
+
+ raw_local_irq_save(flags);
+ check_flags(flags);
+
+ current->lockdep_recursion = 1;
+ ret = __lock_is_held(lock);
+ current->lockdep_recursion = 0;
+ raw_local_irq_restore(flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(lock_is_held);
+
void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
{
current->lockdep_reclaim_gfp = gfp_mask;
@@ -3041,7 +3308,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -3049,6 +3316,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
return;
found_it:
+ if (hlock->instance != lock)
+ return;
+
hlock->waittime_stamp = sched_clock();
contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
@@ -3088,7 +3358,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
*/
if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
break;
- if (hlock->instance == lock)
+ if (match_held_lock(hlock, lock))
goto found_it;
prev_hlock = hlock;
}
@@ -3096,6 +3366,9 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
return;
found_it:
+ if (hlock->instance != lock)
+ return;
+
cpu = smp_processor_id();
if (hlock->waittime_stamp) {
now = sched_clock();
@@ -3326,7 +3599,12 @@ void __init lockdep_info(void)
sizeof(struct list_head) * CLASSHASH_SIZE +
sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
- sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
+ sizeof(struct list_head) * CHAINHASH_SIZE
+#ifdef CONFIG_PROVE_LOCKING
+ + sizeof(struct circular_queue)
+#endif
+ ) / 1024
+ );
printk(" per task-struct memory footprint: %lu bytes\n",
sizeof(struct held_lock) * MAX_LOCK_DEPTH);