aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c117
1 files changed, 62 insertions, 55 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 5fa1324a4f29..023386338269 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -49,6 +49,7 @@
#include <linux/gfp.h>
#include <linux/random.h>
#include <linux/jhash.h>
+#include <linux/nmi.h>
#include <asm/sections.h>
@@ -555,9 +556,9 @@ static void print_lock(struct held_lock *hlock)
return;
}
+ printk(KERN_CONT "%p", hlock->instance);
print_lock_name(lock_classes + class_idx - 1);
- printk(KERN_CONT ", at: [<%p>] %pS\n",
- (void *)hlock->acquire_ip, (void *)hlock->acquire_ip);
+ printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip);
}
static void lockdep_print_held_locks(struct task_struct *curr)
@@ -647,18 +648,12 @@ static int count_matching_names(struct lock_class *new_class)
return count + 1;
}
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
static inline struct lock_class *
-look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
+look_up_lock_class(const struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
- bool is_static = false;
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off();
@@ -671,24 +666,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
}
/*
- * Static locks do not have their class-keys yet - for them the key
- * is the lock object itself. If the lock is in the per cpu area,
- * the canonical address of the lock (per cpu offset removed) is
- * used.
+ * If it is not initialised then it has never been locked,
+ * so it won't be present in the hash table.
*/
- if (unlikely(!lock->key)) {
- unsigned long can_addr, addr = (unsigned long)lock;
-
- if (__is_kernel_percpu_address(addr, &can_addr))
- lock->key = (void *)can_addr;
- else if (__is_module_percpu_address(addr, &can_addr))
- lock->key = (void *)can_addr;
- else if (static_obj(lock))
- lock->key = (void *)lock;
- else
- return ERR_PTR(-EINVAL);
- is_static = true;
- }
+ if (unlikely(!lock->key))
+ return NULL;
/*
* NOTE: the class-key must be unique. For dynamic locks, a static
@@ -720,7 +702,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
}
}
- return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+ return NULL;
+}
+
+/*
+ * Static locks do not have their class-keys yet - for them the key is
+ * the lock object itself. If the lock is in the per cpu area, the
+ * canonical address of the lock (per cpu offset removed) is used.
+ */
+static bool assign_lock_key(struct lockdep_map *lock)
+{
+ unsigned long can_addr, addr = (unsigned long)lock;
+
+ if (__is_kernel_percpu_address(addr, &can_addr))
+ lock->key = (void *)can_addr;
+ else if (__is_module_percpu_address(addr, &can_addr))
+ lock->key = (void *)can_addr;
+ else if (static_obj(lock))
+ lock->key = (void *)lock;
+ else {
+ /* Debug-check: all keys must be persistent! */
+ debug_locks_off();
+ pr_err("INFO: trying to register non-static key.\n");
+ pr_err("the code is fine but needs lockdep annotation.\n");
+ pr_err("turning off the locking correctness validator.\n");
+ dump_stack();
+ return false;
+ }
+
+ return true;
}
/*
@@ -738,18 +748,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass);
- if (likely(!IS_ERR_OR_NULL(class)))
+ if (likely(class))
goto out_set_class_cache;
- /*
- * Debug-check: all keys must be persistent!
- */
- if (IS_ERR(class)) {
- debug_locks_off();
- printk("INFO: trying to register non-static key.\n");
- printk("the code is fine but needs lockdep annotation.\n");
- printk("turning off the locking correctness validator.\n");
- dump_stack();
+ if (!lock->key) {
+ if (!assign_lock_key(lock))
+ return NULL;
+ } else if (!static_obj(lock->key)) {
return NULL;
}
@@ -803,7 +808,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if (verbose(class)) {
graph_unlock();
- printk("\nnew class %p: %s", class->key, class->name);
+ printk("\nnew class %px: %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
@@ -1402,7 +1407,7 @@ static void print_lock_class_header(struct lock_class *class, int depth)
}
printk("%*s }\n", depth, "");
- printk("%*s ... key at: [<%p>] %pS\n",
+ printk("%*s ... key at: [<%px>] %pS\n",
depth, "", class->key, class->key);
}
@@ -2335,7 +2340,7 @@ cache_hit:
if (very_verbose(class)) {
printk("\nhash chain already cached, key: "
- "%016Lx tail class: [%p] %s\n",
+ "%016Lx tail class: [%px] %s\n",
(unsigned long long)chain_key,
class->key, class->name);
}
@@ -2344,7 +2349,7 @@ cache_hit:
}
if (very_verbose(class)) {
- printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
+ printk("\nnew hash chain, key: %016Lx tail class: [%px] %s\n",
(unsigned long long)chain_key, class->key, class->name);
}
@@ -2671,16 +2676,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
void print_irqtrace_events(struct task_struct *curr)
{
printk("irq event stamp: %u\n", curr->irq_events);
- printk("hardirqs last enabled at (%u): [<%p>] %pS\n",
+ printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
(void *)curr->hardirq_enable_ip);
- printk("hardirqs last disabled at (%u): [<%p>] %pS\n",
+ printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
(void *)curr->hardirq_disable_ip);
- printk("softirqs last enabled at (%u): [<%p>] %pS\n",
+ printk("softirqs last enabled at (%u): [<%px>] %pS\n",
curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
(void *)curr->softirq_enable_ip);
- printk("softirqs last disabled at (%u): [<%p>] %pS\n",
+ printk("softirqs last disabled at (%u): [<%px>] %pS\n",
curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
(void *)curr->softirq_disable_ip);
}
@@ -3202,7 +3207,7 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
* Sanity check, the lock-class key must be persistent:
*/
if (!static_obj(key)) {
- printk("BUG: key %p not in .data!\n", key);
+ printk("BUG: key %px not in .data!\n", key);
/*
* What it says above ^^^^^, I suggest you read it.
*/
@@ -3272,7 +3277,7 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
return 0;
}
-static int __lock_is_held(struct lockdep_map *lock, int read);
+static int __lock_is_held(const struct lockdep_map *lock, int read);
/*
* This gets called for every mutex_lock*()/spin_lock*() operation.
@@ -3317,7 +3322,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
}
atomic_inc((atomic_t *)&class->ops);
if (very_verbose(class)) {
- printk("\nacquire class [%p] %s", class->key, class->name);
+ printk("\nacquire class [%px] %s", class->key, class->name);
if (class->name_version > 1)
printk(KERN_CONT "#%d", class->name_version);
printk(KERN_CONT "\n");
@@ -3481,13 +3486,14 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
return 0;
}
-static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
+static int match_held_lock(const struct held_lock *hlock,
+ const struct lockdep_map *lock)
{
if (hlock->instance == lock)
return 1;
if (hlock->references) {
- struct lock_class *class = lock->class_cache[0];
+ const struct lock_class *class = lock->class_cache[0];
if (!class)
class = look_up_lock_class(lock, 0);
@@ -3498,7 +3504,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
* Clearly if the lock hasn't been acquired _ever_, we're not
* holding it either, so report failure.
*/
- if (IS_ERR_OR_NULL(class))
+ if (!class)
return 0;
/*
@@ -3723,7 +3729,7 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
return 1;
}
-static int __lock_is_held(struct lockdep_map *lock, int read)
+static int __lock_is_held(const struct lockdep_map *lock, int read)
{
struct task_struct *curr = current;
int i;
@@ -3937,7 +3943,7 @@ void lock_release(struct lockdep_map *lock, int nested,
}
EXPORT_SYMBOL_GPL(lock_release);
-int lock_is_held_type(struct lockdep_map *lock, int read)
+int lock_is_held_type(const struct lockdep_map *lock, int read)
{
unsigned long flags;
int ret = 0;
@@ -4294,7 +4300,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* If the class exists we look it up and zap it:
*/
class = look_up_lock_class(lock, j);
- if (!IS_ERR_OR_NULL(class))
+ if (class)
zap_class(class);
}
/*
@@ -4370,7 +4376,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
pr_warn("WARNING: held lock freed!\n");
print_kernel_ident();
pr_warn("-------------------------\n");
- pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
+ pr_warn("%s/%d is freeing memory %px-%px, with a lock still held there!\n",
curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
print_lock(hlock);
lockdep_print_held_locks(curr);
@@ -4490,6 +4496,7 @@ retry:
if (!unlock)
if (read_trylock(&tasklist_lock))
unlock = 1;
+ touch_nmi_watchdog();
} while_each_thread(g, p);
pr_warn("\n");