aboutsummaryrefslogtreecommitdiffstats
path: root/lib/spinlock_debug.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-10-24 10:20:57 +0200
committerIngo Molnar <mingo@kernel.org>2012-10-24 10:20:57 +0200
commitef8c029fa793423439e67ef0416b220d3fa3321a (patch)
tree4199cefa6e1dcad1783040755246a14371f029af /lib/spinlock_debug.c
parentMerge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core (diff)
parentperf, cpu hotplug: Use cached value of smp_processor_id() (diff)
downloadlinux-dev-ef8c029fa793423439e67ef0416b220d3fa3321a.tar.xz
linux-dev-ef8c029fa793423439e67ef0416b220d3fa3321a.zip
Merge branch 'perf/urgent' into perf/core
Pick up v3.7-rc2 and fixes before applying more patches. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib/spinlock_debug.c')
-rw-r--r--lib/spinlock_debug.c32
1 files changed, 18 insertions, 14 deletions
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index eb10578ae055..0374a596cffa 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -107,23 +107,27 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
{
u64 i;
u64 loops = loops_per_jiffy * HZ;
- int print_once = 1;
- for (;;) {
- for (i = 0; i < loops; i++) {
- if (arch_spin_trylock(&lock->raw_lock))
- return;
- __delay(1);
- }
- /* lockup suspected: */
- if (print_once) {
- print_once = 0;
- spin_dump(lock, "lockup suspected");
+ for (i = 0; i < loops; i++) {
+ if (arch_spin_trylock(&lock->raw_lock))
+ return;
+ __delay(1);
+ }
+ /* lockup suspected: */
+ spin_dump(lock, "lockup suspected");
#ifdef CONFIG_SMP
- trigger_all_cpu_backtrace();
+ trigger_all_cpu_backtrace();
#endif
- }
- }
+
+ /*
+ * The trylock above was causing a livelock. Give the lower level arch
+ * specific lock code a chance to acquire the lock. We have already
+ * printed a warning/backtrace at this point. The non-debug arch
+ * specific code might actually succeed in acquiring the lock. If it is
+ * not successful, the end-result is the same - there is no forward
+ * progress.
+ */
+ arch_spin_lock(&lock->raw_lock);
}
void do_raw_spin_lock(raw_spinlock_t *lock)