aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 00:24:54 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 15:27:04 -0700
commit8a25d5debff2daee280e83e09d8c25d67c26a972 (patch)
tree3bccfef9acb66fc62863bfd6c16493c5e8c8e394 /include/asm-i386
parent[PATCH] lockdep: prove rwsem locking correctness (diff)
downloadlinux-dev-8a25d5debff2daee280e83e09d8c25d67c26a972.tar.xz
linux-dev-8a25d5debff2daee280e83e09d8c25d67c26a972.zip
[PATCH] lockdep: prove spinlock rwlock locking correctness
Use the lock validator framework to prove spinlock and rwlock locking correctness. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/spinlock.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h
index 7e29b51bcaa0..87c40f830653 100644
--- a/include/asm-i386/spinlock.h
+++ b/include/asm-i386/spinlock.h
@@ -68,6 +68,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
"=m" (lock->slock) : : "memory");
}
+/*
+ * It is easier for the lock validator if interrupts are not re-enabled
+ * in the middle of a lock-acquire. This is a performance feature anyway
+ * so we turn it off:
+ */
+#ifndef CONFIG_PROVE_LOCKING
static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
{
alternative_smp(
@@ -75,6 +81,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
__raw_spin_lock_string_up,
"=m" (lock->slock) : "r" (flags) : "memory");
}
+#endif
static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{