aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/spinlock.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-02 20:01:25 +0100
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 23:55:32 +0100
commit0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62 (patch)
treee371d17bd73d64332349debbf45962ec67e7269d /include/linux/spinlock.h
parentlocking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED (diff)
downloadwireguard-linux-0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62.tar.xz
wireguard-linux-0199c4e68d1f02894bdefe4b5d9e9ee4aedd8d62.zip
locking: Convert __raw_spin* functions to arch_spin*
Name space cleanup. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: linux-arch@vger.kernel.org
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r--include/linux/spinlock.h22
1 files changed, 11 insertions, 11 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 5ef7a4c060b5..de3a022489c6 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -14,7 +14,7 @@
* linux/spinlock_types.h:
* defines the generic type and initializers
*
- * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
* implementations, mostly inline assembly code
*
* (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
* defines the generic type and initializers
*
* linux/spinlock_up.h:
- * contains the __raw_spin_*()/etc. version of UP
+ * contains the arch_spin_*()/etc. version of UP
* builds. (which are NOPs on non-debug, non-preempt
* builds)
*
@@ -103,17 +103,17 @@ do { \
do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
-#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+#define spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#else
-#ifdef __raw_spin_is_contended
-#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#ifdef arch_spin_is_contended
+#define spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
#else
#define spin_is_contended(lock) (((void)(lock), 0))
-#endif /*__raw_spin_is_contended*/
+#endif /*arch_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
@@ -125,7 +125,7 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
*/
-#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+#define spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
#ifdef CONFIG_DEBUG_SPINLOCK
extern void _raw_spin_lock(spinlock_t *lock);
@@ -133,11 +133,11 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
extern int _raw_spin_trylock(spinlock_t *lock);
extern void _raw_spin_unlock(spinlock_t *lock);
#else
-# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
+# define _raw_spin_lock(lock) arch_spin_lock(&(lock)->raw_lock)
# define _raw_spin_lock_flags(lock, flags) \
- __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
-# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
-# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
+ arch_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_spin_trylock(lock) arch_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_unlock(lock) arch_spin_unlock(&(lock)->raw_lock)
#endif
/*