aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include/asm/cmpxchg.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-10-09 17:01:21 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-10-29 11:06:06 +0000
commit2523c67bb6962f98193dce1c73b6efb65a6ea92c (patch)
tree0c23d146be95ffcb82405c6b6770ae52421ba19e /arch/arm/include/asm/cmpxchg.h
parentARM: 7850/1: DEBUG_LL on efm32 SoCs (diff)
downloadlinux-dev-2523c67bb6962f98193dce1c73b6efb65a6ea92c.tar.xz
linux-dev-2523c67bb6962f98193dce1c73b6efb65a6ea92c.zip
ARM: 7852/1: cmpxchg: implement barrier-less cmpxchg64_local
Our cmpxchg64 macros are wrappers around atomic64_cmpxchg. Whilst this is great for code re-use, there is a case for barrier-less cmpxchg where it is known to be safe (for example cmpxchg64_local and cmpxchg-based lockrefs). This patch introduces a 64-bit cmpxchg implementation specifically for the cmpxchg64_* macros, so that it can be later used by the lockref code. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/include/asm/cmpxchg.h')
-rw-r--r--arch/arm/include/asm/cmpxchg.h52
1 files changed, 42 insertions, 10 deletions
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c10540d..fbd978fc248f 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
+static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long oldval;
+ unsigned long res;
+
+ __asm__ __volatile__(
+"1: ldrexd %1, %H1, [%3]\n"
+" teq %1, %4\n"
+" teqeq %H1, %H4\n"
+" bne 2f\n"
+" strexd %0, %5, %H5, [%3]\n"
+" teq %0, #0\n"
+" bne 1b\n"
+"2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (old), "r" (new)
+ : "cc");
+
+ return oldval;
+}
+
+static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
+ unsigned long long old,
+ unsigned long long new)
+{
+ unsigned long long ret;
+
+ smp_mb();
+ ret = __cmpxchg64(ptr, old, new);
+ smp_mb();
+
+ return ret;
+}
+
#define cmpxchg_local(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(o), \
@@ -230,18 +266,14 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
sizeof(*(ptr))))
#define cmpxchg64(ptr, o, n) \
- ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
- atomic64_t, \
- counter), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
+ ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
- ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
- local64_t, \
- a), \
- (unsigned long long)(o), \
- (unsigned long long)(n)))
+ ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
+ (unsigned long long)(o), \
+ (unsigned long long)(n)))
#endif /* __LINUX_ARM_ARCH__ >= 6 */