aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/atomic_ll_sc.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-02-03 12:39:03 +0000
committerWill Deacon <will.deacon@arm.com>2015-07-27 15:28:50 +0100
commitc0385b24af15020a1e505f2c984db0d7c0d017e1 (patch)
tree50cf977ef4ba5de29e1c9a931588bc340c9e103c /arch/arm64/include/asm/atomic_ll_sc.h
parentarm64: alternatives: add cpu feature for lse atomics (diff)
downloadlinux-dev-c0385b24af15020a1e505f2c984db0d7c0d017e1.tar.xz
linux-dev-c0385b24af15020a1e505f2c984db0d7c0d017e1.zip
arm64: introduce CONFIG_ARM64_LSE_ATOMICS as fallback to ll/sc atomics
In order to patch in the new atomic instructions at runtime, we need to generate wrappers around the out-of-line exclusive load/store atomics. This patch adds a new Kconfig option, CONFIG_ARM64_LSE_ATOMICS. which causes our atomic functions to branch to the out-of-line ll/sc implementations. To avoid the register spill overhead of the PCS, the out-of-line functions are compiled with specific compiler flags to force out-of-line save/restore of any registers that are usually caller-saved. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/atomic_ll_sc.h')
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h19
1 files changed, 17 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index 66e992a58f6b..c33fa2cd399e 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -21,6 +21,10 @@
#ifndef __ASM_ATOMIC_LL_SC_H
#define __ASM_ATOMIC_LL_SC_H
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
/*
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop
@@ -41,6 +45,10 @@
#define __LL_SC_PREFIX(x) x
#endif
+#ifndef __LL_SC_EXPORT
+#define __LL_SC_EXPORT(x)
+#endif
+
#define ATOMIC_OP(op, asm_op) \
__LL_SC_INLINE void \
__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
@@ -56,6 +64,7 @@ __LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
+__LL_SC_EXPORT(atomic_##op);
#define ATOMIC_OP_RETURN(op, asm_op) \
__LL_SC_INLINE int \
@@ -75,7 +84,8 @@ __LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
\
smp_mb(); \
return result; \
-}
+} \
+__LL_SC_EXPORT(atomic_##op##_return);
#define ATOMIC_OPS(op, asm_op) \
ATOMIC_OP(op, asm_op) \
@@ -115,6 +125,7 @@ __LL_SC_PREFIX(atomic_cmpxchg(atomic_t *ptr, int old, int new))
smp_mb();
return oldval;
}
+__LL_SC_EXPORT(atomic_cmpxchg);
#define ATOMIC64_OP(op, asm_op) \
__LL_SC_INLINE void \
@@ -131,6 +142,7 @@ __LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
: "Ir" (i)); \
} \
+__LL_SC_EXPORT(atomic64_##op);
#define ATOMIC64_OP_RETURN(op, asm_op) \
__LL_SC_INLINE long \
@@ -150,7 +162,8 @@ __LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
\
smp_mb(); \
return result; \
-}
+} \
+__LL_SC_EXPORT(atomic64_##op##_return);
#define ATOMIC64_OPS(op, asm_op) \
ATOMIC64_OP(op, asm_op) \
@@ -190,6 +203,7 @@ __LL_SC_PREFIX(atomic64_cmpxchg(atomic64_t *ptr, long old, long new))
smp_mb();
return oldval;
}
+__LL_SC_EXPORT(atomic64_cmpxchg);
__LL_SC_INLINE long
__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
@@ -211,5 +225,6 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
return result;
}
+__LL_SC_EXPORT(atomic64_dec_if_positive);
#endif /* __ASM_ATOMIC_LL_SC_H */