aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/powerpc/include/asm/atomic.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/atomic.h')
-rw-r--r--arch/powerpc/include/asm/atomic.h77
1 files changed, 21 insertions, 56 deletions
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 853dc86864f4..d1ea554c33ed 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -11,6 +11,7 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
+#include <asm/asm-compat.h>
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
@@ -27,14 +28,22 @@ static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
- __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+ else
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+ else
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
@@ -118,46 +127,6 @@ ATOMIC_OPS(xor, xor, "", K)
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-#define arch_atomic_cmpxchg(v, o, n) \
- (arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic_cmpxchg_relaxed(v, o, n) \
- arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define arch_atomic_cmpxchg_acquire(v, o, n) \
- arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-
-#define arch_atomic_xchg(v, new) \
- (arch_xchg(&((v)->counter), new))
-#define arch_atomic_xchg_relaxed(v, new) \
- arch_xchg_relaxed(&((v)->counter), (new))
-
-/*
- * Don't want to override the generic atomic_try_cmpxchg_acquire, because
- * we add a lock hint to the lwarx, which may not be wanted for the
- * _acquire case (and is not used by the other _acquire variants so it
- * would be a surprise).
- */
-static __always_inline bool
-arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
-{
- int r, o = *old;
-
- __asm__ __volatile__ (
-"1: lwarx %0,0,%2,%5 # atomic_try_cmpxchg_acquire \n"
-" cmpw 0,%0,%3 \n"
-" bne- 2f \n"
-" stwcx. %4,0,%2 \n"
-" bne- 1b \n"
-"\t" PPC_ACQUIRE_BARRIER " \n"
-"2: \n"
- : "=&r" (r), "+m" (v->counter)
- : "r" (&v->counter), "r" (o), "r" (new), "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
- : "cr0", "memory");
-
- if (unlikely(r != o))
- *old = r;
- return likely(r == o);
-}
-
/**
* atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
@@ -225,14 +194,22 @@ static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
+ else
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
return t;
}
static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
+ /* -mprefixed can generate offsets beyond range, fall back hack */
+ if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
+ __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
+ else
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
@@ -408,18 +385,6 @@ static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
}
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define arch_atomic64_cmpxchg(v, o, n) \
- (arch_cmpxchg(&((v)->counter), (o), (n)))
-#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
- arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define arch_atomic64_cmpxchg_acquire(v, o, n) \
- arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-
-#define arch_atomic64_xchg(v, new) \
- (arch_xchg(&((v)->counter), new))
-#define arch_atomic64_xchg_relaxed(v, new) \
- arch_xchg_relaxed(&((v)->counter), (new))
-
/**
* atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t