aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/atomic.h
diff options
context:
space:
mode:
authorMaciej W. Rozycki <macro@codesourcery.com>2014-11-15 22:08:48 +0000
committerRalf Baechle <ralf@linux-mips.org>2014-11-24 07:45:36 +0100
commitb0984c43702f0fe2dbb0c344843e36c8b2cd13f1 (patch)
tree5c8543266b5a2549ef2e90e0c22e05e0a08d32f8 /arch/mips/include/asm/atomic.h
parentMIPS: Kconfig: Only allow 32-bit microMIPS builds (diff)
downloadlinux-dev-b0984c43702f0fe2dbb0c344843e36c8b2cd13f1.tar.xz
linux-dev-b0984c43702f0fe2dbb0c344843e36c8b2cd13f1.zip
MIPS: Fix microMIPS LL/SC immediate offsets
In the microMIPS encoding some memory access instructions have their immediate offset reduced to 12 bits only. That does not match the GCC `R' constraint we use in some places to satisfy the requirement, resulting in build failures like this: {standard input}: Assembler messages: {standard input}:720: Error: macro used $at after ".set noat" {standard input}:720: Warning: macro instruction expanded into multiple instructions Fix the problem by defining a macro, `GCC_OFF12_ASM', that expands to the right constraint depending on whether microMIPS or standard MIPS code is produced. Also apply the fix to where `m' is used as in the worst case this change does nothing, e.g. where the pointer was already in a register such as a function argument and no further offset was requested, and in the best case it avoids an extraneous sequence of up to two instructions to load the high 20 bits of the address in the LL/SC loop. This reduces the risk of lock contention that is the higher the more instructions there are in the critical section between LL and SC. Strictly speaking we could just bulk-replace `R' with `ZC' as the latter constraint adjusts automatically depending on the ISA selected. However it was only introduced with GCC 4.9 and we keep supporing older compilers for the standard MIPS configuration, hence the slightly more complicated approach I chose. The choice of a zero-argument function-like rather than an object-like macro was made so that it does not look like a function call taking the C expression used for the constraint as an argument. This is so as not to confuse the reader or formatting checkers like `checkpatch.pl' and follows previous practice. Signed-off-by: Maciej W. Rozycki <macro@codesourcery.com> Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8482/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/include/asm/atomic.h')
-rw-r--r--arch/mips/include/asm/atomic.h39
1 files changed, 24 insertions, 15 deletions
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 6dd6bfc607e9..ec4b4d658bc4 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -17,6 +17,7 @@
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
+#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cmpxchg.h>
#include <asm/war.h>
@@ -53,7 +54,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" sc %0, %1 \n" \
" beqzl %0, 1b \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
@@ -65,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -95,7 +96,8 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
" beqzl %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF12_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
int temp; \
@@ -107,7 +109,8 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
" .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF12_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!result)); \
\
@@ -167,8 +170,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
- : "Ir" (i), "m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF12_ASM() (v->counter)
+ : "Ir" (i), GCC_OFF12_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
int temp;
@@ -185,7 +189,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF12_ASM() (v->counter)
: "Ir" (i));
} else {
unsigned long flags;
@@ -328,7 +333,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
" scd %0, %1 \n" \
" beqzl %0, 1b \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
@@ -340,7 +345,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+m" (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -370,7 +375,8 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
" beqzl %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF12_ASM() (v->counter) \
: "Ir" (i)); \
} else if (kernel_uses_llsc) { \
long temp; \
@@ -382,8 +388,9 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
" .set mips0 \n" \
- : "=&r" (result), "=&r" (temp), "=m" (v->counter) \
- : "Ir" (i), "m" (v->counter) \
+ : "=&r" (result), "=&r" (temp), \
+ "=" GCC_OFF12_ASM() (v->counter) \
+ : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
: "memory"); \
} while (unlikely(!result)); \
\
@@ -443,8 +450,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "=" GCC_OFF12_ASM() (v->counter)
+ : "Ir" (i), GCC_OFF12_ASM() (v->counter)
: "memory");
} else if (kernel_uses_llsc) {
long temp;
@@ -461,7 +469,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" .set reorder \n"
"1: \n"
" .set mips0 \n"
- : "=&r" (result), "=&r" (temp), "+m" (v->counter)
+ : "=&r" (result), "=&r" (temp),
+ "+" GCC_OFF12_ASM() (v->counter)
: "Ir" (i));
} else {
unsigned long flags;