aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/s390/include/asm/cmpxchg.h
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2022-11-02 15:16:43 +0100
committerHeiko Carstens <hca@linux.ibm.com>2022-11-21 13:36:15 +0100
commit13f62e84385fa0241fc6a2178da50af02189121b (patch)
tree0ade33ba54f706c15b45e7289cd71a1ca0616fe6 /arch/s390/include/asm/cmpxchg.h
parentLinux 6.1-rc2 (diff)
downloadwireguard-linux-13f62e84385fa0241fc6a2178da50af02189121b.tar.xz
wireguard-linux-13f62e84385fa0241fc6a2178da50af02189121b.zip
s390/cmpxchg: use symbolic names for inline assembly operands
Make cmpxchg() inline assemblies more readable by using symbolic names for operands. Link: https://lore.kernel.org/r/Y2J7yzQYt/bjLQXY@osiris Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/include/asm/cmpxchg.h')
-rw-r--r--arch/s390/include/asm/cmpxchg.h76
1 files changed, 42 insertions, 34 deletions
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 84c3f0d576c5..56fb8aa08945 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -96,56 +96,64 @@ static __always_inline unsigned long __cmpxchg(unsigned long address,
shift = (3 ^ (address & 3)) << 3;
address ^= address & 3;
asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
+ " l %[prev],%[address]\n"
+ "0: nr %[prev],%[mask]\n"
+ " lr %[tmp],%[prev]\n"
+ " or %[prev],%[old]\n"
+ " or %[tmp],%[new]\n"
+ " cs %[prev],%[tmp],%[address]\n"
+ " jnl 1f\n"
+ " xr %[tmp],%[prev]\n"
+ " nr %[tmp],%[mask]\n"
+ " jnz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
- : "d" ((old & 0xff) << shift),
- "d" ((new & 0xff) << shift),
- "d" (~(0xff << shift))
+ : [prev] "=&d" (prev),
+ [tmp] "=&d" (tmp),
+ [address] "+Q" (*(int *)address)
+ : [old] "d" ((old & 0xff) << shift),
+ [new] "d" ((new & 0xff) << shift),
+ [mask] "d" (~(0xff << shift))
: "memory", "cc");
return prev >> shift;
case 2:
shift = (2 ^ (address & 2)) << 3;
address ^= address & 2;
asm volatile(
- " l %0,%2\n"
- "0: nr %0,%5\n"
- " lr %1,%0\n"
- " or %0,%3\n"
- " or %1,%4\n"
- " cs %0,%1,%2\n"
- " jnl 1f\n"
- " xr %1,%0\n"
- " nr %1,%5\n"
- " jnz 0b\n"
+ " l %[prev],%[address]\n"
+ "0: nr %[prev],%[mask]\n"
+ " lr %[tmp],%[prev]\n"
+ " or %[prev],%[old]\n"
+ " or %[tmp],%[new]\n"
+ " cs %[prev],%[tmp],%[address]\n"
+ " jnl 1f\n"
+ " xr %[tmp],%[prev]\n"
+ " nr %[tmp],%[mask]\n"
+ " jnz 0b\n"
"1:"
- : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
- : "d" ((old & 0xffff) << shift),
- "d" ((new & 0xffff) << shift),
- "d" (~(0xffff << shift))
+ : [prev] "=&d" (prev),
+ [tmp] "=&d" (tmp),
+ [address] "+Q" (*(int *)address)
+ : [old] "d" ((old & 0xffff) << shift),
+ [new] "d" ((new & 0xffff) << shift),
+ [mask] "d" (~(0xffff << shift))
: "memory", "cc");
return prev >> shift;
case 4:
asm volatile(
- " cs %0,%3,%1\n"
- : "=&d" (prev), "+Q" (*(int *) address)
- : "0" (old), "d" (new)
+ " cs %[prev],%[new],%[address]\n"
+ : [prev] "=&d" (prev),
+ [address] "+Q" (*(int *)address)
+ : "0" (old),
+ [new] "d" (new)
: "memory", "cc");
return prev;
case 8:
asm volatile(
- " csg %0,%3,%1\n"
- : "=&d" (prev), "+QS" (*(long *) address)
- : "0" (old), "d" (new)
+ " csg %[prev],%[new],%[address]\n"
+ : [prev] "=&d" (prev),
+ [address] "+QS" (*(long *)address)
+ : "0" (old),
+ [new] "d" (new)
: "memory", "cc");
return prev;
}