From c1d7cd228b4b46eca1dbd9bb2c6053f477a1a6ff Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 28 Jul 2015 14:48:00 +0100 Subject: arm64: spinlock: fix ll/sc unlock on big-endian systems When unlocking a spinlock, we perform a read-modify-write on the owner ticket in order to increment it and store it back with release semantics. In the LL/SC case, we load the 16-bit ticket using a 32-bit load and therefore store back the wrong halfword on a big-endian system, corrupting the lock after the first unlock and killing the system dead. This patch fixes the unlock code to use 16-bit accessors consistently. Signed-off-by: Will Deacon --- arch/arm64/include/asm/spinlock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/include/asm/spinlock.h') diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 87ae7efa1211..c85e96d174a5 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -110,7 +110,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) asm volatile(ARM64_LSE_ATOMIC_INSN( /* LL/SC */ - " ldr %w1, %0\n" + " ldrh %w1, %0\n" " add %w1, %w1, #1\n" " stlrh %w1, %0", /* LSE atomics */ -- cgit v1.2.3-59-g8ed1b