aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/lib/locks.c
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2014-08-07 15:36:18 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-08-13 15:13:27 +1000
commit78e05b1421fa41ae8457701140933baa5e7d9479 (patch)
treeddd3d45946163cdc5f5836d2e460b9b4f4dd24c2 /arch/powerpc/lib/locks.c
parentpowerpc: Add smp_mb() to arch_spin_is_locked() (diff)
downloadlinux-dev-78e05b1421fa41ae8457701140933baa5e7d9479.tar.xz
linux-dev-78e05b1421fa41ae8457701140933baa5e7d9479.zip
powerpc: Add smp_mb()s to arch_spin_unlock_wait()
Similar to the previous commit which described why we need to add a barrier to arch_spin_is_locked(), we have a similar problem with spin_unlock_wait(). We need a barrier on entry to ensure any spinlock we have previously taken is visibly locked prior to the load of lock->slock. It's also not clear if spin_unlock_wait() is intended to have ACQUIRE semantics. For now be conservative and add a barrier on exit to give it ACQUIRE semantics. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/lib/locks.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 0c9c8d7d0734..170a0346f756 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -70,12 +70,16 @@ void __rw_yield(arch_rwlock_t *rw)
void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
+ smp_mb();
+
while (lock->slock) {
HMT_low();
if (SHARED_PROCESSOR)
__spin_yield(lock);
}
HMT_medium();
+
+ smp_mb();
}
EXPORT_SYMBOL(arch_spin_unlock_wait);