aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorRohan McLure <rmclure@linux.ibm.com>2023-05-10 13:31:07 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2023-06-21 15:13:57 +1000
commit03d44ee80eac980a869ed3d5637ed85de6fb957f (patch)
tree0891e2bd7c86989a5359ce1386b7430dfc7b897d
parentpowerpc/powernv/pci: Remove last IODA1 defines (diff)
downloadwireguard-linux-03d44ee80eac980a869ed3d5637ed85de6fb957f.tar.xz
wireguard-linux-03d44ee80eac980a869ed3d5637ed85de6fb957f.zip
powerpc: qspinlock: Mark accesses to qnode lock checks
The powerpc implementation of qspinlocks will both poll and spin on the bitlock guarding a qnode. Mark these accesses with READ_ONCE to convey to KCSAN that polling is intentional here. Signed-off-by: Rohan McLure <rmclure@linux.ibm.com> Reviewed-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230510033117.1395895-2-rmclure@linux.ibm.com
-rw-r--r--arch/powerpc/lib/qspinlock.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index e4bd145255d0..b76c1f6acce5 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -435,7 +435,7 @@ yield_prev:
smp_rmb(); /* See __yield_to_locked_owner comment */
- if (!node->locked) {
+ if (!READ_ONCE(node->locked)) {
yield_to_preempted(prev_cpu, yield_count);
spin_begin();
return preempted;
@@ -584,7 +584,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
/* Wait for mcs node lock to be released */
spin_begin();
- while (!node->locked) {
+ while (!READ_ONCE(node->locked)) {
spec_barrier();
if (yield_to_prev(lock, node, old, paravirt))