summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorguenther <guenther@openbsd.org>2013-05-06 00:23:49 +0000
committerguenther <guenther@openbsd.org>2013-05-06 00:23:49 +0000
commit9cea5a4d20b399784136c8ceaebbc1e93a61aec9 (patch)
treeff358f68dd60445e71629b314014000774c0aeea
parentthe use of modern intel performance counter msrs to measure the number of (diff)
downloadwireguard-openbsd-9cea5a4d20b399784136c8ceaebbc1e93a61aec9.tar.xz
wireguard-openbsd-9cea5a4d20b399784136c8ceaebbc1e93a61aec9.zip
If the lock is contended, such that the 'sc' fails, then we need to
reset the %2 register with _SPINLOCK_LOCKED before retrying. tobiasu@ hit the problem and figured out that ".set noreorder" is needed in the ASM
-rw-r--r--lib/librthread/arch/mips64/_atomic_lock.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/lib/librthread/arch/mips64/_atomic_lock.c b/lib/librthread/arch/mips64/_atomic_lock.c
index 3e1c4a07a3f..3b000eb0914 100644
--- a/lib/librthread/arch/mips64/_atomic_lock.c
+++ b/lib/librthread/arch/mips64/_atomic_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: _atomic_lock.c,v 1.4 2009/06/01 23:17:53 miod Exp $ */
+/* $OpenBSD: _atomic_lock.c,v 1.5 2013/05/06 00:23:49 guenther Exp $ */
/*
* Atomic lock for mips
@@ -13,12 +13,15 @@ _atomic_lock(volatile _spinlock_lock_t *lock)
_spinlock_lock_t old;
__asm__ __volatile__ (
+ ".set noreorder\n"
"1: ll %0, 0(%1)\n"
" sc %2, 0(%1)\n"
" beqz %2, 1b\n"
- " nop\n" :
- "=r"(old) :
- "r"(lock), "r"(_SPINLOCK_LOCKED) : "memory");
+ " addi %2, $0, %3\n"
+ ".set reorder\n"
+ : "=&r"(old)
+ : "r"(lock), "r"(_SPINLOCK_LOCKED), "i"(_SPINLOCK_LOCKED)
+ : "memory");
return (old != _SPINLOCK_UNLOCKED);
}