summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/arch/mips64/_atomic_lock.c
diff options
context:
space:
mode:
authorpefo <pefo@openbsd.org>2004-08-11 17:38:15 +0000
committerpefo <pefo@openbsd.org>2004-08-11 17:38:15 +0000
commit1b5c0ae122494e594a4e217b37043a65d1fb6ada (patch)
tree6a41bda4356a9bb6f3228e74c4eaa1b99c840a03 /lib/libpthread/arch/mips64/_atomic_lock.c
parentbye (diff)
downloadwireguard-openbsd-1b5c0ae122494e594a4e217b37043a65d1fb6ada.tar.xz
wireguard-openbsd-1b5c0ae122494e594a4e217b37043a65d1fb6ada.zip
mips->mips64
Diffstat (limited to 'lib/libpthread/arch/mips64/_atomic_lock.c')
-rw-r--r--lib/libpthread/arch/mips64/_atomic_lock.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/lib/libpthread/arch/mips64/_atomic_lock.c b/lib/libpthread/arch/mips64/_atomic_lock.c
new file mode 100644
index 00000000000..5390c2c3e13
--- /dev/null
+++ b/lib/libpthread/arch/mips64/_atomic_lock.c
@@ -0,0 +1,55 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2004/08/11 17:41:34 pefo Exp $ */
+/*
+ * Atomic lock for mips
+ */
+
+#include "pthread.h"
+#include "pthread_private.h"
+#include "spinlock.h"
+#include <signal.h>
+
+/*
+ * uthread atomic lock:
+ * attempt to acquire a lock (by giving it a non-zero value).
+ * Return zero on success, or the lock's value on failure
+ */
+int
+_atomic_lock(volatile _spinlock_lock_t *lock)
+{
+#if __mips >= 2
+ _spinlock_lock_t old;
+ _spinlock_lock_t temp;
+
+ do {
+ /*
+ * On a mips2 machine and above, we can use ll/sc.
+ * Read the lock and tag the cache line with a 'load linked'
+ * instruction. (Register 17 (LLAddr) will hold the
+ * physical address of lock for diagnostic purposes);
+ * (Under pathologically heavy swapping, the physaddr may
+ * change! XXX)
+ */
+ __asm__("ll %0, %1" : "=r"(old) : "m"(*lock));
+ if (old != _SPINLOCK_UNLOCKED)
+ break; /* already locked */
+ /*
+ * Try and store a 1 at the tagged lock address. If
+ * anyone else has since written it, the tag on the cache
+ * line will have been wiped, and temp will be set to zero
+ * by the 'store conditional' instruction.
+ */
+ temp = _SPINLOCK_LOCKED;
+ __asm__("sc %0, %1" : "=r"(temp), "=m"(*lock)
+ : "0"(temp));
+ } while (temp == 0);
+
+ return (old != _SPINLOCK_UNLOCKED);
+#else
+ /*
+ * Older MIPS cpus have no way of doing an atomic lock
+ * without some kind of shift to supervisor mode.
+ */
+
+ return (_thread_slow_atomic_lock(lock));
+#endif
+}