summaryrefslogtreecommitdiffstats
path: root/lib/libpthread/sys
diff options
context:
space:
mode:
authord <d@openbsd.org>1998-11-20 11:15:35 +0000
committerd <d@openbsd.org>1998-11-20 11:15:35 +0000
commitea03e63fbd0d2b427542c2481ae506ce9e278791 (patch)
tree6a43235671adbd8289e0d0a14275441a0d653748 /lib/libpthread/sys
parentfix strcat usage; deraadt (diff)
downloadwireguard-openbsd-ea03e63fbd0d2b427542c2481ae506ce9e278791.tar.xz
wireguard-openbsd-ea03e63fbd0d2b427542c2481ae506ce9e278791.zip
Move atomic_lock code from asm to C with inline asm;
Add m68k, mips and sparc. (needs more careful checking) Add 'slow_atomic_lock' for crippled archs.
Diffstat (limited to 'lib/libpthread/sys')
-rw-r--r--lib/libpthread/sys/Makefile.inc6
-rw-r--r--lib/libpthread/sys/slow_atomic_lock.c33
2 files changed, 36 insertions, 3 deletions
diff --git a/lib/libpthread/sys/Makefile.inc b/lib/libpthread/sys/Makefile.inc
index bf19b6dd111..be55ff3747b 100644
--- a/lib/libpthread/sys/Makefile.inc
+++ b/lib/libpthread/sys/Makefile.inc
@@ -1,9 +1,9 @@
-# $Id: Makefile.inc,v 1.1 1998/08/27 09:00:48 d Exp $
-# $OpenBSD: Makefile.inc,v 1.1 1998/08/27 09:00:48 d Exp $
+# $Id: Makefile.inc,v 1.2 1998/11/20 11:15:38 d Exp $
+# $OpenBSD: Makefile.inc,v 1.2 1998/11/20 11:15:38 d Exp $
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}
-SRCS+= uthread_error.c _atomic_lock.S _sys_aliases.S
+SRCS+= uthread_error.c _atomic_lock.c _sys_aliases.S slow_atomic_lock.c
_sys_aliases.S: ${.CURDIR}/Makefile ${LIBCSRCDIR}/sys/Makefile.inc
(echo '#include "SYS.h"'; \
diff --git a/lib/libpthread/sys/slow_atomic_lock.c b/lib/libpthread/sys/slow_atomic_lock.c
new file mode 100644
index 00000000000..96be0897dc9
--- /dev/null
+++ b/lib/libpthread/sys/slow_atomic_lock.c
@@ -0,0 +1,33 @@
+/* $OpenBSD: slow_atomic_lock.c,v 1.1 1998/11/20 11:15:38 d Exp $ */
+
+#include "pthread_private.h"
+#include "spinlock.h"
+#include <signal.h>
+
+/*
+ * uthread atomic lock:
+ * attempt to acquire a lock (by giving it a non-zero value).
+ * Return zero on success, or the lock's value on failure
+ * This uses signal masking to make sure that no other thread
+ * can modify the lock while processing, hence it is very slow.
+ */
+register_t
+_thread_slow_atomic_lock(volatile register_t *lock)
+{
+ register_t old;
+ sigset_t oldset, newset = (sigset_t)~0;
+
+ /* block signals - incurs a context switch */
+ if (_thread_sys_sigprocmask(SIG_SETMASK, &newset, &oldset) < 0)
+ PANIC("_atomic_lock block");
+
+ old = *lock;
+ if (old == 0)
+ *lock = 1;
+
+ /* restore signal mask to what it was */
+ if (_thread_sys_sigprocmask(SIG_SETMASK, &oldset, NULL) < 0)
+ PANIC("_atomic_lock restore");
+
+ return old;
+}