summaryrefslogtreecommitdiffstats
path: root/lib/libc/arch
diff options
context:
space:
mode:
authorguenther <guenther@openbsd.org>2017-08-15 06:13:24 +0000
committerguenther <guenther@openbsd.org>2017-08-15 06:13:24 +0000
commit7e321ac128fdcd388c62dfa54aca790ebbd73ce1 (patch)
treedcaaa56a773388005748dd5a23dadbd6c1338a21 /lib/libc/arch
parentAfter we stopped processing router advertisements in the kernel (diff)
downloadwireguard-openbsd-7e321ac128fdcd388c62dfa54aca790ebbd73ce1.tar.xz
wireguard-openbsd-7e321ac128fdcd388c62dfa54aca790ebbd73ce1.zip
Copy files from ../librthread in preparation for moving functionality
from libpthread to libc. No changes to the build yet, just making it easier to review the substantive diffs. ok beck@ kettenis@ tedu@
Diffstat (limited to 'lib/libc/arch')
-rw-r--r--lib/libc/arch/alpha/gen/_atomic_lock.S19
-rw-r--r--lib/libc/arch/amd64/gen/_atomic_lock.c26
-rw-r--r--lib/libc/arch/arm/gen/_atomic_lock.c49
-rw-r--r--lib/libc/arch/hppa/gen/_atomic_lock.c41
-rw-r--r--lib/libc/arch/i386/gen/_atomic_lock.c25
-rw-r--r--lib/libc/arch/m88k/gen/_atomic_lock.c44
-rw-r--r--lib/libc/arch/mips64/gen/_atomic_lock.c27
-rw-r--r--lib/libc/arch/powerpc/gen/_atomic_lock.c53
-rw-r--r--lib/libc/arch/sh/gen/_atomic_lock.c46
-rw-r--r--lib/libc/arch/sparc64/gen/_atomic_lock.c41
10 files changed, 371 insertions, 0 deletions
diff --git a/lib/libc/arch/alpha/gen/_atomic_lock.S b/lib/libc/arch/alpha/gen/_atomic_lock.S
new file mode 100644
index 00000000000..98666faeb85
--- /dev/null
+++ b/lib/libc/arch/alpha/gen/_atomic_lock.S
@@ -0,0 +1,19 @@
+/* $OpenBSD: _atomic_lock.S,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+#include <machine/asm.h>
+
+LEAF(_atomic_lock,1)
+ LDGP(pv)
+
+ /* NOTE: using ldl_l/stl_c instead of
+ ldq_l and stq_c as machine/spinlock.h
+ defines _atomic_lock_t as int */
+0: ldl_l v0, 0(a0) /* read existing lock value */
+ mov 1, t0 /* locked value to store */
+ stl_c t0, 0(a0) /* attempt to store, status in t0 */
+ beq t0, 1f /* branch forward to optimise prediction */
+ mb /* sync with other processors */
+ RET /* return with v0==0 if lock obtained */
+1: br 0b /* loop to try again */
+END(_atomic_lock)
diff --git a/lib/libc/arch/amd64/gen/_atomic_lock.c b/lib/libc/arch/amd64/gen/_atomic_lock.c
new file mode 100644
index 00000000000..299c470b6cf
--- /dev/null
+++ b/lib/libc/arch/amd64/gen/_atomic_lock.c
@@ -0,0 +1,26 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+/*
+ * Atomic lock for amd64 -- taken from i386 code.
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ /*
+ * Use the eXCHanGe instruction to swap the lock value with
+ * a local variable containing the locked state.
+ */
+ old = _ATOMIC_LOCK_LOCKED;
+ __asm__("xchg %0,(%2)"
+ : "=r" (old)
+ : "0" (old), "r" (lock));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/arm/gen/_atomic_lock.c b/lib/libc/arch/arm/gen/_atomic_lock.c
new file mode 100644
index 00000000000..f93aae59ad3
--- /dev/null
+++ b/lib/libc/arch/arm/gen/_atomic_lock.c
@@ -0,0 +1,49 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*
+ * Copyright (c) 2004 Dale Rahn. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Atomic lock for arm
+ */
+
+#include <sys/types.h>
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old = 0;
+ uint32_t scratch = 0;
+
+ __asm__("1: ldrex %0, [%1] \n"
+ " strex %2, %3, [%1] \n"
+ " cmp %2, #0 \n"
+ " bne 1b \n"
+ " dmb sy \n"
+ : "+r" (old), "+r" (lock), "+r" (scratch)
+ : "r" (_ATOMIC_LOCK_LOCKED));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/hppa/gen/_atomic_lock.c b/lib/libc/arch/hppa/gen/_atomic_lock.c
new file mode 100644
index 00000000000..64f161d3052
--- /dev/null
+++ b/lib/libc/arch/hppa/gen/_atomic_lock.c
@@ -0,0 +1,41 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <machine/spinlock.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#include <stdlib.h>
+#endif
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ volatile _atomic_lock_t old;
+
+#ifdef DIAGNOSTIC
+ if ((unsigned long)lock & 0xf) {
+ printf("lock not 16 byte aligned\n");
+ abort();
+ }
+#endif
+
+ asm volatile ("ldcws 0(%2),%0"
+ : "=&r" (old), "+m" (lock)
+ : "r" (lock));
+
+ return (old == _ATOMIC_LOCK_LOCKED);
+}
diff --git a/lib/libc/arch/i386/gen/_atomic_lock.c b/lib/libc/arch/i386/gen/_atomic_lock.c
new file mode 100644
index 00000000000..1cb84505928
--- /dev/null
+++ b/lib/libc/arch/i386/gen/_atomic_lock.c
@@ -0,0 +1,25 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+/*
+ * Atomic lock for i386
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ /*
+ * Use the eXCHanGe instruction to swap the lock value with
+ * a local variable containing the locked state.
+ */
+ old = _ATOMIC_LOCK_LOCKED;
+ __asm__("xchg %0,(%2)"
+ : "=r" (old)
+ : "0" (old), "r" (lock));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/m88k/gen/_atomic_lock.c b/lib/libc/arch/m88k/gen/_atomic_lock.c
new file mode 100644
index 00000000000..ac058e10ce7
--- /dev/null
+++ b/lib/libc/arch/m88k/gen/_atomic_lock.c
@@ -0,0 +1,44 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*
+ * Copyright (c) 2003, Miodrag Vallat.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Atomic lock for m88k
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ old = _ATOMIC_LOCK_LOCKED;
+ __asm__ volatile
+ ("xmem %0, %2, %%r0" : "=r" (old) : "0" (old), "r" (lock));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/mips64/gen/_atomic_lock.c b/lib/libc/arch/mips64/gen/_atomic_lock.c
new file mode 100644
index 00000000000..5ad4e1674b9
--- /dev/null
+++ b/lib/libc/arch/mips64/gen/_atomic_lock.c
@@ -0,0 +1,27 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*
+ * Atomic lock for mips
+ * Written by Miodrag Vallat <miod@openbsd.org> - placed in the public domain.
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ __asm__ volatile (
+ ".set noreorder\n"
+ "1: ll %0, 0(%1)\n"
+ " sc %2, 0(%1)\n"
+ " beqz %2, 1b\n"
+ " addi %2, $0, %3\n"
+ ".set reorder\n"
+ : "=&r"(old)
+ : "r"(lock), "r"(_ATOMIC_LOCK_LOCKED), "i"(_ATOMIC_LOCK_LOCKED)
+ : "memory");
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/powerpc/gen/_atomic_lock.c b/lib/libc/arch/powerpc/gen/_atomic_lock.c
new file mode 100644
index 00000000000..a90231c802c
--- /dev/null
+++ b/lib/libc/arch/powerpc/gen/_atomic_lock.c
@@ -0,0 +1,53 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 1998 Dale Rahn <drahn@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Atomic lock for powerpc
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ __asm__("1: lwarx 0,0,%1 \n"
+ " stwcx. %2,0,%1 \n"
+ " bne- 1b \n"
+ " mr %0, 0 \n"
+ : "=r" (old), "=r" (lock)
+ : "r" (_ATOMIC_LOCK_LOCKED), "1" (lock) : "0"
+ );
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+
+ /*
+ * Dale <drahn@openbsd.org> says:
+ * Side note. to prevent two processes from accessing
+ * the same address with the lwarx in one instruction
+ * and the stwcx in another process, the current powerpc
+ * kernel uses a stwcx instruction without the corresponding
+ * lwarx which causes any reservation of a process
+ * to be removed. if a context switch occurs
+ * between the two accesses the store will not occur
+ * and the condition code will cause it to loop. If on
+ * a dual processor machine, the reserve will cause
+ * appropriate bus cycle accesses to notify other
+ * processors.
+ */
+}
diff --git a/lib/libc/arch/sh/gen/_atomic_lock.c b/lib/libc/arch/sh/gen/_atomic_lock.c
new file mode 100644
index 00000000000..36b7c8c6d34
--- /dev/null
+++ b/lib/libc/arch/sh/gen/_atomic_lock.c
@@ -0,0 +1,46 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*-
+ * Copyright (c) 2002 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Gregory McGarry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ __asm volatile(
+ " tas.b %0 \n"
+ " mov #0, %1 \n"
+ " rotcl %1 \n"
+ : "=m" (*lock), "=r" (old));
+
+ return (old == 0);
+}
diff --git a/lib/libc/arch/sparc64/gen/_atomic_lock.c b/lib/libc/arch/sparc64/gen/_atomic_lock.c
new file mode 100644
index 00000000000..88f0f354bcb
--- /dev/null
+++ b/lib/libc/arch/sparc64/gen/_atomic_lock.c
@@ -0,0 +1,41 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+/*
+ * Atomic lock for sparc64
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t * lock)
+{
+ _atomic_lock_t old;
+
+ /*
+ * " ldstub [address], reg_rd
+ *
+ * The atomic load-store instructions copy a byte from memory
+ * into r[rd]m then rewrite the addressed byte in memory to all
+ * ones [_ATOMIC_LOCK_LOCKED]. The operation is performed
+ * atomically, that is, without allowing intervening interrupts
+ * or deferred traps. In a multiprocessor system, two or more
+ * processors executing atomic load-store unsigned byte [...]
+ * addressing the same byte [...] simultaneously are guaranteed
+ * to execute them in an undefined, but serial order."
+ * - p101, The SPARC Architecture Manual (version 8) Prentice-Hall
+ *
+ * "LDSTUB loads a byte value from memory to a register and writes
+ * the value FF_16 into the addressed byte atomically. LDSTUB
+ * is the classic test-and-set instruction. Like SWAP, it has
+ * a consensus number of two and so cannot resolve more than
+ * two contending processes in a wait-free fashion."
+ * - p129, The SPARC Architecture Manual (version 9) Prentice-Hall
+ * (See also section J.6 (spinlocks))
+ *
+ * (No change to the condition codes are documented.)
+ */
+ __asm__("ldstub [%1], %0" : "=&r" (old) : "r" (lock) : "memory");
+
+ return (old == _ATOMIC_LOCK_LOCKED);
+}