summaryrefslogtreecommitdiffstats
path: root/lib/libc
diff options
context:
space:
mode:
authorguenther <guenther@openbsd.org>2017-08-15 06:13:24 +0000
committerguenther <guenther@openbsd.org>2017-08-15 06:13:24 +0000
commit7e321ac128fdcd388c62dfa54aca790ebbd73ce1 (patch)
treedcaaa56a773388005748dd5a23dadbd6c1338a21 /lib/libc
parentAfter we stopped processing router advertisements in the kernel (diff)
downloadwireguard-openbsd-7e321ac128fdcd388c62dfa54aca790ebbd73ce1.tar.xz
wireguard-openbsd-7e321ac128fdcd388c62dfa54aca790ebbd73ce1.zip
Copy files from ../librthread in preparation for moving functionality
from libpthread to libc. No changes to the build yet, just making it easier to review the substantive diffs. ok beck@ kettenis@ tedu@
Diffstat (limited to 'lib/libc')
-rw-r--r--lib/libc/arch/alpha/gen/_atomic_lock.S19
-rw-r--r--lib/libc/arch/amd64/gen/_atomic_lock.c26
-rw-r--r--lib/libc/arch/arm/gen/_atomic_lock.c49
-rw-r--r--lib/libc/arch/hppa/gen/_atomic_lock.c41
-rw-r--r--lib/libc/arch/i386/gen/_atomic_lock.c25
-rw-r--r--lib/libc/arch/m88k/gen/_atomic_lock.c44
-rw-r--r--lib/libc/arch/mips64/gen/_atomic_lock.c27
-rw-r--r--lib/libc/arch/powerpc/gen/_atomic_lock.c53
-rw-r--r--lib/libc/arch/sh/gen/_atomic_lock.c46
-rw-r--r--lib/libc/arch/sparc64/gen/_atomic_lock.c41
-rw-r--r--lib/libc/thread/rthread.c692
-rw-r--r--lib/libc/thread/rthread.h236
-rw-r--r--lib/libc/thread/rthread_cb.h41
-rw-r--r--lib/libc/thread/rthread_cond.c216
-rw-r--r--lib/libc/thread/rthread_condattr.c71
-rw-r--r--lib/libc/thread/rthread_debug.c76
-rw-r--r--lib/libc/thread/rthread_file.c303
-rw-r--r--lib/libc/thread/rthread_libc.c262
-rw-r--r--lib/libc/thread/rthread_mutex.c288
-rw-r--r--lib/libc/thread/rthread_once.c32
-rw-r--r--lib/libc/thread/rthread_sync.c693
-rw-r--r--lib/libc/thread/rthread_tls.c185
-rw-r--r--lib/libc/thread/synch.h63
23 files changed, 3529 insertions, 0 deletions
diff --git a/lib/libc/arch/alpha/gen/_atomic_lock.S b/lib/libc/arch/alpha/gen/_atomic_lock.S
new file mode 100644
index 00000000000..98666faeb85
--- /dev/null
+++ b/lib/libc/arch/alpha/gen/_atomic_lock.S
@@ -0,0 +1,19 @@
+/* $OpenBSD: _atomic_lock.S,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+#include <machine/asm.h>
+
+LEAF(_atomic_lock,1)
+ LDGP(pv)
+
+ /* NOTE: using ldl_l/stl_c instead of
+ ldq_l and stq_c as machine/spinlock.h
+ defines _atomic_lock_t as int */
+0: ldl_l v0, 0(a0) /* read existing lock value */
+ mov 1, t0 /* locked value to store */
+ stl_c t0, 0(a0) /* attempt to store, status in t0 */
+ beq t0, 1f /* branch forward to optimise prediction */
+ mb /* sync with other processors */
+ RET /* return with v0==0 if lock obtained */
+1: br 0b /* loop to try again */
+END(_atomic_lock)
diff --git a/lib/libc/arch/amd64/gen/_atomic_lock.c b/lib/libc/arch/amd64/gen/_atomic_lock.c
new file mode 100644
index 00000000000..299c470b6cf
--- /dev/null
+++ b/lib/libc/arch/amd64/gen/_atomic_lock.c
@@ -0,0 +1,26 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+/*
+ * Atomic lock for amd64 -- taken from i386 code.
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ /*
+ * Use the eXCHanGe instruction to swap the lock value with
+ * a local variable containing the locked state.
+ */
+ old = _ATOMIC_LOCK_LOCKED;
+ __asm__("xchg %0,(%2)"
+ : "=r" (old)
+ : "0" (old), "r" (lock));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/arm/gen/_atomic_lock.c b/lib/libc/arch/arm/gen/_atomic_lock.c
new file mode 100644
index 00000000000..f93aae59ad3
--- /dev/null
+++ b/lib/libc/arch/arm/gen/_atomic_lock.c
@@ -0,0 +1,49 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*
+ * Copyright (c) 2004 Dale Rahn. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Atomic lock for arm
+ */
+
+#include <sys/types.h>
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old = 0;
+ uint32_t scratch = 0;
+
+ __asm__("1: ldrex %0, [%1] \n"
+ " strex %2, %3, [%1] \n"
+ " cmp %2, #0 \n"
+ " bne 1b \n"
+ " dmb sy \n"
+ : "+r" (old), "+r" (lock), "+r" (scratch)
+ : "r" (_ATOMIC_LOCK_LOCKED));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/hppa/gen/_atomic_lock.c b/lib/libc/arch/hppa/gen/_atomic_lock.c
new file mode 100644
index 00000000000..64f161d3052
--- /dev/null
+++ b/lib/libc/arch/hppa/gen/_atomic_lock.c
@@ -0,0 +1,41 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2005 Marco Peereboom <marco@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <machine/spinlock.h>
+#ifdef DIAGNOSTIC
+#include <stdio.h>
+#include <stdlib.h>
+#endif
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ volatile _atomic_lock_t old;
+
+#ifdef DIAGNOSTIC
+ if ((unsigned long)lock & 0xf) {
+ printf("lock not 16 byte aligned\n");
+ abort();
+ }
+#endif
+
+ asm volatile ("ldcws 0(%2),%0"
+ : "=&r" (old), "+m" (lock)
+ : "r" (lock));
+
+ return (old == _ATOMIC_LOCK_LOCKED);
+}
diff --git a/lib/libc/arch/i386/gen/_atomic_lock.c b/lib/libc/arch/i386/gen/_atomic_lock.c
new file mode 100644
index 00000000000..1cb84505928
--- /dev/null
+++ b/lib/libc/arch/i386/gen/_atomic_lock.c
@@ -0,0 +1,25 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+/*
+ * Atomic lock for i386
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ /*
+ * Use the eXCHanGe instruction to swap the lock value with
+ * a local variable containing the locked state.
+ */
+ old = _ATOMIC_LOCK_LOCKED;
+ __asm__("xchg %0,(%2)"
+ : "=r" (old)
+ : "0" (old), "r" (lock));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/m88k/gen/_atomic_lock.c b/lib/libc/arch/m88k/gen/_atomic_lock.c
new file mode 100644
index 00000000000..ac058e10ce7
--- /dev/null
+++ b/lib/libc/arch/m88k/gen/_atomic_lock.c
@@ -0,0 +1,44 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*
+ * Copyright (c) 2003, Miodrag Vallat.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Atomic lock for m88k
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ old = _ATOMIC_LOCK_LOCKED;
+ __asm__ volatile
+ ("xmem %0, %2, %%r0" : "=r" (old) : "0" (old), "r" (lock));
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/mips64/gen/_atomic_lock.c b/lib/libc/arch/mips64/gen/_atomic_lock.c
new file mode 100644
index 00000000000..5ad4e1674b9
--- /dev/null
+++ b/lib/libc/arch/mips64/gen/_atomic_lock.c
@@ -0,0 +1,27 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*
+ * Atomic lock for mips
+ * Written by Miodrag Vallat <miod@openbsd.org> - placed in the public domain.
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ __asm__ volatile (
+ ".set noreorder\n"
+ "1: ll %0, 0(%1)\n"
+ " sc %2, 0(%1)\n"
+ " beqz %2, 1b\n"
+ " addi %2, $0, %3\n"
+ ".set reorder\n"
+ : "=&r"(old)
+ : "r"(lock), "r"(_ATOMIC_LOCK_LOCKED), "i"(_ATOMIC_LOCK_LOCKED)
+ : "memory");
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+}
diff --git a/lib/libc/arch/powerpc/gen/_atomic_lock.c b/lib/libc/arch/powerpc/gen/_atomic_lock.c
new file mode 100644
index 00000000000..a90231c802c
--- /dev/null
+++ b/lib/libc/arch/powerpc/gen/_atomic_lock.c
@@ -0,0 +1,53 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 1998 Dale Rahn <drahn@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Atomic lock for powerpc
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ __asm__("1: lwarx 0,0,%1 \n"
+ " stwcx. %2,0,%1 \n"
+ " bne- 1b \n"
+ " mr %0, 0 \n"
+ : "=r" (old), "=r" (lock)
+ : "r" (_ATOMIC_LOCK_LOCKED), "1" (lock) : "0"
+ );
+
+ return (old != _ATOMIC_LOCK_UNLOCKED);
+
+ /*
+ * Dale <drahn@openbsd.org> says:
+ * Side note. to prevent two processes from accessing
+ * the same address with the lwarx in one instruction
+ * and the stwcx in another process, the current powerpc
+ * kernel uses a stwcx instruction without the corresponding
+ * lwarx which causes any reservation of a process
+ * to be removed. if a context switch occurs
+ * between the two accesses the store will not occur
+ * and the condition code will cause it to loop. If on
+ * a dual processor machine, the reserve will cause
+ * appropriate bus cycle accesses to notify other
+ * processors.
+ */
+}
diff --git a/lib/libc/arch/sh/gen/_atomic_lock.c b/lib/libc/arch/sh/gen/_atomic_lock.c
new file mode 100644
index 00000000000..36b7c8c6d34
--- /dev/null
+++ b/lib/libc/arch/sh/gen/_atomic_lock.c
@@ -0,0 +1,46 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/*-
+ * Copyright (c) 2002 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Gregory McGarry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t *lock)
+{
+ _atomic_lock_t old;
+
+ __asm volatile(
+ " tas.b %0 \n"
+ " mov #0, %1 \n"
+ " rotcl %1 \n"
+ : "=m" (*lock), "=r" (old));
+
+ return (old == 0);
+}
diff --git a/lib/libc/arch/sparc64/gen/_atomic_lock.c b/lib/libc/arch/sparc64/gen/_atomic_lock.c
new file mode 100644
index 00000000000..88f0f354bcb
--- /dev/null
+++ b/lib/libc/arch/sparc64/gen/_atomic_lock.c
@@ -0,0 +1,41 @@
+/* $OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/* David Leonard, <d@csee.uq.edu.au>. Public domain. */
+
+/*
+ * Atomic lock for sparc64
+ */
+
+#include <machine/spinlock.h>
+
+int
+_atomic_lock(volatile _atomic_lock_t * lock)
+{
+ _atomic_lock_t old;
+
+ /*
+ * " ldstub [address], reg_rd
+ *
+ * The atomic load-store instructions copy a byte from memory
+ * into r[rd]m then rewrite the addressed byte in memory to all
+ * ones [_ATOMIC_LOCK_LOCKED]. The operation is performed
+ * atomically, that is, without allowing intervening interrupts
+ * or deferred traps. In a multiprocessor system, two or more
+ * processors executing atomic load-store unsigned byte [...]
+ * addressing the same byte [...] simultaneously are guaranteed
+ * to execute them in an undefined, but serial order."
+ * - p101, The SPARC Architecture Manual (version 8) Prentice-Hall
+ *
+ * "LDSTUB loads a byte value from memory to a register and writes
+ * the value FF_16 into the addressed byte atomically. LDSTUB
+ * is the classic test-and-set instruction. Like SWAP, it has
+ * a consensus number of two and so cannot resolve more than
+ * two contending processes in a wait-free fashion."
+ * - p129, The SPARC Architecture Manual (version 9) Prentice-Hall
+ * (See also section J.6 (spinlocks))
+ *
+ * (No change to the condition codes are documented.)
+ */
+ __asm__("ldstub [%1], %0" : "=&r" (old) : "r" (lock) : "memory");
+
+ return (old == _ATOMIC_LOCK_LOCKED);
+}
diff --git a/lib/libc/thread/rthread.c b/lib/libc/thread/rthread.c
new file mode 100644
index 00000000000..5daa0e65f07
--- /dev/null
+++ b/lib/libc/thread/rthread.c
@@ -0,0 +1,692 @@
+/* $OpenBSD: rthread.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * The heart of rthreads. Basic functions like creating and joining
+ * threads.
+ */
+
+#include <sys/types.h>
+#ifndef NO_PIC
+#include <sys/exec_elf.h>
+#pragma weak _DYNAMIC
+#endif
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <dlfcn.h>
+#include <tib.h>
+
+#include <pthread.h>
+
+#include "cancel.h" /* in libc/include */
+#include "thread_private.h"
+#include "rthread.h"
+#include "rthread_cb.h"
+
+/*
+ * Call nonstandard functions via names in the reserved namespace:
+ * dlctl() -> _dlctl()
+ * getthrid -> _thread_sys_getthrid
+ */
+typeof(dlctl) dlctl asm("_dlctl") __attribute__((weak));
+REDIRECT_SYSCALL(getthrid);
+
+/* weak stub to be overriden by ld.so */
+int dlctl(void *handle, int cmd, void *data) { return 0; }
+
+/*
+ * libc's signal wrappers hide SIGTHR; we need to call the real syscall
+ * stubs _thread_sys_* directly.
+ */
+REDIRECT_SYSCALL(sigaction);
+REDIRECT_SYSCALL(sigprocmask);
+REDIRECT_SYSCALL(thrkill);
+
+static int concurrency_level; /* not used */
+
+int _threads_ready;
+int _post_threaded;
+size_t _thread_pagesize;
+struct listhead _thread_list = LIST_HEAD_INITIALIZER(_thread_list);
+_atomic_lock_t _thread_lock = _SPINLOCK_UNLOCKED;
+static struct pthread_queue _thread_gc_list
+ = TAILQ_HEAD_INITIALIZER(_thread_gc_list);
+static _atomic_lock_t _thread_gc_lock = _SPINLOCK_UNLOCKED;
+static struct pthread _initial_thread;
+
+struct pthread_attr _rthread_attr_default = {
+ .stack_addr = NULL,
+ .stack_size = RTHREAD_STACK_SIZE_DEF,
+/* .guard_size set in _rthread_init */
+ .detach_state = PTHREAD_CREATE_JOINABLE,
+ .contention_scope = PTHREAD_SCOPE_SYSTEM,
+ .sched_policy = SCHED_OTHER,
+ .sched_param = { .sched_priority = 0 },
+ .sched_inherit = PTHREAD_INHERIT_SCHED,
+};
+
+/*
+ * internal support functions
+ */
+void
+_spinlock(volatile _atomic_lock_t *lock)
+{
+ while (_atomic_lock(lock))
+ sched_yield();
+}
+
+int
+_spinlocktry(volatile _atomic_lock_t *lock)
+{
+ return 0 == _atomic_lock(lock);
+}
+
+void
+_spinunlock(volatile _atomic_lock_t *lock)
+{
+ *lock = _ATOMIC_LOCK_UNLOCKED;
+}
+
+static void
+_rthread_start(void *v)
+{
+ pthread_t thread = v;
+ void *retval;
+
+ retval = thread->fn(thread->arg);
+ pthread_exit(retval);
+}
+
+static void
+sigthr_handler(__unused int sig)
+{
+ struct tib *tib = TIB_GET();
+ pthread_t self = tib->tib_thread;
+
+ /*
+ * Do nothing unless
+ * 1) pthread_cancel() has been called on this thread,
+ * 2) cancelation is enabled for it, and
+ * 3) we're not already in cancelation processing
+ */
+ if (!tib->tib_canceled || tib->tib_cantcancel)
+ return;
+
+ /*
+ * If delaying cancels inside complex ops (pthread_cond_wait,
+ * pthread_join, etc), just mark that this has happened to
+ * prevent a race with going to sleep
+ */
+ if (tib->tib_cancel_point & CANCEL_POINT_DELAYED) {
+ self->delayed_cancel = 1;
+ return;
+ }
+
+ /*
+ * otherwise, if in a cancel point or async cancels are
+ * enabled, then exit
+ */
+ if (tib->tib_cancel_point ||
+ (tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL))
+ pthread_exit(PTHREAD_CANCELED);
+}
+
+
+/*
+ * A few basic callbacks for libc. The first couple are only used
+ * on archs where there isn't a fast TCB_GET()
+ */
+#ifndef TCB_HAVE_MD_GET
+static int *
+multi_threaded_errnoptr(void)
+{
+ return (&TIB_GET()->tib_errno);
+}
+
+static void *
+multi_threaded_tcb(void)
+{
+ return (TCB_GET());
+}
+#endif /* TCB_HAVE_MD_GET */
+
+void
+_thread_canceled(void)
+{
+ pthread_exit(PTHREAD_CANCELED);
+}
+
+void
+_rthread_init(void)
+{
+ pthread_t thread = &_initial_thread;
+ struct tib *tib;
+ struct sigaction sa;
+
+ tib = TIB_GET();
+ tib->tib_thread = thread;
+ thread->tib = tib;
+
+ thread->donesem.lock = _SPINLOCK_UNLOCKED;
+ tib->tib_thread_flags = TIB_THREAD_INITIAL_STACK;
+ thread->flags_lock = _SPINLOCK_UNLOCKED;
+ strlcpy(thread->name, "Main process", sizeof(thread->name));
+ LIST_INSERT_HEAD(&_thread_list, thread, threads);
+ _rthread_debug_init();
+
+ _thread_pagesize = (size_t)sysconf(_SC_PAGESIZE);
+ _rthread_attr_default.guard_size = _thread_pagesize;
+ thread->attr = _rthread_attr_default;
+
+ /* get libc to start using our callbacks */
+ {
+ struct thread_callbacks cb = { 0 };
+
+#ifndef TCB_HAVE_MD_GET
+ cb.tc_errnoptr = multi_threaded_errnoptr;
+ cb.tc_tcb = multi_threaded_tcb;
+#endif
+ cb.tc_canceled = _thread_canceled;
+ cb.tc_flockfile = _thread_flockfile;
+ cb.tc_ftrylockfile = _thread_ftrylockfile;
+ cb.tc_funlockfile = _thread_funlockfile;
+ cb.tc_malloc_lock = _thread_malloc_lock;
+ cb.tc_malloc_unlock = _thread_malloc_unlock;
+ cb.tc_atexit_lock = _thread_atexit_lock;
+ cb.tc_atexit_unlock = _thread_atexit_unlock;
+ cb.tc_atfork_lock = _thread_atfork_lock;
+ cb.tc_atfork_unlock = _thread_atfork_unlock;
+ cb.tc_arc4_lock = _thread_arc4_lock;
+ cb.tc_arc4_unlock = _thread_arc4_unlock;
+ cb.tc_mutex_lock = _thread_mutex_lock;
+ cb.tc_mutex_unlock = _thread_mutex_unlock;
+ cb.tc_mutex_destroy = _thread_mutex_destroy;
+ cb.tc_tag_lock = _thread_tag_lock;
+ cb.tc_tag_unlock = _thread_tag_unlock;
+ cb.tc_tag_storage = _thread_tag_storage;
+ cb.tc_fork = _thread_fork;
+ cb.tc_vfork = _thread_vfork;
+ _thread_set_callbacks(&cb, sizeof(cb));
+ }
+
+#ifndef NO_PIC
+ if (_DYNAMIC) {
+ dlctl(NULL, DL_SETTHREADLCK, _rthread_dl_lock);
+ }
+#endif
+
+ /*
+ * Set the handler on the signal used for cancelation and
+ * suspension, and make sure it's unblocked
+ */
+ memset(&sa, 0, sizeof(sa));
+ sigemptyset(&sa.sa_mask);
+ sa.sa_handler = sigthr_handler;
+ sigaction(SIGTHR, &sa, NULL);
+ sigaddset(&sa.sa_mask, SIGTHR);
+ sigprocmask(SIG_UNBLOCK, &sa.sa_mask, NULL);
+
+ _threads_ready = 1;
+
+ _malloc_init(1);
+
+ _rthread_debug(1, "rthread init\n");
+}
+
+static void
+_rthread_free(pthread_t thread)
+{
+ _spinlock(&_thread_gc_lock);
+ TAILQ_INSERT_TAIL(&_thread_gc_list, thread, waiting);
+ _spinunlock(&_thread_gc_lock);
+}
+
+/*
+ * real pthread functions
+ */
+pthread_t
+pthread_self(void)
+{
+ if (!_threads_ready)
+ _rthread_init();
+
+ return (TIB_GET()->tib_thread);
+}
+DEF_STD(pthread_self);
+
+static void
+_rthread_reaper(void)
+{
+ pthread_t thread;
+
+restart:
+ _spinlock(&_thread_gc_lock);
+ TAILQ_FOREACH(thread, &_thread_gc_list, waiting) {
+ if (thread->tib->tib_tid != 0)
+ continue;
+ TAILQ_REMOVE(&_thread_gc_list, thread, waiting);
+ _spinunlock(&_thread_gc_lock);
+ if (thread != &_initial_thread) {
+ _rthread_debug(3, "rthread reaping %p stack %p\n",
+ (void *)thread, (void *)thread->stack);
+ _rthread_free_stack(thread->stack);
+ _dl_free_tib(thread->tib, sizeof(*thread));
+ } else {
+ /* initial thread isn't part of TIB allocation */
+ _rthread_debug(3, "rthread reaping %p (initial)\n",
+ (void *)thread);
+ _dl_free_tib(thread->tib, 0);
+ }
+ goto restart;
+ }
+ _spinunlock(&_thread_gc_lock);
+}
+
+void
+pthread_exit(void *retval)
+{
+ struct rthread_cleanup_fn *clfn;
+ struct tib *tib = TIB_GET();
+ pthread_t thread;
+
+ if (!_threads_ready)
+ _rthread_init();
+ thread = tib->tib_thread;
+
+ if (tib->tib_cantcancel & CANCEL_DYING) {
+ /*
+ * Called pthread_exit() from destructor or cancelation
+ * handler: blow up. XXX write something to stderr?
+ */
+ abort();
+ //_exit(42);
+ }
+
+ tib->tib_cantcancel |= CANCEL_DYING;
+
+ thread->retval = retval;
+
+ for (clfn = thread->cleanup_fns; clfn; ) {
+ struct rthread_cleanup_fn *oclfn = clfn;
+ clfn = clfn->next;
+ oclfn->fn(oclfn->arg);
+ free(oclfn);
+ }
+ _rthread_tls_destructors(thread);
+ _spinlock(&_thread_lock);
+ LIST_REMOVE(thread, threads);
+ _spinunlock(&_thread_lock);
+
+ _spinlock(&thread->flags_lock);
+ if (thread->flags & THREAD_DETACHED) {
+ _spinunlock(&thread->flags_lock);
+ _rthread_free(thread);
+ } else {
+ thread->flags |= THREAD_DONE;
+ _spinunlock(&thread->flags_lock);
+ _sem_post(&thread->donesem);
+ }
+
+ __threxit(&tib->tib_tid);
+ for(;;);
+}
+DEF_STD(pthread_exit);
+
+int
+pthread_join(pthread_t thread, void **retval)
+{
+ int e;
+ struct tib *tib = TIB_GET();
+ pthread_t self;
+ PREP_CANCEL_POINT(tib);
+
+ if (_post_threaded) {
+#define GREATSCOTT "great scott! serious repercussions on future events!\n"
+ write(2, GREATSCOTT, sizeof(GREATSCOTT) - 1);
+ abort();
+ }
+ if (!_threads_ready)
+ _rthread_init();
+ self = tib->tib_thread;
+
+ e = 0;
+ ENTER_DELAYED_CANCEL_POINT(tib, self);
+ if (thread == NULL)
+ e = EINVAL;
+ else if (thread == self)
+ e = EDEADLK;
+ else if (thread->flags & THREAD_DETACHED)
+ e = EINVAL;
+ else if ((e = _sem_wait(&thread->donesem, 0, NULL,
+ &self->delayed_cancel)) == 0) {
+ if (retval)
+ *retval = thread->retval;
+
+ /*
+ * We should be the last having a ref to this thread,
+ * but someone stupid or evil might haved detached it;
+ * in that case the thread will clean up itself
+ */
+ if ((thread->flags & THREAD_DETACHED) == 0)
+ _rthread_free(thread);
+ }
+
+ LEAVE_CANCEL_POINT_INNER(tib, e);
+ _rthread_reaper();
+ return (e);
+}
+
+int
+pthread_detach(pthread_t thread)
+{
+ int rc = 0;
+
+ _spinlock(&thread->flags_lock);
+ if (thread->flags & THREAD_DETACHED) {
+ rc = EINVAL;
+ _spinunlock(&thread->flags_lock);
+ } else if (thread->flags & THREAD_DONE) {
+ _spinunlock(&thread->flags_lock);
+ _rthread_free(thread);
+ } else {
+ thread->flags |= THREAD_DETACHED;
+ _spinunlock(&thread->flags_lock);
+ }
+ _rthread_reaper();
+ return (rc);
+}
+
+int
+pthread_create(pthread_t *threadp, const pthread_attr_t *attr,
+ void *(*start_routine)(void *), void *arg)
+{
+ extern int __isthreaded;
+ struct tib *tib;
+ pthread_t thread;
+ struct __tfork param;
+ int rc;
+
+ if (!_threads_ready)
+ _rthread_init();
+
+ _rthread_reaper();
+
+ tib = _dl_allocate_tib(sizeof(*thread));
+ if (tib == NULL)
+ return (ENOMEM);
+ thread = tib->tib_thread;
+ memset(thread, 0, sizeof(*thread));
+ thread->tib = tib;
+ thread->donesem.lock = _SPINLOCK_UNLOCKED;
+ thread->flags_lock = _SPINLOCK_UNLOCKED;
+ thread->fn = start_routine;
+ thread->arg = arg;
+ tib->tib_tid = -1;
+
+ thread->attr = attr != NULL ? *(*attr) : _rthread_attr_default;
+ if (thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
+ pthread_t self = pthread_self();
+
+ thread->attr.sched_policy = self->attr.sched_policy;
+ thread->attr.sched_param = self->attr.sched_param;
+ }
+ if (thread->attr.detach_state == PTHREAD_CREATE_DETACHED)
+ thread->flags |= THREAD_DETACHED;
+
+ thread->stack = _rthread_alloc_stack(thread);
+ if (!thread->stack) {
+ rc = errno;
+ goto fail1;
+ }
+
+ param.tf_tcb = TIB_TO_TCB(tib);
+ param.tf_tid = &tib->tib_tid;
+ param.tf_stack = thread->stack->sp;
+
+ _spinlock(&_thread_lock);
+ LIST_INSERT_HEAD(&_thread_list, thread, threads);
+ _spinunlock(&_thread_lock);
+
+ /* we're going to be multi-threaded real soon now */
+ __isthreaded = 1;
+ rc = __tfork_thread(&param, sizeof(param), _rthread_start, thread);
+ if (rc != -1) {
+ /* success */
+ *threadp = thread;
+ return (0);
+ }
+
+ rc = errno;
+
+ _spinlock(&_thread_lock);
+ LIST_REMOVE(thread, threads);
+ _spinunlock(&_thread_lock);
+ _rthread_free_stack(thread->stack);
+fail1:
+ _dl_free_tib(tib, sizeof(*thread));
+
+ return (rc);
+}
+
+int
+pthread_kill(pthread_t thread, int sig)
+{
+ struct tib *tib = thread->tib;
+
+ if (sig == SIGTHR)
+ return (EINVAL);
+ if (thrkill(tib->tib_tid, sig, TIB_TO_TCB(tib)))
+ return (errno);
+ return (0);
+}
+
+int
+pthread_equal(pthread_t t1, pthread_t t2)
+{
+ return (t1 == t2);
+}
+
+int
+pthread_cancel(pthread_t thread)
+{
+ struct tib *tib = thread->tib;
+ pid_t tid = tib->tib_tid;
+
+ if (tib->tib_canceled == 0 && tid != 0 &&
+ (tib->tib_cantcancel & CANCEL_DYING) == 0) {
+ tib->tib_canceled = 1;
+
+ if ((tib->tib_cantcancel & CANCEL_DISABLED) == 0) {
+ thrkill(tid, SIGTHR, TIB_TO_TCB(tib));
+ return (0);
+ }
+ }
+ return (0);
+}
+
+void
+pthread_testcancel(void)
+{
+ struct tib *tib = TIB_GET();
+
+ if (tib->tib_canceled && (tib->tib_cantcancel & CANCEL_DISABLED) == 0)
+ pthread_exit(PTHREAD_CANCELED);
+}
+
+int
+pthread_setcancelstate(int state, int *oldstatep)
+{
+ struct tib *tib = TIB_GET();
+ int oldstate;
+
+ oldstate = tib->tib_cantcancel & CANCEL_DISABLED ?
+ PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE;
+ if (state == PTHREAD_CANCEL_ENABLE) {
+ tib->tib_cantcancel &= ~CANCEL_DISABLED;
+ } else if (state == PTHREAD_CANCEL_DISABLE) {
+ tib->tib_cantcancel |= CANCEL_DISABLED;
+ } else {
+ return (EINVAL);
+ }
+ if (oldstatep)
+ *oldstatep = oldstate;
+
+ return (0);
+}
+DEF_STD(pthread_setcancelstate);
+
+int
+pthread_setcanceltype(int type, int *oldtypep)
+{
+ struct tib *tib = TIB_GET();
+ int oldtype;
+
+ oldtype = tib->tib_thread_flags & TIB_THREAD_ASYNC_CANCEL ?
+ PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED;
+ if (type == PTHREAD_CANCEL_DEFERRED) {
+ tib->tib_thread_flags &=~ TIB_THREAD_ASYNC_CANCEL;
+ } else if (type == PTHREAD_CANCEL_ASYNCHRONOUS) {
+ tib->tib_thread_flags |= TIB_THREAD_ASYNC_CANCEL;
+ } else {
+ return (EINVAL);
+ }
+ if (oldtypep)
+ *oldtypep = oldtype;
+
+ return (0);
+}
+
+void
+pthread_cleanup_push(void (*fn)(void *), void *arg)
+{
+ struct rthread_cleanup_fn *clfn;
+ pthread_t self = pthread_self();
+
+ clfn = calloc(1, sizeof(*clfn));
+ if (!clfn)
+ return;
+ clfn->fn = fn;
+ clfn->arg = arg;
+ clfn->next = self->cleanup_fns;
+ self->cleanup_fns = clfn;
+}
+
+void
+pthread_cleanup_pop(int execute)
+{
+ struct rthread_cleanup_fn *clfn;
+ pthread_t self = pthread_self();
+
+ clfn = self->cleanup_fns;
+ if (clfn) {
+ self->cleanup_fns = clfn->next;
+ if (execute)
+ clfn->fn(clfn->arg);
+ free(clfn);
+ }
+}
+
+int
+pthread_getconcurrency(void)
+{
+ return (concurrency_level);
+}
+
+int
+pthread_setconcurrency(int new_level)
+{
+ if (new_level < 0)
+ return (EINVAL);
+ concurrency_level = new_level;
+ return (0);
+}
+
+/*
+ * compat debug stuff
+ */
+void
+_thread_dump_info(void)
+{
+ pthread_t thread;
+
+ _spinlock(&_thread_lock);
+ LIST_FOREACH(thread, &_thread_list, threads)
+ printf("thread %d flags 0x%x name %s\n", thread->tib->tib_tid,
+ thread->tib->tib_thread_flags, thread->name);
+ _spinunlock(&_thread_lock);
+}
+
+#ifndef NO_PIC
+/*
+ * _rthread_dl_lock() provides the locking for dlopen(), dlclose(), and
+ * the function called via atexit() to invoke all destructors. The latter
+ * two call shared-object destructors, which may need to call dlclose(),
+ * so this lock needs to permit recursive locking.
+ * The specific code here was extracted from _rthread_mutex_lock() and
+ * pthread_mutex_unlock() and simplified to use the static variables.
+ */
+void
+_rthread_dl_lock(int what)
+{
+ static _atomic_lock_t lock = _SPINLOCK_UNLOCKED;
+ static pthread_t owner = NULL;
+ static struct pthread_queue lockers = TAILQ_HEAD_INITIALIZER(lockers);
+ static int count = 0;
+
+ if (what == 0) {
+ pthread_t self = pthread_self();
+
+ /* lock, possibly recursive */
+ _spinlock(&lock);
+ if (owner == NULL) {
+ owner = self;
+ } else if (owner != self) {
+ TAILQ_INSERT_TAIL(&lockers, self, waiting);
+ while (owner != self) {
+ __thrsleep(self, 0, NULL, &lock, NULL);
+ _spinlock(&lock);
+ }
+ }
+ count++;
+ _spinunlock(&lock);
+ } else if (what == 1) {
+ /* unlock, possibly recursive */
+ if (--count == 0) {
+ pthread_t next;
+
+ _spinlock(&lock);
+ owner = next = TAILQ_FIRST(&lockers);
+ if (next != NULL)
+ TAILQ_REMOVE(&lockers, next, waiting);
+ _spinunlock(&lock);
+ if (next != NULL)
+ __thrwakeup(next, 1);
+ }
+ } else {
+ /* reinit: used in child after fork to clear the queue */
+ lock = _SPINLOCK_UNLOCKED;
+ if (--count == 0)
+ owner = NULL;
+ TAILQ_INIT(&lockers);
+ }
+}
+#endif
diff --git a/lib/libc/thread/rthread.h b/lib/libc/thread/rthread.h
new file mode 100644
index 00000000000..4bae5fd4e1d
--- /dev/null
+++ b/lib/libc/thread/rthread.h
@@ -0,0 +1,236 @@
+/* $OpenBSD: rthread.h,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Private data structures that back up the typedefs in pthread.h.
+ * Since only the thread library cares about their size or arrangement,
+ * it should be possible to switch libraries without relinking.
+ *
+ * Do not reorder _atomic_lock_t and sem_t variables in the structs.
+ * This is due to alignment requirements of certain arches like hppa.
+ * The current requirement is 16 bytes.
+ *
+ * THE MACHINE DEPENDENT CERROR CODE HAS HARD CODED OFFSETS INTO PTHREAD_T!
+ */
+
+#include <sys/queue.h>
+#include <semaphore.h>
+#include <machine/spinlock.h>
+
+#ifdef __LP64__
+#define RTHREAD_STACK_SIZE_DEF (512 * 1024)
+#else
+#define RTHREAD_STACK_SIZE_DEF (256 * 1024)
+#endif
+
+#define _SPINLOCK_UNLOCKED _ATOMIC_LOCK_UNLOCKED
+
+struct stack {
+ SLIST_ENTRY(stack) link; /* link for free default stacks */
+ void *sp; /* machine stack pointer */
+ void *base; /* bottom of allocated area */
+ size_t guardsize; /* size of PROT_NONE zone or */
+ /* ==1 if application alloced */
+ size_t len; /* total size of allocated stack */
+};
+
+struct __sem {
+ _atomic_lock_t lock;
+ volatile int waitcount;
+ volatile int value;
+ int shared;
+};
+
+TAILQ_HEAD(pthread_queue, pthread);
+
+#ifdef FUTEX
+
+struct pthread_mutex {
+ volatile unsigned int lock;
+ int type;
+ pthread_t owner;
+ int count;
+ int prioceiling;
+};
+
+struct pthread_cond {
+ volatile unsigned int seq;
+ clockid_t clock;
+ struct pthread_mutex *mutex;
+};
+
+#else
+
+struct pthread_mutex {
+ _atomic_lock_t lock;
+ struct pthread_queue lockers;
+ int type;
+ pthread_t owner;
+ int count;
+ int prioceiling;
+};
+
+struct pthread_cond {
+ _atomic_lock_t lock;
+ struct pthread_queue waiters;
+ struct pthread_mutex *mutex;
+ clockid_t clock;
+};
+#endif /* FUTEX */
+
+struct pthread_mutex_attr {
+ int ma_type;
+ int ma_protocol;
+ int ma_prioceiling;
+};
+
+struct pthread_cond_attr {
+ clockid_t ca_clock;
+};
+
+struct pthread_rwlock {
+ _atomic_lock_t lock;
+ pthread_t owner;
+ struct pthread_queue writers;
+ int readers;
+};
+
+struct pthread_rwlockattr {
+ int pshared;
+};
+
+struct pthread_attr {
+ void *stack_addr;
+ size_t stack_size;
+ size_t guard_size;
+ int detach_state;
+ int contention_scope;
+ int sched_policy;
+ struct sched_param sched_param;
+ int sched_inherit;
+};
+
+#define PTHREAD_MIN_PRIORITY 0
+#define PTHREAD_MAX_PRIORITY 31
+
+struct rthread_key {
+ int used;
+ void (*destructor)(void *);
+};
+
+struct rthread_storage {
+ int keyid;
+ struct rthread_storage *next;
+ void *data;
+};
+
+struct rthread_cleanup_fn {
+ void (*fn)(void *);
+ void *arg;
+ struct rthread_cleanup_fn *next;
+};
+
+struct pthread_barrier {
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int threshold;
+ int in;
+ int out;
+ int generation;
+};
+
+struct pthread_barrierattr {
+ int pshared;
+};
+
+struct pthread_spinlock {
+ _atomic_lock_t lock;
+ pthread_t owner;
+};
+
+struct tib;
+struct pthread {
+ struct __sem donesem;
+ unsigned int flags;
+ _atomic_lock_t flags_lock;
+ struct tib *tib;
+ void *retval;
+ void *(*fn)(void *);
+ void *arg;
+ char name[32];
+ struct stack *stack;
+ LIST_ENTRY(pthread) threads;
+ TAILQ_ENTRY(pthread) waiting;
+ pthread_cond_t blocking_cond;
+ struct pthread_attr attr;
+ struct rthread_storage *local_storage;
+ struct rthread_cleanup_fn *cleanup_fns;
+ int myerrno;
+
+ /* cancel received in a delayed cancel block? */
+ int delayed_cancel;
+};
+/* flags in pthread->flags */
+#define THREAD_DONE 0x001
+#define THREAD_DETACHED 0x002
+
+/* flags in tib->tib_thread_flags */
+#define TIB_THREAD_ASYNC_CANCEL 0x001
+#define TIB_THREAD_INITIAL_STACK 0x002 /* has stack from exec */
+
+#define ENTER_DELAYED_CANCEL_POINT(tib, self) \
+ (self)->delayed_cancel = 0; \
+ ENTER_CANCEL_POINT_INNER(tib, 1, 1)
+
+#define ROUND_TO_PAGE(size) \
+ (((size) + (_thread_pagesize - 1)) & ~(_thread_pagesize - 1))
+
+__BEGIN_HIDDEN_DECLS
+void _spinlock(volatile _atomic_lock_t *);
+int _spinlocktry(volatile _atomic_lock_t *);
+void _spinunlock(volatile _atomic_lock_t *);
+int _sem_wait(sem_t, int, const struct timespec *, int *);
+int _sem_post(sem_t);
+
+void _rthread_init(void);
+struct stack *_rthread_alloc_stack(pthread_t);
+void _rthread_free_stack(struct stack *);
+void _rthread_tls_destructors(pthread_t);
+void _rthread_debug(int, const char *, ...)
+ __attribute__((__format__ (printf, 2, 3)));
+void _rthread_debug_init(void);
+#ifndef NO_PIC
+void _rthread_dl_lock(int what);
+#endif
+void _thread_malloc_reinit(void);
+
+extern int _threads_ready;
+extern size_t _thread_pagesize;
+extern LIST_HEAD(listhead, pthread) _thread_list;
+extern _atomic_lock_t _thread_lock;
+extern struct pthread_attr _rthread_attr_default;
+__END_HIDDEN_DECLS
+
+void _thread_dump_info(void);
+
+/* syscalls not declared in system headers */
+#define REDIRECT_SYSCALL(x) typeof(x) x asm("_thread_sys_"#x)
+void __threxit(pid_t *);
+int __thrsleep(const volatile void *, clockid_t, const struct timespec *,
+ volatile void *, const int *);
+int __thrwakeup(const volatile void *, int n);
+int __thrsigdivert(sigset_t, siginfo_t *, const struct timespec *);
diff --git a/lib/libc/thread/rthread_cb.h b/lib/libc/thread/rthread_cb.h
new file mode 100644
index 00000000000..4865e2ec4a2
--- /dev/null
+++ b/lib/libc/thread/rthread_cb.h
@@ -0,0 +1,41 @@
+/* $OpenBSD: rthread_cb.h,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2016 Philip Guenther <guenther@openbsd.org>
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdio.h>
+
+__BEGIN_HIDDEN_DECLS
+pid_t _thread_fork(void);
+pid_t _thread_vfork(void);
+void _thread_flockfile(FILE *);
+int _thread_ftrylockfile(FILE *);
+void _thread_funlockfile(FILE *);
+void _thread_malloc_lock(int);
+void _thread_malloc_unlock(int);
+void _thread_atexit_lock(void);
+void _thread_atexit_unlock(void);
+void _thread_atfork_lock(void);
+void _thread_atfork_unlock(void);
+void _thread_arc4_lock(void);
+void _thread_arc4_unlock(void);
+void _thread_mutex_lock(void **);
+void _thread_mutex_unlock(void **);
+void _thread_mutex_destroy(void **);
+void _thread_tag_lock(void **);
+void _thread_tag_unlock(void **);
+void *_thread_tag_storage(void **, void *, size_t, void *);
+__END_HIDDEN_DECLS
diff --git a/lib/libc/thread/rthread_cond.c b/lib/libc/thread/rthread_cond.c
new file mode 100644
index 00000000000..28c4be3fdc3
--- /dev/null
+++ b/lib/libc/thread/rthread_cond.c
@@ -0,0 +1,216 @@
+/* $OpenBSD: rthread_cond.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2017 Martin Pieuchot <mpi@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <pthread.h>
+
+#include "rthread.h"
+#include "cancel.h"
+#include "synch.h"
+
+int
+pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
+{
+ pthread_cond_t cond;
+
+ cond = calloc(1, sizeof(*cond));
+ if (cond == NULL)
+ return (ENOMEM);
+
+ if (attr == NULL)
+ cond->clock = CLOCK_REALTIME;
+ else
+ cond->clock = (*attr)->ca_clock;
+ *condp = cond;
+
+ return (0);
+}
+DEF_STD(pthread_cond_init);
+
+int
+pthread_cond_destroy(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+
+ assert(condp != NULL);
+ cond = *condp;
+
+ if (cond != NULL) {
+ if (cond->mutex != NULL) {
+#define MSG "pthread_cond_destroy on condvar with waiters!\n"
+ write(2, MSG, sizeof(MSG) - 1);
+#undef MSG
+ return (EBUSY);
+ }
+ free(cond);
+ }
+ *condp = NULL;
+
+ return (0);
+}
+DEF_STD(pthread_cond_destroy);
+
+int
+_rthread_cond_timedwait(pthread_cond_t cond, pthread_mutex_t *mutexp,
+ const struct timespec *abs)
+{
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+ struct tib *tib = TIB_GET();
+ pthread_t self = tib->tib_thread;
+ int error, rv = 0, canceled = 0, mutex_count = 0;
+ clockid_t clock = cond->clock;
+ int seq = cond->seq;
+ PREP_CANCEL_POINT(tib);
+
+ _rthread_debug(5, "%p: cond_timed %p,%p (%p)\n", self,
+ (void *)cond, (void *)mutex, (void *)mutex->owner);
+
+ ENTER_DELAYED_CANCEL_POINT(tib, self);
+
+#if notyet
+ /* mark the condvar as being associated with this mutex */
+ if (cond->mutex == NULL)
+ atomic_cas_ptr(&cond->mutex, NULL, mutex);
+
+ if (cond->mutex != mutex) {
+ LEAVE_CANCEL_POINT_INNER(tib, 1);
+ return (EINVAL);
+ }
+#endif
+
+ /* snag the count in case this is a recursive mutex */
+ if (mutex->type == PTHREAD_MUTEX_RECURSIVE)
+ mutex_count = mutex->count;
+
+ pthread_mutex_unlock(mutexp);
+
+ do {
+ /* If ``seq'' wraps you deserve to lose a signal. */
+ error = _twait(&cond->seq, seq, clock, abs);
+ /*
+ * If we took a normal signal (not from cancellation) then
+ * we should just go back to sleep without changing state
+ * (timeouts, etc).
+ */
+ } while ((error == EINTR) &&
+ (tib->tib_canceled == 0 || (tib->tib_cantcancel & CANCEL_DISABLED)));
+
+ /* if timeout or canceled, make note of that */
+ if (error == ETIMEDOUT)
+ rv = ETIMEDOUT;
+ else if (error == EINTR)
+ canceled = 1;
+
+ pthread_mutex_lock(mutexp);
+
+ /* restore the mutex's count */
+ if (mutex->type == PTHREAD_MUTEX_RECURSIVE)
+ mutex->count = mutex_count;
+
+ LEAVE_CANCEL_POINT_INNER(tib, canceled);
+
+ return rv;
+}
+
+int
+pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
+ const struct timespec *abs)
+{
+ pthread_cond_t cond;
+ int error;
+
+ if (*condp == NULL) {
+ if ((error = pthread_cond_init(condp, NULL)))
+ return (error);
+ }
+
+ cond = *condp;
+ if (abs == NULL || abs->tv_sec < 0 || abs->tv_nsec < 0 ||
+ abs->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ return (_rthread_cond_timedwait(cond, mutexp, abs));
+}
+
+int
+pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
+{
+ pthread_cond_t cond;
+ int error;
+
+ if (*condp == NULL) {
+ if ((error = pthread_cond_init(condp, NULL)))
+ return (error);
+ }
+
+ cond = *condp;
+ return (_rthread_cond_timedwait(cond, mutexp, NULL));
+}
+DEF_STD(pthread_cond_wait);
+
+int
+pthread_cond_signal(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+ int count;
+
+ if (*condp == NULL)
+ return (0);
+
+ cond = *condp;
+
+ atomic_inc_int(&cond->seq);
+ count = _wake(&cond->seq, 1);
+
+ _rthread_debug(5, "%p: cond_signal %p, %d awaken\n", pthread_self(),
+ (void *)cond, count);
+
+ return (0);
+}
+DEF_STD(pthread_cond_signal);
+
+int
+pthread_cond_broadcast(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+ int count;
+
+ if (*condp == NULL)
+ return (0);
+
+ cond = *condp;
+
+ atomic_inc_int(&cond->seq);
+#if notyet
+ count = _requeue(&cond->seq, 1, INT_MAX, &cond->mutex->lock);
+#else
+ count = _wake(&cond->seq, INT_MAX);
+#endif
+
+ _rthread_debug(5, "%p: cond_broadcast %p, %d awaken\n", pthread_self(),
+ (void *)cond, count);
+
+ return (0);
+}
+DEF_STD(pthread_cond_broadcast);
diff --git a/lib/libc/thread/rthread_condattr.c b/lib/libc/thread/rthread_condattr.c
new file mode 100644
index 00000000000..d856a3cf313
--- /dev/null
+++ b/lib/libc/thread/rthread_condattr.c
@@ -0,0 +1,71 @@
+/* $OpenBSD: rthread_condattr.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Condition Variable Attributes
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <pthread.h>
+
+#include "rthread.h"
+
+int
+pthread_condattr_init(pthread_condattr_t *attrp)
+{
+ pthread_condattr_t attr;
+
+ attr = calloc(1, sizeof(*attr));
+ if (!attr)
+ return (errno);
+ attr->ca_clock = CLOCK_REALTIME;
+ *attrp = attr;
+
+ return (0);
+}
+
+int
+pthread_condattr_destroy(pthread_condattr_t *attrp)
+{
+ free(*attrp);
+ *attrp = NULL;
+
+ return (0);
+}
+
+int
+pthread_condattr_getclock(const pthread_condattr_t *attr, clockid_t *clock_id)
+{
+ *clock_id = (*attr)->ca_clock;
+ return (0);
+}
+
+int
+pthread_condattr_setclock(pthread_condattr_t *attr, clockid_t clock_id)
+{
+ if (clock_id != CLOCK_REALTIME && clock_id != CLOCK_MONOTONIC)
+ return (EINVAL);
+ (*attr)->ca_clock = clock_id;
+ return (0);
+}
+
diff --git a/lib/libc/thread/rthread_debug.c b/lib/libc/thread/rthread_debug.c
new file mode 100644
index 00000000000..ce7d347f83e
--- /dev/null
+++ b/lib/libc/thread/rthread_debug.c
@@ -0,0 +1,76 @@
+/* $OpenBSD: rthread_debug.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
+
+#include <pthread.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "rthread.h"
+
+REDIRECT_SYSCALL(issetugid);
+
+int _rthread_debug_level;
+
+/*
+ * Note: messages truncated at 255 characters. Could use vasprintf,
+ * but don't want to use malloc here so the function can be used
+ * in signal handlers.
+ */
+#define MAX_MSG_LEN 256
+#define RTHREAD_ENV_DEBUG "RTHREAD_DEBUG"
+
+/*
+ * format and send output to stderr if the given "level" is less than or
+ * equal to the current debug level. Messages with a level <= 0 will
+ * always be printed.
+ */
+void
+_rthread_debug(int level, const char *fmt, ...)
+{
+ char msg[MAX_MSG_LEN];
+ char *p;
+ int cnt;
+ ssize_t c;
+
+ if (_rthread_debug_level >= level) {
+ va_list ap;
+ va_start(ap, fmt);
+ cnt = vsnprintf(msg, MAX_MSG_LEN, fmt, ap);
+ va_end(ap);
+ if (cnt > MAX_MSG_LEN - 1)
+ cnt = MAX_MSG_LEN - 1;
+ p = msg;
+ do {
+ c = write(STDERR_FILENO, p, cnt);
+ if (c == -1)
+ break;
+ if (c != cnt)
+ sched_yield();
+ p += c;
+ cnt -= c;
+ } while (cnt > 0);
+ }
+}
+
+/*
+ * set the debug level from an environment string. Bogus values are
+ * silently ignored.
+ */
+void
+_rthread_debug_init(void)
+{
+ char *envp;
+ char *rem;
+
+ if (issetugid())
+ return;
+ envp = getenv(RTHREAD_ENV_DEBUG);
+ if (envp) {
+ _rthread_debug_level = (int) strtol(envp, &rem, 0);
+ if (*rem || _rthread_debug_level < 0)
+ _rthread_debug_level = 0;
+ }
+}
diff --git a/lib/libc/thread/rthread_file.c b/lib/libc/thread/rthread_file.c
new file mode 100644
index 00000000000..48cd5bda5bd
--- /dev/null
+++ b/lib/libc/thread/rthread_file.c
@@ -0,0 +1,303 @@
+/* $OpenBSD: rthread_file.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by John Birrell.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: uthread_file.c,v 1.9 1999/08/28 00:03:32 peter Exp $
+ *
+ * POSIX stdio FILE locking functions. These assume that the locking
+ * is only required at FILE structure level, not at file descriptor
+ * level too.
+ *
+ */
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/queue.h>
+#include <pthread.h>
+#include "rthread.h"
+#include "rthread_cb.h"
+
+/*
+ * The FILE lock structure. The FILE *fp is locked if the owner is
+ * not NULL. If not locked, the file lock structure can be
+ * reassigned to a different file by setting fp.
+ */
+struct file_lock {
+ LIST_ENTRY(file_lock) entry; /* Entry if file list. */
+ FILE *fp; /* The target file. */
+ struct pthread_queue lockers;
+ pthread_t owner;
+ int count;
+};
+
+/*
+ * The number of file lock lists into which the file pointer is
+ * hashed. Ideally, the FILE structure size would have been increased,
+ * but this causes incompatibility, so separate data structures are
+ * required.
+ */
+#define NUM_HEADS 128
+
+/*
+ * This macro casts a file pointer to a long integer and right
+ * shifts this by the number of bytes in a pointer. The shifted
+ * value is then remaindered using the maximum number of hash
+ * entries to produce and index into the array of static lock
+ * structures. If there is a collision, a linear search of the
+ * dynamic list of locks linked to each static lock is perfomed.
+ */
+#define file_idx(_p) ((int)((((uintptr_t) _p) >> sizeof(void *)) % NUM_HEADS))
+
+/*
+ * Global array of file locks. The first lock for each hash bucket is
+ * allocated statically in the hope that there won't be too many
+ * collisions that require a malloc and an element added to the list.
+ */
+static struct static_file_lock {
+ LIST_HEAD(file_list_head, file_lock) head;
+ struct file_lock fl;
+} flh[NUM_HEADS];
+
+/* Lock for accesses to the hash table: */
+static _atomic_lock_t hash_lock = _SPINLOCK_UNLOCKED;
+
+/*
+ * Find a lock structure for a FILE, return NULL if the file is
+ * not locked:
+ */
+static
+struct file_lock *
+find_lock(int idx, FILE *fp)
+{
+ struct file_lock *p;
+
+ /* Check if the file is locked using the static structure: */
+ if (flh[idx].fl.fp == fp && flh[idx].fl.owner != NULL)
+ /* Return a pointer to the static lock: */
+ p = &flh[idx].fl;
+ else {
+ /* Point to the first dynamic lock: */
+ p = LIST_FIRST(&flh[idx].head);
+
+ /*
+ * Loop through the dynamic locks looking for the
+ * target file:
+ */
+ while (p != NULL && (p->fp != fp || p->owner == NULL))
+ /* Not this file, try the next: */
+ p = LIST_NEXT(p, entry);
+ }
+ return(p);
+}
+
+/*
+ * Lock a file, assuming that there is no lock structure currently
+ * assigned to it.
+ */
+static
+struct file_lock *
+do_lock(int idx, FILE *fp)
+{
+ struct file_lock *p;
+
+ /* Check if the static structure is not being used: */
+ if (flh[idx].fl.owner == NULL) {
+ /* Return a pointer to the static lock: */
+ p = &flh[idx].fl;
+ }
+ else {
+ /* Point to the first dynamic lock: */
+ p = LIST_FIRST(&flh[idx].head);
+
+ /*
+ * Loop through the dynamic locks looking for a
+ * lock structure that is not being used:
+ */
+ while (p != NULL && p->owner != NULL)
+ /* This one is used, try the next: */
+ p = LIST_NEXT(p, entry);
+ }
+
+ /*
+ * If an existing lock structure has not been found,
+ * allocate memory for a new one:
+ */
+ if (p == NULL && (p = (struct file_lock *)
+ malloc(sizeof(struct file_lock))) != NULL) {
+ /* Add the new element to the list: */
+ LIST_INSERT_HEAD(&flh[idx].head, p, entry);
+ }
+
+ /* Check if there is a lock structure to acquire: */
+ if (p != NULL) {
+ /* Acquire the lock for the running thread: */
+ p->fp = fp;
+ p->owner = pthread_self();
+ p->count = 1;
+ TAILQ_INIT(&p->lockers);
+ }
+ return(p);
+}
+
+void
+_thread_flockfile(FILE * fp)
+{
+ int idx = file_idx(fp);
+ struct file_lock *p;
+ pthread_t self = pthread_self();
+
+ /* Lock the hash table: */
+ _spinlock(&hash_lock);
+
+ /* Get a pointer to any existing lock for the file: */
+ if ((p = find_lock(idx, fp)) == NULL) {
+ /*
+ * The file is not locked, so this thread can
+ * grab the lock:
+ */
+ do_lock(idx, fp);
+
+ /*
+ * The file is already locked, so check if the
+ * running thread is the owner:
+ */
+ } else if (p->owner == self) {
+ /*
+ * The running thread is already the
+ * owner, so increment the count of
+ * the number of times it has locked
+ * the file:
+ */
+ p->count++;
+ } else {
+ /*
+ * The file is locked for another thread.
+ * Append this thread to the queue of
+ * threads waiting on the lock.
+ */
+ TAILQ_INSERT_TAIL(&p->lockers,self,waiting);
+ while (p->owner != self) {
+ __thrsleep(self, 0, NULL, &hash_lock, NULL);
+ _spinlock(&hash_lock);
+ }
+ }
+
+ /* Unlock the hash table: */
+ _spinunlock(&hash_lock);
+}
+
+int
+_thread_ftrylockfile(FILE * fp)
+{
+ int ret = -1;
+ int idx = file_idx(fp);
+ struct file_lock *p;
+
+ /* Lock the hash table: */
+ _spinlock(&hash_lock);
+
+ /* Get a pointer to any existing lock for the file: */
+ if ((p = find_lock(idx, fp)) == NULL) {
+ /*
+ * The file is not locked, so this thread can
+ * grab the lock:
+ */
+ p = do_lock(idx, fp);
+
+ /*
+ * The file is already locked, so check if the
+ * running thread is the owner:
+ */
+ } else if (p->owner == pthread_self()) {
+ /*
+ * The running thread is already the
+ * owner, so increment the count of
+ * the number of times it has locked
+ * the file:
+ */
+ p->count++;
+ } else {
+ /*
+ * The file is locked for another thread,
+ * so this try fails.
+ */
+ p = NULL;
+ }
+
+ /* Unlock the hash table: */
+ _spinunlock(&hash_lock);
+
+ /* Check if the lock was obtained: */
+ if (p != NULL)
+ /* Return success: */
+ ret = 0;
+
+ return (ret);
+}
+
+void
+_thread_funlockfile(FILE * fp)
+{
+ int idx = file_idx(fp);
+ struct file_lock *p;
+
+ /* Lock the hash table: */
+ _spinlock(&hash_lock);
+
+ /*
+ * Get a pointer to the lock for the file and check that
+ * the running thread is the one with the lock:
+ */
+ if ((p = find_lock(idx, fp)) != NULL && p->owner == pthread_self()) {
+ /*
+ * Check if this thread has locked the FILE
+ * more than once:
+ */
+ if (--p->count == 0) {
+ /* Get the new owner of the lock: */
+ if ((p->owner = TAILQ_FIRST(&p->lockers)) != NULL) {
+ /* Pop the thread off the queue: */
+ TAILQ_REMOVE(&p->lockers,p->owner,waiting);
+
+ /*
+ * This is the first lock for the new
+ * owner:
+ */
+ p->count = 1;
+
+ __thrwakeup(p->owner, 1);
+ }
+ }
+ }
+
+ /* Unlock the hash table: */
+ _spinunlock(&hash_lock);
+}
diff --git a/lib/libc/thread/rthread_libc.c b/lib/libc/thread/rthread_libc.c
new file mode 100644
index 00000000000..406fc9f939e
--- /dev/null
+++ b/lib/libc/thread/rthread_libc.c
@@ -0,0 +1,262 @@
+/* $OpenBSD: rthread_libc.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+
+/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
+
+#include <sys/time.h>
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "thread_private.h" /* in libc/include */
+
+#include "rthread.h"
+#include "rthread_cb.h"
+
+/*
+ * A thread tag is a pointer to a structure of this type. An opaque
+ * tag is used to decouple libc from the thread library.
+ */
+struct _thread_tag {
+ pthread_mutex_t m; /* the tag's mutex */
+ pthread_key_t k; /* a key for private data */
+};
+
+/*
+ * local mutex to protect against tag creation races.
+ */
+static pthread_mutex_t _thread_tag_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Initialize a thread tag structure once. This function is called
+ * if the tag is null. Allocation and initialization are controlled
+ * by a mutex. If the tag is not null when the mutex is obtained
+ * the caller lost a race -- some other thread initialized the tag.
+ * This function will never return NULL.
+ */
+static void
+_thread_tag_init(void **tag)
+{
+ struct _thread_tag *tt;
+ int result;
+
+ result = pthread_mutex_lock(&_thread_tag_mutex);
+ if (result == 0) {
+ if (*tag == NULL) {
+ tt = malloc(sizeof *tt);
+ if (tt != NULL) {
+ result = pthread_mutex_init(&tt->m, NULL);
+ result |= pthread_key_create(&tt->k, free);
+ *tag = tt;
+ }
+ }
+ result |= pthread_mutex_unlock(&_thread_tag_mutex);
+ }
+ if (result != 0)
+ _rthread_debug(1, "tag init failure");
+}
+
+/*
+ * lock the mutex associated with the given tag
+ */
+void
+_thread_tag_lock(void **tag)
+{
+ struct _thread_tag *tt;
+
+ if (__isthreaded) {
+ if (*tag == NULL)
+ _thread_tag_init(tag);
+ tt = *tag;
+ if (pthread_mutex_lock(&tt->m) != 0)
+ _rthread_debug(1, "tag mutex lock failure");
+ }
+}
+
+/*
+ * unlock the mutex associated with the given tag
+ */
+void
+_thread_tag_unlock(void **tag)
+{
+ struct _thread_tag *tt;
+
+ if (__isthreaded) {
+ if (*tag == NULL)
+ _thread_tag_init(tag);
+ tt = *tag;
+ if (pthread_mutex_unlock(&tt->m) != 0)
+ _rthread_debug(1, "tag mutex unlock failure");
+ }
+}
+
+/*
+ * return the thread specific data for the given tag. If there
+ * is no data for this thread initialize it from 'storage'.
+ * On any error return 'err'.
+ */
+void *
+_thread_tag_storage(void **tag, void *storage, size_t sz, void *err)
+{
+ struct _thread_tag *tt;
+ void *ret;
+
+ if (*tag == NULL)
+ _thread_tag_init(tag);
+ tt = *tag;
+
+ ret = pthread_getspecific(tt->k);
+ if (ret == NULL) {
+ ret = malloc(sz);
+ if (ret == NULL)
+ ret = err;
+ else {
+ if (pthread_setspecific(tt->k, ret) == 0)
+ memcpy(ret, storage, sz);
+ else {
+ free(ret);
+ ret = err;
+ }
+ }
+ }
+ return ret;
+}
+
+void
+_thread_mutex_lock(void **mutex)
+{
+ pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
+
+ if (pthread_mutex_lock(pmutex) != 0)
+ _rthread_debug(1, "mutex lock failure");
+}
+
+void
+_thread_mutex_unlock(void **mutex)
+{
+ pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
+
+ if (pthread_mutex_unlock(pmutex) != 0)
+ _rthread_debug(1, "mutex unlock failure");
+}
+
+void
+_thread_mutex_destroy(void **mutex)
+{
+ pthread_mutex_t *pmutex = (pthread_mutex_t *)mutex;
+
+ if (pthread_mutex_destroy(pmutex) != 0)
+ _rthread_debug(1, "mutex destroy failure");
+}
+
+/*
+ * the malloc lock
+ */
+#ifndef FUTEX
+#define MALLOC_LOCK_INITIALIZER(n) { \
+ _SPINLOCK_UNLOCKED, \
+ TAILQ_HEAD_INITIALIZER(malloc_lock[n].lockers), \
+ PTHREAD_MUTEX_DEFAULT, \
+ NULL, \
+ 0, \
+ -1 }
+#else
+#define MALLOC_LOCK_INITIALIZER(n) { \
+ _SPINLOCK_UNLOCKED, \
+ PTHREAD_MUTEX_DEFAULT, \
+ NULL, \
+ 0, \
+ -1 }
+#endif
+
+static struct pthread_mutex malloc_lock[_MALLOC_MUTEXES] = {
+ MALLOC_LOCK_INITIALIZER(0),
+ MALLOC_LOCK_INITIALIZER(1),
+ MALLOC_LOCK_INITIALIZER(2),
+ MALLOC_LOCK_INITIALIZER(3)
+};
+
+static pthread_mutex_t malloc_mutex[_MALLOC_MUTEXES] = {
+ &malloc_lock[0],
+ &malloc_lock[1],
+ &malloc_lock[2],
+ &malloc_lock[3]
+};
+
+void
+_thread_malloc_lock(int i)
+{
+ pthread_mutex_lock(&malloc_mutex[i]);
+}
+
+void
+_thread_malloc_unlock(int i)
+{
+ pthread_mutex_unlock(&malloc_mutex[i]);
+}
+
+void
+_thread_malloc_reinit(void)
+{
+ int i;
+
+ for (i = 0; i < _MALLOC_MUTEXES; i++) {
+ malloc_lock[i].lock = _SPINLOCK_UNLOCKED;
+#ifndef FUTEX
+ TAILQ_INIT(&malloc_lock[i].lockers);
+#endif
+ malloc_lock[i].owner = NULL;
+ malloc_lock[i].count = 0;
+ }
+}
+
+/*
+ * atexit lock
+ */
+static _atomic_lock_t atexit_lock = _SPINLOCK_UNLOCKED;
+
+void
+_thread_atexit_lock(void)
+{
+ _spinlock(&atexit_lock);
+}
+
+void
+_thread_atexit_unlock(void)
+{
+ _spinunlock(&atexit_lock);
+}
+
+/*
+ * atfork lock
+ */
+static _atomic_lock_t atfork_lock = _SPINLOCK_UNLOCKED;
+
+void
+_thread_atfork_lock(void)
+{
+ _spinlock(&atfork_lock);
+}
+
+void
+_thread_atfork_unlock(void)
+{
+ _spinunlock(&atfork_lock);
+}
+
+/*
+ * arc4random lock
+ */
+static _atomic_lock_t arc4_lock = _SPINLOCK_UNLOCKED;
+
+void
+_thread_arc4_lock(void)
+{
+ _spinlock(&arc4_lock);
+}
+
+void
+_thread_arc4_unlock(void)
+{
+ _spinunlock(&arc4_lock);
+}
diff --git a/lib/libc/thread/rthread_mutex.c b/lib/libc/thread/rthread_mutex.c
new file mode 100644
index 00000000000..73a87180086
--- /dev/null
+++ b/lib/libc/thread/rthread_mutex.c
@@ -0,0 +1,288 @@
+/* $OpenBSD: rthread_mutex.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2017 Martin Pieuchot <mpi@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <pthread.h>
+
+#include "rthread.h"
+#include "cancel.h"
+#include "synch.h"
+
+/*
+ * States defined in "Futexes Are Tricky" 5.2
+ */
+enum {
+ UNLOCKED = 0,
+ LOCKED = 1, /* locked without waiter */
+ CONTENDED = 2, /* threads waiting for this mutex */
+};
+
+#define SPIN_COUNT 128
+#if defined(__i386__) || defined(__amd64__)
+#define SPIN_WAIT() asm volatile("pause": : : "memory")
+#else
+#define SPIN_WAIT() do { } while (0)
+#endif
+
+static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
+
+int
+pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
+{
+ pthread_mutex_t mutex;
+
+ mutex = calloc(1, sizeof(*mutex));
+ if (mutex == NULL)
+ return (ENOMEM);
+
+ if (attr == NULL) {
+ mutex->type = PTHREAD_MUTEX_DEFAULT;
+ mutex->prioceiling = -1;
+ } else {
+ mutex->type = (*attr)->ma_type;
+ mutex->prioceiling = (*attr)->ma_protocol ==
+ PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
+ }
+ *mutexp = mutex;
+
+ return (0);
+}
+DEF_STD(pthread_mutex_init);
+
+int
+pthread_mutex_destroy(pthread_mutex_t *mutexp)
+{
+ pthread_mutex_t mutex;
+
+ if (mutexp == NULL || *mutexp == NULL)
+ return (EINVAL);
+
+ mutex = *mutexp;
+ if (mutex) {
+ if (mutex->lock != UNLOCKED) {
+#define MSG "pthread_mutex_destroy on mutex with waiters!\n"
+ write(2, MSG, sizeof(MSG) - 1);
+#undef MSG
+ return (EBUSY);
+ }
+ free((void *)mutex);
+ *mutexp = NULL;
+ }
+
+ return (0);
+}
+DEF_STD(pthread_mutex_destroy);
+
+static int
+_rthread_mutex_trylock(pthread_mutex_t mutex, int trywait,
+ const struct timespec *abs)
+{
+ pthread_t self = pthread_self();
+
+ if (atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED) == UNLOCKED) {
+ membar_enter_after_atomic();
+ mutex->owner = self;
+ return (0);
+ }
+
+ if (mutex->owner == self) {
+ int type = mutex->type;
+
+ /* already owner? handle recursive behavior */
+ if (type != PTHREAD_MUTEX_RECURSIVE) {
+ if (trywait || type == PTHREAD_MUTEX_ERRORCHECK)
+ return (trywait ? EBUSY : EDEADLK);
+
+ /* self-deadlock is disallowed by strict */
+ if (type == PTHREAD_MUTEX_STRICT_NP && abs == NULL)
+ abort();
+
+ /* self-deadlock, possibly until timeout */
+ while (_twait(&mutex->type, type, CLOCK_REALTIME,
+ abs) != ETIMEDOUT)
+ ;
+ return (ETIMEDOUT);
+ } else {
+ if (mutex->count == INT_MAX)
+ return (EAGAIN);
+ mutex->count++;
+ return (0);
+ }
+ }
+
+ return (EBUSY);
+}
+
+static int
+_rthread_mutex_timedlock(pthread_mutex_t *mutexp, int trywait,
+ const struct timespec *abs, int timed)
+{
+ pthread_t self = pthread_self();
+ pthread_mutex_t mutex;
+ unsigned int i, lock;
+ int error = 0;
+
+ if (mutexp == NULL)
+ return (EINVAL);
+
+ /*
+ * If the mutex is statically initialized, perform the dynamic
+ * initialization. Note: _thread_mutex_lock() in libc requires
+ * pthread_mutex_lock() to perform the mutex init when *mutexp
+ * is NULL.
+ */
+ if (*mutexp == NULL) {
+ _spinlock(&static_init_lock);
+ if (*mutexp == NULL)
+ error = pthread_mutex_init(mutexp, NULL);
+ _spinunlock(&static_init_lock);
+ if (error != 0)
+ return (EINVAL);
+ }
+
+ mutex = *mutexp;
+ _rthread_debug(5, "%p: mutex_%slock %p (%p)\n", self,
+ (timed ? "timed" : (trywait ? "try" : "")), (void *)mutex,
+ (void *)mutex->owner);
+
+ error = _rthread_mutex_trylock(mutex, trywait, abs);
+ if (error != EBUSY || trywait)
+ return (error);
+
+ /* Try hard to not enter the kernel. */
+ for (i = 0; i < SPIN_COUNT; i ++) {
+ if (mutex->lock == UNLOCKED)
+ break;
+
+ SPIN_WAIT();
+ }
+
+ lock = atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED);
+ if (lock == UNLOCKED) {
+ membar_enter_after_atomic();
+ mutex->owner = self;
+ return (0);
+ }
+
+ if (lock != CONTENDED) {
+ /* Indicate that we're waiting on this mutex. */
+ lock = atomic_swap_uint(&mutex->lock, CONTENDED);
+ }
+
+ while (lock != UNLOCKED) {
+ error = _twait(&mutex->lock, CONTENDED, CLOCK_REALTIME, abs);
+ if (error == ETIMEDOUT)
+ return (error);
+ /*
+ * We cannot know if there's another waiter, so in
+ * doubt set the state to CONTENDED.
+ */
+ lock = atomic_swap_uint(&mutex->lock, CONTENDED);
+ };
+
+ membar_enter_after_atomic();
+ mutex->owner = self;
+ return (0);
+}
+
+int
+pthread_mutex_trylock(pthread_mutex_t *mutexp)
+{
+ return (_rthread_mutex_timedlock(mutexp, 1, NULL, 0));
+}
+
+int
+pthread_mutex_timedlock(pthread_mutex_t *mutexp, const struct timespec *abs)
+{
+ return (_rthread_mutex_timedlock(mutexp, 0, abs, 1));
+}
+
+int
+pthread_mutex_lock(pthread_mutex_t *mutexp)
+{
+ return (_rthread_mutex_timedlock(mutexp, 0, NULL, 0));
+}
+DEF_STD(pthread_mutex_lock);
+
+int
+pthread_mutex_unlock(pthread_mutex_t *mutexp)
+{
+ pthread_t self = pthread_self();
+ pthread_mutex_t mutex;
+
+ if (mutexp == NULL)
+ return (EINVAL);
+
+ if (*mutexp == NULL)
+#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
+ return (EPERM);
+#elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
+ return(0);
+#else
+ abort();
+#endif
+
+ mutex = *mutexp;
+ _rthread_debug(5, "%p: mutex_unlock %p (%p)\n", self, (void *)mutex,
+ (void *)mutex->owner);
+
+ if (mutex->owner != self) {
+ _rthread_debug(5, "%p: different owner %p (%p)\n", self, (void *)mutex,
+ (void *)mutex->owner);
+ if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
+ mutex->type == PTHREAD_MUTEX_RECURSIVE) {
+ return (EPERM);
+ } else {
+ /*
+ * For mutex type NORMAL our undefined behavior for
+ * unlocking an unlocked mutex is to succeed without
+ * error. All other undefined behaviors are to
+ * abort() immediately.
+ */
+ if (mutex->owner == NULL &&
+ mutex->type == PTHREAD_MUTEX_NORMAL)
+ return (0);
+ else
+ abort();
+
+ }
+ }
+
+ if (mutex->type == PTHREAD_MUTEX_RECURSIVE) {
+ if (mutex->count > 0) {
+ mutex->count--;
+ return (0);
+ }
+ }
+
+ mutex->owner = NULL;
+ membar_exit_before_atomic();
+ if (atomic_dec_int_nv(&mutex->lock) != UNLOCKED) {
+ mutex->lock = UNLOCKED;
+ _wake(&mutex->lock, 1);
+ }
+
+ return (0);
+}
+DEF_STD(pthread_mutex_unlock);
diff --git a/lib/libc/thread/rthread_once.c b/lib/libc/thread/rthread_once.c
new file mode 100644
index 00000000000..b8c6ba79161
--- /dev/null
+++ b/lib/libc/thread/rthread_once.c
@@ -0,0 +1,32 @@
+/* $OpenBSD: rthread_once.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <pthread.h>
+
+int
+pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
+{
+ pthread_mutex_lock(&once_control->mutex);
+ if (once_control->state == PTHREAD_NEEDS_INIT) {
+ init_routine();
+ once_control->state = PTHREAD_DONE_INIT;
+ }
+ pthread_mutex_unlock(&once_control->mutex);
+
+ return (0);
+}
diff --git a/lib/libc/thread/rthread_sync.c b/lib/libc/thread/rthread_sync.c
new file mode 100644
index 00000000000..42f953ffd10
--- /dev/null
+++ b/lib/libc/thread/rthread_sync.c
@@ -0,0 +1,693 @@
+/* $OpenBSD: rthread_sync.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Mutexes and conditions - synchronization functions.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <pthread.h>
+
+#include "rthread.h"
+#include "cancel.h" /* in libc/include */
+
+static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
+
+/*
+ * mutexen
+ */
+int
+pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
+{
+ struct pthread_mutex *mutex;
+
+ mutex = calloc(1, sizeof(*mutex));
+ if (!mutex)
+ return (errno);
+ mutex->lock = _SPINLOCK_UNLOCKED;
+ TAILQ_INIT(&mutex->lockers);
+ if (attr == NULL) {
+ mutex->type = PTHREAD_MUTEX_DEFAULT;
+ mutex->prioceiling = -1;
+ } else {
+ mutex->type = (*attr)->ma_type;
+ mutex->prioceiling = (*attr)->ma_protocol ==
+ PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
+ }
+ *mutexp = mutex;
+
+ return (0);
+}
+DEF_STD(pthread_mutex_init);
+
+int
+pthread_mutex_destroy(pthread_mutex_t *mutexp)
+{
+ struct pthread_mutex *mutex;
+
+ assert(mutexp);
+ mutex = (struct pthread_mutex *)*mutexp;
+ if (mutex) {
+ if (mutex->count || mutex->owner != NULL ||
+ !TAILQ_EMPTY(&mutex->lockers)) {
+#define MSG "pthread_mutex_destroy on mutex with waiters!\n"
+ write(2, MSG, sizeof(MSG) - 1);
+#undef MSG
+ return (EBUSY);
+ }
+ free(mutex);
+ *mutexp = NULL;
+ }
+ return (0);
+}
+DEF_STD(pthread_mutex_destroy);
+
+static int
+_rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait,
+ const struct timespec *abstime)
+{
+ struct pthread_mutex *mutex;
+ pthread_t self = pthread_self();
+ int ret = 0;
+
+ /*
+ * If the mutex is statically initialized, perform the dynamic
+ * initialization. Note: _thread_mutex_lock() in libc requires
+ * _rthread_mutex_lock() to perform the mutex init when *mutexp
+ * is NULL.
+ */
+ if (*mutexp == NULL) {
+ _spinlock(&static_init_lock);
+ if (*mutexp == NULL)
+ ret = pthread_mutex_init(mutexp, NULL);
+ _spinunlock(&static_init_lock);
+ if (ret != 0)
+ return (EINVAL);
+ }
+ mutex = (struct pthread_mutex *)*mutexp;
+
+ _rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
+ _spinlock(&mutex->lock);
+ if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
+ assert(mutex->count == 0);
+ mutex->owner = self;
+ } else if (mutex->owner == self) {
+ assert(mutex->count > 0);
+
+ /* already owner? handle recursive behavior */
+ if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
+ {
+ if (trywait ||
+ mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
+ _spinunlock(&mutex->lock);
+ return (trywait ? EBUSY : EDEADLK);
+ }
+
+ /* self-deadlock is disallowed by strict */
+ if (mutex->type == PTHREAD_MUTEX_STRICT_NP &&
+ abstime == NULL)
+ abort();
+
+ /* self-deadlock, possibly until timeout */
+ while (__thrsleep(self, CLOCK_REALTIME, abstime,
+ &mutex->lock, NULL) != EWOULDBLOCK)
+ _spinlock(&mutex->lock);
+ return (ETIMEDOUT);
+ }
+ if (mutex->count == INT_MAX) {
+ _spinunlock(&mutex->lock);
+ return (EAGAIN);
+ }
+ } else if (trywait) {
+ /* try failed */
+ _spinunlock(&mutex->lock);
+ return (EBUSY);
+ } else {
+ /* add to the wait queue and block until at the head */
+ TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
+ while (mutex->owner != self) {
+ ret = __thrsleep(self, CLOCK_REALTIME, abstime,
+ &mutex->lock, NULL);
+ _spinlock(&mutex->lock);
+ assert(mutex->owner != NULL);
+ if (ret == EWOULDBLOCK) {
+ if (mutex->owner == self)
+ break;
+ TAILQ_REMOVE(&mutex->lockers, self, waiting);
+ _spinunlock(&mutex->lock);
+ return (ETIMEDOUT);
+ }
+ }
+ }
+
+ mutex->count++;
+ _spinunlock(&mutex->lock);
+
+ return (0);
+}
+
+int
+pthread_mutex_lock(pthread_mutex_t *p)
+{
+ return (_rthread_mutex_lock(p, 0, NULL));
+}
+DEF_STD(pthread_mutex_lock);
+
+int
+pthread_mutex_trylock(pthread_mutex_t *p)
+{
+ return (_rthread_mutex_lock(p, 1, NULL));
+}
+
+int
+pthread_mutex_timedlock(pthread_mutex_t *p, const struct timespec *abstime)
+{
+ return (_rthread_mutex_lock(p, 0, abstime));
+}
+
+int
+pthread_mutex_unlock(pthread_mutex_t *mutexp)
+{
+ pthread_t self = pthread_self();
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+
+ _rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self,
+ (void *)mutex);
+
+ if (mutex == NULL)
+#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
+ return (EPERM);
+#elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
+ return(0);
+#else
+ abort();
+#endif
+
+ if (mutex->owner != self) {
+ if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
+ mutex->type == PTHREAD_MUTEX_RECURSIVE)
+ return (EPERM);
+ else {
+ /*
+ * For mutex type NORMAL our undefined behavior for
+ * unlocking an unlocked mutex is to succeed without
+ * error. All other undefined behaviors are to
+ * abort() immediately.
+ */
+ if (mutex->owner == NULL &&
+ mutex->type == PTHREAD_MUTEX_NORMAL)
+ return (0);
+ else
+ abort();
+ }
+ }
+
+ if (--mutex->count == 0) {
+ pthread_t next;
+
+ _spinlock(&mutex->lock);
+ mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
+ if (next != NULL)
+ TAILQ_REMOVE(&mutex->lockers, next, waiting);
+ _spinunlock(&mutex->lock);
+ if (next != NULL)
+ __thrwakeup(next, 1);
+ }
+
+ return (0);
+}
+DEF_STD(pthread_mutex_unlock);
+
+/*
+ * condition variables
+ */
+int
+pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
+{
+ pthread_cond_t cond;
+
+ cond = calloc(1, sizeof(*cond));
+ if (!cond)
+ return (errno);
+ cond->lock = _SPINLOCK_UNLOCKED;
+ TAILQ_INIT(&cond->waiters);
+ if (attr == NULL)
+ cond->clock = CLOCK_REALTIME;
+ else
+ cond->clock = (*attr)->ca_clock;
+ *condp = cond;
+
+ return (0);
+}
+DEF_STD(pthread_cond_init);
+
+int
+pthread_cond_destroy(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+
+ assert(condp);
+ cond = *condp;
+ if (cond) {
+ if (!TAILQ_EMPTY(&cond->waiters)) {
+#define MSG "pthread_cond_destroy on condvar with waiters!\n"
+ write(2, MSG, sizeof(MSG) - 1);
+#undef MSG
+ return (EBUSY);
+ }
+ free(cond);
+ }
+ *condp = NULL;
+
+ return (0);
+}
+DEF_STD(pthread_cond_destroy);
+
+int
+pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
+ const struct timespec *abstime)
+{
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+ struct tib *tib = TIB_GET();
+ pthread_t self = tib->tib_thread;
+ pthread_t next;
+ int mutex_count;
+ int canceled = 0;
+ int rv = 0;
+ int error;
+ PREP_CANCEL_POINT(tib);
+
+ if (!*condp)
+ if ((error = pthread_cond_init(condp, NULL)))
+ return (error);
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
+ (void *)cond, (void *)mutex);
+
+ if (mutex == NULL)
+#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
+ return (EPERM);
+#else
+ abort();
+#endif
+
+ if (mutex->owner != self) {
+ if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
+ return (EPERM);
+ else
+ abort();
+ }
+
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ ENTER_DELAYED_CANCEL_POINT(tib, self);
+
+ _spinlock(&cond->lock);
+
+ /* mark the condvar as being associated with this mutex */
+ if (cond->mutex == NULL) {
+ cond->mutex = mutex;
+ assert(TAILQ_EMPTY(&cond->waiters));
+ } else if (cond->mutex != mutex) {
+ assert(cond->mutex == mutex);
+ _spinunlock(&cond->lock);
+ LEAVE_CANCEL_POINT_INNER(tib, 1);
+ return (EINVAL);
+ } else
+ assert(! TAILQ_EMPTY(&cond->waiters));
+
+ /* snag the count in case this is a recursive mutex */
+ mutex_count = mutex->count;
+
+ /* transfer from the mutex queue to the condvar queue */
+ _spinlock(&mutex->lock);
+ self->blocking_cond = cond;
+ TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
+ _spinunlock(&cond->lock);
+
+ /* wake the next guy blocked on the mutex */
+ mutex->count = 0;
+ mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
+ if (next != NULL) {
+ TAILQ_REMOVE(&mutex->lockers, next, waiting);
+ __thrwakeup(next, 1);
+ }
+
+ /* wait until we're the owner of the mutex again */
+ while (mutex->owner != self) {
+ error = __thrsleep(self, cond->clock, abstime,
+ &mutex->lock, &self->delayed_cancel);
+
+ /*
+ * If abstime == NULL, then we're definitely waiting
+ * on the mutex instead of the condvar, and are
+ * just waiting for mutex ownership, regardless of
+ * why we woke up.
+ */
+ if (abstime == NULL) {
+ _spinlock(&mutex->lock);
+ continue;
+ }
+
+ /*
+ * If we took a normal signal (not from
+ * cancellation) then we should just go back to
+ * sleep without changing state (timeouts, etc).
+ */
+ if (error == EINTR && (tib->tib_canceled == 0 ||
+ (tib->tib_cantcancel & CANCEL_DISABLED))) {
+ _spinlock(&mutex->lock);
+ continue;
+ }
+
+ /*
+ * The remaining reasons for waking up (normal
+ * wakeup, timeout, and cancellation) all mean that
+ * we won't be staying in the condvar queue and
+ * we'll no longer time out or be cancelable.
+ */
+ abstime = NULL;
+ LEAVE_CANCEL_POINT_INNER(tib, 0);
+
+ /*
+ * If we're no longer in the condvar's queue then
+ * we're just waiting for mutex ownership. Need
+ * cond->lock here to prevent race with cond_signal().
+ */
+ _spinlock(&cond->lock);
+ if (self->blocking_cond == NULL) {
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+ continue;
+ }
+ assert(self->blocking_cond == cond);
+
+ /* if timeout or canceled, make note of that */
+ if (error == EWOULDBLOCK)
+ rv = ETIMEDOUT;
+ else if (error == EINTR)
+ canceled = 1;
+
+ /* transfer between the queues */
+ TAILQ_REMOVE(&cond->waiters, self, waiting);
+ assert(mutex == cond->mutex);
+ if (TAILQ_EMPTY(&cond->waiters))
+ cond->mutex = NULL;
+ self->blocking_cond = NULL;
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+
+ /* mutex unlocked right now? */
+ if (mutex->owner == NULL &&
+ TAILQ_EMPTY(&mutex->lockers)) {
+ assert(mutex->count == 0);
+ mutex->owner = self;
+ break;
+ }
+ TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
+ }
+
+ /* restore the mutex's count */
+ mutex->count = mutex_count;
+ _spinunlock(&mutex->lock);
+
+ LEAVE_CANCEL_POINT_INNER(tib, canceled);
+
+ return (rv);
+}
+
+int
+pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
+{
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+ struct tib *tib = TIB_GET();
+ pthread_t self = tib->tib_thread;
+ pthread_t next;
+ int mutex_count;
+ int canceled = 0;
+ int error;
+ PREP_CANCEL_POINT(tib);
+
+ if (!*condp)
+ if ((error = pthread_cond_init(condp, NULL)))
+ return (error);
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_wait %p,%p\n", (void *)self,
+ (void *)cond, (void *)mutex);
+
+ if (mutex == NULL)
+#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
+ return (EPERM);
+#else
+ abort();
+#endif
+
+ if (mutex->owner != self) {
+ if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
+ return (EPERM);
+ else
+ abort();
+ }
+
+ ENTER_DELAYED_CANCEL_POINT(tib, self);
+
+ _spinlock(&cond->lock);
+
+ /* mark the condvar as being associated with this mutex */
+ if (cond->mutex == NULL) {
+ cond->mutex = mutex;
+ assert(TAILQ_EMPTY(&cond->waiters));
+ } else if (cond->mutex != mutex) {
+ assert(cond->mutex == mutex);
+ _spinunlock(&cond->lock);
+ LEAVE_CANCEL_POINT_INNER(tib, 1);
+ return (EINVAL);
+ } else
+ assert(! TAILQ_EMPTY(&cond->waiters));
+
+ /* snag the count in case this is a recursive mutex */
+ mutex_count = mutex->count;
+
+ /* transfer from the mutex queue to the condvar queue */
+ _spinlock(&mutex->lock);
+ self->blocking_cond = cond;
+ TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
+ _spinunlock(&cond->lock);
+
+ /* wake the next guy blocked on the mutex */
+ mutex->count = 0;
+ mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
+ if (next != NULL) {
+ TAILQ_REMOVE(&mutex->lockers, next, waiting);
+ __thrwakeup(next, 1);
+ }
+
+ /* wait until we're the owner of the mutex again */
+ while (mutex->owner != self) {
+ error = __thrsleep(self, 0, NULL, &mutex->lock,
+ &self->delayed_cancel);
+
+ /*
+ * If we took a normal signal (not from
+ * cancellation) then we should just go back to
+ * sleep without changing state (timeouts, etc).
+ */
+ if (error == EINTR && (tib->tib_canceled == 0 ||
+ (tib->tib_cantcancel & CANCEL_DISABLED))) {
+ _spinlock(&mutex->lock);
+ continue;
+ }
+
+ /*
+ * The remaining reasons for waking up (normal
+ * wakeup and cancellation) all mean that we won't
+ * be staying in the condvar queue and we'll no
+ * longer be cancelable.
+ */
+ LEAVE_CANCEL_POINT_INNER(tib, 0);
+
+ /*
+ * If we're no longer in the condvar's queue then
+ * we're just waiting for mutex ownership. Need
+ * cond->lock here to prevent race with cond_signal().
+ */
+ _spinlock(&cond->lock);
+ if (self->blocking_cond == NULL) {
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+ continue;
+ }
+ assert(self->blocking_cond == cond);
+
+ /* if canceled, make note of that */
+ if (error == EINTR)
+ canceled = 1;
+
+ /* transfer between the queues */
+ TAILQ_REMOVE(&cond->waiters, self, waiting);
+ assert(mutex == cond->mutex);
+ if (TAILQ_EMPTY(&cond->waiters))
+ cond->mutex = NULL;
+ self->blocking_cond = NULL;
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+
+ /* mutex unlocked right now? */
+ if (mutex->owner == NULL &&
+ TAILQ_EMPTY(&mutex->lockers)) {
+ assert(mutex->count == 0);
+ mutex->owner = self;
+ break;
+ }
+ TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
+ }
+
+ /* restore the mutex's count */
+ mutex->count = mutex_count;
+ _spinunlock(&mutex->lock);
+
+ LEAVE_CANCEL_POINT_INNER(tib, canceled);
+
+ return (0);
+}
+DEF_STD(pthread_cond_wait);
+
+
+int
+pthread_cond_signal(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex;
+ pthread_t thread;
+ int wakeup;
+
+ /* uninitialized? Then there's obviously no one waiting! */
+ if (!*condp)
+ return 0;
+
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(),
+ (void *)cond, (void *)cond->mutex);
+ _spinlock(&cond->lock);
+ thread = TAILQ_FIRST(&cond->waiters);
+ if (thread == NULL) {
+ assert(cond->mutex == NULL);
+ _spinunlock(&cond->lock);
+ return (0);
+ }
+
+ assert(thread->blocking_cond == cond);
+ TAILQ_REMOVE(&cond->waiters, thread, waiting);
+ thread->blocking_cond = NULL;
+
+ mutex = cond->mutex;
+ assert(mutex != NULL);
+ if (TAILQ_EMPTY(&cond->waiters))
+ cond->mutex = NULL;
+
+ /* link locks to prevent race with timedwait */
+ _spinlock(&mutex->lock);
+ _spinunlock(&cond->lock);
+
+ wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
+ if (wakeup)
+ mutex->owner = thread;
+ else
+ TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting);
+ _spinunlock(&mutex->lock);
+ if (wakeup)
+ __thrwakeup(thread, 1);
+
+ return (0);
+}
+DEF_STD(pthread_cond_signal);
+
+int
+pthread_cond_broadcast(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex;
+ pthread_t thread;
+ pthread_t p;
+ int wakeup;
+
+ /* uninitialized? Then there's obviously no one waiting! */
+ if (!*condp)
+ return 0;
+
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(),
+ (void *)cond, (void *)cond->mutex);
+ _spinlock(&cond->lock);
+ thread = TAILQ_FIRST(&cond->waiters);
+ if (thread == NULL) {
+ assert(cond->mutex == NULL);
+ _spinunlock(&cond->lock);
+ return (0);
+ }
+
+ mutex = cond->mutex;
+ assert(mutex != NULL);
+
+ /* walk the list, clearing the "blocked on condvar" pointer */
+ p = thread;
+ do
+ p->blocking_cond = NULL;
+ while ((p = TAILQ_NEXT(p, waiting)) != NULL);
+
+ /*
+ * We want to transfer all the threads from the condvar's list
+ * to the mutex's list. The TAILQ_* macros don't let us do that
+ * efficiently, so this is direct list surgery. Pay attention!
+ */
+
+ /* 1) attach the first thread to the end of the mutex's list */
+ _spinlock(&mutex->lock);
+ wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
+ thread->waiting.tqe_prev = mutex->lockers.tqh_last;
+ *(mutex->lockers.tqh_last) = thread;
+
+ /* 2) fix up the end pointer for the mutex's list */
+ mutex->lockers.tqh_last = cond->waiters.tqh_last;
+
+ if (wakeup) {
+ TAILQ_REMOVE(&mutex->lockers, thread, waiting);
+ mutex->owner = thread;
+ _spinunlock(&mutex->lock);
+ __thrwakeup(thread, 1);
+ } else
+ _spinunlock(&mutex->lock);
+
+ /* 3) reset the condvar's list and mutex pointer */
+ TAILQ_INIT(&cond->waiters);
+ assert(cond->mutex != NULL);
+ cond->mutex = NULL;
+ _spinunlock(&cond->lock);
+
+ return (0);
+}
+DEF_STD(pthread_cond_broadcast);
diff --git a/lib/libc/thread/rthread_tls.c b/lib/libc/thread/rthread_tls.c
new file mode 100644
index 00000000000..53e348e4dbd
--- /dev/null
+++ b/lib/libc/thread/rthread_tls.c
@@ -0,0 +1,185 @@
+/* $OpenBSD: rthread_tls.c,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * thread specific storage
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+
+#include <pthread.h>
+
+#include "rthread.h"
+
+static struct rthread_key rkeys[PTHREAD_KEYS_MAX];
+static _atomic_lock_t rkeyslock = _SPINLOCK_UNLOCKED;
+
+int
+pthread_key_create(pthread_key_t *key, void (*destructor)(void*))
+{
+ static int hint;
+ int i;
+
+ _spinlock(&rkeyslock);
+ if (rkeys[hint].used) {
+ for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
+ if (!rkeys[i].used)
+ break;
+ }
+ if (i == PTHREAD_KEYS_MAX) {
+ _spinunlock(&rkeyslock);
+ return (EAGAIN);
+ }
+ hint = i;
+ }
+ rkeys[hint].used = 1;
+ rkeys[hint].destructor = destructor;
+
+ *key = hint++;
+ if (hint >= PTHREAD_KEYS_MAX)
+ hint = 0;
+ _spinunlock(&rkeyslock);
+
+ return (0);
+}
+DEF_STD(pthread_key_create);
+
+int
+pthread_key_delete(pthread_key_t key)
+{
+ pthread_t thread;
+ struct rthread_storage *rs;
+ int rv = 0;
+
+ if (key < 0 || key >= PTHREAD_KEYS_MAX)
+ return (EINVAL);
+
+ _spinlock(&rkeyslock);
+ if (!rkeys[key].used) {
+ rv = EINVAL;
+ goto out;
+ }
+
+ rkeys[key].used = 0;
+ rkeys[key].destructor = NULL;
+ _spinlock(&_thread_lock);
+ LIST_FOREACH(thread, &_thread_list, threads) {
+ for (rs = thread->local_storage; rs; rs = rs->next) {
+ if (rs->keyid == key)
+ rs->data = NULL;
+ }
+ }
+ _spinunlock(&_thread_lock);
+
+out:
+ _spinunlock(&rkeyslock);
+ return (rv);
+}
+
+static struct rthread_storage *
+_rthread_findstorage(pthread_key_t key)
+{
+ struct rthread_storage *rs;
+ pthread_t self;
+
+ if (!rkeys[key].used) {
+ rs = NULL;
+ goto out;
+ }
+
+ self = pthread_self();
+
+ for (rs = self->local_storage; rs; rs = rs->next) {
+ if (rs->keyid == key)
+ break;
+ }
+ if (!rs) {
+ rs = calloc(1, sizeof(*rs));
+ if (!rs)
+ goto out;
+ rs->keyid = key;
+ rs->data = NULL;
+ rs->next = self->local_storage;
+ self->local_storage = rs;
+ }
+
+out:
+ return (rs);
+}
+
+void *
+pthread_getspecific(pthread_key_t key)
+{
+ struct rthread_storage *rs;
+
+ if (key < 0 || key >= PTHREAD_KEYS_MAX)
+ return (NULL);
+
+ rs = _rthread_findstorage(key);
+ if (!rs)
+ return (NULL);
+
+ return (rs->data);
+}
+DEF_STD(pthread_getspecific);
+
+int
+pthread_setspecific(pthread_key_t key, const void *data)
+{
+ struct rthread_storage *rs;
+
+ if (key < 0 || key >= PTHREAD_KEYS_MAX)
+ return (EINVAL);
+
+ rs = _rthread_findstorage(key);
+ if (!rs)
+ return (ENOMEM);
+ rs->data = (void *)data;
+
+ return (0);
+}
+DEF_STD(pthread_setspecific);
+
+void
+_rthread_tls_destructors(pthread_t thread)
+{
+ struct rthread_storage *rs;
+ int i;
+
+ _spinlock(&rkeyslock);
+ for (i = 0; i < PTHREAD_DESTRUCTOR_ITERATIONS; i++) {
+ for (rs = thread->local_storage; rs; rs = rs->next) {
+ if (!rs->data)
+ continue;
+ if (rkeys[rs->keyid].destructor) {
+ void (*destructor)(void *) =
+ rkeys[rs->keyid].destructor;
+ void *data = rs->data;
+ rs->data = NULL;
+ _spinunlock(&rkeyslock);
+ destructor(data);
+ _spinlock(&rkeyslock);
+ }
+ }
+ }
+ for (rs = thread->local_storage; rs; rs = thread->local_storage) {
+ thread->local_storage = rs->next;
+ free(rs);
+ }
+ _spinunlock(&rkeyslock);
+}
diff --git a/lib/libc/thread/synch.h b/lib/libc/thread/synch.h
new file mode 100644
index 00000000000..0ac8b3419b1
--- /dev/null
+++ b/lib/libc/thread/synch.h
@@ -0,0 +1,63 @@
+/* $OpenBSD: synch.h,v 1.1 2017/08/15 06:13:24 guenther Exp $ */
+/*
+ * Copyright (c) 2017 Martin Pieuchot
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/atomic.h>
+#include <sys/time.h>
+#include <sys/futex.h>
+
+REDIRECT_SYSCALL(futex);
+
+static inline int
+_wake(volatile uint32_t *p, int n)
+{
+ return futex(p, FUTEX_WAKE, n, NULL, NULL);
+}
+
+static inline void
+_wait(volatile uint32_t *p, int val)
+{
+ while (*p != (uint32_t)val)
+ futex(p, FUTEX_WAIT, val, NULL, NULL);
+}
+
+static inline int
+_twait(volatile uint32_t *p, int val, clockid_t clockid, const struct timespec *abs)
+{
+ struct timespec rel;
+
+ if (abs == NULL)
+ return futex(p, FUTEX_WAIT, val, NULL, NULL);
+
+ if (abs->tv_nsec >= 1000000000 || clock_gettime(clockid, &rel))
+ return (EINVAL);
+
+ rel.tv_sec = abs->tv_sec - rel.tv_sec;
+ if ((rel.tv_nsec = abs->tv_nsec - rel.tv_nsec) < 0) {
+ rel.tv_sec--;
+ rel.tv_nsec += 1000000000;
+ }
+ if (rel.tv_sec < 0)
+ return (ETIMEDOUT);
+
+ return futex(p, FUTEX_WAIT, val, &rel, NULL);
+}
+
+static inline int
+_requeue(volatile uint32_t *p, int n, int m, volatile uint32_t *q)
+{
+ return futex(p, FUTEX_REQUEUE, n, (void *)(long)m, q);
+}