summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormpi <mpi@openbsd.org>2017-05-27 14:20:39 +0000
committermpi <mpi@openbsd.org>2017-05-27 14:20:39 +0000
commit9d7be9d9a98ff5bc44a9a52f4935b562fd019ee6 (patch)
tree018644354f28d4ae847693b5ac2c9db349b8e818
parentremove #ifdef HIBERNATE section that declares stuff that lives in ahci.c (diff)
downloadwireguard-openbsd-9d7be9d9a98ff5bc44a9a52f4935b562fd019ee6.tar.xz
wireguard-openbsd-9d7be9d9a98ff5bc44a9a52f4935b562fd019ee6.zip
New mutex and condvar implementations based on futex(2).
Not enabled yet, it needs some SPINLOCK_SPIN_HOOK love and some bumps. Tested by many including sthen@ in a bulk. ok visa@, sthen@, kettenis@, tedu@
-rw-r--r--lib/librthread/rthread.h31
-rw-r--r--lib/librthread/rthread_cond.c216
-rw-r--r--lib/librthread/rthread_libc.c16
-rw-r--r--lib/librthread/rthread_mutex.c281
-rw-r--r--lib/librthread/synch.h66
5 files changed, 602 insertions, 8 deletions
diff --git a/lib/librthread/rthread.h b/lib/librthread/rthread.h
index 65217653f24..92ccf459a33 100644
--- a/lib/librthread/rthread.h
+++ b/lib/librthread/rthread.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.h,v 1.60 2016/09/04 10:13:35 akfaew Exp $ */
+/* $OpenBSD: rthread.h,v 1.61 2017/05/27 14:20:39 mpi Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -57,19 +57,31 @@ struct __sem {
TAILQ_HEAD(pthread_queue, pthread);
+#ifdef FUTEX
+
struct pthread_mutex {
_atomic_lock_t lock;
- struct pthread_queue lockers;
int type;
pthread_t owner;
int count;
int prioceiling;
};
-struct pthread_mutex_attr {
- int ma_type;
- int ma_protocol;
- int ma_prioceiling;
+struct pthread_cond {
+ _atomic_lock_t seq;
+ clockid_t clock;
+ struct pthread_mutex *mutex;
+};
+
+#else
+
+struct pthread_mutex {
+ _atomic_lock_t lock;
+ struct pthread_queue lockers;
+ int type;
+ pthread_t owner;
+ int count;
+ int prioceiling;
};
struct pthread_cond {
@@ -78,6 +90,13 @@ struct pthread_cond {
struct pthread_mutex *mutex;
clockid_t clock;
};
+#endif /* FUTEX */
+
+struct pthread_mutex_attr {
+ int ma_type;
+ int ma_protocol;
+ int ma_prioceiling;
+};
struct pthread_cond_attr {
clockid_t ca_clock;
diff --git a/lib/librthread/rthread_cond.c b/lib/librthread/rthread_cond.c
new file mode 100644
index 00000000000..3a484b04d06
--- /dev/null
+++ b/lib/librthread/rthread_cond.c
@@ -0,0 +1,216 @@
+/* $OpenBSD: rthread_cond.c,v 1.1 2017/05/27 14:20:39 mpi Exp $ */
+/*
+ * Copyright (c) 2017 Martin Pieuchot <mpi@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <pthread.h>
+
+#include "rthread.h"
+#include "cancel.h"
+#include "synch.h"
+
+int
+pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attr)
+{
+ pthread_cond_t cond;
+
+ cond = calloc(1, sizeof(*cond));
+ if (cond == NULL)
+ return (ENOMEM);
+
+ if (attr == NULL)
+ cond->clock = CLOCK_REALTIME;
+ else
+ cond->clock = (*attr)->ca_clock;
+ *condp = cond;
+
+ return (0);
+}
+DEF_STD(pthread_cond_init);
+
+int
+pthread_cond_destroy(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+
+ assert(condp != NULL);
+ cond = *condp;
+
+ if (cond != NULL) {
+ if (cond->mutex != NULL) {
+#define MSG "pthread_cond_destroy on condvar with waiters!\n"
+ write(2, MSG, sizeof(MSG) - 1);
+#undef MSG
+ return (EBUSY);
+ }
+ free(cond);
+ }
+ *condp = NULL;
+
+ return (0);
+}
+DEF_STD(pthread_cond_destroy);
+
+int
+_rthread_cond_timedwait(pthread_cond_t cond, pthread_mutex_t *mutexp,
+ const struct timespec *abs)
+{
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+ struct tib *tib = TIB_GET();
+ pthread_t self = tib->tib_thread;
+ int error, rv = 0, canceled = 0, mutex_count = 0;
+ clockid_t clock = cond->clock;
+ int seq = cond->seq;
+ PREP_CANCEL_POINT(tib);
+
+ _rthread_debug(5, "%p: cond_timed %p,%p (%p)\n", self,
+ (void *)cond, (void *)mutex, (void *)mutex->owner);
+
+ ENTER_DELAYED_CANCEL_POINT(tib, self);
+
+#if notyet
+ /* mark the condvar as being associated with this mutex */
+ if (cond->mutex == NULL)
+ atomic_cas_ptr(&cond->mutex, NULL, mutex);
+
+ if (cond->mutex != mutex) {
+ LEAVE_CANCEL_POINT_INNER(tib, 1);
+ return (EINVAL);
+ }
+#endif
+
+ /* snag the count in case this is a recursive mutex */
+ if (mutex->type == PTHREAD_MUTEX_RECURSIVE)
+ mutex_count = mutex->count;
+
+ pthread_mutex_unlock(mutexp);
+
+ do {
+ /* If ``seq'' wraps you deserve to lose a signal. */
+ error = _twait(&cond->seq, seq, clock, abs);
+ /*
+ * If we took a normal signal (not from cancellation) then
+ * we should just go back to sleep without changing state
+ * (timeouts, etc).
+ */
+ } while ((error == EINTR) &&
+ (tib->tib_canceled == 0 || (tib->tib_cantcancel & CANCEL_DISABLED)));
+
+ /* if timeout or canceled, make note of that */
+ if (error == ETIMEDOUT)
+ rv = ETIMEDOUT;
+ else if (error == EINTR)
+ canceled = 1;
+
+ pthread_mutex_lock(mutexp);
+
+ /* restore the mutex's count */
+ if (mutex->type == PTHREAD_MUTEX_RECURSIVE)
+ mutex->count = mutex_count;
+
+ LEAVE_CANCEL_POINT_INNER(tib, canceled);
+
+ return rv;
+}
+
+int
+pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
+ const struct timespec *abs)
+{
+ pthread_cond_t cond;
+ int error;
+
+ if (*condp == NULL) {
+ if ((error = pthread_cond_init(condp, NULL)))
+ return (error);
+ }
+
+ cond = *condp;
+ if (abs == NULL || abs->tv_sec < 0 || abs->tv_nsec < 0 ||
+ abs->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ return (_rthread_cond_timedwait(cond, mutexp, abs));
+}
+
+int
+pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
+{
+ pthread_cond_t cond;
+ int error;
+
+ if (*condp == NULL) {
+ if ((error = pthread_cond_init(condp, NULL)))
+ return (error);
+ }
+
+ cond = *condp;
+ return (_rthread_cond_timedwait(cond, mutexp, NULL));
+}
+DEF_STD(pthread_cond_wait);
+
+int
+pthread_cond_signal(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+ int count;
+
+ if (*condp == NULL)
+ return (0);
+
+ cond = *condp;
+
+ atomic_inc_int(&cond->seq);
+ count = _wake(&cond->seq, 1);
+
+ _rthread_debug(5, "%p: cond_signal %p, %d awaken\n", pthread_self(),
+ (void *)cond, count);
+
+ return (0);
+}
+DEF_STD(pthread_cond_signal);
+
+int
+pthread_cond_broadcast(pthread_cond_t *condp)
+{
+ pthread_cond_t cond;
+ int count;
+
+ if (*condp == NULL)
+ return (0);
+
+ cond = *condp;
+
+ atomic_inc_int(&cond->seq);
+#if notyet
+ count = _requeue(&cond->seq, 1, INT_MAX, &cond->mutex->lock);
+#else
+ count = _wake(&cond->seq, INT_MAX);
+#endif
+
+ _rthread_debug(5, "%p: cond_broadcast %p, %d awaken\n", pthread_self(),
+ (void *)cond, count);
+
+ return (0);
+}
+DEF_STD(pthread_cond_broadcast);
diff --git a/lib/librthread/rthread_libc.c b/lib/librthread/rthread_libc.c
index 4209b244a7b..645c5605fd1 100644
--- a/lib/librthread/rthread_libc.c
+++ b/lib/librthread/rthread_libc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_libc.c,v 1.17 2016/09/04 10:13:35 akfaew Exp $ */
+/* $OpenBSD: rthread_libc.c,v 1.18 2017/05/27 14:20:39 mpi Exp $ */
/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
@@ -152,13 +152,22 @@ _thread_mutex_destroy(void **mutex)
/*
* the malloc lock
*/
+#ifndef FUTEX
#define MALLOC_LOCK_INITIALIZER(n) { \
_SPINLOCK_UNLOCKED, \
TAILQ_HEAD_INITIALIZER(malloc_lock[n].lockers), \
PTHREAD_MUTEX_DEFAULT, \
NULL, \
0, \
- -1 } \
+ -1 }
+#else
+#define MALLOC_LOCK_INITIALIZER(n) { \
+ _SPINLOCK_UNLOCKED, \
+ PTHREAD_MUTEX_DEFAULT, \
+ NULL, \
+ 0, \
+ -1 }
+#endif
static struct pthread_mutex malloc_lock[_MALLOC_MUTEXES] = {
MALLOC_LOCK_INITIALIZER(0),
@@ -166,6 +175,7 @@ static struct pthread_mutex malloc_lock[_MALLOC_MUTEXES] = {
MALLOC_LOCK_INITIALIZER(2),
MALLOC_LOCK_INITIALIZER(3)
};
+
static pthread_mutex_t malloc_mutex[_MALLOC_MUTEXES] = {
&malloc_lock[0],
&malloc_lock[1],
@@ -192,7 +202,9 @@ _thread_malloc_reinit(void)
for (i = 0; i < _MALLOC_MUTEXES; i++) {
malloc_lock[i].lock = _SPINLOCK_UNLOCKED;
+#ifndef FUTEX
TAILQ_INIT(&malloc_lock[i].lockers);
+#endif
malloc_lock[i].owner = NULL;
malloc_lock[i].count = 0;
}
diff --git a/lib/librthread/rthread_mutex.c b/lib/librthread/rthread_mutex.c
new file mode 100644
index 00000000000..d596332e417
--- /dev/null
+++ b/lib/librthread/rthread_mutex.c
@@ -0,0 +1,281 @@
+/* $OpenBSD: rthread_mutex.c,v 1.1 2017/05/27 14:20:39 mpi Exp $ */
+/*
+ * Copyright (c) 2017 Martin Pieuchot <mpi@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <pthread.h>
+
+#include "rthread.h"
+#include "cancel.h"
+#include "synch.h"
+
+/*
+ * States defined in "Futexes Are Tricky" 5.2
+ */
+enum {
+ UNLOCKED = 0,
+ LOCKED = 1, /* locked without waiter */
+ CONTENDED = 2, /* threads waiting for this mutex */
+};
+
+static _atomic_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
+
+int
+pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
+{
+ pthread_mutex_t mutex;
+
+ mutex = calloc(1, sizeof(*mutex));
+ if (mutex == NULL)
+ return (ENOMEM);
+
+ if (attr == NULL) {
+ mutex->type = PTHREAD_MUTEX_DEFAULT;
+ mutex->prioceiling = -1;
+ } else {
+ mutex->type = (*attr)->ma_type;
+ mutex->prioceiling = (*attr)->ma_protocol ==
+ PTHREAD_PRIO_PROTECT ? (*attr)->ma_prioceiling : -1;
+ }
+ *mutexp = mutex;
+
+ return (0);
+}
+DEF_STD(pthread_mutex_init);
+
+int
+pthread_mutex_destroy(pthread_mutex_t *mutexp)
+{
+ pthread_mutex_t mutex;
+
+ if (mutexp == NULL || *mutexp == NULL)
+ return (EINVAL);
+
+ mutex = *mutexp;
+ if (mutex) {
+ if (mutex->lock != UNLOCKED) {
+#define MSG "pthread_mutex_destroy on mutex with waiters!\n"
+ write(2, MSG, sizeof(MSG) - 1);
+#undef MSG
+ return (EBUSY);
+ }
+ free((void *)mutex);
+ *mutexp = NULL;
+ }
+
+ return (0);
+}
+DEF_STD(pthread_mutex_destroy);
+
+static int
+_rthread_mutex_trylock(pthread_mutex_t mutex, int trywait,
+ const struct timespec *abs)
+{
+ pthread_t self = pthread_self();
+
+ if (atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED) == UNLOCKED) {
+ membar_enter();
+ mutex->owner = self;
+ return (0);
+ }
+
+ if (mutex->owner == self) {
+ int type = mutex->type;
+
+ /* already owner? handle recursive behavior */
+ if (type != PTHREAD_MUTEX_RECURSIVE) {
+ if (trywait || type == PTHREAD_MUTEX_ERRORCHECK)
+ return (trywait ? EBUSY : EDEADLK);
+
+ /* self-deadlock is disallowed by strict */
+ if (type == PTHREAD_MUTEX_STRICT_NP && abs == NULL)
+ abort();
+
+ /* self-deadlock, possibly until timeout */
+ while (_twait(&mutex->type, type, CLOCK_REALTIME,
+ abs) != ETIMEDOUT)
+ ;
+ return (ETIMEDOUT);
+ } else {
+ if (mutex->count == INT_MAX)
+ return (EAGAIN);
+ mutex->count++;
+ return (0);
+ }
+ }
+
+ return (EBUSY);
+}
+
+static int
+_rthread_mutex_timedlock(pthread_mutex_t *mutexp, int trywait,
+ const struct timespec *abs, int timed)
+{
+ pthread_t self = pthread_self();
+ pthread_mutex_t mutex;
+ unsigned int i, lock;
+ int error = 0;
+
+ if (mutexp == NULL)
+ return (EINVAL);
+
+ /*
+ * If the mutex is statically initialized, perform the dynamic
+ * initialization. Note: _thread_mutex_lock() in libc requires
+ * pthread_mutex_lock() to perform the mutex init when *mutexp
+ * is NULL.
+ */
+ if (*mutexp == NULL) {
+ _spinlock(&static_init_lock);
+ if (*mutexp == NULL)
+ error = pthread_mutex_init(mutexp, NULL);
+ _spinunlock(&static_init_lock);
+ if (error != 0)
+ return (EINVAL);
+ }
+
+ mutex = *mutexp;
+ _rthread_debug(5, "%p: mutex_%slock %p (%p)\n", self,
+ (timed ? "timed" : (trywait ? "try" : "")), (void *)mutex,
+ (void *)mutex->owner);
+
+ error = _rthread_mutex_trylock(mutex, trywait, abs);
+ if (error != EBUSY || trywait)
+ return (error);
+
+ /* Try hard to not enter the kernel. */
+ for (i = 0; i < SPINLOCK_SPIN_COUNT; i ++) {
+ if (mutex->lock == UNLOCKED)
+ break;
+
+ SPINLOCK_SPIN_HOOK;
+ }
+
+ lock = atomic_cas_uint(&mutex->lock, UNLOCKED, LOCKED);
+ if (lock == UNLOCKED) {
+ membar_enter();
+ mutex->owner = self;
+ return (0);
+ }
+
+ if (lock != CONTENDED) {
+ /* Indicate that we're waiting on this mutex. */
+ lock = atomic_swap_uint(&mutex->lock, CONTENDED);
+ }
+
+ while (lock != UNLOCKED) {
+ error = _twait(&mutex->lock, CONTENDED, CLOCK_REALTIME, abs);
+ if (error == ETIMEDOUT)
+ return (error);
+ /*
+ * We cannot know if there's another waiter, so in
+ * doubt set the state to CONTENDED.
+ */
+ lock = atomic_swap_uint(&mutex->lock, CONTENDED);
+ };
+
+ membar_enter();
+ mutex->owner = self;
+ return (0);
+}
+
+int
+pthread_mutex_trylock(pthread_mutex_t *mutexp)
+{
+ return (_rthread_mutex_timedlock(mutexp, 1, NULL, 0));
+}
+
+int
+pthread_mutex_timedlock(pthread_mutex_t *mutexp, const struct timespec *abs)
+{
+ return (_rthread_mutex_timedlock(mutexp, 0, abs, 1));
+}
+
+int
+pthread_mutex_lock(pthread_mutex_t *mutexp)
+{
+ return (_rthread_mutex_timedlock(mutexp, 0, NULL, 0));
+}
+DEF_STD(pthread_mutex_lock);
+
+int
+pthread_mutex_unlock(pthread_mutex_t *mutexp)
+{
+ pthread_t self = pthread_self();
+ pthread_mutex_t mutex;
+
+ if (mutexp == NULL)
+ return (EINVAL);
+
+ if (*mutexp == NULL)
+#if PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_ERRORCHECK
+ return (EPERM);
+#elif PTHREAD_MUTEX_DEFAULT == PTHREAD_MUTEX_NORMAL
+ return(0);
+#else
+ abort();
+#endif
+
+ mutex = *mutexp;
+ _rthread_debug(5, "%p: mutex_unlock %p (%p)\n", self, (void *)mutex,
+ (void *)mutex->owner);
+
+ if (mutex->owner != self) {
+ _rthread_debug(5, "%p: different owner %p (%p)\n", self, (void *)mutex,
+ (void *)mutex->owner);
+ if (mutex->type == PTHREAD_MUTEX_ERRORCHECK ||
+ mutex->type == PTHREAD_MUTEX_RECURSIVE) {
+ return (EPERM);
+ } else {
+ /*
+ * For mutex type NORMAL our undefined behavior for
+ * unlocking an unlocked mutex is to succeed without
+ * error. All other undefined behaviors are to
+ * abort() immediately.
+ */
+ if (mutex->owner == NULL &&
+ mutex->type == PTHREAD_MUTEX_NORMAL)
+ return (0);
+ else
+ abort();
+
+ }
+ }
+
+ if (mutex->type == PTHREAD_MUTEX_RECURSIVE) {
+ if (mutex->count > 0) {
+ mutex->count--;
+ return (0);
+ }
+ }
+
+ membar_exit();
+ mutex->owner = NULL;
+ if (atomic_dec_int_nv(&mutex->lock) != UNLOCKED) {
+ mutex->lock = UNLOCKED;
+ _wake(&mutex->lock, 1);
+ }
+
+ return (0);
+}
+DEF_STD(pthread_mutex_unlock);
diff --git a/lib/librthread/synch.h b/lib/librthread/synch.h
new file mode 100644
index 00000000000..8f3f241c2f7
--- /dev/null
+++ b/lib/librthread/synch.h
@@ -0,0 +1,66 @@
+/* $OpenBSD: synch.h,v 1.1 2017/05/27 14:20:39 mpi Exp $ */
+/*
+ * Copyright (c) 2017 Martin Pieuchot
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/atomic.h>
+#include <sys/time.h>
+#include <sys/futex.h>
+
+REDIRECT_SYSCALL(futex);
+
+#include <machine/lock.h> /* for SPINLOCK_SPIN_HOOK */
+#define SPINLOCK_SPIN_COUNT 128
+
+static inline int
+_wake(volatile uint32_t *p, int n)
+{
+ return futex(p, FUTEX_WAKE, n, NULL, NULL);
+}
+
+static inline void
+_wait(volatile uint32_t *p, int val)
+{
+ while (*p != (uint32_t)val)
+ futex(p, FUTEX_WAIT, val, NULL, NULL);
+}
+
+static inline int
+_twait(volatile uint32_t *p, int val, clockid_t clockid, const struct timespec *abs)
+{
+ struct timespec rel;
+
+ if (abs == NULL)
+ return futex(p, FUTEX_WAIT, val, NULL, NULL);
+
+ if (abs->tv_nsec >= 1000000000 || clock_gettime(clockid, &rel))
+ return (EINVAL);
+
+ rel.tv_sec = abs->tv_sec - rel.tv_sec;
+ if ((rel.tv_nsec = abs->tv_nsec - rel.tv_nsec) < 0) {
+ rel.tv_sec--;
+ rel.tv_nsec += 1000000000;
+ }
+ if (rel.tv_sec < 0)
+ return (ETIMEDOUT);
+
+ return futex(p, FUTEX_WAIT, val, &rel, NULL);
+}
+
+static inline int
+_requeue(volatile uint32_t *p, int n, int m, volatile uint32_t *q)
+{
+ return futex(p, FUTEX_REQUEUE, n, (void *)(long)m, q);
+}