summaryrefslogtreecommitdiffstats
path: root/lib/librthread
diff options
context:
space:
mode:
authorguenther <guenther@openbsd.org>2012-01-17 02:34:18 +0000
committerguenther <guenther@openbsd.org>2012-01-17 02:34:18 +0000
commit2aa8ea07027e21f7e3a39dbbc13d2a3af24af722 (patch)
tree5169bf71eaf80e7fda22d364a1fdfb1d5decc13b /lib/librthread
parentnet_addrcmp() dies. found out to be a horrific function by eric (diff)
downloadwireguard-openbsd-2aa8ea07027e21f7e3a39dbbc13d2a3af24af722.tar.xz
wireguard-openbsd-2aa8ea07027e21f7e3a39dbbc13d2a3af24af722.zip
Reimplement mutexes, condvars, and rwlocks to eliminate bugs,
particularly the "consume the signal you just sent" hang, and putting the wait queues in userspace. Do cancellation handling in pthread_cond_*wait(), pthread_join(), and sem_wait(). Add __ prefix to thr{sleep,wakeup,exit,sigdivert}() syscalls; add 'abort" argument to thrsleep to close cancellation race; make thr{sleep,wakeup} return errno values via *retval to avoid touching userspace errno.
Diffstat (limited to 'lib/librthread')
-rw-r--r--lib/librthread/arch/alpha/rfork_thread.S4
-rw-r--r--lib/librthread/arch/amd64/rfork_thread.S6
-rw-r--r--lib/librthread/arch/arm/rfork_thread.S4
-rw-r--r--lib/librthread/arch/hppa/rfork_thread.S4
-rw-r--r--lib/librthread/arch/i386/rfork_thread.S4
-rw-r--r--lib/librthread/arch/m68k/rfork_thread.S4
-rw-r--r--lib/librthread/arch/m88k/rfork_thread.S4
-rw-r--r--lib/librthread/arch/mips64/rfork_thread.S4
-rw-r--r--lib/librthread/arch/powerpc/rfork_thread.S4
-rw-r--r--lib/librthread/arch/sh/rfork_thread.S6
-rw-r--r--lib/librthread/arch/sparc/rfork_thread.S4
-rw-r--r--lib/librthread/arch/sparc64/rfork_thread.S4
-rw-r--r--lib/librthread/arch/vax/rfork_thread.S4
-rw-r--r--lib/librthread/rthread.c59
-rw-r--r--lib/librthread/rthread.h53
-rw-r--r--lib/librthread/rthread_cancel.c32
-rw-r--r--lib/librthread/rthread_file.c6
-rw-r--r--lib/librthread/rthread_rwlock.c244
-rw-r--r--lib/librthread/rthread_sem.c97
-rw-r--r--lib/librthread/rthread_sig.c4
-rw-r--r--lib/librthread/rthread_sync.c472
-rw-r--r--lib/librthread/shlib_version4
22 files changed, 720 insertions, 307 deletions
diff --git a/lib/librthread/arch/alpha/rfork_thread.S b/lib/librthread/arch/alpha/rfork_thread.S
index 3205958f6a9..3cd2ab68106 100644
--- a/lib/librthread/arch/alpha/rfork_thread.S
+++ b/lib/librthread/arch/alpha/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -52,6 +52,6 @@ LEAF(__tfork_thread,0)
jsr ra, (pv)
mov zero, a0
- CALLSYS_NOERROR(threxit)
+ CALLSYS_NOERROR(__threxit)
END(__tfork_thread)
diff --git a/lib/librthread/arch/amd64/rfork_thread.S b/lib/librthread/arch/amd64/rfork_thread.S
index 77b62ef8005..d5bd9a428eb 100644
--- a/lib/librthread/arch/amd64/rfork_thread.S
+++ b/lib/librthread/arch/amd64/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.5 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.6 2012/01/17 02:34:18 guenther Exp $ */
/*-
* Copyright (c) 2000 Peter Wemm <peter@FreeBSD.org>
* Copyright (c) 2003 Alan L. Cox <alc@cs.rice.edu>
@@ -68,7 +68,7 @@ ENTRY(__tfork_thread)
/*
* If we are in the child (new thread), then
* set-up the call to the internal subroutine. If it
- * returns, then call threxit.
+ * returns, then call __threxit.
*/
1:
movq %rsi, %rsp
@@ -78,7 +78,7 @@ ENTRY(__tfork_thread)
/*
* Thread exit system call
*/
- movl $SYS_threxit, %eax
+ movl $SYS___threxit, %eax
xorl %edi, %edi
syscall
diff --git a/lib/librthread/arch/arm/rfork_thread.S b/lib/librthread/arch/arm/rfork_thread.S
index a631b1b8ce3..749ba0490fa 100644
--- a/lib/librthread/arch/arm/rfork_thread.S
+++ b/lib/librthread/arch/arm/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005 Dale Rahn <drahn@openbsd.org>
*
@@ -42,7 +42,7 @@ ENTRY(__tfork_thread)
mov lr, pc
mov pc, r2
nop
- SYSTRAP(threxit)
+ SYSTRAP(__threxit)
1:
ldmia sp!, {r4}
b PIC_SYM(CERROR, PLT)
diff --git a/lib/librthread/arch/hppa/rfork_thread.S b/lib/librthread/arch/hppa/rfork_thread.S
index c98d4ee3bac..e29488db4d1 100644
--- a/lib/librthread/arch/hppa/rfork_thread.S
+++ b/lib/librthread/arch/hppa/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -56,7 +56,7 @@ ENTRY(__tfork_thread, 0)
copy r31, rp
copy r0, arg0
- SYSCALL(threxit)
+ SYSCALL(__threxit)
1:
bv r0(rp)
diff --git a/lib/librthread/arch/i386/rfork_thread.S b/lib/librthread/arch/i386/rfork_thread.S
index 1bc1d3e9e32..9fe8da9310c 100644
--- a/lib/librthread/arch/i386/rfork_thread.S
+++ b/lib/librthread/arch/i386/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.4 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.5 2012/01/17 02:34:18 guenther Exp $ */
/*-
* Copyright (c) 2000 Peter Wemm <peter@FreeBSD.org>
* All rights reserved.
@@ -101,7 +101,7 @@ ENTRY(__tfork_thread)
*/
pushl %eax
pushl $0
- movl $SYS_threxit, %eax
+ movl $SYS___threxit, %eax
int $0x80
/*
diff --git a/lib/librthread/arch/m68k/rfork_thread.S b/lib/librthread/arch/m68k/rfork_thread.S
index 80c682baf50..3931a4a8eb1 100644
--- a/lib/librthread/arch/m68k/rfork_thread.S
+++ b/lib/librthread/arch/m68k/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -61,7 +61,7 @@ ENTRY(__tfork_thread)
jsr a1@ /* func */
addq #4, sp
- __DO_SYSCALL(threxit)
+ __DO_SYSCALL(__threxit)
9:
/*
diff --git a/lib/librthread/arch/m88k/rfork_thread.S b/lib/librthread/arch/m88k/rfork_thread.S
index 2c820232a40..d8f7657bd30 100644
--- a/lib/librthread/arch/m88k/rfork_thread.S
+++ b/lib/librthread/arch/m88k/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -51,5 +51,5 @@ ENTRY(__tfork_thread)
jsr.n r4 /* func */
or r2, r5, r0 /* arg */
- or r13, r0, __SYSCALLNAME(SYS_,threxit)
+ or r13, r0, __SYSCALLNAME(SYS_,__threxit)
tb0 0, r0, 128
diff --git a/lib/librthread/arch/mips64/rfork_thread.S b/lib/librthread/arch/mips64/rfork_thread.S
index 32ee5307bee..adb73dbc24d 100644
--- a/lib/librthread/arch/mips64/rfork_thread.S
+++ b/lib/librthread/arch/mips64/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -64,7 +64,7 @@ LEAF(__tfork_thread, 32)
move v0, zero
move a0, zero
- __DO_SYSCALL(threxit)
+ __DO_SYSCALL(__threxit)
9:
/*
diff --git a/lib/librthread/arch/powerpc/rfork_thread.S b/lib/librthread/arch/powerpc/rfork_thread.S
index 5731553fbe7..103ff9f10a0 100644
--- a/lib/librthread/arch/powerpc/rfork_thread.S
+++ b/lib/librthread/arch/powerpc/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.4 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.5 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005 Tim Wiess <tim@nop.cx>
@@ -45,7 +45,7 @@ ENTRY(__tfork_thread)
blrl
/* child returned, call _exit */
- li %r0, SYS_threxit
+ li %r0, SYS___threxit
sc
1:
li %r3, -1
diff --git a/lib/librthread/arch/sh/rfork_thread.S b/lib/librthread/arch/sh/rfork_thread.S
index fb6002477fc..51100162285 100644
--- a/lib/librthread/arch/sh/rfork_thread.S
+++ b/lib/librthread/arch/sh/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2007 Miodrag Vallat.
@@ -45,7 +45,7 @@ ENTRY(__tfork_thread)
jsr @r6
mov r7, r4
- mov.l .LSYS_threxit, r0
+ mov.l .LSYS___threxit, r0
.word 0xc380 /* trapa #0x80 */
9:
@@ -56,6 +56,6 @@ ENTRY(__tfork_thread)
.align 2
.LSYS___tfork: .long SYS___tfork
-.LSYS_threxit: .long SYS_threxit
+.LSYS___threxit: .long SYS___threxit
SET_ENTRY_SIZE(__tfork_thread)
diff --git a/lib/librthread/arch/sparc/rfork_thread.S b/lib/librthread/arch/sparc/rfork_thread.S
index fc4b4f998c7..2feac006077 100644
--- a/lib/librthread/arch/sparc/rfork_thread.S
+++ b/lib/librthread/arch/sparc/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.2 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.3 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -63,7 +63,7 @@ ENTRY(__tfork_thread)
call %o2 /* func */
mov %o3, %o0 /* arg */
- mov SYS_threxit, %g1
+ mov SYS___threxit, %g1
clr %o0
t ST_SYSCALL /* will not return */
diff --git a/lib/librthread/arch/sparc64/rfork_thread.S b/lib/librthread/arch/sparc64/rfork_thread.S
index d35f6ba1d74..86b876db714 100644
--- a/lib/librthread/arch/sparc64/rfork_thread.S
+++ b/lib/librthread/arch/sparc64/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -62,7 +62,7 @@ ENTRY(__tfork_thread)
call %o2 /* func */
mov %o3, %o0 /* arg */
- mov SYS_threxit, %g1
+ mov SYS___threxit, %g1
clr %o0
t ST_SYSCALL /* will not return */
diff --git a/lib/librthread/arch/vax/rfork_thread.S b/lib/librthread/arch/vax/rfork_thread.S
index a7e6259b9e0..f5cc4231c58 100644
--- a/lib/librthread/arch/vax/rfork_thread.S
+++ b/lib/librthread/arch/vax/rfork_thread.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: rfork_thread.S,v 1.3 2011/10/17 06:39:20 guenther Exp $ */
+/* $OpenBSD: rfork_thread.S,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005, Miodrag Vallat
@@ -60,7 +60,7 @@ ENTRY(__tfork_thread, R2|R3|R4)
pushl r4 /* arg */
calls $1, *4(sp) /* func */
- __DO_SYSCALL(threxit)
+ __DO_SYSCALL(__threxit)
9:
/*
diff --git a/lib/librthread/rthread.c b/lib/librthread/rthread.c
index b4016f656d5..8f761c3896d 100644
--- a/lib/librthread/rthread.c
+++ b/lib/librthread/rthread.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.c,v 1.49 2011/12/28 04:59:31 guenther Exp $ */
+/* $OpenBSD: rthread.c,v 1.50 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -103,9 +103,31 @@ sigthr_handler(__unused int sig)
{
pthread_t self = pthread_self();
- if ((self->flags & (THREAD_CANCELED | THREAD_CANCEL_COND)) ==
- THREAD_CANCELED && (self->cancel_point ||
- (self->flags & THREAD_CANCEL_DEFERRED) == 0))
+ /*
+ * Do nothing unless
+ * 1) pthread_cancel() has been called on this thread,
+ * 2) cancelation is enabled for it, and
+ * 3) we're not already in cancelation processing
+ */
+ if ((self->flags & (THREAD_CANCELED|THREAD_CANCEL_ENABLE|THREAD_DYING))
+ != (THREAD_CANCELED|THREAD_CANCEL_ENABLE))
+ return;
+
+ /*
+ * If delaying cancels inside complex ops (pthread_cond_wait,
+ * pthread_join, etc), just mark that this has happened to
+ * prevent a race with going to sleep
+ */
+ if (self->flags & THREAD_CANCEL_DELAY) {
+ self->delayed_cancel = 1;
+ return;
+ }
+
+ /*
+ * otherwise, if in a cancel point or async cancels are
+ * enabled, then exit
+ */
+ if (self->cancel_point || (self->flags & THREAD_CANCEL_DEFERRED) == 0)
pthread_exit(PTHREAD_CANCELED);
}
@@ -123,10 +145,12 @@ _rthread_init(void)
strlcpy(thread->name, "Main process", sizeof(thread->name));
LIST_INSERT_HEAD(&_thread_list, thread, threads);
_rthread_debug_init();
- _rthread_debug(1, "rthread init\n");
+
_threads_ready = 1;
__isthreaded = 1;
+ _rthread_debug(1, "rthread init\n");
+
#if defined(__ELF__) && defined(PIC)
/*
* To avoid recursion problems in ld.so, we need to trigger the
@@ -177,7 +201,7 @@ _rthread_free(pthread_t thread)
}
}
-static void
+void
_rthread_setflag(pthread_t thread, int flag)
{
_spinlock(&thread->flags_lock);
@@ -185,7 +209,7 @@ _rthread_setflag(pthread_t thread, int flag)
_spinunlock(&thread->flags_lock);
}
-static void
+void
_rthread_clearflag(pthread_t thread, int flag)
{
_spinlock(&thread->flags_lock);
@@ -269,34 +293,38 @@ pthread_exit(void *retval)
_sem_post(&thread->donesem);
}
- threxit(&thread->tid);
+ __threxit(&thread->tid);
for(;;);
}
int
pthread_join(pthread_t thread, void **retval)
{
- int e;
+ int e, r;
pthread_t self = pthread_self();
+ e = r = 0;
+ _enter_delayed_cancel(self);
if (thread == NULL)
e = EINVAL;
else if (thread == self)
e = EDEADLK;
else if (thread->flags & THREAD_DETACHED)
e = EINVAL;
- else {
- _sem_wait(&thread->donesem, 0);
+ else if ((r = _sem_wait(&thread->donesem, 0, &self->delayed_cancel))) {
if (retval)
*retval = thread->retval;
- e = 0;
- /* We should be the last having a ref to this thread, but
- * someone stupid or evil might haved detached it;
- * in that case the thread will cleanup itself */
+
+ /*
+ * We should be the last having a ref to this thread,
+ * but someone stupid or evil might haved detached it;
+ * in that case the thread will clean up itself
+ */
if ((thread->flags & THREAD_DETACHED) == 0)
_rthread_free(thread);
}
+ _leave_delayed_cancel(self, !r);
_rthread_reaper();
return (e);
}
@@ -439,7 +467,6 @@ pthread_setcancelstate(int state, int *oldstatep)
PTHREAD_CANCEL_ENABLE : PTHREAD_CANCEL_DISABLE;
if (state == PTHREAD_CANCEL_ENABLE) {
_rthread_setflag(self, THREAD_CANCEL_ENABLE);
- pthread_testcancel();
} else if (state == PTHREAD_CANCEL_DISABLE) {
_rthread_clearflag(self, THREAD_CANCEL_ENABLE);
} else {
diff --git a/lib/librthread/rthread.h b/lib/librthread/rthread.h
index e20bf11d734..391f0746699 100644
--- a/lib/librthread/rthread.h
+++ b/lib/librthread/rthread.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread.h,v 1.30 2011/12/21 00:49:47 guenther Exp $ */
+/* $OpenBSD: rthread.h,v 1.31 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -54,7 +54,8 @@ struct sem {
TAILQ_HEAD(pthread_queue, pthread);
struct pthread_mutex {
- struct sem sem;
+ _spinlock_lock_t lock;
+ struct pthread_queue lockers;
int type;
pthread_t owner;
int count;
@@ -68,7 +69,9 @@ struct pthread_mutex_attr {
};
struct pthread_cond {
- struct sem sem;
+ _spinlock_lock_t lock;
+ struct pthread_queue waiters;
+ struct pthread_mutex *mutex;
};
struct pthread_cond_attr {
@@ -76,10 +79,10 @@ struct pthread_cond_attr {
};
struct pthread_rwlock {
- struct sem sem;
_spinlock_lock_t lock;
+ pthread_t owner;
+ struct pthread_queue writers;
int readers;
- int writer;
};
struct pthread_rwlockattr {
@@ -133,22 +136,32 @@ struct pthread {
struct stack *stack;
LIST_ENTRY(pthread) threads;
TAILQ_ENTRY(pthread) waiting;
+ pthread_cond_t blocking_cond;
int sched_policy;
struct pthread_attr attr;
struct sched_param sched_param;
struct rthread_storage *local_storage;
struct rthread_cleanup_fn *cleanup_fns;
int myerrno;
+
+ /* currently in a cancel point? */
int cancel_point;
+
+ /* cancel received in a delayed cancel block? */
+ int delayed_cancel;
};
#define THREAD_DONE 0x001
#define THREAD_DETACHED 0x002
#define THREAD_CANCELED 0x004
#define THREAD_CANCEL_ENABLE 0x008
#define THREAD_CANCEL_DEFERRED 0x010
-#define THREAD_CANCEL_COND 0x020
+#define THREAD_CANCEL_DELAY 0x020
#define THREAD_DYING 0x040
+#define IS_CANCELED(thread) \
+ (((thread)->flags & (THREAD_CANCELED|THREAD_DYING)) == THREAD_CANCELED)
+
+
extern int _threads_ready;
extern LIST_HEAD(listhead, pthread) _thread_list;
extern struct pthread _initial_thread;
@@ -156,12 +169,11 @@ extern _spinlock_lock_t _thread_lock;
void _spinlock(_spinlock_lock_t *);
void _spinunlock(_spinlock_lock_t *);
-int _sem_wait(sem_t, int);
-int _sem_waitl(sem_t, int, clockid_t, const struct timespec *);
+int _sem_wait(sem_t, int, int *);
int _sem_post(sem_t);
-int _sem_wakeup(sem_t);
-int _sem_wakeall(sem_t);
+void _rthread_setflag(pthread_t, int);
+void _rthread_clearflag(pthread_t, int);
struct stack *_rthread_alloc_stack(pthread_t);
void _rthread_free_stack(struct stack *);
void _rthread_tls_destructors(pthread_t);
@@ -174,19 +186,22 @@ void _rthread_bind_lock(int);
#endif
/* rthread_cancel.c */
-void _leave_cancel(pthread_t);
void _enter_cancel(pthread_t);
+void _leave_cancel(pthread_t);
+void _enter_delayed_cancel(pthread_t);
+void _leave_delayed_cancel(pthread_t, int);
void _thread_dump_info(void);
int _atomic_lock(register volatile _spinlock_lock_t *);
/* syscalls */
-int getthrid(void);
-void threxit(pid_t *);
-int thrsleep(const volatile void *, clockid_t, const struct timespec *,
- volatile void *);
-int thrwakeup(void *, int n);
-int sched_yield(void);
-int thrsigdivert(sigset_t, siginfo_t *, const struct timespec *);
-int _thread_sys_sigaction(int, const struct sigaction *, struct sigaction *);
+int getthrid(void);
+void __threxit(pid_t *);
+int __thrsleep(const volatile void *, clockid_t, const struct timespec *,
+ void *, const int *);
+int __thrwakeup(const volatile void *, int n);
+int __thrsigdivert(sigset_t, siginfo_t *, const struct timespec *);
+int sched_yield(void);
+int _thread_sys_sigaction(int, const struct sigaction *,
+ struct sigaction *);
diff --git a/lib/librthread/rthread_cancel.c b/lib/librthread/rthread_cancel.c
index 4d57c8ee530..1db785a9a9e 100644
--- a/lib/librthread/rthread_cancel.c
+++ b/lib/librthread/rthread_cancel.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_cancel.c,v 1.3 2012/01/04 05:46:38 guenther Exp $ */
+/* $OpenBSD: rthread_cancel.c,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
/* $snafu: libc_tag.c,v 1.4 2004/11/30 07:00:06 marc Exp $ */
/* PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org> */
@@ -66,8 +66,7 @@ _enter_cancel(pthread_t self)
{
if (self->flags & THREAD_CANCEL_ENABLE) {
self->cancel_point++;
- if ((self->flags & (THREAD_CANCELED | THREAD_DYING)) ==
- THREAD_CANCELED)
+ if (IS_CANCELED(self))
pthread_exit(PTHREAD_CANCELED);
}
}
@@ -79,6 +78,31 @@ _leave_cancel(pthread_t self)
self->cancel_point--;
}
+void
+_enter_delayed_cancel(pthread_t self)
+{
+ if (self->flags & THREAD_CANCEL_ENABLE) {
+ self->delayed_cancel = 0;
+ self->cancel_point++;
+ if (IS_CANCELED(self))
+ pthread_exit(PTHREAD_CANCELED);
+ _rthread_setflag(self, THREAD_CANCEL_DELAY);
+ }
+}
+
+void
+_leave_delayed_cancel(pthread_t self, int can_cancel)
+{
+ if (self->flags & THREAD_CANCEL_ENABLE) {
+ if (self->flags & THREAD_CANCEL_DELAY) {
+ self->cancel_point--;
+ _rthread_clearflag(self, THREAD_CANCEL_DELAY);
+ }
+ if (IS_CANCELED(self) && can_cancel)
+ pthread_exit(PTHREAD_CANCELED);
+ self->delayed_cancel = 0;
+ }
+}
int
accept(int fd, struct sockaddr *addr, socklen_t *addrlen)
@@ -419,7 +443,7 @@ select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
#if 0
sem_timedwait() /* don't have yet */
-sem_wait() /* don't have yet */
+sem_wait() /* in rthread_sem.c */
send() /* built on sendto() */
#endif
diff --git a/lib/librthread/rthread_file.c b/lib/librthread/rthread_file.c
index 14190662885..28bad52d0c2 100644
--- a/lib/librthread/rthread_file.c
+++ b/lib/librthread/rthread_file.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_file.c,v 1.3 2011/11/06 11:48:59 guenther Exp $ */
+/* $OpenBSD: rthread_file.c,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
@@ -204,7 +204,7 @@ void
*/
TAILQ_INSERT_TAIL(&p->lockers,self,waiting);
while (p->owner != self) {
- thrsleep(self, 0, NULL, &hash_lock);
+ __thrsleep(self, 0, NULL, &hash_lock, NULL);
_spinlock(&hash_lock);
}
}
@@ -292,7 +292,7 @@ void
*/
p->count = 1;
- thrwakeup(p->owner, 1);
+ __thrwakeup(p->owner, 1);
}
}
}
diff --git a/lib/librthread/rthread_rwlock.c b/lib/librthread/rthread_rwlock.c
index 0d391fe9afd..c148089d996 100644
--- a/lib/librthread/rthread_rwlock.c
+++ b/lib/librthread/rthread_rwlock.c
@@ -1,6 +1,7 @@
-/* $OpenBSD: rthread_rwlock.c,v 1.1 2011/12/21 23:59:03 guenther Exp $ */
+/* $OpenBSD: rthread_rwlock.c,v 1.2 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
* All Rights Reserved.
*
* Permission to use, copy, modify, and distribute this software for any
@@ -20,6 +21,7 @@
*/
+#include <assert.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
@@ -31,8 +33,10 @@
static _spinlock_lock_t rwlock_init_lock = _SPINLOCK_UNLOCKED;
+/* ARGSUSED1 */
int
-pthread_rwlock_init(pthread_rwlock_t *lockp, const pthread_rwlockattr_t *attrp)
+pthread_rwlock_init(pthread_rwlock_t *lockp,
+ const pthread_rwlockattr_t *attrp __unused)
{
pthread_rwlock_t lock;
@@ -40,7 +44,8 @@ pthread_rwlock_init(pthread_rwlock_t *lockp, const pthread_rwlockattr_t *attrp)
if (!lock)
return (errno);
lock->lock = _SPINLOCK_UNLOCKED;
- lock->sem.lock = _SPINLOCK_UNLOCKED;
+ TAILQ_INIT(&lock->writers);
+
*lockp = lock;
return (0);
@@ -49,13 +54,19 @@ pthread_rwlock_init(pthread_rwlock_t *lockp, const pthread_rwlockattr_t *attrp)
int
pthread_rwlock_destroy(pthread_rwlock_t *lockp)
{
- if ((*lockp) && ((*lockp)->readers || (*lockp)->writer)) {
+ pthread_rwlock_t lock;
+
+ assert(lockp);
+ lock = *lockp;
+ if (lock) {
+ if (lock->readers || !TAILQ_EMPTY(&lock->writers)) {
#define MSG "pthread_rwlock_destroy on rwlock with waiters!\n"
- write(2, MSG, sizeof(MSG) - 1);
+ write(2, MSG, sizeof(MSG) - 1);
#undef MSG
- return (EBUSY);
+ return (EBUSY);
+ }
+ free(lock);
}
- free(*lockp);
*lockp = NULL;
return (0);
@@ -81,86 +92,72 @@ _rthread_rwlock_ensure_init(pthread_rwlock_t *lockp)
}
-int
-pthread_rwlock_rdlock(pthread_rwlock_t *lockp)
+static int
+_rthread_rwlock_rdlock(pthread_rwlock_t *lockp, const struct timespec *abstime,
+ int try)
{
pthread_rwlock_t lock;
+ pthread_t thread = pthread_self();
int error;
if ((error = _rthread_rwlock_ensure_init(lockp)))
return (error);
lock = *lockp;
-again:
+ _rthread_debug(5, "%p: rwlock_rdlock %p\n", (void *)thread,
+ (void *)lock);
_spinlock(&lock->lock);
- if (lock->writer) {
- _spinlock(&lock->sem.lock);
- _spinunlock(&lock->lock);
- _sem_waitl(&lock->sem, 0, 0, NULL);
- goto again;
+
+ /* writers have precedence */
+ if (lock->owner == NULL && TAILQ_EMPTY(&lock->writers))
+ lock->readers++;
+ else if (try)
+ error = EBUSY;
+ else if (lock->owner == thread)
+ error = EDEADLK;
+ else {
+ do {
+ if (__thrsleep(lock, CLOCK_REALTIME, abstime,
+ &lock->lock, NULL) == EWOULDBLOCK)
+ return (ETIMEDOUT);
+ _spinlock(&lock->lock);
+ } while (lock->owner != NULL || !TAILQ_EMPTY(&lock->writers));
+ lock->readers++;
}
- lock->readers++;
_spinunlock(&lock->lock);
- return (0);
+ return (error);
}
int
-pthread_rwlock_timedrdlock(pthread_rwlock_t *lockp,
- const struct timespec *abstime)
+pthread_rwlock_rdlock(pthread_rwlock_t *lockp)
{
- pthread_rwlock_t lock;
- int do_wait = 1;
- int error;
-
- if ((error = _rthread_rwlock_ensure_init(lockp)))
- return (error);
-
- lock = *lockp;
- _spinlock(&lock->lock);
- while (lock->writer && do_wait) {
- _spinlock(&lock->sem.lock);
- _spinunlock(&lock->lock);
- do_wait = _sem_waitl(&lock->sem, 0, CLOCK_REALTIME, abstime);
- _spinlock(&lock->lock);
- }
- if (lock->writer) {
- /* do_wait must be 0, so timed out */
- _spinunlock(&lock->lock);
- return (ETIMEDOUT);
- }
- lock->readers++;
- _spinunlock(&lock->lock);
-
- return (0);
+ return (_rthread_rwlock_rdlock(lockp, NULL, 0));
}
int
pthread_rwlock_tryrdlock(pthread_rwlock_t *lockp)
{
- pthread_rwlock_t lock;
- int error;
-
- if ((error = _rthread_rwlock_ensure_init(lockp)))
- return (error);
-
- lock = *lockp;
-
- _spinlock(&lock->lock);
- if (lock->writer) {
- _spinunlock(&lock->lock);
- return (EBUSY);
- }
- lock->readers++;
- _spinunlock(&lock->lock);
-
- return (0);
+ return (_rthread_rwlock_rdlock(lockp, NULL, 1));
}
int
-pthread_rwlock_wrlock(pthread_rwlock_t *lockp)
+pthread_rwlock_timedrdlock(pthread_rwlock_t *lockp,
+ const struct timespec *abstime)
+{
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec > 1000000000)
+ return (EINVAL);
+ return (_rthread_rwlock_rdlock(lockp, abstime, 0));
+}
+
+
+static int
+_rthread_rwlock_wrlock(pthread_rwlock_t *lockp, const struct timespec *abstime,
+ int try)
{
pthread_rwlock_t lock;
+ pthread_t thread = pthread_self();
int error;
if ((error = _rthread_rwlock_ensure_init(lockp)))
@@ -168,95 +165,98 @@ pthread_rwlock_wrlock(pthread_rwlock_t *lockp)
lock = *lockp;
+ _rthread_debug(5, "%p: rwlock_timedwrlock %p\n", (void *)thread,
+ (void *)lock);
_spinlock(&lock->lock);
- lock->writer++;
- while (lock->readers) {
- _spinlock(&lock->sem.lock);
- _spinunlock(&lock->lock);
- _sem_waitl(&lock->sem, 0, 0, NULL);
- _spinlock(&lock->lock);
+ if (lock->readers == 0 && lock->owner == NULL)
+ lock->owner = thread;
+ else if (try)
+ error = EBUSY;
+ else if (lock->owner == thread)
+ error = EDEADLK;
+ else {
+ int do_wait;
+
+ /* gotta block */
+ TAILQ_INSERT_TAIL(&lock->writers, thread, waiting);
+ do {
+ do_wait = __thrsleep(thread, CLOCK_REALTIME, abstime,
+ &lock->lock, NULL) != EWOULDBLOCK;
+ _spinlock(&lock->lock);
+ } while (lock->owner != thread && do_wait);
+
+ if (lock->owner != thread) {
+ /* timed out, sigh */
+ TAILQ_REMOVE(&lock->writers, thread, waiting);
+ error = ETIMEDOUT;
+ }
}
- lock->readers = -pthread_self()->tid;
_spinunlock(&lock->lock);
- return (0);
+ return (error);
}
int
-pthread_rwlock_timedwrlock(pthread_rwlock_t *lockp,
- const struct timespec *abstime)
+pthread_rwlock_wrlock(pthread_rwlock_t *lockp)
{
- pthread_rwlock_t lock;
- int do_wait = 1;
- int error;
-
- if ((error = _rthread_rwlock_ensure_init(lockp)))
- return (error);
-
- lock = *lockp;
-
- _spinlock(&lock->lock);
- lock->writer++;
- while (lock->readers && do_wait) {
- _spinlock(&lock->sem.lock);
- _spinunlock(&lock->lock);
- do_wait = _sem_waitl(&lock->sem, 0, CLOCK_REALTIME, abstime);
- _spinlock(&lock->lock);
- }
- if (lock->readers) {
- /* do_wait must be 0, so timed out */
- lock->writer--;
- _spinunlock(&lock->lock);
- return (ETIMEDOUT);
- }
- lock->readers = -pthread_self()->tid;
- _spinunlock(&lock->lock);
-
- return (0);
+ return (_rthread_rwlock_wrlock(lockp, NULL, 0));
}
int
pthread_rwlock_trywrlock(pthread_rwlock_t *lockp)
{
- pthread_rwlock_t lock;
- int error;
-
- if ((error = _rthread_rwlock_ensure_init(lockp)))
- return (error);
-
- lock = *lockp;
-
- _spinlock(&lock->lock);
- if (lock->readers || lock->writer) {
- _spinunlock(&lock->lock);
- return (EBUSY);
- }
- lock->writer = 1;
- lock->readers = -pthread_self()->tid;
- _spinunlock(&lock->lock);
+ return (_rthread_rwlock_wrlock(lockp, NULL, 1));
+}
- return (0);
+int
+pthread_rwlock_timedwrlock(pthread_rwlock_t *lockp,
+ const struct timespec *abstime)
+{
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec > 1000000000)
+ return (EINVAL);
+ return (_rthread_rwlock_wrlock(lockp, abstime, 0));
}
+
int
pthread_rwlock_unlock(pthread_rwlock_t *lockp)
{
pthread_rwlock_t lock;
+ pthread_t thread = pthread_self();
+ pthread_t next;
+ int was_writer;
lock = *lockp;
+ _rthread_debug(5, "%p: rwlock_unlock %p\n", (void *)thread,
+ (void *)lock);
_spinlock(&lock->lock);
- if (lock->readers == -pthread_self()->tid) {
- lock->readers = 0;
- lock->writer--;
- } else if (lock->readers > 0) {
- lock->readers--;
+ if (lock->owner != NULL) {
+ assert(lock->owner == thread);
+ was_writer = 1;
} else {
+ assert(lock->readers > 0);
+ lock->readers--;
+ if (lock->readers > 0)
+ goto out;
+ was_writer = 0;
+ }
+
+ lock->owner = next = TAILQ_FIRST(&lock->writers);
+ if (next != NULL) {
+ /* dequeue and wake first writer */
+ TAILQ_REMOVE(&lock->writers, next, waiting);
_spinunlock(&lock->lock);
- return (EPERM);
+ __thrwakeup(next, 1);
+ return (0);
}
+
+ /* could there have been blocked readers? wake them all */
+ if (was_writer)
+ __thrwakeup(lock, 0);
+out:
_spinunlock(&lock->lock);
- _sem_wakeall(&lock->sem);
return (0);
}
diff --git a/lib/librthread/rthread_sem.c b/lib/librthread/rthread_sem.c
index 6c065cc4b78..66c22af4b9a 100644
--- a/lib/librthread/rthread_sem.c
+++ b/lib/librthread/rthread_sem.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_sem.c,v 1.3 2012/01/04 21:01:25 guenther Exp $ */
+/* $OpenBSD: rthread_sem.c,v 1.4 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -28,42 +28,29 @@
* Internal implementation of semaphores
*/
int
-_sem_wait(sem_t sem, int tryonly)
+_sem_wait(sem_t sem, int tryonly, int *delayed_cancel)
{
+ int r;
_spinlock(&sem->lock);
- return (_sem_waitl(sem, tryonly, 0, NULL));
-}
-
-int
-_sem_waitl(sem_t sem, int tryonly, clockid_t clock_id,
- const struct timespec *abstime)
-{
- int do_sleep;
-
-again:
- if (sem->value == 0) {
- if (tryonly) {
- _spinunlock(&sem->lock);
- return (0);
- }
- sem->waitcount++;
- do_sleep = 1;
- } else {
+ if (sem->value) {
sem->value--;
- do_sleep = 0;
- }
-
- if (do_sleep) {
- if (thrsleep(sem, clock_id, abstime, &sem->lock) == -1 &&
- errno == EWOULDBLOCK)
- return (0);
- _spinlock(&sem->lock);
+ r = 1;
+ } else if (tryonly) {
+ r = 0;
+ } else {
+ sem->waitcount++;
+ do {
+ r = __thrsleep(&sem->waitcount, 0, NULL, &sem->lock,
+ delayed_cancel) == 0;
+ _spinlock(&sem->lock);
+ } while (r && sem->value == 0);
sem->waitcount--;
- goto again;
+ if (r)
+ sem->value--;
}
_spinunlock(&sem->lock);
- return (1);
+ return (r);
}
/* always increment count */
@@ -75,44 +62,13 @@ _sem_post(sem_t sem)
_spinlock(&sem->lock);
sem->value++;
if (sem->waitcount) {
- thrwakeup(sem, 1);
+ __thrwakeup(&sem->waitcount, 1);
rv = 1;
}
_spinunlock(&sem->lock);
return (rv);
}
-/* only increment count if a waiter */
-int
-_sem_wakeup(sem_t sem)
-{
- int rv = 0;
-
- _spinlock(&sem->lock);
- if (sem->waitcount) {
- sem->value++;
- thrwakeup(sem, 1);
- rv = 1;
- }
- _spinunlock(&sem->lock);
- return (rv);
-}
-
-
-int
-_sem_wakeall(sem_t sem)
-{
- int rv;
-
- _spinlock(&sem->lock);
- rv = sem->waitcount;
- sem->value += rv;
- thrwakeup(sem, 0);
- _spinunlock(&sem->lock);
-
- return (rv);
-}
-
/*
* exported semaphores
*/
@@ -199,13 +155,17 @@ int
sem_wait(sem_t *semp)
{
sem_t sem = *semp;
+ pthread_t self = pthread_self();
+ int r;
if (!semp || !*semp) {
errno = EINVAL;
return (-1);
}
- _sem_wait(sem, 0);
+ _enter_delayed_cancel(self);
+ r = _sem_wait(sem, 0, &self->delayed_cancel);
+ _leave_delayed_cancel(self, !r);
return (0);
}
@@ -221,7 +181,7 @@ sem_trywait(sem_t *semp)
return (-1);
}
- rv = _sem_wait(sem, 1);
+ rv = _sem_wait(sem, 1, NULL);
if (!rv) {
errno = EAGAIN;
@@ -231,22 +191,25 @@ sem_trywait(sem_t *semp)
return (0);
}
+/* ARGSUSED */
sem_t *
-sem_open(const char *name, int oflag, ...)
+sem_open(const char *name __unused, int oflag __unused, ...)
{
errno = ENOSYS;
return (SEM_FAILED);
}
+/* ARGSUSED */
int
-sem_close(sem_t *sem)
+sem_close(sem_t *sem __unused)
{
errno = ENOSYS;
return (-1);
}
+/* ARGSUSED */
int
-sem_unlink(const char *name)
+sem_unlink(const char *name __unused)
{
errno = ENOSYS;
return (-1);
diff --git a/lib/librthread/rthread_sig.c b/lib/librthread/rthread_sig.c
index 11351ddafc3..c38fed3998b 100644
--- a/lib/librthread/rthread_sig.c
+++ b/lib/librthread/rthread_sig.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_sig.c,v 1.11 2011/12/27 17:36:59 guenther Exp $ */
+/* $OpenBSD: rthread_sig.c,v 1.12 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2005 Ted Unangst <tedu@openbsd.org>
* All Rights Reserved.
@@ -56,7 +56,7 @@ sigwait(const sigset_t *set, int *sig)
sigdelset(&s, SIGTHR);
_enter_cancel(self);
- ret = thrsigdivert(s, NULL, NULL);
+ ret = __thrsigdivert(s, NULL, NULL);
_leave_cancel(self);
if (ret == -1)
return (errno);
diff --git a/lib/librthread/rthread_sync.c b/lib/librthread/rthread_sync.c
index cbe529a83aa..ae3bdbb40d9 100644
--- a/lib/librthread/rthread_sync.c
+++ b/lib/librthread/rthread_sync.c
@@ -1,6 +1,7 @@
-/* $OpenBSD: rthread_sync.c,v 1.28 2012/01/04 17:43:34 mpi Exp $ */
+/* $OpenBSD: rthread_sync.c,v 1.29 2012/01/17 02:34:18 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
+ * Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
* All Rights Reserved.
*
* Permission to use, copy, modify, and distribute this software for any
@@ -20,7 +21,9 @@
*/
+#include <assert.h>
#include <stdlib.h>
+#include <string.h>
#include <unistd.h>
#include <errno.h>
@@ -36,16 +39,16 @@ static _spinlock_lock_t static_init_lock = _SPINLOCK_UNLOCKED;
int
pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
{
- pthread_mutex_t mutex;
+ struct pthread_mutex *mutex;
mutex = calloc(1, sizeof(*mutex));
if (!mutex)
return (errno);
- mutex->sem.lock = _SPINLOCK_UNLOCKED;
- mutex->sem.value = 1; /* unlocked */
+ mutex->lock = _SPINLOCK_UNLOCKED;
+ TAILQ_INIT(&mutex->lockers);
if (attr == NULL) {
mutex->type = PTHREAD_MUTEX_ERRORCHECK;
- mutex->prioceiling = PTHREAD_PRIO_NONE;
+ mutex->prioceiling = -1;
} else {
mutex->type = (*attr)->ma_type;
mutex->prioceiling = (*attr)->ma_protocol ==
@@ -59,23 +62,29 @@ pthread_mutex_init(pthread_mutex_t *mutexp, const pthread_mutexattr_t *attr)
int
pthread_mutex_destroy(pthread_mutex_t *mutexp)
{
+ struct pthread_mutex *mutex;
- if ((*mutexp) && (*mutexp)->count) {
+ assert(mutexp);
+ mutex = (struct pthread_mutex *)*mutexp;
+ if (mutex) {
+ if (mutex->count || mutex->owner != NULL ||
+ !TAILQ_EMPTY(&mutex->lockers)) {
#define MSG "pthread_mutex_destroy on mutex with waiters!\n"
- write(2, MSG, sizeof(MSG) - 1);
+ write(2, MSG, sizeof(MSG) - 1);
#undef MSG
- return (EBUSY);
+ return (EBUSY);
+ }
+ free(mutex);
+ *mutexp = NULL;
}
- free((void *)*mutexp);
- *mutexp = NULL;
return (0);
}
static int
_rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait)
{
- pthread_mutex_t mutex;
- pthread_t thread = pthread_self();
+ struct pthread_mutex *mutex;
+ pthread_t self = pthread_self();
int ret = 0;
/*
@@ -92,19 +101,42 @@ _rthread_mutex_lock(pthread_mutex_t *mutexp, int trywait)
if (ret != 0)
return (EINVAL);
}
- mutex = *mutexp;
- if (mutex->owner == thread) {
- if (mutex->type == PTHREAD_MUTEX_RECURSIVE) {
- mutex->count++;
- return (0);
+ mutex = (struct pthread_mutex *)*mutexp;
+
+ _rthread_debug(5, "%p: mutex_lock %p\n", (void *)self, (void *)mutex);
+ _spinlock(&mutex->lock);
+ if (mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers)) {
+ assert(mutex->count == 0);
+ mutex->owner = self;
+ } else if (mutex->owner == self) {
+ assert(mutex->count > 0);
+
+ /* already owner? handle recursive behavior */
+ if (mutex->type != PTHREAD_MUTEX_RECURSIVE)
+ {
+ if (trywait ||
+ mutex->type == PTHREAD_MUTEX_ERRORCHECK) {
+ _spinunlock(&mutex->lock);
+ return (trywait ? EBUSY : EDEADLK);
+ }
+ abort();
}
- if (mutex->type == PTHREAD_MUTEX_ERRORCHECK)
- return (trywait ? EBUSY : EDEADLK);
- }
- if (!_sem_wait((void *)&mutex->sem, trywait))
+ } else if (trywait) {
+ /* try failed */
+ _spinunlock(&mutex->lock);
return (EBUSY);
- mutex->owner = thread;
- mutex->count = 1;
+ } else {
+ /* add to the wait queue and block until at the head */
+ TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
+ while (mutex->owner != self) {
+ __thrsleep(self, 0, NULL, &mutex->lock, NULL);
+ _spinlock(&mutex->lock);
+ assert(mutex->owner != NULL);
+ }
+ }
+
+ mutex->count++;
+ _spinunlock(&mutex->lock);
return (0);
}
@@ -124,15 +156,25 @@ pthread_mutex_trylock(pthread_mutex_t *p)
int
pthread_mutex_unlock(pthread_mutex_t *mutexp)
{
- pthread_t thread = pthread_self();
- pthread_mutex_t mutex = *mutexp;
+ pthread_t self = pthread_self();
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+
+ _rthread_debug(5, "%p: mutex_unlock %p\n", (void *)self,
+ (void *)mutex);
- if (mutex->owner != thread)
+ if (mutex->owner != self)
return (EPERM);
if (--mutex->count == 0) {
- mutex->owner = NULL;
- _sem_post((void *)&mutex->sem);
+ pthread_t next;
+
+ _spinlock(&mutex->lock);
+ mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
+ if (next != NULL)
+ TAILQ_REMOVE(&mutex->lockers, next, waiting);
+ _spinunlock(&mutex->lock);
+ if (next != NULL)
+ __thrwakeup(next, 1);
}
return (0);
@@ -141,15 +183,18 @@ pthread_mutex_unlock(pthread_mutex_t *mutexp)
/*
* condition variables
*/
+/* ARGSUSED1 */
int
-pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attrp)
+pthread_cond_init(pthread_cond_t *condp,
+ const pthread_condattr_t *attrp __unused)
{
pthread_cond_t cond;
cond = calloc(1, sizeof(*cond));
if (!cond)
return (errno);
- cond->sem.lock = _SPINLOCK_UNLOCKED;
+ cond->lock = _SPINLOCK_UNLOCKED;
+ TAILQ_INIT(&cond->waiters);
*condp = cond;
@@ -159,8 +204,19 @@ pthread_cond_init(pthread_cond_t *condp, const pthread_condattr_t *attrp)
int
pthread_cond_destroy(pthread_cond_t *condp)
{
+ pthread_cond_t cond;
- free(*condp);
+ assert(condp);
+ cond = *condp;
+ if (cond) {
+ if (!TAILQ_EMPTY(&cond->waiters)) {
+#define MSG "pthread_cond_destroy on condvar with waiters!\n"
+ write(2, MSG, sizeof(MSG) - 1);
+#undef MSG
+ return (EBUSY);
+ }
+ free(cond);
+ }
*condp = NULL;
return (0);
@@ -170,37 +226,310 @@ int
pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
const struct timespec *abstime)
{
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+ pthread_t self = pthread_self();
+ pthread_t next;
+ int mutex_count;
+ int canceled = 0;
+ int rv = 0;
int error;
- int rv;
if (!*condp)
if ((error = pthread_cond_init(condp, NULL)))
return (error);
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
+ (void *)cond, (void *)mutex);
- _spinlock(&(*condp)->sem.lock);
- pthread_mutex_unlock(mutexp);
- rv = _sem_waitl(&(*condp)->sem, 0, CLOCK_REALTIME, abstime);
- error = pthread_mutex_lock(mutexp);
+ if (mutex->owner != self)
+ return (EPERM);
+ if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
+ abstime->tv_nsec >= 1000000000)
+ return (EINVAL);
+
+ _enter_delayed_cancel(self);
+
+ _spinlock(&cond->lock);
+
+ /* mark the condvar as being associated with this mutex */
+ if (cond->mutex == NULL) {
+ cond->mutex = mutex;
+ assert(TAILQ_EMPTY(&cond->waiters));
+ } else if (cond->mutex != mutex) {
+ assert(cond->mutex == mutex);
+ _spinunlock(&cond->lock);
+ _leave_delayed_cancel(self, 1);
+ return (EINVAL);
+ } else
+ assert(! TAILQ_EMPTY(&cond->waiters));
+
+ /* snag the count in case this is a recursive mutex */
+ mutex_count = mutex->count;
+
+ /* transfer from the mutex queue to the condvar queue */
+ _spinlock(&mutex->lock);
+ self->blocking_cond = cond;
+ TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
+ _spinunlock(&cond->lock);
+
+ /* wake the next guy blocked on the mutex */
+ mutex->count = 0;
+ mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
+ if (next != NULL) {
+ TAILQ_REMOVE(&mutex->lockers, next, waiting);
+ __thrwakeup(next, 1);
+ }
- return (error ? error : rv ? 0 : ETIMEDOUT);
+ /* wait until we're the owner of the mutex again */
+ while (mutex->owner != self) {
+ error = __thrsleep(self, CLOCK_REALTIME, abstime, &mutex->lock,
+ &self->delayed_cancel);
+
+ /*
+ * If abstime == NULL, then we're definitely waiting
+ * on the mutex instead of the condvar, and are
+ * just waiting for mutex ownership, regardless of
+ * why we woke up.
+ */
+ if (abstime == NULL) {
+ _spinlock(&mutex->lock);
+ continue;
+ }
+
+ /*
+ * If we took a normal signal (not from
+ * cancellation) then we should just go back to
+ * sleep without changing state (timeouts, etc).
+ */
+ if (error == EINTR && !IS_CANCELED(self)) {
+ _spinlock(&mutex->lock);
+ continue;
+ }
+
+ /*
+ * The remaining reasons for waking up (normal
+ * wakeup, timeout, and cancellation) all mean that
+ * we won't be staying in the condvar queue and
+ * we'll no longer time out or be cancelable.
+ */
+ abstime = NULL;
+ _leave_delayed_cancel(self, 0);
+
+ /*
+ * If we're no longer in the condvar's queue then
+ * we're just waiting for mutex ownership. Need
+ * cond->lock here to prevent race with cond_signal().
+ */
+ _spinlock(&cond->lock);
+ if (self->blocking_cond == NULL) {
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+ continue;
+ }
+ assert(self->blocking_cond == cond);
+
+ /* if timeout or canceled, make note of that */
+ if (error == EWOULDBLOCK)
+ rv = ETIMEDOUT;
+ else if (error == EINTR)
+ canceled = 1;
+
+ /* transfer between the queues */
+ TAILQ_REMOVE(&cond->waiters, self, waiting);
+ assert(mutex == cond->mutex);
+ if (TAILQ_EMPTY(&cond->waiters))
+ cond->mutex = NULL;
+ self->blocking_cond = NULL;
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+
+ /* mutex unlocked right now? */
+ if (mutex->owner == NULL &&
+ TAILQ_EMPTY(&mutex->lockers)) {
+ assert(mutex->count == 0);
+ mutex->owner = self;
+ break;
+ }
+ TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
+ }
+
+ /* restore the mutex's count */
+ mutex->count = mutex_count;
+ _spinunlock(&mutex->lock);
+
+ _leave_delayed_cancel(self, canceled);
+
+ return (rv);
}
int
pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
{
- return (pthread_cond_timedwait(condp, mutexp, NULL));
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
+ pthread_t self = pthread_self();
+ pthread_t next;
+ int mutex_count;
+ int canceled = 0;
+ int error;
+
+ if (!*condp)
+ if ((error = pthread_cond_init(condp, NULL)))
+ return (error);
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_timed %p,%p\n", (void *)self,
+ (void *)cond, (void *)mutex);
+
+ if (mutex->owner != self)
+ return (EPERM);
+
+ _enter_delayed_cancel(self);
+
+ _spinlock(&cond->lock);
+
+ /* mark the condvar as being associated with this mutex */
+ if (cond->mutex == NULL) {
+ cond->mutex = mutex;
+ assert(TAILQ_EMPTY(&cond->waiters));
+ } else if (cond->mutex != mutex) {
+ assert(cond->mutex == mutex);
+ _spinunlock(&cond->lock);
+ _leave_delayed_cancel(self, 1);
+ return (EINVAL);
+ } else
+ assert(! TAILQ_EMPTY(&cond->waiters));
+
+ /* snag the count in case this is a recursive mutex */
+ mutex_count = mutex->count;
+
+ /* transfer from the mutex queue to the condvar queue */
+ _spinlock(&mutex->lock);
+ self->blocking_cond = cond;
+ TAILQ_INSERT_TAIL(&cond->waiters, self, waiting);
+ _spinunlock(&cond->lock);
+
+ /* wake the next guy blocked on the mutex */
+ mutex->count = 0;
+ mutex->owner = next = TAILQ_FIRST(&mutex->lockers);
+ if (next != NULL) {
+ TAILQ_REMOVE(&mutex->lockers, next, waiting);
+ __thrwakeup(next, 1);
+ }
+
+ /* wait until we're the owner of the mutex again */
+ while (mutex->owner != self) {
+ error = __thrsleep(self, 0, NULL, &mutex->lock,
+ &self->delayed_cancel);
+
+ /*
+ * If we took a normal signal (not from
+ * cancellation) then we should just go back to
+ * sleep without changing state (timeouts, etc).
+ */
+ if (error == EINTR && !IS_CANCELED(self)) {
+ _spinlock(&mutex->lock);
+ continue;
+ }
+
+ /*
+ * The remaining reasons for waking up (normal
+ * wakeup and cancellation) all mean that we won't
+ * be staying in the condvar queue and we'll no
+ * longer be cancelable.
+ */
+ _leave_delayed_cancel(self, 0);
+
+ /*
+ * If we're no longer in the condvar's queue then
+ * we're just waiting for mutex ownership. Need
+ * cond->lock here to prevent race with cond_signal().
+ */
+ _spinlock(&cond->lock);
+ if (self->blocking_cond == NULL) {
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+ continue;
+ }
+ assert(self->blocking_cond == cond);
+
+ /* if canceled, make note of that */
+ if (error == EINTR)
+ canceled = 1;
+
+ /* transfer between the queues */
+ TAILQ_REMOVE(&cond->waiters, self, waiting);
+ assert(mutex == cond->mutex);
+ if (TAILQ_EMPTY(&cond->waiters))
+ cond->mutex = NULL;
+ self->blocking_cond = NULL;
+ _spinunlock(&cond->lock);
+ _spinlock(&mutex->lock);
+
+ /* mutex unlocked right now? */
+ if (mutex->owner == NULL &&
+ TAILQ_EMPTY(&mutex->lockers)) {
+ assert(mutex->count == 0);
+ mutex->owner = self;
+ break;
+ }
+ TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting);
+ }
+
+ /* restore the mutex's count */
+ mutex->count = mutex_count;
+ _spinunlock(&mutex->lock);
+
+ _leave_delayed_cancel(self, canceled);
+
+ return (0);
}
+
int
pthread_cond_signal(pthread_cond_t *condp)
{
- int error;
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex;
+ pthread_t thread;
+ int wakeup;
+ /* uninitialized? Then there's obviously no one waiting! */
if (!*condp)
- if ((error = pthread_cond_init(condp, NULL)))
- return (error);
+ return 0;
+
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_signal %p,%p\n", (void *)pthread_self(),
+ (void *)cond, (void *)cond->mutex);
+ _spinlock(&cond->lock);
+ thread = TAILQ_FIRST(&cond->waiters);
+ if (thread == NULL) {
+ assert(cond->mutex == NULL);
+ _spinunlock(&cond->lock);
+ return (0);
+ }
- _sem_wakeup(&(*condp)->sem);
+ assert(thread->blocking_cond == cond);
+ TAILQ_REMOVE(&cond->waiters, thread, waiting);
+ thread->blocking_cond = NULL;
+
+ mutex = cond->mutex;
+ assert(mutex != NULL);
+ if (TAILQ_EMPTY(&cond->waiters))
+ cond->mutex = NULL;
+
+ /* link locks to prevent race with timedwait */
+ _spinlock(&mutex->lock);
+ _spinunlock(&cond->lock);
+
+ wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
+ if (wakeup)
+ mutex->owner = thread;
+ else
+ TAILQ_INSERT_TAIL(&mutex->lockers, thread, waiting);
+ _spinunlock(&mutex->lock);
+ if (wakeup)
+ __thrwakeup(thread, 1);
return (0);
}
@@ -208,10 +537,65 @@ pthread_cond_signal(pthread_cond_t *condp)
int
pthread_cond_broadcast(pthread_cond_t *condp)
{
+ pthread_cond_t cond;
+ struct pthread_mutex *mutex;
+ pthread_t thread;
+ pthread_t p;
+ int wakeup;
+
+ /* uninitialized? Then there's obviously no one waiting! */
if (!*condp)
- pthread_cond_init(condp, NULL);
+ return 0;
+
+ cond = *condp;
+ _rthread_debug(5, "%p: cond_broadcast %p,%p\n", (void *)pthread_self(),
+ (void *)cond, (void *)cond->mutex);
+ _spinlock(&cond->lock);
+ thread = TAILQ_FIRST(&cond->waiters);
+ if (thread == NULL) {
+ assert(cond->mutex == NULL);
+ _spinunlock(&cond->lock);
+ return (0);
+ }
+
+ mutex = cond->mutex;
+ assert(mutex != NULL);
+
+ /* walk the list, clearing the "blocked on condvar" pointer */
+ p = thread;
+ do
+ p->blocking_cond = NULL;
+ while ((p = TAILQ_NEXT(p, waiting)) != NULL);
+
+ /*
+ * We want to transfer all the threads from the condvar's list
+ * to the mutex's list. The TAILQ_* macros don't let us do that
+ * efficiently, so this is direct list surgery. Pay attention!
+ */
- _sem_wakeall(&(*condp)->sem);
+ /* 1) attach the first thread to the end of the mutex's list */
+ _spinlock(&mutex->lock);
+ wakeup = mutex->owner == NULL && TAILQ_EMPTY(&mutex->lockers);
+ thread->waiting.tqe_prev = mutex->lockers.tqh_last;
+ *(mutex->lockers.tqh_last) = thread;
+
+ /* 2) fix up the end pointer for the mutex's list */
+ mutex->lockers.tqh_last = cond->waiters.tqh_last;
+ _spinunlock(&mutex->lock);
+
+ if (wakeup) {
+ TAILQ_REMOVE(&mutex->lockers, thread, waiting);
+ mutex->owner = thread;
+ _spinunlock(&mutex->lock);
+ __thrwakeup(thread, 1);
+ } else
+ _spinunlock(&mutex->lock);
+
+ /* 3) reset the condvar's list and mutex pointer */
+ TAILQ_INIT(&cond->waiters);
+ assert(cond->mutex != NULL);
+ cond->mutex = NULL;
+ _spinunlock(&cond->lock);
return (0);
}
diff --git a/lib/librthread/shlib_version b/lib/librthread/shlib_version
index 890c57389b5..3066b9771e7 100644
--- a/lib/librthread/shlib_version
+++ b/lib/librthread/shlib_version
@@ -1,2 +1,2 @@
-major=4
-minor=1
+major=5
+minor=0