summaryrefslogtreecommitdiffstats
path: root/lib/librthread/rthread_sync.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/librthread/rthread_sync.c')
-rw-r--r--lib/librthread/rthread_sync.c33
1 files changed, 20 insertions, 13 deletions
diff --git a/lib/librthread/rthread_sync.c b/lib/librthread/rthread_sync.c
index 32331952906..7deb9fbea1a 100644
--- a/lib/librthread/rthread_sync.c
+++ b/lib/librthread/rthread_sync.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: rthread_sync.c,v 1.41 2016/04/15 21:04:04 guenther Exp $ */
+/* $OpenBSD: rthread_sync.c,v 1.42 2016/05/07 19:05:22 guenther Exp $ */
/*
* Copyright (c) 2004,2005 Ted Unangst <tedu@openbsd.org>
* Copyright (c) 2012 Philip Guenther <guenther@openbsd.org>
@@ -30,6 +30,7 @@
#include <pthread.h>
#include "rthread.h"
+#include "cancel.h" /* in libc/include */
static struct _spinlock static_init_lock = _SPINLOCK_UNLOCKED;
@@ -290,12 +291,14 @@ pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
{
pthread_cond_t cond;
struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
- pthread_t self = pthread_self();
+ struct tib *tib = TIB_GET();
+ pthread_t self = tib->tib_thread;
pthread_t next;
int mutex_count;
int canceled = 0;
int rv = 0;
int error;
+ PREP_CANCEL_POINT(tib);
if (!*condp)
if ((error = pthread_cond_init(condp, NULL)))
@@ -322,7 +325,7 @@ pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
abstime->tv_nsec >= 1000000000)
return (EINVAL);
- _enter_delayed_cancel(self);
+ ENTER_DELAYED_CANCEL_POINT(tib, self);
_spinlock(&cond->lock);
@@ -333,7 +336,7 @@ pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
} else if (cond->mutex != mutex) {
assert(cond->mutex == mutex);
_spinunlock(&cond->lock);
- _leave_delayed_cancel(self, 1);
+ LEAVE_CANCEL_POINT_INNER(tib, 1);
return (EINVAL);
} else
assert(! TAILQ_EMPTY(&cond->waiters));
@@ -376,7 +379,8 @@ pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
* cancellation) then we should just go back to
* sleep without changing state (timeouts, etc).
*/
- if (error == EINTR && !IS_CANCELED(self)) {
+ if (error == EINTR && (tib->tib_canceled == 0 ||
+ (tib->tib_cantcancel & CANCEL_DISABLED))) {
_spinlock(&mutex->lock);
continue;
}
@@ -388,7 +392,7 @@ pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
* we'll no longer time out or be cancelable.
*/
abstime = NULL;
- _leave_delayed_cancel(self, 0);
+ LEAVE_CANCEL_POINT_INNER(tib, 0);
/*
* If we're no longer in the condvar's queue then
@@ -432,7 +436,7 @@ pthread_cond_timedwait(pthread_cond_t *condp, pthread_mutex_t *mutexp,
mutex->count = mutex_count;
_spinunlock(&mutex->lock);
- _leave_delayed_cancel(self, canceled);
+ LEAVE_CANCEL_POINT_INNER(tib, canceled);
return (rv);
}
@@ -442,11 +446,13 @@ pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
{
pthread_cond_t cond;
struct pthread_mutex *mutex = (struct pthread_mutex *)*mutexp;
- pthread_t self = pthread_self();
+ struct tib *tib = TIB_GET();
+ pthread_t self = tib->tib_thread;
pthread_t next;
int mutex_count;
int canceled = 0;
int error;
+ PREP_CANCEL_POINT(tib);
if (!*condp)
if ((error = pthread_cond_init(condp, NULL)))
@@ -469,7 +475,7 @@ pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
abort();
}
- _enter_delayed_cancel(self);
+ ENTER_DELAYED_CANCEL_POINT(tib, self);
_spinlock(&cond->lock);
@@ -480,7 +486,7 @@ pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
} else if (cond->mutex != mutex) {
assert(cond->mutex == mutex);
_spinunlock(&cond->lock);
- _leave_delayed_cancel(self, 1);
+ LEAVE_CANCEL_POINT_INNER(tib, 1);
return (EINVAL);
} else
assert(! TAILQ_EMPTY(&cond->waiters));
@@ -512,7 +518,8 @@ pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
* cancellation) then we should just go back to
* sleep without changing state (timeouts, etc).
*/
- if (error == EINTR && !IS_CANCELED(self)) {
+ if (error == EINTR && (tib->tib_canceled == 0 ||
+ (tib->tib_cantcancel & CANCEL_DISABLED))) {
_spinlock(&mutex->lock);
continue;
}
@@ -523,7 +530,7 @@ pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
* be staying in the condvar queue and we'll no
* longer be cancelable.
*/
- _leave_delayed_cancel(self, 0);
+ LEAVE_CANCEL_POINT_INNER(tib, 0);
/*
* If we're no longer in the condvar's queue then
@@ -565,7 +572,7 @@ pthread_cond_wait(pthread_cond_t *condp, pthread_mutex_t *mutexp)
mutex->count = mutex_count;
_spinunlock(&mutex->lock);
- _leave_delayed_cancel(self, canceled);
+ LEAVE_CANCEL_POINT_INNER(tib, canceled);
return (0);
}