summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_rwlock.c
diff options
context:
space:
mode:
authorart <art@openbsd.org>2007-04-04 18:01:57 +0000
committerart <art@openbsd.org>2007-04-04 18:01:57 +0000
commit4e4e01ffb7937d2d5d0a04824c5a94363a89ab63 (patch)
tree100739617f5c2826d7301ca9d7f6a6efb8bae83e /sys/kern/kern_rwlock.c
parentMechanically rename the "flags" and "version" fields in struct vm_page (diff)
downloadwireguard-openbsd-4e4e01ffb7937d2d5d0a04824c5a94363a89ab63.tar.xz
wireguard-openbsd-4e4e01ffb7937d2d5d0a04824c5a94363a89ab63.zip
Implement RW_DOWNGRADE that downgrades an exclusive lock to a shared lock
without letting any other exclusive locks in between. As opposed to upgrading locks, this is easy and solves real problems. deraadt@ ok
Diffstat (limited to 'sys/kern/kern_rwlock.c')
-rw-r--r--sys/kern/kern_rwlock.c38
1 files changed, 31 insertions, 7 deletions
diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c
index 1d5bb954787..85f1e6ace92 100644
--- a/sys/kern/kern_rwlock.c
+++ b/sys/kern/kern_rwlock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_rwlock.c,v 1.9 2006/11/30 20:08:22 mk Exp $ */
+/* $OpenBSD: kern_rwlock.c,v 1.10 2007/04/04 18:01:57 art Exp $ */
/*
* Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
@@ -67,6 +67,13 @@ static const struct rwlock_op {
0,
PLOCK
},
+ { /* RW_DOWNGRADE */
+ RWLOCK_READ_INCR - RWLOCK_WRLOCK,
+ 0,
+ 0,
+ -1,
+ PLOCK
+ },
};
#ifndef __HAVE_MD_RWLOCK
@@ -138,6 +145,16 @@ rw_enter_diag(struct rwlock *rwl, int flags)
if (RW_PROC(curproc) == RW_PROC(rwl->rwl_owner))
panic("rw_enter: locking against myself");
break;
+ case RW_DOWNGRADE:
+ /*
+ * If we're downgrading, we must hold the write lock.
+ */
+ if ((rwl->rwl_owner & RWLOCK_WRLOCK) == 0)
+ panic("rw_enter: downgrade of non-write lock");
+ if (RW_PROC(curproc) != RW_PROC(rwl->rwl_owner))
+ panic("rw_enter: downgrade, not holder");
+ break;
+
default:
panic("rw_enter: unknown op 0x%x", flags);
}
@@ -154,9 +171,6 @@ rw_init(struct rwlock *rwl, const char *name)
rwl->rwl_name = name;
}
-/*
- * You are supposed to understand this.
- */
int
rw_enter(struct rwlock *rwl, int flags)
{
@@ -167,14 +181,15 @@ rw_enter(struct rwlock *rwl, int flags)
op = &rw_ops[flags & RW_OPMASK];
inc = op->inc + RW_PROC(curproc) * op->proc_mult;
- prio = op->wait_prio;
- if (flags & RW_INTR)
- prio |= PCATCH;
retry:
while (__predict_false(((o = rwl->rwl_owner) & op->check) != 0)) {
if (rw_test_and_set(&rwl->rwl_owner, o, o | op->wait_set))
continue;
+ prio = op->wait_prio;
+ if (flags & RW_INTR)
+ prio |= PCATCH;
+
rw_enter_diag(rwl, flags);
if (flags & RW_NOSLEEP)
@@ -188,6 +203,15 @@ retry:
if (__predict_false(rw_test_and_set(&rwl->rwl_owner, o, o + inc)))
goto retry;
+ /*
+ * If old lock had RWLOCK_WAIT and RWLOCK_WRLOCK set, it means we
+ * downgraded a write lock and had possible read waiter, wake them
+ * to let them retry the lock.
+ */
+ if (__predict_false((o & (RWLOCK_WRLOCK|RWLOCK_WAIT)) ==
+ (RWLOCK_WRLOCK|RWLOCK_WAIT)))
+ wakeup(rwl);
+
return (0);
}