summaryrefslogtreecommitdiffstats
path: root/sys/kern
diff options
context:
space:
mode:
authormiod <miod@openbsd.org>2007-04-11 12:06:34 +0000
committermiod <miod@openbsd.org>2007-04-11 12:06:34 +0000
commit3c16e060c35c1b0aeffd3da8f70ece23a65d1419 (patch)
tree0c38180e6042579aaf57f50a37522ecbb8b24af8 /sys/kern
parentUse a separate function to remove the buffers in sddetach. (diff)
downloadwireguard-openbsd-3c16e060c35c1b0aeffd3da8f70ece23a65d1419.tar.xz
wireguard-openbsd-3c16e060c35c1b0aeffd3da8f70ece23a65d1419.zip
lockmgr keeps losing code, call 911!
ok pedro@ art@
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/kern_lock.c74
1 files changed, 8 insertions, 66 deletions
diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c
index d0b418a7b1f..6d51847a770 100644
--- a/sys/kern/kern_lock.c
+++ b/sys/kern/kern_lock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: kern_lock.c,v 1.25 2007/03/15 10:22:30 art Exp $ */
+/* $OpenBSD: kern_lock.c,v 1.26 2007/04/11 12:06:37 miod Exp $ */
/*
* Copyright (c) 1995
@@ -287,7 +287,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
* If just polling, check to see if we will block.
*/
if ((extflags & LK_NOWAIT) && (lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE))) {
+ (LK_HAVE_EXCL | LK_WANT_EXCL))) {
error = EBUSY;
break;
}
@@ -295,7 +295,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
* Wait for exclusive locks and upgrades to clear.
*/
ACQUIRE(lkp, error, extflags, 0, lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE));
+ (LK_HAVE_EXCL | LK_WANT_EXCL));
if (error)
break;
lkp->lk_sharecount++;
@@ -308,9 +308,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
*/
lkp->lk_sharecount++;
COUNT(lkp, p, cpu_id, 1);
- /* FALLTHROUGH */
- case LK_DOWNGRADE:
if (WEHOLDIT(lkp, pid, cpu_id) == 0 ||
lkp->lk_exclusivecount == 0)
panic("lockmgr: not holding exclusive lock");
@@ -326,61 +324,6 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
WAKEUP_WAITER(lkp);
break;
- case LK_UPGRADE:
- /*
- * Upgrade a shared lock to an exclusive one. If another
- * shared lock has already requested an upgrade to an
- * exclusive lock, our shared lock is released and an
- * exclusive lock is requested (which will be granted
- * after the upgrade). If we return an error, the file
- * will always be unlocked.
- */
- if (WEHOLDIT(lkp, pid, cpu_id) || lkp->lk_sharecount <= 0)
- panic("lockmgr: upgrade exclusive lock");
- lkp->lk_sharecount--;
- COUNT(lkp, p, cpu_id, -1);
- /*
- * If we are just polling, check to see if we will block.
- */
- if ((extflags & LK_NOWAIT) &&
- ((lkp->lk_flags & LK_WANT_UPGRADE) ||
- lkp->lk_sharecount > 1)) {
- error = EBUSY;
- break;
- }
- if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) {
- /*
- * We are first shared lock to request an upgrade, so
- * request upgrade and wait for the shared count to
- * drop to zero, then take exclusive lock.
- */
- lkp->lk_flags |= LK_WANT_UPGRADE;
- ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount);
- lkp->lk_flags &= ~LK_WANT_UPGRADE;
- if (error)
- break;
- lkp->lk_flags |= LK_HAVE_EXCL;
- SETHOLDER(lkp, pid, cpu_id);
-#if defined(LOCKDEBUG)
- lkp->lk_lock_file = file;
- lkp->lk_lock_line = line;
-#endif
- HAVEIT(lkp);
- if (lkp->lk_exclusivecount != 0)
- panic("lockmgr: non-zero exclusive count");
- lkp->lk_exclusivecount = 1;
- COUNT(lkp, p, cpu_id, 1);
- break;
- }
- /*
- * Someone else has requested upgrade. Release our shared
- * lock, awaken upgrade requestor if we are the last shared
- * lock, then request an exclusive lock.
- */
- if (lkp->lk_sharecount == 0)
- WAKEUP_WAITER(lkp);
- /* FALLTHROUGH */
-
case LK_EXCLUSIVE:
if (WEHOLDIT(lkp, pid, cpu_id)) {
/*
@@ -401,7 +344,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
* If we are just polling, check to see if we will sleep.
*/
if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+ (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
lkp->lk_sharecount != 0)) {
error = EBUSY;
break;
@@ -417,8 +360,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
/*
* Wait for shared locks and upgrades to finish.
*/
- ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0 ||
- (lkp->lk_flags & LK_WANT_UPGRADE));
+ ACQUIRE(lkp, error, extflags, 0, lkp->lk_sharecount != 0);
lkp->lk_flags &= ~LK_WANT_EXCL;
if (error)
break;
@@ -477,14 +419,14 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
* If we are just polling, check to see if we will sleep.
*/
if ((extflags & LK_NOWAIT) && ((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+ (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
lkp->lk_sharecount != 0 || lkp->lk_waitcount != 0)) {
error = EBUSY;
break;
}
ACQUIRE(lkp, error, extflags, 1,
((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) ||
+ (LK_HAVE_EXCL | LK_WANT_EXCL)) ||
lkp->lk_sharecount != 0 ||
lkp->lk_waitcount != 0));
if (error)
@@ -508,7 +450,7 @@ lockmgr(__volatile struct lock *lkp, u_int flags, struct simplelock *interlkp)
}
if ((lkp->lk_flags & LK_WAITDRAIN) != 0 &&
((lkp->lk_flags &
- (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE)) == 0 &&
+ (LK_HAVE_EXCL | LK_WANT_EXCL)) == 0 &&
lkp->lk_sharecount == 0 && lkp->lk_waitcount == 0)) {
lkp->lk_flags &= ~LK_WAITDRAIN;
wakeup((void *)&lkp->lk_flags);