summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoroga <oga@openbsd.org>2010-04-30 20:50:53 +0000
committeroga <oga@openbsd.org>2010-04-30 20:50:53 +0000
commit69ba976b1c944845eea8d56df00167f3285f565e (patch)
tree2750af6ce50d0a4caea406533997a6cae03ead1a
parentAdd __unused and __used macros that expand to appropriate __attribute__ (diff)
downloadwireguard-openbsd-69ba976b1c944845eea8d56df00167f3285f565e.tar.xz
wireguard-openbsd-69ba976b1c944845eea8d56df00167f3285f565e.zip
Prevent a possible case of lock recursion in swapoff.
If when we have successfully swapped an aobj back in, then we release our reference count, and that reference is the last reference, we will free the the aobj and recursively lock the list lock. Fix this by keeping track of the last object we had a reference on, and releasing the refcount the next time we unlock the list lock. Put a couple of comments in explaining lock ordering in this file. noticed by, discussed with and ok guenther@.
-rw-r--r--sys/uvm/uvm_aobj.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/sys/uvm/uvm_aobj.c b/sys/uvm/uvm_aobj.c
index e5dce0a947d..f9caf2a7bbe 100644
--- a/sys/uvm/uvm_aobj.c
+++ b/sys/uvm/uvm_aobj.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_aobj.c,v 1.48 2010/04/25 23:02:22 oga Exp $ */
+/* $OpenBSD: uvm_aobj.c,v 1.49 2010/04/30 20:50:53 oga Exp $ */
/* $NetBSD: uvm_aobj.c,v 1.39 2001/02/18 21:19:08 chs Exp $ */
/*
@@ -196,6 +196,11 @@ struct uvm_pagerops aobj_pager = {
/*
* uao_list: global list of active aobjs, locked by uao_list_lock
+ *
+ * Lock ordering: generally the locking order is object lock, then list lock.
+ * in the case of swap off we have to iterate over the list, and thus the
+ * ordering is reversed. In that case we must use trylocking to prevent
+ * deadlock.
*/
static LIST_HEAD(aobjlist, uvm_aobj) uao_list = LIST_HEAD_INITIALIZER(uao_list);
@@ -1157,7 +1162,7 @@ uao_dropswap(struct uvm_object *uobj, int pageidx)
boolean_t
uao_swap_off(int startslot, int endslot)
{
- struct uvm_aobj *aobj, *nextaobj;
+ struct uvm_aobj *aobj, *nextaobj, *prevaobj = NULL;
/*
* walk the list of all aobjs.
@@ -1179,6 +1184,10 @@ restart:
*/
if (!simple_lock_try(&aobj->u_obj.vmobjlock)) {
mtx_leave(&uao_list_lock);
+ if (prevaobj) {
+ uao_detach_locked(&prevaobj->u_obj);
+ prevaobj = NULL;
+ }
goto restart;
}
@@ -1194,6 +1203,11 @@ restart:
*/
mtx_leave(&uao_list_lock);
+ if (prevaobj) {
+ uao_detach_locked(&prevaobj->u_obj);
+ prevaobj = NULL;
+ }
+
/*
* page in any pages in the swslot range.
* if there's an error, abort and return the error.
@@ -1210,13 +1224,23 @@ restart:
*/
mtx_enter(&uao_list_lock);
nextaobj = LIST_NEXT(aobj, u_list);
- uao_detach_locked(&aobj->u_obj);
+ /*
+ * prevaobj means that we have an object that we need
+ * to drop a reference for. We can't just drop it now with
+ * the list locked since that could cause lock recursion in
+ * the case where we reduce the refcount to 0. It will be
+ * released the next time we drop the list lock.
+ */
+ prevaobj = aobj;
}
/*
* done with traversal, unlock the list
*/
mtx_leave(&uao_list_lock);
+ if (prevaobj) {
+ uao_detach_locked(&prevaobj->u_obj);
+ }
return FALSE;
}