summaryrefslogtreecommitdiffstats
path: root/sys/uvm/uvm_vnode.c
diff options
context:
space:
mode:
authoroga <oga@openbsd.org>2009-06-16 00:11:29 +0000
committeroga <oga@openbsd.org>2009-06-16 00:11:29 +0000
commit29336ad765788b37f9946f6e2934311a865503f8 (patch)
tree51dfa65ef4c1dd2ec822dbbd531f6ef39037fdbd /sys/uvm/uvm_vnode.c
parentbring back miod@'s "real functions" patch (rev. 1.2) (diff)
downloadwireguard-openbsd-29336ad765788b37f9946f6e2934311a865503f8.tar.xz
wireguard-openbsd-29336ad765788b37f9946f6e2934311a865503f8.zip
Backout all changes to uvm after pmemrange (which will be backed out
separately). a change at or just before the hackathon has either exposed or added a very very nasty memory corruption bug that is giving us hell right now. So in the interest of kernel stability these diffs are being backed out until such a time as that corruption bug has been found and squashed, then the ones that are proven good may slowly return. a quick hitlist of the main commits this backs out: mine: uvm_objwire the lock change in uvm_swap.c using trees for uvm objects instead of the hash removing the pgo_releasepg callback. art@'s: putting pmap_page_protect(VM_PROT_NONE) in uvm_pagedeactivate() since all callers called that just prior anyway. ok beck@, ariane@. prompted by deraadt@.
Diffstat (limited to 'sys/uvm/uvm_vnode.c')
-rw-r--r--sys/uvm/uvm_vnode.c57
1 files changed, 51 insertions, 6 deletions
diff --git a/sys/uvm/uvm_vnode.c b/sys/uvm/uvm_vnode.c
index c15ebbf70a0..e85e2c24e38 100644
--- a/sys/uvm/uvm_vnode.c
+++ b/sys/uvm/uvm_vnode.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_vnode.c,v 1.62 2009/06/06 17:46:44 art Exp $ */
+/* $OpenBSD: uvm_vnode.c,v 1.63 2009/06/16 00:11:29 oga Exp $ */
/* $NetBSD: uvm_vnode.c,v 1.36 2000/11/24 20:34:01 chs Exp $ */
/*
@@ -93,6 +93,7 @@ void uvn_init(void);
int uvn_io(struct uvm_vnode *, vm_page_t *, int, int, int);
int uvn_put(struct uvm_object *, vm_page_t *, int, boolean_t);
void uvn_reference(struct uvm_object *);
+boolean_t uvn_releasepg(struct vm_page *, struct vm_page **);
/*
* master pager structure
@@ -108,6 +109,7 @@ struct uvm_pagerops uvm_vnodeops = {
uvn_put,
uvn_cluster,
uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */
+ uvn_releasepg,
};
/*
@@ -271,7 +273,7 @@ uvn_attach(void *arg, vm_prot_t accessprot)
* now set up the uvn.
*/
uvn->u_obj.pgops = &uvm_vnodeops;
- RB_INIT(&uvn->u_obj.memt);
+ TAILQ_INIT(&uvn->u_obj.memq);
uvn->u_obj.uo_npages = 0;
uvn->u_obj.uo_refs = 1; /* just us... */
oldflags = uvn->u_flags;
@@ -438,7 +440,11 @@ uvn_detach(struct uvm_object *uobj)
if (uvn->u_flags & UVM_VNODE_WRITEABLE) {
LIST_REMOVE(uvn, u_wlist);
}
- KASSERT(RB_EMPTY(&uobj->memt));
+#ifdef DIAGNOSTIC
+ if (!TAILQ_EMPTY(&uobj->memq))
+ panic("uvn_deref: vnode VM object still has pages afer "
+ "syncio/free flush");
+#endif
oldflags = uvn->u_flags;
uvn->u_flags = 0;
simple_unlock(&uobj->vmobjlock);
@@ -520,8 +526,8 @@ uvm_vnp_terminate(struct vnode *vp)
/*
* it is possible that the uvn was detached and is in the relkill
- * state [i.e. waiting for async i/o to finish].
- * we take over the vnode now and cancel the relkill.
+ * state [i.e. waiting for async i/o to finish so that releasepg can
+ * kill object]. we take over the vnode now and cancel the relkill.
* we want to know when the i/o is done so we can recycle right
* away. note that a uvn can only be in the RELKILL state if it
* has a zero reference count.
@@ -555,7 +561,7 @@ uvm_vnp_terminate(struct vnode *vp)
while (uvn->u_obj.uo_npages) {
#ifdef DEBUG
struct vm_page *pp;
- RB_FOREACH(pp, uobj_pgs, &uvn->u_obj.memt) {
+ TAILQ_FOREACH(pp, &uvn->u_obj.memq, fq.queues.listq) {
if ((pp->pg_flags & PG_BUSY) == 0)
panic("uvm_vnp_terminate: detected unbusy pg");
}
@@ -615,6 +621,41 @@ uvm_vnp_terminate(struct vnode *vp)
}
/*
+ * uvn_releasepg: handled a released page in a uvn
+ *
+ * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need
+ * to dispose of.
+ * => caller must handled PG_WANTED case
+ * => called with page's object locked, pageq's unlocked
+ * => returns TRUE if page's object is still alive, FALSE if we
+ * killed the page's object. if we return TRUE, then we
+ * return with the object locked.
+ * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return
+ * with the page queues locked [for pagedaemon]
+ * => if (nextpgp == NULL) => we return with page queues unlocked [normal case]
+ * => we kill the uvn if it is not referenced and we are suppose to
+ * kill it ("relkill").
+ */
+
+boolean_t
+uvn_releasepg(struct vm_page *pg, struct vm_page **nextpgp /* OUT */)
+{
+ KASSERT(pg->pg_flags & PG_RELEASED);
+
+ /*
+ * dispose of the page [caller handles PG_WANTED]
+ */
+ pmap_page_protect(pg, VM_PROT_NONE);
+ uvm_lock_pageq();
+ if (nextpgp)
+ *nextpgp = TAILQ_NEXT(pg, pageq); /* next page for daemon */
+ uvm_pagefree(pg);
+ if (!nextpgp)
+ uvm_unlock_pageq();
+ return (TRUE);
+}
+
+/*
* NOTE: currently we have to use VOP_READ/VOP_WRITE because they go
* through the buffer cache and allow I/O in any size. These VOPs use
* synchronous i/o. [vs. VOP_STRATEGY which can be async, but doesn't
@@ -648,6 +689,8 @@ uvm_vnp_terminate(struct vnode *vp)
* - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio }
* - get "page" structures (atop?).
* - handle "wanted" pages
+ * - handle "released" pages [using pgo_releasepg]
+ * >>> pgo_releasepg may kill the object
* dont forget to look at "object" wanted flag in all cases.
*/
@@ -802,6 +845,7 @@ uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags)
if (flags & PGO_DEACTIVATE) {
if ((pp->pg_flags & PQ_INACTIVE) == 0 &&
pp->wire_count == 0) {
+ pmap_page_protect(pp, VM_PROT_NONE);
uvm_pagedeactivate(pp);
}
} else if (flags & PGO_FREE) {
@@ -945,6 +989,7 @@ ReTry:
if (flags & PGO_DEACTIVATE) {
if ((pp->pg_flags & PQ_INACTIVE) == 0 &&
pp->wire_count == 0) {
+ pmap_page_protect(ptmp, VM_PROT_NONE);
uvm_pagedeactivate(ptmp);
}
} else if (flags & PGO_FREE &&