summaryrefslogtreecommitdiffstats
path: root/sys/uvm/uvm_pdaemon.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/uvm/uvm_pdaemon.c')
-rw-r--r--sys/uvm/uvm_pdaemon.c88
1 files changed, 37 insertions, 51 deletions
diff --git a/sys/uvm/uvm_pdaemon.c b/sys/uvm/uvm_pdaemon.c
index 42fa8b0809d..6d8d8719d6b 100644
--- a/sys/uvm/uvm_pdaemon.c
+++ b/sys/uvm/uvm_pdaemon.c
@@ -1,5 +1,5 @@
-/* $OpenBSD: uvm_pdaemon.c,v 1.21 2001/12/19 08:58:07 art Exp $ */
-/* $NetBSD: uvm_pdaemon.c,v 1.30 2001/03/09 01:02:12 chs Exp $ */
+/* $OpenBSD: uvm_pdaemon.c,v 1.22 2002/01/02 22:23:25 miod Exp $ */
+/* $NetBSD: uvm_pdaemon.c,v 1.23 2000/08/20 10:24:14 bjh21 Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -164,12 +164,12 @@ uvmpd_tune()
{
UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
- uvmexp.freemin = uvmexp.npages / 20;
+ uvmexp.freemin = uvmexp.npages / 30;
- /* between 16k and 256k */
+ /* between 16k and 512k */
/* XXX: what are these values good for? */
uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
- uvmexp.freemin = min(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
+ uvmexp.freemin = min(uvmexp.freemin, (512*1024) >> PAGE_SHIFT);
/* Make sure there's always a user page free. */
if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
@@ -248,8 +248,16 @@ uvm_pageout(void *arg)
* scan if needed
*/
+#ifdef UBC
if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
+ uvmexp.inactive < uvmexp.inactarg ||
+ uvm_pgcnt_vnode >
+ (uvmexp.active + uvmexp.inactive + uvmexp.wired +
+ uvmexp.free) * 13 / 16) {
+#else
+ if (uvmexp.free < uvmexp.freetarg ||
uvmexp.inactive < uvmexp.inactarg) {
+#endif
uvmpd_scan();
}
@@ -369,7 +377,7 @@ uvmpd_scan_inactive(pglst)
struct vm_anon *anon;
boolean_t swap_backed;
vaddr_t start;
- int dirtyreacts, t;
+ int dirtyreacts;
UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
/*
@@ -438,7 +446,9 @@ uvmpd_scan_inactive(pglst)
/*
* move referenced pages back to active queue and
- * skip to next page.
+ * skip to next page (unlikely to happen since
+ * inactive pages shouldn't have any valid mappings
+ * and we cleared reference before deactivating).
*/
if (pmap_is_referenced(p)) {
@@ -448,35 +458,6 @@ uvmpd_scan_inactive(pglst)
}
/*
- * enforce the minimum thresholds on different
- * types of memory usage. if reusing the current
- * page would reduce that type of usage below its
- * minimum, reactivate the page instead and move
- * on to the next page.
- */
-
- t = uvmexp.active + uvmexp.inactive + uvmexp.free;
- if (p->uanon &&
- uvmexp.anonpages <= (t * uvmexp.anonmin) >> 8) {
- uvm_pageactivate(p);
- uvmexp.pdreanon++;
- continue;
- }
- if (p->uobject && UVM_OBJ_IS_VTEXT(p->uobject) &&
- uvmexp.vtextpages <= (t * uvmexp.vtextmin) >> 8) {
- uvm_pageactivate(p);
- uvmexp.pdrevtext++;
- continue;
- }
- if (p->uobject && UVM_OBJ_IS_VNODE(p->uobject) &&
- !UVM_OBJ_IS_VTEXT(p->uobject) &&
- uvmexp.vnodepages <= (t * uvmexp.vnodemin) >> 8) {
- uvm_pageactivate(p);
- uvmexp.pdrevnode++;
- continue;
- }
-
- /*
* first we attempt to lock the object that this page
* belongs to. if our attempt fails we skip on to
* the next page (no harm done). it is important to
@@ -537,17 +518,10 @@ uvmpd_scan_inactive(pglst)
/*
* we now have the object and the page queues locked.
- * the page is not busy. remove all the permissions
- * from the page so we can sync the modified info
- * without any race conditions. if the page is clean
- * we can free it now and continue.
+ * the page is not busy. if the page is clean we
+ * can free it now and continue.
*/
- pmap_page_protect(p, VM_PROT_NONE);
- if ((p->flags & PG_CLEAN) != 0 && pmap_is_modified(p)) {
- p->flags &= ~PG_CLEAN;
- }
-
if (p->flags & PG_CLEAN) {
if (p->pqflags & PQ_SWAPBACKED) {
/* this page now lives only in swap */
@@ -556,6 +530,8 @@ uvmpd_scan_inactive(pglst)
simple_unlock(&uvm.swap_data_lock);
}
+ /* zap all mappings with pmap_page_protect... */
+ pmap_page_protect(p, VM_PROT_NONE);
uvm_pagefree(p);
uvmexp.pdfreed++;
@@ -638,12 +614,15 @@ uvmpd_scan_inactive(pglst)
* the page we are looking at is dirty. we must
* clean it before it can be freed. to do this we
* first mark the page busy so that no one else will
- * touch the page.
+ * touch the page. we write protect all the mappings
+ * of the page so that no one touches it while it is
+ * in I/O.
*/
swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
p->flags |= PG_BUSY; /* now we own it */
UVM_PAGE_OWN(p, "scan_inactive");
+ pmap_page_protect(p, VM_PROT_READ);
uvmexp.pgswapout++;
/*
@@ -816,6 +795,7 @@ uvmpd_scan_inactive(pglst)
continue;
}
+#ifdef UBC
if (result == VM_PAGER_ERROR &&
curproc == uvm.pagedaemon_proc) {
uvm_lock_pageq();
@@ -823,6 +803,7 @@ uvmpd_scan_inactive(pglst)
uvm_pageactivate(p);
continue;
}
+#endif
/*
* clean up "p" if we have one
@@ -859,6 +840,12 @@ uvmpd_scan_inactive(pglst)
simple_lock(&uobj->vmobjlock);
}
+#ifdef DIAGNOSTIC
+ if (result == VM_PAGER_UNLOCK)
+ panic("pagedaemon: pageout returned "
+ "invalid 'unlock' code");
+#endif
+
/* handle PG_WANTED now */
if (p->flags & PG_WANTED)
/* still holding object lock */
@@ -1108,13 +1095,12 @@ uvmpd_scan()
}
/*
- * If the page has not been referenced since the
- * last scan, deactivate the page if there is a
- * shortage of inactive pages.
+ * deactivate this page if there's a shortage of
+ * inactive pages.
*/
- if (inactive_shortage > 0 &&
- pmap_clear_reference(p) == FALSE) {
+ if (inactive_shortage > 0) {
+ pmap_page_protect(p, VM_PROT_NONE);
/* no need to check wire_count as pg is "active" */
uvm_pagedeactivate(p);
uvmexp.pddeact++;