summaryrefslogtreecommitdiffstats
path: root/sys/uvm/uvm_pmemrange.c
diff options
context:
space:
mode:
authoroga <oga@openbsd.org>2011-07-07 20:52:50 +0000
committeroga <oga@openbsd.org>2011-07-07 20:52:50 +0000
commit0bec8dbe4017410cf1ddf990002aabeccd653f82 (patch)
tree5aec429a62d78dac765f4a08a89e26bb14b7cab9 /sys/uvm/uvm_pmemrange.c
parentThere were two loops in pf_setup_pdesc() and pf_normalize_ip6() (diff)
downloadwireguard-openbsd-0bec8dbe4017410cf1ddf990002aabeccd653f82.tar.xz
wireguard-openbsd-0bec8dbe4017410cf1ddf990002aabeccd653f82.zip
Move the uvm reserve enforcement from uvm_pagealloc to pmemrange.
More and more things are allocating outside of uvm_pagealloc these days making it easy for something like the buffer cache to eat your last page with no repercussions (other than a hung machine, of course). ok ariane@ also ok ariane@ again after I spotted and fixed a possible underflow problem in the calculation.
Diffstat (limited to 'sys/uvm/uvm_pmemrange.c')
-rw-r--r--sys/uvm/uvm_pmemrange.c38
1 files changed, 37 insertions, 1 deletions
diff --git a/sys/uvm/uvm_pmemrange.c b/sys/uvm/uvm_pmemrange.c
index 114709b6ac4..20b68bd8152 100644
--- a/sys/uvm/uvm_pmemrange.c
+++ b/sys/uvm/uvm_pmemrange.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmemrange.c,v 1.27 2011/07/06 19:50:38 beck Exp $ */
+/* $OpenBSD: uvm_pmemrange.c,v 1.28 2011/07/07 20:52:50 oga Exp $ */
/*
* Copyright (c) 2009, 2010 Ariane van der Steldt <ariane@stack.nl>
@@ -20,6 +20,7 @@
#include <sys/systm.h>
#include <uvm/uvm.h>
#include <sys/malloc.h>
+#include <sys/mount.h> /* for BUFPAGES defines */
#include <sys/proc.h> /* XXX for atomic */
#include <sys/kernel.h>
@@ -747,6 +748,7 @@ uvm_pmr_getpages(psize_t count, paddr_t start, paddr_t end, paddr_t align,
int memtype; /* Requested memtype. */
int memtype_init; /* Best memtype. */
int desperate; /* True if allocation failed. */
+ int is_pdaemon;
#ifdef DIAGNOSTIC
struct vm_page *diag_prev; /* Used during validation. */
#endif /* DIAGNOSTIC */
@@ -762,6 +764,26 @@ uvm_pmr_getpages(psize_t count, paddr_t start, paddr_t end, paddr_t align,
(boundary == 0 || maxseg * boundary >= count) &&
TAILQ_EMPTY(result));
+ is_pdaemon = ((curproc == uvm.pagedaemon_proc) ||
+ (curproc == syncerproc));
+
+ /*
+ * All allocations by the pagedaemon automatically get access to
+ * the kernel reserve of pages so swapping can catch up with memory
+ * exhaustion
+ */
+ if (is_pdaemon)
+ flags |= UVM_PLA_USERESERVE;
+
+ /*
+ * check to see if we need to generate some free pages waking
+ * the pagedaemon.
+ */
+ if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
+ ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
+ (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
+ wakeup(&uvm.pagedaemon);
+
/*
* TRYCONTIG is a noop if you only want a single segment.
* Remove it if that's the case: otherwise it'll deny the fast
@@ -836,6 +858,20 @@ retry: /* Return point after sleeping. */
fcount = 0;
fnsegs = 0;
+ /*
+ * fail if any of these conditions are true:
+ * [1] there really are no free pages, or
+ * [2] only kernel "reserved" pages remain and
+ * the we are not allowed to use them.
+ * [3] only pagedaemon "reserved" pages remain and
+ * the requestor isn't the pagedaemon.
+ */
+ if (((uvmexp.free < uvmexp.reserve_kernel + ptoa(count)) &&
+ (flags & UVM_PLA_USERESERVE) == 0) ||
+ ((uvmexp.free < uvmexp.reserve_pagedaemon + ptoa(count)) &&
+ !is_pdaemon))
+ goto fail;
+
retry_desperate:
/*
* If we just want any page(s), go for the really fast option.