summaryrefslogtreecommitdiffstats
path: root/sys/uvm/uvm_pmemrange.c
diff options
context:
space:
mode:
authorkettenis <kettenis@openbsd.org>2014-10-03 18:06:46 +0000
committerkettenis <kettenis@openbsd.org>2014-10-03 18:06:46 +0000
commitef440d082d19b8982f069e03e4b224578a151385 (patch)
tree139a9aef402a510bc2c3c18a9c0bd2a139633a50 /sys/uvm/uvm_pmemrange.c
parentIntroduce __MAP_NOFAULT, a mmap(2) flag that makes sure a mapping will not (diff)
downloadwireguard-openbsd-ef440d082d19b8982f069e03e4b224578a151385.tar.xz
wireguard-openbsd-ef440d082d19b8982f069e03e4b224578a151385.zip
Introduce a thread for zeroing pages without holding the kernel lock. This
way we can do some useful kernel lock in parallel with other things and create a reservoir of zeroed pages ready for use elsewhere. This should reduce latency. The thread runs at the absolutel lowest priority such that we don't keep other kernel threads or userland from doing useful work. Can be easily disabled by disabling the kthread_create(9) call in main(). Which perhaps we should do for non-MP kernels. ok deraadt@, tedu@
Diffstat (limited to 'sys/uvm/uvm_pmemrange.c')
-rw-r--r--sys/uvm/uvm_pmemrange.c54
1 files changed, 50 insertions, 4 deletions
diff --git a/sys/uvm/uvm_pmemrange.c b/sys/uvm/uvm_pmemrange.c
index 00e6cbad804..b31c66fb7c1 100644
--- a/sys/uvm/uvm_pmemrange.c
+++ b/sys/uvm/uvm_pmemrange.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: uvm_pmemrange.c,v 1.41 2014/09/14 14:17:27 jsg Exp $ */
+/* $OpenBSD: uvm_pmemrange.c,v 1.42 2014/10/03 18:06:47 kettenis Exp $ */
/*
* Copyright (c) 2009, 2010 Ariane van der Steldt <ariane@stack.nl>
@@ -21,6 +21,7 @@
#include <uvm/uvm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
+#include <sys/kthread.h>
#include <sys/mount.h>
/*
@@ -107,7 +108,7 @@ void uvm_pmr_assertvalid(struct uvm_pmemrange *pmr);
#endif
int uvm_pmr_get1page(psize_t, int, struct pglist *,
- paddr_t, paddr_t);
+ paddr_t, paddr_t, int);
struct uvm_pmemrange *uvm_pmr_allocpmr(void);
struct vm_page *uvm_pmr_nfindsz(struct uvm_pmemrange *, psize_t, int);
@@ -824,7 +825,7 @@ retry_desperate:
if (count <= maxseg && align == 1 && boundary == 0 &&
(flags & UVM_PLA_TRYCONTIG) == 0) {
fcount += uvm_pmr_get1page(count - fcount, memtype_init,
- result, start, end);
+ result, start, end, 0);
/*
* If we found sufficient pages, go to the succes exit code.
@@ -1036,6 +1037,8 @@ out:
if (found->pg_flags & PG_ZERO) {
uvmexp.zeropages--;
+ if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
+ wakeup(&uvmexp.zeropages);
}
if (flags & UVM_PLA_ZERO) {
if (found->pg_flags & PG_ZERO)
@@ -1130,6 +1133,8 @@ uvm_pmr_freepages(struct vm_page *pg, psize_t count)
pg += pmr_count;
}
wakeup(&uvmexp.free);
+ if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
+ wakeup(&uvmexp.zeropages);
uvm_wakeup_pla(VM_PAGE_TO_PHYS(firstpg), ptoa(count));
@@ -1167,6 +1172,8 @@ uvm_pmr_freepageq(struct pglist *pgl)
uvm_wakeup_pla(pstart, ptoa(plen));
}
wakeup(&uvmexp.free);
+ if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
+ wakeup(&uvmexp.zeropages);
uvm_unlock_fpageq();
return;
@@ -1663,7 +1670,7 @@ uvm_pmr_rootupdate(struct uvm_pmemrange *pmr, struct vm_page *init_root,
*/
int
uvm_pmr_get1page(psize_t count, int memtype_init, struct pglist *result,
- paddr_t start, paddr_t end)
+ paddr_t start, paddr_t end, int memtype_only)
{
struct uvm_pmemrange *pmr;
struct vm_page *found, *splitpg;
@@ -1779,6 +1786,8 @@ uvm_pmr_get1page(psize_t count, int memtype_init, struct pglist *result,
uvm_pmr_remove_addr(pmr, found);
uvm_pmr_assertvalid(pmr);
} else {
+ if (memtype_only)
+ break;
/*
* Skip to the next memtype.
*/
@@ -1943,3 +1952,40 @@ uvm_wakeup_pla(paddr_t low, psize_t len)
}
}
}
+
+void
+uvm_pagezero_thread(void *arg)
+{
+ struct pglist pgl;
+ struct vm_page *pg;
+ int count;
+
+ /* Run at the lowest possible priority. */
+ curproc->p_p->ps_nice = NZERO + PRIO_MAX;
+
+ KERNEL_UNLOCK();
+
+ for (;;) {
+ uvm_lock_fpageq();
+ while (uvmexp.zeropages >= UVM_PAGEZERO_TARGET ||
+ (count = uvm_pmr_get1page(16, UVM_PMR_MEMTYPE_DIRTY,
+ &pgl, 0, 0, 1)) == 0) {
+ msleep(&uvmexp.zeropages, &uvm.fpageqlock, MAXPRI,
+ "pgzero", 0);
+ }
+ uvm_unlock_fpageq();
+
+ TAILQ_FOREACH(pg, &pgl, pageq) {
+ uvm_pagezero(pg);
+ atomic_setbits_int(&pg->pg_flags, PG_ZERO);
+ }
+
+ uvm_lock_fpageq();
+ while (!TAILQ_EMPTY(&pgl))
+ uvm_pmr_remove_1strange(&pgl, 0, NULL, 0);
+ uvmexp.zeropages += count;
+ uvm_unlock_fpageq();
+
+ yield();
+ }
+}