diff options
author | 2009-05-05 05:27:53 +0000 | |
---|---|---|
committer | 2009-05-05 05:27:53 +0000 | |
commit | 1dbff99df66ae128d95de654c72a42b4d4b90e03 (patch) | |
tree | add6bcb17c0a6e5b0afdb0ebe378d1c13ed9a4c9 /sys/uvm/uvm_km.c | |
parent | The first step in cleaning up the use of PG_RELEASED for uvm objects. (diff) | |
download | wireguard-openbsd-1dbff99df66ae128d95de654c72a42b4d4b90e03.tar.xz wireguard-openbsd-1dbff99df66ae128d95de654c72a42b4d4b90e03.zip |
Second step of PG_RELEASED cleanup.
uvm_km deals with kernel memory which is either part of one of the
kernel maps, or the main kernel object (a uao). If on km_pgremove we hit
a busy page, just sleep on it, if so there's some async io (and that is
unlikely). we can remove the check for uvm_km_alloc1() for a released page
since now we will never end up with a removed but released page in the kernel
map (due to the other chunk and the last diff).
ok ariane@. Diff survived several make builds, on amd64 and sparc64,
also forced paging with ariane's evil program.
Diffstat (limited to 'sys/uvm/uvm_km.c')
-rw-r--r-- | sys/uvm/uvm_km.c | 25 |
1 files changed, 7 insertions, 18 deletions
diff --git a/sys/uvm/uvm_km.c b/sys/uvm/uvm_km.c index 1eb9bf2e57d..895a9593173 100644 --- a/sys/uvm/uvm_km.c +++ b/sys/uvm/uvm_km.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_km.c,v 1.70 2009/02/22 19:59:01 miod Exp $ */ +/* $OpenBSD: uvm_km.c,v 1.71 2009/05/05 05:27:53 oga Exp $ */ /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ /* @@ -276,8 +276,12 @@ uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end) pp->pg_flags & PG_BUSY, 0, 0); if (pp->pg_flags & PG_BUSY) { - /* owner must check for this when done */ - atomic_setbits_int(&pp->pg_flags, PG_RELEASED); + atomic_setbits_int(&pp->pg_flags, PG_WANTED); + UVM_UNLOCK_AND_WAIT(pp, &uobj->vmobjlock, 0, + "km_pgrm", 0); + simple_lock(&uobj->vmobjlock); + curoff -= PAGE_SIZE; /* loop back to us */ + continue; } else { /* free the swap slot... */ uao_dropswap(uobj, curoff >> PAGE_SHIFT); @@ -511,21 +515,6 @@ uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) loopva = kva; while (size) { simple_lock(&uvm.kernel_object->vmobjlock); - pg = uvm_pagelookup(uvm.kernel_object, offset); - - /* - * if we found a page in an unallocated region, it must be - * released - */ - if (pg) { - if ((pg->pg_flags & PG_RELEASED) == 0) - panic("uvm_km_alloc1: non-released page"); - atomic_setbits_int(&pg->pg_flags, PG_WANTED); - UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, - FALSE, "km_alloc", 0); - continue; /* retry */ - } - /* allocate ram */ pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); if (pg) { |