diff options
author | 2014-04-03 21:40:10 +0000 | |
---|---|---|
committer | 2014-04-03 21:40:10 +0000 | |
commit | a371dcf1189081b32571db85591a9e2b9b3b3b35 (patch) | |
tree | 0e837d9ba94e20304dd6605f718da2c331d0b39d | |
parent | if it's ok to wait, it must also be ok to give the kernel lock. do so. (diff) | |
download | wireguard-openbsd-a371dcf1189081b32571db85591a9e2b9b3b3b35.tar.xz wireguard-openbsd-a371dcf1189081b32571db85591a9e2b9b3b3b35.zip |
add a uvm_yield function and use it in the reaper path to prevent the
reaper from hogging the cpu. it will do the kernel lock twiddle trick to
allow other CPUs a chance to run, and also checks if the reaper has been
running for an entire timeslice and should be preempted.
ok deraadt
-rw-r--r-- | sys/uvm/uvm_extern.h | 3 | ||||
-rw-r--r-- | sys/uvm/uvm_glue.c | 13 | ||||
-rw-r--r-- | sys/uvm/uvm_map.c | 20 |
3 files changed, 27 insertions, 9 deletions
diff --git a/sys/uvm/uvm_extern.h b/sys/uvm/uvm_extern.h index 9a49d2d3043..06919d70979 100644 --- a/sys/uvm/uvm_extern.h +++ b/sys/uvm/uvm_extern.h @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_extern.h,v 1.111 2014/03/28 17:57:11 mpi Exp $ */ +/* $OpenBSD: uvm_extern.h,v 1.112 2014/04/03 21:40:10 tedu Exp $ */ /* $NetBSD: uvm_extern.h,v 1.57 2001/03/09 01:02:12 chs Exp $ */ /* @@ -527,6 +527,7 @@ int uvm_vslock_device(struct proc *, void *, size_t, vm_prot_t, void **); void uvm_vsunlock_device(struct proc *, void *, size_t, void *); +void uvm_pause(void); /* uvm_init.c */ diff --git a/sys/uvm/uvm_glue.c b/sys/uvm/uvm_glue.c index 54aabfec330..62c7d152527 100644 --- a/sys/uvm/uvm_glue.c +++ b/sys/uvm/uvm_glue.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_glue.c,v 1.60 2013/03/31 17:06:34 deraadt Exp $ */ +/* $OpenBSD: uvm_glue.c,v 1.61 2014/04/03 21:40:10 tedu Exp $ */ /* $NetBSD: uvm_glue.c,v 1.44 2001/02/06 19:54:44 eeh Exp $ */ /* @@ -475,4 +475,13 @@ uvm_atopg(vaddr_t kva) pg = PHYS_TO_VM_PAGE(pa); KASSERT(pg != NULL); return (pg); -} +} + +void +uvm_pause(void) +{ + KERNEL_UNLOCK(); + KERNEL_LOCK(); + if (curcpu()->ci_schedstate.spc_schedflags & SPCF_SHOULDYIELD) + preempt(NULL); +} diff --git a/sys/uvm/uvm_map.c b/sys/uvm/uvm_map.c index f35a3864029..41a266cbb53 100644 --- a/sys/uvm/uvm_map.c +++ b/sys/uvm/uvm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: uvm_map.c,v 1.164 2014/01/23 22:06:30 miod Exp $ */ +/* $OpenBSD: uvm_map.c,v 1.165 2014/04/03 21:40:10 tedu Exp $ */ /* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */ /* @@ -1381,8 +1381,12 @@ void uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags) { struct vm_map_entry *entry; + int waitable; + waitable = flags & UVM_PLA_WAITOK; while ((entry = TAILQ_FIRST(deadq)) != NULL) { + if (waitable) + uvm_pause(); /* * Drop reference to amap, if we've got one. */ @@ -1390,7 +1394,7 @@ uvm_unmap_detach(struct uvm_map_deadq *deadq, int flags) amap_unref(entry->aref.ar_amap, entry->aref.ar_pageoff, atop(entry->end - entry->start), - flags); + flags & AMAP_REFALL); /* * Drop reference to our backing object, if we've got one. @@ -2322,13 +2326,15 @@ void uvm_map_teardown(struct vm_map *map) { struct uvm_map_deadq dead_entries; - int i; + int i, waitable = 0; struct vm_map_entry *entry, *tmp; #ifdef VMMAP_DEBUG size_t numq, numt; #endif - if ((map->flags & VM_MAP_INTRSAFE) == 0) { + if ((map->flags & VM_MAP_INTRSAFE) == 0) + waitable = 1; + if (waitable) { if (rw_enter(&map->lock, RW_NOSLEEP | RW_WRITE) != 0) panic("uvm_map_teardown: rw_enter failed on free map"); } @@ -2366,6 +2372,8 @@ uvm_map_teardown(struct vm_map *map) if ((entry = RB_ROOT(&map->addr)) != NULL) DEAD_ENTRY_PUSH(&dead_entries, entry); while (entry != NULL) { + if (waitable) + uvm_pause(); uvm_unmap_kill_entry(map, entry); if ((tmp = RB_LEFT(entry, daddrs.addr_entry)) != NULL) DEAD_ENTRY_PUSH(&dead_entries, tmp); @@ -2375,7 +2383,7 @@ uvm_map_teardown(struct vm_map *map) entry = TAILQ_NEXT(entry, dfree.deadq); } - if ((map->flags & VM_MAP_INTRSAFE) == 0) + if (waitable) rw_exit(&map->lock); #ifdef VMMAP_DEBUG @@ -2386,7 +2394,7 @@ uvm_map_teardown(struct vm_map *map) numq++; KASSERT(numt == numq); #endif - uvm_unmap_detach(&dead_entries, 0); + uvm_unmap_detach(&dead_entries, waitable ? UVM_PLA_WAITOK : 0); pmap_destroy(map->pmap); map->pmap = NULL; } |