diff options
author | 2000-03-16 22:11:02 +0000 | |
---|---|---|
committer | 2000-03-16 22:11:02 +0000 | |
commit | 7cb53682cfa1278ff20707d41fced713da20d463 (patch) | |
tree | e2721f4c1f592f234243f2ee723ac6ce9bfa0742 /sys/uvm/uvm_fault.c | |
parent | -pedantic: signed vs. unsigned, void*-arithm, etc (diff) | |
download | wireguard-openbsd-7cb53682cfa1278ff20707d41fced713da20d463.tar.xz wireguard-openbsd-7cb53682cfa1278ff20707d41fced713da20d463.zip |
Bring in some new UVM code from NetBSD (not current).
- Introduce a new type of map that are interrupt safe and never allow faults
in them. mb_map and kmem_map are made intrsafe.
- Add "access protection" to uvm_vslock (to be passed down to uvm_fault and
later to pmap_enter).
- madvise(2) now works.
- various cleanups.
Diffstat (limited to 'sys/uvm/uvm_fault.c')
-rw-r--r-- | sys/uvm/uvm_fault.c | 45 |
1 files changed, 37 insertions, 8 deletions
diff --git a/sys/uvm/uvm_fault.c b/sys/uvm/uvm_fault.c index d05c5c30282..b4002aceb87 100644 --- a/sys/uvm/uvm_fault.c +++ b/sys/uvm/uvm_fault.c @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_fault.c,v 1.28 1999/04/11 04:04:11 chs Exp $ */ +/* $NetBSD: uvm_fault.c,v 1.33 1999/06/04 23:38:41 thorpej Exp $ */ /* * @@ -587,6 +587,19 @@ uvm_fault(orig_map, vaddr, fault_type, access_type) narrow = FALSE; /* normal fault */ /* + * before we do anything else, if this is a fault on a kernel + * address, check to see if the address is managed by an + * interrupt-safe map. If it is, we fail immediately. Intrsafe + * maps are never pageable, and this approach avoids an evil + * locking mess. + */ + if (orig_map == kernel_map && uvmfault_check_intrsafe(&ufi)) { + UVMHIST_LOG(maphist, "<- VA 0x%lx in intrsafe map %p", + ufi.orig_rvaddr, ufi.map, 0, 0); + return (KERN_FAILURE); + } + + /* * "goto ReFault" means restart the page fault from ground zero. */ ReFault: @@ -614,6 +627,17 @@ ReFault: } /* + * if the map is not a pageable map, a page fault always fails. + */ + + if ((ufi.map->flags & VM_MAP_PAGEABLE) == 0) { + UVMHIST_LOG(maphist, + "<- map %p not pageable", ufi.map, 0, 0, 0); + uvmfault_unlockmaps(&ufi, FALSE); + return (KERN_FAILURE); + } + + /* * "enter_prot" is the protection we want to enter the page in at. * for certain pages (e.g. copy-on-write pages) this protection can * be more strict than ufi.entry->protection. "wired" means either @@ -1689,9 +1713,10 @@ Case2: */ int -uvm_fault_wire(map, start, end) +uvm_fault_wire(map, start, end, access_type) vm_map_t map; vaddr_t start, end; + vm_prot_t access_type; { vaddr_t va; pmap_t pmap; @@ -1713,10 +1738,10 @@ uvm_fault_wire(map, start, end) */ for (va = start ; va < end ; va += PAGE_SIZE) { - rv = uvm_fault(map, va, VM_FAULT_WIRE, VM_PROT_NONE); + rv = uvm_fault(map, va, VM_FAULT_WIRE, access_type); if (rv) { if (va != start) { - uvm_fault_unwire(map->pmap, start, va); + uvm_fault_unwire(map, start, va); } return (rv); } @@ -1727,19 +1752,23 @@ uvm_fault_wire(map, start, end) /* * uvm_fault_unwire(): unwire range of virtual space. - * - * => caller holds reference to pmap (via its map) */ void -uvm_fault_unwire(pmap, start, end) - struct pmap *pmap; +uvm_fault_unwire(map, start, end) + vm_map_t map; vaddr_t start, end; { + pmap_t pmap = vm_map_pmap(map); vaddr_t va; paddr_t pa; struct vm_page *pg; +#ifdef DIAGNOSTIC + if (map->flags & VM_MAP_INTRSAFE) + panic("uvm_fault_unwire: intrsafe map"); +#endif + /* * we assume that the area we are unwiring has actually been wired * in the first place. this means that we should be able to extract |