diff options
author | 1998-02-02 20:14:06 +0000 | |
---|---|---|
committer | 1998-02-02 20:14:06 +0000 | |
commit | 79791de65c61f1fc314a1a909ade791af85c62e6 (patch) | |
tree | b67ebe2d4b6d524db0b717e7f5d51abfc5d48a80 | |
parent | The "fix" for running out of kernel maps, from FreeBSD. Integrated by (diff) | |
download | wireguard-openbsd-79791de65c61f1fc314a1a909ade791af85c62e6.tar.xz wireguard-openbsd-79791de65c61f1fc314a1a909ade791af85c62e6.zip |
Replay revisions 1.8 and 1.9.
-rw-r--r-- | sys/vm/vm_map.c | 47 |
1 files changed, 28 insertions, 19 deletions
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 2511e25dd0a..4d8c849e772 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1,4 +1,4 @@ -/* $OpenBSD: vm_map.c,v 1.10 1998/02/02 18:39:49 downsj Exp $ */ +/* $OpenBSD: vm_map.c,v 1.11 1998/02/02 20:14:06 downsj Exp $ */ /* $NetBSD: vm_map.c,v 1.23 1996/02/10 00:08:08 christos Exp $ */ /* @@ -36,7 +36,7 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * @(#)vm_map.c 8.3 (Berkeley) 1/12/94 + * @(#)vm_map.c 8.9 (Berkeley) 5/17/95 * * * Copyright (c) 1987, 1990 Carnegie-Mellon University. @@ -77,7 +77,6 @@ #include <vm/vm.h> #include <vm/vm_kern.h> #include <vm/vm_page.h> -#include <vm/vm_object.h> /* * Virtual memory maps provide for the mapping, protection, @@ -285,7 +284,7 @@ vm_map_init(map, min, max, pageable) map->first_free = &map->header; map->hint = &map->header; map->timestamp = 0; - lock_init(&map->lock, TRUE); + lockinit(&map->lock, PVM, "thrd_sleep", 0, 0); simple_lock_init(&map->ref_lock); simple_lock_init(&map->hint_lock); } @@ -458,12 +457,14 @@ vm_map_deallocate(map) * to it. */ - vm_map_lock(map); + vm_map_lock_drain_interlock(map); (void) vm_map_delete(map, map->min_offset, map->max_offset); pmap_destroy(map->pmap); + vm_map_unlock(map); + FREE(map, M_VMMAP); } @@ -1252,7 +1253,7 @@ vm_map_pageable(map, start, end, new_pageable) * If a region becomes completely unwired, * unwire its physical pages and mappings. */ - lock_set_recursive(&map->lock); + vm_map_set_recursive(&map->lock); entry = start_entry; while ((entry != &map->header) && (entry->start < end)) { @@ -1264,7 +1265,7 @@ vm_map_pageable(map, start, end, new_pageable) entry = entry->next; } - lock_clear_recursive(&map->lock); + vm_map_clear_recursive(&map->lock); } else { @@ -1373,8 +1374,8 @@ vm_map_pageable(map, start, end, new_pageable) vm_map_unlock(map); /* trust me ... */ } else { - lock_set_recursive(&map->lock); - lock_write_to_read(&map->lock); + vm_map_set_recursive(&map->lock); + lockmgr(&map->lock, LK_DOWNGRADE, (void *)0, curproc); } rv = 0; @@ -1405,7 +1406,7 @@ vm_map_pageable(map, start, end, new_pageable) vm_map_lock(map); } else { - lock_clear_recursive(&map->lock); + vm_map_clear_recursive(&map->lock); } if (rv) { vm_map_unlock(map); @@ -1450,7 +1451,8 @@ vm_map_clean(map, start, end, syncio, invalidate) } /* - * Make a first pass to check for holes. + * Make a first pass to check for holes, and (if invalidating) + * wired pages. */ for (current = entry; current->start < end; current = current->next) { if (current->is_sub_map) { @@ -1463,6 +1465,10 @@ vm_map_clean(map, start, end, syncio, invalidate) vm_map_unlock_read(map); return(KERN_INVALID_ADDRESS); } + if (current->wired_count) { + vm_map_unlock_read(map); + return(KERN_PAGES_LOCKED); + } } /* @@ -2059,7 +2065,7 @@ vm_map_copy(dst_map, src_map, else { new_src_map = src_map; new_src_start = src_entry->start; - lock_set_recursive(&src_map->lock); + vm_map_set_recursive(&src_map->lock); } if (dst_entry->is_a_map) { @@ -2097,7 +2103,7 @@ vm_map_copy(dst_map, src_map, else { new_dst_map = dst_map; new_dst_start = dst_entry->start; - lock_set_recursive(&dst_map->lock); + vm_map_set_recursive(&dst_map->lock); } /* @@ -2109,9 +2115,9 @@ vm_map_copy(dst_map, src_map, FALSE, FALSE); if (dst_map == new_dst_map) - lock_clear_recursive(&dst_map->lock); + vm_map_clear_recursive(&dst_map->lock); if (src_map == new_src_map) - lock_clear_recursive(&src_map->lock); + vm_map_clear_recursive(&src_map->lock); } /* @@ -2480,7 +2486,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry, * share map to the new object. */ - if (lock_read_to_write(&share_map->lock)) { + if (lockmgr(&share_map->lock, LK_EXCLUPGRADE, + (void *)0, curproc)) { if (share_map != map) vm_map_unlock_read(map); goto RetryLookup; @@ -2493,7 +2500,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry, entry->needs_copy = FALSE; - lock_write_to_read(&share_map->lock); + lockmgr(&share_map->lock, LK_DOWNGRADE, + (void *)0, curproc); } else { /* @@ -2510,7 +2518,8 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry, */ if (entry->object.vm_object == NULL) { - if (lock_read_to_write(&share_map->lock)) { + if (lockmgr(&share_map->lock, LK_EXCLUPGRADE, + (void *)0, curproc)) { if (share_map != map) vm_map_unlock_read(map); goto RetryLookup; @@ -2519,7 +2528,7 @@ vm_map_lookup(var_map, vaddr, fault_type, out_entry, entry->object.vm_object = vm_object_allocate( (vm_size_t)(entry->end - entry->start)); entry->offset = 0; - lock_write_to_read(&share_map->lock); + lockmgr(&share_map->lock, LK_DOWNGRADE, (void *)0, curproc); } /* |