summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkettenis <kettenis@openbsd.org>2019-06-01 11:45:01 +0000
committerkettenis <kettenis@openbsd.org>2019-06-01 11:45:01 +0000
commite52ba23703a885a96256f86cdc327f424dba1405 (patch)
treef88247058c23781f1066d84a76b1c38b5a385530
parentFix warnings when building zlib with ZLIB_CONST defined (diff)
downloadwireguard-openbsd-e52ba23703a885a96256f86cdc327f424dba1405.tar.xz
wireguard-openbsd-e52ba23703a885a96256f86cdc327f424dba1405.zip
On machines with large amounts of physical memory we fail to initialize uvm
because we don't have enough kernel memory available in the early bootstrap phase to allocate the vm_page structures. Fix this by making uvm_growkernel() work before uvm is initialized like we do on other architectures that don't use a direct map and explicitly call it after enumerating the available physical memory with an estimate of how much KVA we need to initialize uvm. ok patrick@
-rw-r--r--sys/arch/arm64/arm64/machdep.c10
-rw-r--r--sys/arch/arm64/arm64/pmap.c69
2 files changed, 66 insertions, 13 deletions
diff --git a/sys/arch/arm64/arm64/machdep.c b/sys/arch/arm64/arm64/machdep.c
index f8e1a990700..9040ed4d536 100644
--- a/sys/arch/arm64/arm64/machdep.c
+++ b/sys/arch/arm64/arm64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.40 2019/05/28 20:32:30 patrick Exp $ */
+/* $OpenBSD: machdep.c,v 1.41 2019/06/01 11:45:01 kettenis Exp $ */
/*
* Copyright (c) 2014 Patrick Wildt <patrick@blueri.se>
*
@@ -1066,6 +1066,14 @@ initarm(struct arm64_bootparams *abp)
}
}
+ /*
+ * Make sure that we have enough KVA to initialize UVM. In
+ * particular, we need enough KVA to be able to allocate the
+ * vm_page structures.
+ */
+ pmap_growkernel(VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024 +
+ physmem * sizeof(struct vm_page));
+
#ifdef DDB
db_machine_init();
diff --git a/sys/arch/arm64/arm64/pmap.c b/sys/arch/arm64/arm64/pmap.c
index d55d733b57a..a3d41083ffd 100644
--- a/sys/arch/arm64/arm64/pmap.c
+++ b/sys/arch/arm64/arm64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.62 2019/05/28 20:32:30 patrick Exp $ */
+/* $OpenBSD: pmap.c,v 1.63 2019/06/01 11:45:01 kettenis Exp $ */
/*
* Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com>
*
@@ -963,6 +963,7 @@ pmap_vp_destroy(pmap_t pm)
}
vaddr_t virtual_avail, virtual_end;
+int pmap_virtual_space_called;
static inline uint64_t
VP_Lx(paddr_t pa)
@@ -975,6 +976,12 @@ VP_Lx(paddr_t pa)
}
/*
+ * In pmap_bootstrap() we allocate the page tables for the first GB
+ * of the kernel address space.
+ */
+vaddr_t pmap_maxkvaddr = VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024;
+
+/*
* Allocator for growing the kernel page tables. We use a dedicated
* submap to make sure we have the space to map them as we are called
* when address space is tight!
@@ -990,7 +997,30 @@ const struct kmem_va_mode kv_kvp = {
void *
pmap_kvp_alloc(void)
{
- return km_alloc(sizeof(struct pmapvp0), &kv_kvp, &kp_zero, &kd_nowait);
+ void *kvp;
+
+ if (!uvm.page_init_done && !pmap_virtual_space_called) {
+ paddr_t pa[2];
+ vaddr_t va;
+
+ if (!uvm_page_physget(&pa[0]) || !uvm_page_physget(&pa[1]))
+ panic("%s: out of memory", __func__);
+
+ va = virtual_avail;
+ virtual_avail += 2 * PAGE_SIZE;
+ KASSERT(virtual_avail <= pmap_maxkvaddr);
+ kvp = (void *)va;
+
+ pmap_kenter_pa(va, pa[0], PROT_READ|PROT_WRITE);
+ pmap_kenter_pa(va + PAGE_SIZE, pa[1], PROT_READ|PROT_WRITE);
+ pagezero_cache(va);
+ pagezero_cache(va + PAGE_SIZE);
+ } else {
+ kvp = km_alloc(sizeof(struct pmapvp0), &kv_kvp, &kp_zero,
+ &kd_nowait);
+ }
+
+ return kvp;
}
struct pte_desc *
@@ -1000,9 +1030,27 @@ pmap_kpted_alloc(void)
static int npted;
if (npted == 0) {
- pted = km_alloc(PAGE_SIZE, &kv_kvp, &kp_zero, &kd_nowait);
- if (pted == NULL)
- return NULL;
+ if (!uvm.page_init_done && !pmap_virtual_space_called) {
+ paddr_t pa;
+ vaddr_t va;
+
+ if (!uvm_page_physget(&pa))
+ panic("%s: out of memory", __func__);
+
+ va = virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ KASSERT(virtual_avail <= pmap_maxkvaddr);
+ pted = (struct pte_desc *)va;
+
+ pmap_kenter_pa(va, pa, PROT_READ|PROT_WRITE);
+ pagezero_cache(va);
+ } else {
+ pted = km_alloc(PAGE_SIZE, &kv_kvp, &kp_zero,
+ &kd_nowait);
+ if (pted == NULL)
+ return NULL;
+ }
+
npted = PAGE_SIZE / sizeof(struct pte_desc);
}
@@ -1010,12 +1058,6 @@ pmap_kpted_alloc(void)
return pted++;
}
-/*
- * In pmap_bootstrap() we allocate the page tables for the first GB
- * of the kernel address space.
- */
-vaddr_t pmap_maxkvaddr = VM_MIN_KERNEL_ADDRESS + 1024 * 1024 * 1024;
-
vaddr_t
pmap_growkernel(vaddr_t maxkvaddr)
{
@@ -1097,7 +1139,7 @@ void pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo);
* ALL of the code which deals with avail needs rewritten as an actual
* memory allocation.
*/
-CTASSERT(sizeof(struct pmapvp0) == 8192);
+CTASSERT(sizeof(struct pmapvp0) == 2 * PAGE_SIZE);
int mappings_allocated = 0;
int pted_allocated = 0;
@@ -1911,6 +1953,9 @@ pmap_virtual_space(vaddr_t *start, vaddr_t *end)
{
*start = virtual_avail;
*end = virtual_end;
+
+ /* Prevent further KVA stealing. */
+ pmap_virtual_space_called = 1;
}
void