summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorvisa <visa@openbsd.org>2016-12-22 15:33:36 +0000
committervisa <visa@openbsd.org>2016-12-22 15:33:36 +0000
commitf39ec411eac2ee7ad8fd5ee4e1acd655f3b38978 (patch)
tree45f33033a421dbb23c66deabaab875b7fda276c0
parentLearn remote switch flow tables properties to find out where to install (diff)
downloadwireguard-openbsd-f39ec411eac2ee7ad8fd5ee4e1acd655f3b38978.tar.xz
wireguard-openbsd-f39ec411eac2ee7ad8fd5ee4e1acd655f3b38978.zip
Extend the size of user virtual address space from 2GB to 1TB on mips64
by adding another level to page directories. This improves ASLR and complements W^X added earlier on some systems, giving a notable update to the architecture's security. Besides, there is now more room for running tasks that hog memory. Testing help from deraadt@ and fcambus@. Platforms tested: loongson, octeon, sgi/IP27 and sgi/IP30 (IP30 also with 4KB pages).
-rw-r--r--sys/arch/mips64/include/pmap.h63
-rw-r--r--sys/arch/mips64/include/vmparam.h10
-rw-r--r--sys/arch/mips64/mips64/exception_tfp.S9
-rw-r--r--sys/arch/mips64/mips64/genassym.cf4
-rw-r--r--sys/arch/mips64/mips64/pmap.c302
-rw-r--r--sys/arch/mips64/mips64/r4000_errata.c17
-rw-r--r--sys/arch/mips64/mips64/tlbhandler.S8
7 files changed, 255 insertions, 158 deletions
diff --git a/sys/arch/mips64/include/pmap.h b/sys/arch/mips64/include/pmap.h
index f1d6852fd75..33fbc7cb094 100644
--- a/sys/arch/mips64/include/pmap.h
+++ b/sys/arch/mips64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.45 2016/12/06 16:27:33 visa Exp $ */
+/* $OpenBSD: pmap.h,v 1.46 2016/12/22 15:33:36 visa Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@@ -45,20 +45,22 @@
#include <machine/pte.h>
/*
- * The user address space is currently limited to 2Gb (0x0 - 0x80000000).
+ * The user address space is currently limited to 1TB (0x0 - 0x10000000000).
*
* The user address space is mapped using a two level structure where
* the virtual addresses bits are split in three groups:
- * segment:page:offset
+ * segment:directory:page:offset
* where:
* - offset are the in-page offsets (PAGE_SHIFT bits)
- * - page are the second level page table index
- * (PMAP_L2SHIFT - Log2(pt_entry_t) bits)
+ * - page are the third level page table index
+ * (PMAP_PGSHIFT - Log2(pt_entry_t) bits)
+ * - directory are the second level page table (directory) index
+ * (PMAP_PGSHIFT - Log2(void *) bits)
* - segment are the first level page table (segment) index
- * (PMAP_L2SHIFT - Log2(void *) bits)
+ * (PMAP_PGSHIFT - Log2(void *) bits)
*
- * This scheme allows Segment and page tables have the same size
- * (1 << PMAP_L2SHIFT bytes, regardless of the pt_entry_t size) to be able to
+ * This scheme allows Segment, directory and page tables have the same size
+ * (1 << PMAP_PGSHIFT bytes, regardless of the pt_entry_t size) to be able to
* share the same allocator.
*
* Note: The kernel doesn't use the same data structures as user programs.
@@ -66,37 +68,43 @@
* dynamically allocated at boot time.
*/
+#if defined(MIPS_PTE64) && PAGE_SHIFT == 12
+#error "Cannot use MIPS_PTE64 with 4KB pages."
+#endif
+
/*
- * Size of second level page structs (page tables, and segment table) used
- * by this pmap.
+ * Size of page table structs (page tables, page directories,
+ * and segment table) used by this pmap.
*/
-#ifdef MIPS_PTE64
-#define PMAP_L2SHIFT 14
-#else
-#define PMAP_L2SHIFT 12
-#endif
-#define PMAP_L2SIZE (1UL << PMAP_L2SHIFT)
+#define PMAP_PGSHIFT 12
+#define PMAP_PGSIZE (1UL << PMAP_PGSHIFT)
-#define NPTEPG (PMAP_L2SIZE / sizeof(pt_entry_t))
+#define NPDEPG (PMAP_PGSIZE / sizeof(void *))
+#define NPTEPG (PMAP_PGSIZE / sizeof(pt_entry_t))
/*
* Segment sizes
*/
-#define SEGSHIFT (PAGE_SHIFT + PMAP_L2SHIFT - PTE_LOG)
+#define SEGSHIFT (PAGE_SHIFT+PMAP_PGSHIFT*2-PTE_LOG-3)
+#define DIRSHIFT (PAGE_SHIFT+PMAP_PGSHIFT-PTE_LOG)
#define NBSEG (1UL << SEGSHIFT)
+#define NBDIR (1UL << DIRSHIFT)
#define SEGOFSET (NBSEG - 1)
+#define DIROFSET (NBDIR - 1)
#define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET)
+#define mips_trunc_dir(x) ((vaddr_t)(x) & ~DIROFSET)
#define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET)
+#define mips_round_dir(x) (((vaddr_t)(x) + DIROFSET) & ~DIROFSET)
#define pmap_segmap(m, v) ((m)->pm_segtab->seg_tab[((v) >> SEGSHIFT)])
/* number of segments entries */
-#define PMAP_SEGTABSIZE (PMAP_L2SIZE / sizeof(void *))
+#define PMAP_SEGTABSIZE (PMAP_PGSIZE / sizeof(void *))
struct segtab {
- pt_entry_t *seg_tab[PMAP_SEGTABSIZE];
+ pt_entry_t **seg_tab[PMAP_SEGTABSIZE];
};
struct pmap_asid_info {
@@ -123,7 +131,6 @@ typedef struct pmap {
(ALIGN(sizeof(struct pmap) + \
(sizeof(struct pmap_asid_info) * ((x) - 1))))
-
/* machine-dependent pg_flags */
#define PGF_UNCACHED PG_PMAP0 /* Page is explicitely uncached */
#define PGF_CACHED PG_PMAP1 /* Page is currently cached */
@@ -157,7 +164,6 @@ vaddr_t pmap_prefer(vaddr_t, vaddr_t);
int pmap_emulate_modify(pmap_t, vaddr_t);
void pmap_page_cache(vm_page_t, u_int);
-#define pmap_collect(x) do { /* nothing */ } while (0)
#define pmap_unuse_final(p) do { /* nothing yet */ } while (0)
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
@@ -188,6 +194,19 @@ vm_page_t pmap_unmap_direct(vaddr_t);
(Sysmap + (((vaddr_t)(va) - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT))
/* User virtual address to pte page entry */
#define uvtopte(va) (((va) >> PAGE_SHIFT) & (NPTEPG -1))
+#define uvtopde(va) (((va) >> DIRSHIFT) & (NPDEPG - 1))
+
+static inline pt_entry_t *
+pmap_pte_lookup(struct pmap *pmap, vaddr_t va)
+{
+ pt_entry_t **pde, *pte;
+
+ if ((pde = pmap_segmap(pmap, va)) == NULL)
+ return NULL;
+ if ((pte = pde[uvtopde(va)]) == NULL)
+ return NULL;
+ return pte + uvtopte(va);
+}
extern pt_entry_t *Sysmap; /* kernel pte table */
extern u_int Sysmapsize; /* number of pte's in Sysmap */
diff --git a/sys/arch/mips64/include/vmparam.h b/sys/arch/mips64/include/vmparam.h
index ef8ba3edc5a..672e2042313 100644
--- a/sys/arch/mips64/include/vmparam.h
+++ b/sys/arch/mips64/include/vmparam.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: vmparam.h,v 1.27 2014/01/30 18:16:41 miod Exp $ */
+/* $OpenBSD: vmparam.h,v 1.28 2016/12/22 15:33:36 visa Exp $ */
/* $NetBSD: vmparam.h,v 1.5 1994/10/26 21:10:10 cgd Exp $ */
/*
@@ -57,7 +57,7 @@
#define DFLDSIZ (128*1024*1024) /* initial data size limit */
#endif
#ifndef MAXDSIZ
-#define MAXDSIZ (1*1024*1024*1024) /* max data size */
+#define MAXDSIZ (16UL*1024*1024*1024) /* max data size */
#endif
#ifndef BRKSIZ
#define BRKSIZ MAXDSIZ /* heap gap size */
@@ -97,14 +97,14 @@
/* user/kernel map constants */
#define VM_MIN_ADDRESS ((vaddr_t)0x0000000000004000L)
-#define VM_MAXUSER_ADDRESS ((vaddr_t)0x0000000080000000L)
+#define VM_MAXUSER_ADDRESS ((vaddr_t)0x0000010000000000L)
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)0xc000000000000000L)
#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)0xc000000040000000L)
-/* map PIE below 256MB (non-pie link address) to avoid mmap pressure */
+/* map PIE below 256GB (non-pie link address) to avoid mmap pressure */
#define VM_PIE_MIN_ADDR PAGE_SIZE
-#define VM_PIE_MAX_ADDR (0x10000000UL)
+#define VM_PIE_MAX_ADDR (0x4000000000UL)
/* virtual sizes (bytes) for various kernel submaps */
#define VM_PHYS_SIZE (USRIOSIZE*PAGE_SIZE)
diff --git a/sys/arch/mips64/mips64/exception_tfp.S b/sys/arch/mips64/mips64/exception_tfp.S
index fb603a1b257..1169ffafb72 100644
--- a/sys/arch/mips64/mips64/exception_tfp.S
+++ b/sys/arch/mips64/mips64/exception_tfp.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: exception_tfp.S,v 1.5 2016/09/06 13:41:29 visa Exp $ */
+/* $OpenBSD: exception_tfp.S,v 1.6 2016/12/22 15:33:36 visa Exp $ */
/*
* Copyright (c) 2012 Miodrag Vallat.
@@ -96,6 +96,13 @@ utlb_miss:
MFC0_HAZARD
PTR_SLL k0, LOGREGSZ
PTR_ADDU k1, k0
+ PTR_L k1, 0(k1) # get pointer to page directory
+ DMFC0 k0, COP_0_WORK0 # saved COP_0_VADDR
+ MFC0_HAZARD
+ PTR_SRL k0, (DIRSHIFT - LOGREGSZ)
+ beqz k1, _inv_seg
+ andi k0, (NPDEPG - 1) << LOGREGSZ
+ PTR_ADDU k1, k0
PTR_L k1, 0(k1) # get pointer to page table
DMFC0 k0, COP_0_WORK0 # saved COP_0_VADDR
MFC0_HAZARD
diff --git a/sys/arch/mips64/mips64/genassym.cf b/sys/arch/mips64/mips64/genassym.cf
index 16c107d2b93..d1e399db392 100644
--- a/sys/arch/mips64/mips64/genassym.cf
+++ b/sys/arch/mips64/mips64/genassym.cf
@@ -1,4 +1,4 @@
-# $OpenBSD: genassym.cf,v 1.13 2016/08/16 13:03:58 visa Exp $
+# $OpenBSD: genassym.cf,v 1.14 2016/12/22 15:33:36 visa Exp $
#
# Copyright (c) 1997 Per Fogelstrom / Opsycon AB
#
@@ -68,9 +68,11 @@ member VMSPACE_PMAP vm_map.pmap
struct pmap
member PM_ASID pm_asid[0].pma_asid
+export NPDEPG
export NPTEPG
export PMAP_SEGTABSIZE
export SEGSHIFT
+export DIRSHIFT
export CCA_CACHED
export CCA_COHERENT_EXCLWRITE
diff --git a/sys/arch/mips64/mips64/pmap.c b/sys/arch/mips64/mips64/pmap.c
index ef4f1f53b40..ac77c453ba4 100644
--- a/sys/arch/mips64/mips64/pmap.c
+++ b/sys/arch/mips64/mips64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.95 2016/11/21 13:50:22 visa Exp $ */
+/* $OpenBSD: pmap.c,v 1.96 2016/12/22 15:33:36 visa Exp $ */
/*
* Copyright (c) 2001-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -360,7 +360,7 @@ pmap_bootstrap(void)
"pmappl", NULL);
pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, IPL_VM, 0,
"pvpl", NULL);
- pool_init(&pmap_pg_pool, PMAP_L2SIZE, PMAP_L2SIZE, IPL_VM, 0,
+ pool_init(&pmap_pg_pool, PMAP_PGSIZE, PMAP_PGSIZE, IPL_VM, 0,
"pmappgpl", &pmap_pg_allocator);
pmap_kernel()->pm_count = 1;
@@ -502,8 +502,7 @@ extern struct user *proc0paddr;
mtx_init(&pmap->pm_mtx, IPL_VM);
pmap->pm_count = 1;
- pmap->pm_segtab = (struct segtab *)pool_get(&pmap_pg_pool,
- PR_WAITOK | PR_ZERO);
+ pmap->pm_segtab = pool_get(&pmap_pg_pool, PR_WAITOK | PR_ZERO);
if (pmap == vmspace0.vm_map.pmap) {
/*
@@ -534,7 +533,12 @@ extern struct user *proc0paddr;
void
pmap_destroy(pmap_t pmap)
{
+ pt_entry_t **pde, *pte;
int count;
+ unsigned int i, j;
+#ifdef PARANOIA
+ unsigned int k;
+#endif
DPRINTF(PDB_FOLLOW|PDB_CREATE, ("pmap_destroy(%p)\n", pmap));
@@ -543,24 +547,27 @@ pmap_destroy(pmap_t pmap)
return;
if (pmap->pm_segtab) {
- pt_entry_t *pte;
- int i;
-#ifdef PARANOIA
- int j;
-#endif
-
for (i = 0; i < PMAP_SEGTABSIZE; i++) {
/* get pointer to segment map */
- pte = pmap->pm_segtab->seg_tab[i];
- if (!pte)
+ if ((pde = pmap->pm_segtab->seg_tab[i]) == NULL)
continue;
+ for (j = 0; j < NPDEPG; j++) {
+ if ((pte = pde[j]) == NULL)
+ continue;
#ifdef PARANOIA
- for (j = 0; j < NPTEPG; j++) {
- if (pte[j] != PG_NV)
- panic("pmap_destroy: segmap %p not empty at index %d", pte, j);
- }
+ for (k = 0; k < NPTEPG; k++) {
+ if (pte[k] != PG_NV)
+ panic("pmap_destroy(%p): "
+ "pgtab %p not empty at "
+ "index %u", pmap, pte, k);
+ }
#endif
- pool_put(&pmap_pg_pool, pte);
+ pool_put(&pmap_pg_pool, pte);
+#ifdef PARANOIA
+ pde[j] = NULL;
+#endif
+ }
+ pool_put(&pmap_pg_pool, pde);
#ifdef PARANOIA
pmap->pm_segtab->seg_tab[i] = NULL;
#endif
@@ -574,6 +581,59 @@ pmap_destroy(pmap_t pmap)
pool_put(&pmap_pmap_pool, pmap);
}
+void
+pmap_collect(pmap_t pmap)
+{
+ void *pmpg;
+ pt_entry_t **pde, *pte;
+ unsigned int i, j, k;
+ unsigned int m, n;
+
+ DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
+
+ /* There is nothing to garbage collect in the kernel pmap. */
+ if (pmap == pmap_kernel())
+ return;
+
+ pmap_lock(pmap);
+
+ /*
+ * When unlinking a directory page, the subsequent call to
+ * pmap_shootdown_page() lets any parallel lockless directory
+ * traversals end before the page gets freed.
+ */
+
+ for (i = 0; i < PMAP_SEGTABSIZE; i++) {
+ if ((pde = pmap->pm_segtab->seg_tab[i]) == NULL)
+ continue;
+ m = 0;
+ for (j = 0; j < NPDEPG; j++) {
+ if ((pte = pde[j]) == NULL)
+ continue;
+ m++;
+ n = 0;
+ for (k = 0; k < NPTEPG; k++) {
+ if (pte[k] & PG_V)
+ n++;
+ }
+ if (n == 0) {
+ pmpg = pde[j];
+ pde[j] = NULL;
+ pmap_shootdown_page(pmap, 0);
+ pool_put(&pmap_pg_pool, pmpg);
+ }
+ }
+ if (m == 0) {
+ pmpg = pmap->pm_segtab->seg_tab[i];
+ pmap->pm_segtab->seg_tab[i] = NULL;
+ pmap_shootdown_page(pmap, 0);
+ pool_put(&pmap_pg_pool, pmpg);
+ }
+ }
+
+ pmap_unlock(pmap);
+}
+
/*
* Add a reference to the specified pmap.
*/
@@ -623,8 +683,8 @@ pmap_deactivate(struct proc *p)
void
pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
{
- vaddr_t nssva;
- pt_entry_t *pte, entry;
+ vaddr_t ndsva, nssva;
+ pt_entry_t ***seg, **pde, *pte, entry;
paddr_t pa;
struct cpu_info *ci = curcpu();
@@ -670,40 +730,41 @@ pmap_do_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
if (eva > VM_MAXUSER_ADDRESS)
panic("pmap_remove: uva not in range");
#endif
- while (sva < eva) {
+ /*
+ * Invalidate every valid mapping within the range.
+ */
+ seg = &pmap_segmap(pmap, sva);
+ for ( ; sva < eva; sva = nssva, seg++) {
nssva = mips_trunc_seg(sva) + NBSEG;
- if (nssva == 0 || nssva > eva)
- nssva = eva;
- /*
- * If VA belongs to an unallocated segment,
- * skip to the next segment boundary.
- */
- if (!(pte = pmap_segmap(pmap, sva))) {
- sva = nssva;
+ if (*seg == NULL)
continue;
- }
- /*
- * Invalidate every valid mapping within this segment.
- */
- pte += uvtopte(sva);
- for (; sva < nssva; sva += PAGE_SIZE, pte++) {
- entry = *pte;
- if (!(entry & PG_V))
+ pde = *seg + uvtopde(sva);
+ for ( ; sva < eva && sva < nssva; sva = ndsva, pde++) {
+ ndsva = mips_trunc_dir(sva) + NBDIR;
+ if (*pde == NULL)
continue;
- if (entry & PG_WIRED)
- atomic_dec_long(&pmap->pm_stats.wired_count);
- atomic_dec_long(&pmap->pm_stats.resident_count);
- pa = pfn_to_pad(entry);
- if ((entry & PG_CACHEMODE) == PG_CACHED)
- Mips_SyncDCachePage(ci, sva, pa);
- pmap_remove_pv(pmap, sva, pa);
- *pte = PG_NV;
- /*
- * Flush the TLB for the given address.
- */
- pmap_invalidate_user_page(pmap, sva);
- pmap_shootdown_page(pmap, sva);
- stat_count(remove_stats.flushes);
+ pte = *pde + uvtopte(sva);
+ for ( ; sva < eva && sva < ndsva;
+ sva += PAGE_SIZE, pte++) {
+ entry = *pte;
+ if (!(entry & PG_V))
+ continue;
+ if (entry & PG_WIRED)
+ atomic_dec_long(
+ &pmap->pm_stats.wired_count);
+ atomic_dec_long(&pmap->pm_stats.resident_count);
+ pa = pfn_to_pad(entry);
+ if ((entry & PG_CACHEMODE) == PG_CACHED)
+ Mips_SyncDCachePage(ci, sva, pa);
+ pmap_remove_pv(pmap, sva, pa);
+ *pte = PG_NV;
+ /*
+ * Flush the TLB for the given address.
+ */
+ pmap_invalidate_user_page(pmap, sva);
+ pmap_shootdown_page(pmap, sva);
+ stat_count(remove_stats.flushes);
+ }
}
}
}
@@ -751,9 +812,9 @@ pmap_page_wrprotect(struct vm_page *pg, vm_prot_t prot)
pmap_update_kernel_page(pv->pv_va, entry);
pmap_shootdown_page(pmap_kernel(), pv->pv_va);
} else if (pv->pv_pmap != NULL) {
- if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va)) == NULL)
+ pte = pmap_pte_lookup(pv->pv_pmap, pv->pv_va);
+ if (pte == NULL)
continue;
- pte += uvtopte(pv->pv_va);
entry = *pte;
if (!(entry & PG_V))
continue;
@@ -856,8 +917,8 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
void
pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
- vaddr_t nssva;
- pt_entry_t *pte, entry, p;
+ vaddr_t ndsva, nssva;
+ pt_entry_t ***seg, **pde, *pte, entry, p;
struct cpu_info *ci = curcpu();
DPRINTF(PDB_FOLLOW|PDB_PROTECT,
@@ -915,40 +976,40 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
if (eva > VM_MAXUSER_ADDRESS)
panic("pmap_protect: uva not in range");
#endif
- while (sva < eva) {
+ /*
+ * Change protection on every valid mapping within the range.
+ */
+ seg = &pmap_segmap(pmap, sva);
+ for ( ; sva < eva; sva = nssva, seg++) {
nssva = mips_trunc_seg(sva) + NBSEG;
- if (nssva == 0 || nssva > eva)
- nssva = eva;
- /*
- * If VA belongs to an unallocated segment,
- * skip to the next segment boundary.
- */
- if (!(pte = pmap_segmap(pmap, sva))) {
- sva = nssva;
+ if (*seg == NULL)
continue;
- }
- /*
- * Change protection on every valid mapping within this segment.
- */
- pte += uvtopte(sva);
- for (; sva < nssva; sva += PAGE_SIZE, pte++) {
- entry = *pte;
- if (!(entry & PG_V))
+ pde = *seg + uvtopde(sva);
+ for ( ; sva < eva && sva < nssva; sva = ndsva, pde++) {
+ ndsva = mips_trunc_dir(sva) + NBDIR;
+ if (*pde == NULL)
continue;
- if ((entry & PG_M) != 0 /* && p != PG_M */ &&
- (entry & PG_CACHEMODE) == PG_CACHED) {
- if (prot & PROT_EXEC) {
- /* This will also sync D$. */
- pmap_invalidate_icache(pmap, sva,
- entry);
- } else
- Mips_SyncDCachePage(ci, sva,
- pfn_to_pad(entry));
+ pte = *pde + uvtopte(sva);
+ for ( ; sva < eva && sva < ndsva;
+ sva += PAGE_SIZE, pte++) {
+ entry = *pte;
+ if (!(entry & PG_V))
+ continue;
+ if ((entry & PG_M) != 0 /* && p != PG_M */ &&
+ (entry & PG_CACHEMODE) == PG_CACHED) {
+ if (prot & PROT_EXEC) {
+ /* This will also sync D$. */
+ pmap_invalidate_icache(pmap,
+ sva, entry);
+ } else
+ Mips_SyncDCachePage(ci, sva,
+ pfn_to_pad(entry));
+ }
+ entry = (entry & ~(PG_M | PG_RO | PG_XI)) | p;
+ *pte = entry;
+ pmap_update_user_page(pmap, sva, entry);
+ pmap_shootdown_page(pmap, sva);
}
- entry = (entry & ~(PG_M | PG_RO | PG_XI)) | p;
- *pte = entry;
- pmap_update_user_page(pmap, sva, entry);
- pmap_shootdown_page(pmap, sva);
}
}
@@ -967,7 +1028,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
int
pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
- pt_entry_t *pte, npte;
+ pt_entry_t **pde, *pte, npte;
vm_page_t pg;
struct cpu_info *ci = curcpu();
u_long cpuid = ci->ci_cpuid;
@@ -1088,7 +1149,20 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
/*
* User space mapping. Do table build.
*/
- if ((pte = pmap_segmap(pmap, va)) == NULL) {
+ if ((pde = pmap_segmap(pmap, va)) == NULL) {
+ pde = pool_get(&pmap_pg_pool, PR_NOWAIT | PR_ZERO);
+ if (pde == NULL) {
+ if (flags & PMAP_CANFAIL) {
+ if (pg != NULL)
+ mtx_leave(&pg->mdpage.pv_mtx);
+ pmap_unlock(pmap);
+ return ENOMEM;
+ }
+ panic("%s: out of memory", __func__);
+ }
+ pmap_segmap(pmap, va) = pde;
+ }
+ if ((pte = pde[uvtopde(va)]) == NULL) {
pte = pool_get(&pmap_pg_pool, PR_NOWAIT | PR_ZERO);
if (pte == NULL) {
if (flags & PMAP_CANFAIL) {
@@ -1099,8 +1173,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
}
panic("%s: out of memory", __func__);
}
-
- pmap_segmap(pmap, va) = pte;
+ pde[uvtopde(va)] = pte;
}
if (pg != NULL) {
@@ -1269,11 +1342,9 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
if (pmap == pmap_kernel())
pte = kvtopte(va);
else {
- if ((pte = pmap_segmap(pmap, va)) == NULL) {
- pmap_unlock(pmap);
- return;
- }
- pte += uvtopte(va);
+ pte = pmap_pte_lookup(pmap, va);
+ if (pte == NULL)
+ goto out;
}
if (*pte & PG_V) {
@@ -1283,6 +1354,7 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
}
}
+out:
pmap_unlock(pmap);
}
@@ -1323,16 +1395,15 @@ pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
rv = FALSE;
}
} else {
- if (!(pte = pmap_segmap(pmap, va)))
+ pte = pmap_pte_lookup(pmap, va);
+ if (pte == NULL) {
rv = FALSE;
- else {
- pte += uvtopte(va);
- if (*pte & PG_V)
- pa = pfn_to_pad(*pte) | (va & PAGE_MASK);
- else
- rv = FALSE;
+ goto out;
}
+ if (*pte & PG_V)
+ pa = pfn_to_pad(*pte) | (va & PAGE_MASK);
}
+out:
if (rv != FALSE)
*pap = pa;
@@ -1515,9 +1586,9 @@ pmap_clear_modify(struct vm_page *pg)
pmap_shootdown_page(pmap_kernel(), pv->pv_va);
}
} else if (pv->pv_pmap != NULL) {
- if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va)) == NULL)
+ pte = pmap_pte_lookup(pv->pv_pmap, pv->pv_va);
+ if (pte == NULL)
continue;
- pte += uvtopte(pv->pv_va);
entry = *pte;
if ((entry & PG_V) != 0 && (entry & PG_M) != 0) {
if (pg->pg_flags & PGF_CACHED)
@@ -1604,10 +1675,10 @@ pmap_emulate_modify(pmap_t pmap, vaddr_t va)
if (pmap == pmap_kernel()) {
pte = kvtopte(va);
} else {
- if ((pte = pmap_segmap(pmap, va)) == NULL)
- panic("%s: invalid segmap in pmap %p va %p", __func__,
+ pte = pmap_pte_lookup(pmap, va);
+ if (pte == NULL)
+ panic("%s: invalid page dir in pmap %p va %p", __func__,
pmap, (void *)va);
- pte += uvtopte(va);
}
entry = *pte;
if (!(entry & PG_V) || (entry & PG_M)) {
@@ -1688,17 +1759,16 @@ pmap_do_page_cache(vm_page_t pg, u_int mode)
pmap_shootdown_page(pmap_kernel(), pv->pv_va);
}
} else if (pv->pv_pmap != NULL) {
- if ((pte = pmap_segmap(pv->pv_pmap, pv->pv_va))) {
- pte += uvtopte(pv->pv_va);
- entry = *pte;
- if (entry & PG_V) {
- entry = (entry & ~PG_CACHEMODE) | newmode;
- *pte = entry;
- pmap_update_user_page(pv->pv_pmap,
- pv->pv_va, entry);
- pmap_shootdown_page(pv->pv_pmap,
- pv->pv_va);
- }
+ pte = pmap_pte_lookup(pv->pv_pmap, pv->pv_va);
+ if (pte == NULL)
+ continue;
+ entry = *pte;
+ if (entry & PG_V) {
+ entry = (entry & ~PG_CACHEMODE) | newmode;
+ *pte = entry;
+ pmap_update_user_page(pv->pv_pmap, pv->pv_va,
+ entry);
+ pmap_shootdown_page(pv->pv_pmap, pv->pv_va);
}
}
}
diff --git a/sys/arch/mips64/mips64/r4000_errata.c b/sys/arch/mips64/mips64/r4000_errata.c
index 869ab692c0f..23a8180c198 100644
--- a/sys/arch/mips64/mips64/r4000_errata.c
+++ b/sys/arch/mips64/mips64/r4000_errata.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: r4000_errata.c,v 1.6 2016/03/06 19:42:27 mpi Exp $ */
+/* $OpenBSD: r4000_errata.c,v 1.7 2016/12/22 15:33:36 visa Exp $ */
/*
* Copyright (c) 2014 Miodrag Vallat.
@@ -171,11 +171,9 @@ eop_tlb_miss_handler(struct trapframe *trapframe, struct cpu_info *ci,
faultva = trunc_page((vaddr_t)trapframe->badvaddr);
pmap = map->pmap;
- pte = pmap_segmap(pmap, faultva);
+ pte = pmap_pte_lookup(pmap, faultva);
if (pte == NULL)
return 0;
-
- pte += uvtopte(faultva);
entry = *pte;
if ((entry & PG_SP) == 0)
return 0;
@@ -207,10 +205,7 @@ eop_tlb_miss_handler(struct trapframe *trapframe, struct cpu_info *ci,
*/
va = faultva + PAGE_SIZE;
- pte = pmap_segmap(pmap, va);
- if (pte != NULL)
- pte += uvtopte(va);
-
+ pte = pmap_pte_lookup(pmap, va);
if (pte == NULL || (*pte & PG_V) == 0) {
onfault = pcb->pcb_onfault;
pcb->pcb_onfault = 0;
@@ -238,15 +233,13 @@ eop_tlb_miss_handler(struct trapframe *trapframe, struct cpu_info *ci,
tlb_set_wired((UPAGES / 2) + npairs);
for (i = 0, va = faultva & PG_HVPN; i != npairs;
i++, va += 2 * PAGE_SIZE) {
- pte = pmap_segmap(pmap, va);
+ pte = pmap_pte_lookup(pmap, va);
if (pte == NULL)
tlb_update_indexed(va | asid,
PG_NV, PG_NV, (UPAGES / 2) + i);
- else {
- pte += uvtopte(va);
+ else
tlb_update_indexed(va | asid,
pte[0], pte[1], (UPAGES / 2) + i);
- }
}
/*
diff --git a/sys/arch/mips64/mips64/tlbhandler.S b/sys/arch/mips64/mips64/tlbhandler.S
index 5b603ed3ebe..4c726a75db4 100644
--- a/sys/arch/mips64/mips64/tlbhandler.S
+++ b/sys/arch/mips64/mips64/tlbhandler.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: tlbhandler.S,v 1.46 2016/09/06 13:41:29 visa Exp $ */
+/* $OpenBSD: tlbhandler.S,v 1.47 2016/12/22 15:33:36 visa Exp $ */
/*
* Copyright (c) 1995-2004 Opsycon AB (www.opsycon.se / www.opsycon.com)
@@ -135,6 +135,12 @@ xtlb_miss:
PTR_SLL k0, k0, LOGREGSZ
PTR_L k1, PCB_SEGTAB(k1)
PTR_ADDU k1, k1, k0
+ PTR_L k1, 0(k1) # get pointer to page directory
+ dmfc0 k0, COP_0_BAD_VADDR
+ PTR_SRL k0, k0, (DIRSHIFT - LOGREGSZ)
+ beqz k1, _inv_seg
+ andi k0, k0, (NPDEPG - 1) << LOGREGSZ
+ PTR_ADDU k1, k1, k0
PTR_L k1, 0(k1) # get pointer to page table
dmfc0 k0, COP_0_BAD_VADDR
PTR_SRL k0, k0, PGSHIFT - PTE_LOG