summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorkettenis <kettenis@openbsd.org>2017-03-16 20:15:07 +0000
committerkettenis <kettenis@openbsd.org>2017-03-16 20:15:07 +0000
commitcad94a34f9369dfa93e2e77a0bdfa80da497cdd4 (patch)
tree8347105e2c211efd0ee07cce3a3f1cb360ba8768
parentdocument the "type" built-in, marked as xsi by posix; (diff)
downloadwireguard-openbsd-cad94a34f9369dfa93e2e77a0bdfa80da497cdd4.tar.xz
wireguard-openbsd-cad94a34f9369dfa93e2e77a0bdfa80da497cdd4.zip
Remove some unused variables, unused types, duplicated/unused function
prototypes and duplicate defines. Also remove PMAP_CACHE_PTE, which is unused and misleading since the page tables are mapped as normal write-back cached memory. Fix a bunch on KNF issues as well. ok patrick@
-rw-r--r--sys/arch/arm64/arm64/pmap.c81
-rw-r--r--sys/arch/arm64/include/pmap.h18
-rw-r--r--sys/arch/arm64/include/pte.h5
3 files changed, 39 insertions, 65 deletions
diff --git a/sys/arch/arm64/arm64/pmap.c b/sys/arch/arm64/arm64/pmap.c
index 90f98deb985..4d0c58c3663 100644
--- a/sys/arch/arm64/arm64/pmap.c
+++ b/sys/arch/arm64/arm64/pmap.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.c,v 1.25 2017/03/13 23:20:12 kettenis Exp $ */
+/* $OpenBSD: pmap.c,v 1.26 2017/03/16 20:15:07 kettenis Exp $ */
/*
* Copyright (c) 2008-2009,2014-2016 Dale Rahn <drahn@dalerahn.com>
*
@@ -178,7 +178,6 @@ void pmap_release(pmap_t pm);
paddr_t arm_kvm_stolen;
paddr_t pmap_steal_avail(size_t size, int align, void **kva);
void pmap_remove_avail(paddr_t base, paddr_t end);
-void pmap_avail_fixup(void);
vaddr_t pmap_map_stolen(vaddr_t);
void pmap_physload_avail(void);
extern caddr_t msgbufaddr;
@@ -475,8 +474,6 @@ pmap_remove_pv(struct pte_desc *pted)
LIST_REMOVE(pted, pted_pv_list);
}
-volatile int supportuserland;
-
int
pmap_enter(pmap_t pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
{
@@ -771,7 +768,6 @@ pmap_kremove(vaddr_t va, vsize_t len)
pmap_kremove_pg(va);
}
-
void
pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
vm_prot_t prot, int flags, int cache)
@@ -788,8 +784,6 @@ pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
break;
case PMAP_CACHE_DEV:
break;
- case PMAP_CACHE_PTE:
- break;
default:
panic("pmap_fill_pte:invalid cache mode");
}
@@ -806,7 +800,6 @@ pmap_fill_pte(pmap_t pm, vaddr_t va, paddr_t pa, struct pte_desc *pted,
pted->pted_pte |= flags & (PROT_READ|PROT_WRITE|PROT_EXEC);
}
-
/*
* Garbage collects the physical map system for pages which are
* no longer used. Success need not be guaranteed -- that is, there
@@ -902,6 +895,7 @@ pmap_pinit(pmap_t pm)
}
int pmap_vp_poolcache = 0; // force vp poolcache to allocate late.
+
/*
* Create and return a physical map.
*/
@@ -1107,7 +1101,8 @@ VP_Lx(paddr_t pa)
return pa | Lx_TYPE_PT;
}
-void pmap_setup_avail( uint64_t ram_start, uint64_t ram_end, uint64_t kvo);
+void pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo);
+
/*
* Initialize pmap setup.
* ALL of the code which deals with avail needs rewritten as an actual
@@ -1219,9 +1214,6 @@ pmap_bootstrap(long kvo, paddr_t lpt1, long kernelstart, long kernelend,
}
}
- pmap_curmaxkvaddr = VM_MAX_KERNEL_ADDRESS;
-
-
// XXX should this extend the l2 bootstrap mappings for kernel entries?
/* now that we have mapping space for everything, lets map it */
@@ -1472,7 +1464,6 @@ pmap_extract(pmap_t pm, vaddr_t va, paddr_t *pa)
return TRUE;
}
-
void
pmap_page_ro(pmap_t pm, vaddr_t va, vm_prot_t prot)
{
@@ -1528,7 +1519,6 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
splx(s);
}
-
void
pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
{
@@ -1548,7 +1538,7 @@ pmap_protect(pmap_t pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
}
void
-pmap_init()
+pmap_init(void)
{
uint64_t tcr;
@@ -1583,8 +1573,7 @@ pmap_proc_iflush(struct process *pr, vaddr_t va, vsize_t len)
cpu_icache_sync_range(va, len);
}
-
-STATIC uint64_t ap_bits_user [8] = {
+STATIC uint64_t ap_bits_user[8] = {
[PROT_NONE] = ATTR_nG|ATTR_PXN|ATTR_UXN|ATTR_AP(2),
[PROT_READ] = ATTR_nG|ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(3),
[PROT_WRITE] = ATTR_nG|ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(1),
@@ -1595,7 +1584,7 @@ STATIC uint64_t ap_bits_user [8] = {
[PROT_EXEC|PROT_WRITE|PROT_READ]= ATTR_nG|ATTR_PXN|ATTR_AF|ATTR_AP(1),
};
-STATIC uint64_t ap_bits_kern [8] = {
+STATIC uint64_t ap_bits_kern[8] = {
[PROT_NONE] = ATTR_PXN|ATTR_UXN|ATTR_AP(2),
[PROT_READ] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(2),
[PROT_WRITE] = ATTR_PXN|ATTR_UXN|ATTR_AF|ATTR_AP(0),
@@ -1647,10 +1636,6 @@ pmap_pte_update(struct pte_desc *pted, uint64_t *pl3)
attr |= ATTR_IDX(PTE_ATTR_DEV);
attr |= ATTR_SH(SH_INNER);
break;
- case PMAP_CACHE_PTE:
- attr |= ATTR_IDX(PTE_ATTR_CI); // inner and outer uncached, XXX?
- attr |= ATTR_SH(SH_INNER);
- break;
default:
panic("pmap_pte_insert: invalid cache mode");
}
@@ -1714,7 +1699,8 @@ pmap_pte_remove(struct pte_desc *pted, int remove_pted)
* It's purpose is to tell the caller that a fault was generated either
* for this emulation, or to tell the caller that it's a legit fault.
*/
-int pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
+int
+pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
{
struct pte_desc *pted;
struct vm_page *pg;
@@ -1820,7 +1806,11 @@ int pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, int user)
return 1;
}
-void pmap_postinit(void) {}
+void
+pmap_postinit(void)
+{
+}
+
void
pmap_map_section(vaddr_t l1_addr, vaddr_t va, paddr_t pa, int flags, int cache)
{
@@ -1849,9 +1839,6 @@ panic("%s called", __func__);
case PMAP_CACHE_CI:
cache_bits = L1_MODE_DEV;
break;
- case PMAP_CACHE_PTE:
- cache_bits = L1_MODE_PTE;
- break;
}
l1[va>>VP_IDX1_POS] = (pa & L1_S_RPGN) | ap_flag | cache_bits | L1_TYPE_S;
@@ -1867,8 +1854,8 @@ vsize_t pmap_map_chunk(vaddr_t l1, vaddr_t va, paddr_t pa, vsize_t sz, int prot,
return 0;
}
-
-void pmap_update()
+void
+pmap_update(pmap_t pm)
{
}
@@ -1877,18 +1864,20 @@ vaddr_t zero_page;
vaddr_t copy_src_page;
vaddr_t copy_dst_page;
-
-int pmap_is_referenced(struct vm_page *pg)
+int
+pmap_is_referenced(struct vm_page *pg)
{
return ((pg->pg_flags & PG_PMAP_REF) != 0);
}
-int pmap_is_modified(struct vm_page *pg)
+int
+pmap_is_modified(struct vm_page *pg)
{
return ((pg->pg_flags & PG_PMAP_MOD) != 0);
}
-int pmap_clear_modify(struct vm_page *pg)
+int
+pmap_clear_modify(struct vm_page *pg)
{
struct pte_desc *pted;
uint64_t *pl3 = NULL;
@@ -1918,7 +1907,8 @@ int pmap_clear_modify(struct vm_page *pg)
* When this turns off read permissions it also disables write permissions
* so that mod is correctly tracked after clear_ref; FAULT_READ; FAULT_WRITE;
*/
-int pmap_clear_reference(struct vm_page *pg)
+int
+pmap_clear_reference(struct vm_page *pg)
{
struct pte_desc *pted;
@@ -1941,12 +1931,15 @@ int pmap_clear_reference(struct vm_page *pg)
return 0;
}
-void pmap_copy(pmap_t src_pmap, pmap_t dst_pmap, vaddr_t src, vsize_t sz, vaddr_t dst)
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vaddr_t dst_addr,
+ vsize_t len, vaddr_t src_addr)
{
- //printf("%s\n", __func__);
+ /* NOOP */
}
-void pmap_unwire(pmap_t pm, vaddr_t va)
+void
+pmap_unwire(pmap_t pm, vaddr_t va)
{
struct pte_desc *pted;
@@ -1959,23 +1952,21 @@ void pmap_unwire(pmap_t pm, vaddr_t va)
}
}
-void pmap_remove_holes(struct vmspace *vm)
+void
+pmap_remove_holes(struct vmspace *vm)
{
/* NOOP */
}
-void pmap_virtual_space(vaddr_t *start, vaddr_t *end)
+void
+pmap_virtual_space(vaddr_t *start, vaddr_t *end)
{
*start = virtual_avail;
*end = virtual_end;
}
-vaddr_t pmap_curmaxkvaddr;
-
-void pmap_avail_fixup(void);
-
void
-pmap_setup_avail( uint64_t ram_start, uint64_t ram_end, uint64_t kvo)
+pmap_setup_avail(uint64_t ram_start, uint64_t ram_end, uint64_t kvo)
{
/* This makes several assumptions
* 1) kernel will be located 'low' in memory
@@ -1993,7 +1984,6 @@ pmap_setup_avail( uint64_t ram_start, uint64_t ram_end, uint64_t kvo)
pmap_avail[0].start = ram_start;
pmap_avail[0].size = ram_end-ram_start;
-
// XXX - support more than one region
pmap_memregions[0].start = ram_start;
pmap_memregions[0].end = ram_end;
@@ -2211,6 +2201,7 @@ pmap_physload_avail(void)
}
}
+
void
pmap_show_mapping(uint64_t va)
{
diff --git a/sys/arch/arm64/include/pmap.h b/sys/arch/arm64/include/pmap.h
index d2f463aa18e..413bdaf53c1 100644
--- a/sys/arch/arm64/include/pmap.h
+++ b/sys/arch/arm64/include/pmap.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pmap.h,v 1.2 2017/03/12 16:35:56 kettenis Exp $ */
+/* $OpenBSD: pmap.h,v 1.3 2017/03/16 20:15:07 kettenis Exp $ */
/*
* Copyright (c) 2008,2009,2014 Dale Rahn <drahn@dalerahn.com>
*
@@ -43,8 +43,7 @@ void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
#define PMAP_CACHE_CI (PMAP_MD0) /* cache inhibit */
#define PMAP_CACHE_WT (PMAP_MD1) /* writethru */
#define PMAP_CACHE_WB (PMAP_MD1|PMAP_MD0) /* writeback */
-#define PMAP_CACHE_PTE (PMAP_MD2) /* PTE mapping */
-#define PMAP_CACHE_DEV (PMAP_MD2|PMAP_MD0) /* device mapping */
+#define PMAP_CACHE_DEV (PMAP_MD2) /* device mapping */
#define PMAP_CACHE_BITS (PMAP_MD0|PMAP_MD1|PMAP_MD2)
#define PTED_VA_MANAGED_M (PMAP_MD3)
@@ -99,19 +98,6 @@ void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
-/*
- * Physical / virtual address structure. In a number of places (particularly
- * during bootstrapping) we need to keep track of the physical and virtual
- * addresses of various pages
- */
-typedef struct pv_addr {
- SLIST_ENTRY(pv_addr) pv_list;
- paddr_t pv_pa;
- vaddr_t pv_va;
-} pv_addr_t;
-
-extern vaddr_t pmap_curmaxkvaddr;
-
#ifndef _LOCORE
#define __HAVE_VM_PAGE_MD
diff --git a/sys/arch/arm64/include/pte.h b/sys/arch/arm64/include/pte.h
index 7aeffb4ec9a..e2f5a6ab76d 100644
--- a/sys/arch/arm64/include/pte.h
+++ b/sys/arch/arm64/include/pte.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: pte.h,v 1.3 2017/02/17 19:14:58 patrick Exp $ */
+/* $OpenBSD: pte.h,v 1.4 2017/03/16 20:15:07 kettenis Exp $ */
/*
* Copyright (c) 2014 Dale Rahn <drahn@dalerahn.com>
*
@@ -87,9 +87,6 @@
#define L2_BLOCK L0_BLOCK
//#define L2_TABLE L0_TABLE
-#define L2_SHIFT 21
-#define L2_SIZE (1 << L2_SHIFT)
-
// page mapping
#define L3_P (3)