diff options
author | 2003-01-24 09:57:41 +0000 | |
---|---|---|
committer | 2003-01-24 09:57:41 +0000 | |
commit | 237c7940ee9990f41d9d3c0ed97deeb714ba9f0e (patch) | |
tree | 75702ab372055e63980f1e1f31fa8a1799680672 | |
parent | bring in 3 yet unused helper functions before I lose 'em again (diff) | |
download | wireguard-openbsd-237c7940ee9990f41d9d3c0ed97deeb714ba9f0e.tar.xz wireguard-openbsd-237c7940ee9990f41d9d3c0ed97deeb714ba9f0e.zip |
Convert m88k pmap from physseg to VM_PAGE_MD.
This allows us to remove some ambiguities on how some functions are called,
remove some diagnostic checks for conditions that can never happen and
remove the ugly hack with "pmap_initialized".
Then, rework internal function interfaces and some logic so as to stop
fetching vm_page from a pa and the reverse every now and then - this makes
some pmap operations run much faster.
While there, various KNF and whitespace fixes, and rename some structure
fields to be closer to the m68k pmap.
per art@'s idea.
-rw-r--r-- | sys/arch/mvme88k/include/mmu.h | 4 | ||||
-rw-r--r-- | sys/arch/mvme88k/include/pmap.h | 43 | ||||
-rw-r--r-- | sys/arch/mvme88k/include/vmparam.h | 26 | ||||
-rw-r--r-- | sys/arch/mvme88k/mvme88k/pmap.c | 1137 |
4 files changed, 486 insertions, 724 deletions
diff --git a/sys/arch/mvme88k/include/mmu.h b/sys/arch/mvme88k/include/mmu.h index 5754b0d8641..a6820fde23c 100644 --- a/sys/arch/mvme88k/include/mmu.h +++ b/sys/arch/mvme88k/include/mmu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: mmu.h,v 1.17 2002/03/14 01:26:39 millert Exp $ */ +/* $OpenBSD: mmu.h,v 1.18 2003/01/24 09:57:41 miod Exp $ */ /* * This file bears almost no resemblance to the original m68k file, @@ -215,7 +215,7 @@ typedef u_int32_t pt_ind_entry_t; #define PDTIDX(va) (((va) & PDT_MASK) >> PDT_SHIFT) /* XXX uses knowledge of pmap structure */ -#define SDTENT(map, va) ((sdt_entry_t *)((map)->sdt_vaddr + SDTIDX(va))) +#define SDTENT(map, va) ((sdt_entry_t *)((map)->pm_stab + SDTIDX(va))) /* * Size of a PDT table group. diff --git a/sys/arch/mvme88k/include/pmap.h b/sys/arch/mvme88k/include/pmap.h index e95c489bb95..116c14c1c91 100644 --- a/sys/arch/mvme88k/include/pmap.h +++ b/sys/arch/mvme88k/include/pmap.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pmap.h,v 1.28 2003/01/24 00:51:52 miod Exp $ */ +/* $OpenBSD: pmap.h,v 1.29 2003/01/24 09:57:41 miod Exp $ */ /* * Mach Operating System * Copyright (c) 1991 Carnegie Mellon University @@ -17,34 +17,25 @@ #include <machine/mmu.h> /* batc_template_t, BATC_MAX, etc.*/ #include <machine/pcb.h> /* pcb_t, etc.*/ -#include <machine/psl.h> /* get standard goodies */ - -typedef sdt_entry_t *sdt_ptr_t; /* * PMAP structure */ -typedef struct pmap *pmap_t; /* #define PMAP_USE_BATC */ struct pmap { - sdt_ptr_t sdt_paddr; /* physical pointer to sdt */ - sdt_ptr_t sdt_vaddr; /* virtual pointer to sdt */ - int ref_count; /* reference count */ - struct simplelock lock; - struct pmap_statistics stats; /* pmap statistics */ + sdt_entry_t *pm_stpa; /* physical pointer to sdt */ + sdt_entry_t *pm_stab; /* virtual pointer to sdt */ + int pm_count; /* reference count */ + struct simplelock pm_lock; + struct pmap_statistics pm_stats; /* pmap statistics */ /* cpus using of this pmap; NCPU must be <= 32 */ - u_int32_t cpus_using; + u_int32_t pm_cpus; #ifdef PMAP_USE_BATC - batc_template_t i_batc[BATC_MAX]; /* instruction BATCs */ - batc_template_t d_batc[BATC_MAX]; /* data BATCs */ -#endif - -#ifdef DEBUG - pmap_t next; - pmap_t prev; + batc_template_t pm_ibatc[BATC_MAX]; /* instruction BATCs */ + batc_template_t pm_dbatc[BATC_MAX]; /* data BATCs */ #endif }; @@ -57,11 +48,10 @@ struct pmap { * pv_head_table. This is used by things like pmap_remove, when we must * find and remove all mappings for a particular physical page. */ -typedef struct pv_entry { - struct pv_entry *next; /* next pv_entry */ - pmap_t pmap; /* pmap where mapping lies */ - vaddr_t va; /* virtual address for mapping */ -} *pv_entry_t; +/* XXX - struct pv_entry moved to vmparam.h because of include ordering issues */ + +typedef struct pmap *pmap_t; +typedef struct pv_entry *pv_entry_t; #ifdef _KERNEL @@ -70,11 +60,12 @@ extern struct pmap kernel_pmap_store; extern caddr_t vmmap; #define pmap_kernel() (&kernel_pmap_store) -#define pmap_resident_count(pmap) ((pmap)->stats.resident_count) -#define pmap_wired_count(pmap) ((pmap)->stats.wired_count) +#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) +#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) #define pmap_phys_address(frame) ((paddr_t)(ptoa(frame))) -#define pmap_update(pmap) /* nothing (yet) */ +#define pmap_copy(dp,sp,d,l,s) do { /* nothing */ } while (0) +#define pmap_update(pmap) do { /* nothing (yet) */ } while (0) void pmap_bootstrap(vaddr_t, paddr_t *, paddr_t *, vaddr_t *, vaddr_t *); void pmap_cache_ctrl(pmap_t, vaddr_t, vaddr_t, u_int); diff --git a/sys/arch/mvme88k/include/vmparam.h b/sys/arch/mvme88k/include/vmparam.h index 99dbe816ac9..c1565f01437 100644 --- a/sys/arch/mvme88k/include/vmparam.h +++ b/sys/arch/mvme88k/include/vmparam.h @@ -1,4 +1,4 @@ -/* $OpenBSD: vmparam.h,v 1.20 2002/02/17 22:59:53 maja Exp $ */ +/* $OpenBSD: vmparam.h,v 1.21 2003/01/24 09:57:41 miod Exp $ */ /* * Mach Operating System * Copyright (c) 1992 Carnegie Mellon University @@ -107,11 +107,27 @@ /* * pmap-specific data stored in the vm_physmem[] array. */ -#define __HAVE_PMAP_PHYSSEG -struct pmap_physseg { - struct pv_entry *pvent; /* pv table for this seg */ - char *attrs; /* page modify list for this seg */ + +/* XXX - belongs in pmap.h, but put here because of ordering issues */ +struct pv_entry { + struct pv_entry *pv_next; /* next pv_entry */ + struct pmap *pv_pmap; /* pmap where mapping lies */ + vaddr_t pv_va; /* virtual address for mapping */ + int pv_flags; +}; + +#define __HAVE_VM_PAGE_MD +struct vm_page_md { + struct pv_entry pvent; }; + +#define VM_MDPAGE_INIT(pg) do { \ + (pg)->mdpage.pvent.pv_next = NULL; \ + (pg)->mdpage.pvent.pv_pmap = PMAP_NULL; \ + (pg)->mdpage.pvent.pv_va = 0; \ + (pg)->mdpage.pvent.pv_flags = 0; \ +} while (0) + #endif /* _LOCORE */ #endif /* _MACHINE_VM_PARAM_ */ diff --git a/sys/arch/mvme88k/mvme88k/pmap.c b/sys/arch/mvme88k/mvme88k/pmap.c index 337e377d4db..1f5da45967d 100644 --- a/sys/arch/mvme88k/mvme88k/pmap.c +++ b/sys/arch/mvme88k/mvme88k/pmap.c @@ -1,6 +1,6 @@ -/* $OpenBSD: pmap.c,v 1.67 2002/09/10 18:29:43 art Exp $ */ +/* $OpenBSD: pmap.c,v 1.68 2003/01/24 09:57:44 miod Exp $ */ /* - * Copyright (c) 2001, 2002 Miodrag Vallat + * Copyright (c) 2001, 2002, 2003 Miodrag Vallat * Copyright (c) 1998-2001 Steve Murphree, Jr. * Copyright (c) 1996 Nivas Madhur * All rights reserved. @@ -70,7 +70,7 @@ extern vaddr_t avail_start, avail_end; extern vaddr_t virtual_avail, virtual_end; /* - * Macros to operate cpus_using field + * Macros to operate pm_cpus field */ #define SETBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) |= (1 << (cpu_number)); #define CLRBIT_CPUSET(cpu_number, cpuset) (*(cpuset)) &= ~(1 << (cpu_number)); @@ -144,17 +144,17 @@ kpdt_entry_t kpdt_free; /* * Size of kernel page tables for mapping onboard IO space. */ -#if defined(MVME188) +#if defined(MVME188) #define M188_PDT_SIZE (atop(UTIL_SIZE) * sizeof(pt_entry_t)) #else #define M188_PDT_SIZE 0 -#endif +#endif #if defined(MVME187) || defined(MVME197) #define M1x7_PDT_SIZE (atop(OBIO_SIZE) * sizeof(pt_entry_t)) #else #define M1x7_PDT_SIZE 0 -#endif +#endif #if defined(MVME188) && defined(MVME187) || defined(MVME197) #define OBIO_PDT_SIZE ((brdtyp == BRD_188) ? M188_PDT_SIZE : M1x7_PDT_SIZE) @@ -170,36 +170,15 @@ kpdt_entry_t kpdt_free; */ vaddr_t phys_map_vaddr1, phys_map_vaddr2; -/* - * Cached page flags - * - * This is an array, one byte per physical page, which keeps track - * of some flags for pages which are no longer containd in any - * pmap (for mapped pages, the modified flags are in the PTE). - */ -u_int8_t *pmap_cached_attributes; - #define PV_ENTRY_NULL ((pv_entry_t) 0) -pv_entry_t pv_head_table; /* array of entries, one per page */ +static pv_entry_t pg_to_pvh(struct vm_page *); -#define PMAP_MANAGED(pa) \ - (pmap_initialized && IS_VM_PHYSADDR(pa)) - -#define pa_to_pvh(pa) \ -({ \ - int bank_, pg_; \ - \ - bank_ = vm_physseg_find(atop((pa)), &pg_); \ - &vm_physmem[bank_].pmseg.pvent[pg_]; \ -}) -#define pa_to_attribute(pa) \ -({ \ - int bank_, pg_; \ - \ - bank_ = vm_physseg_find(atop((pa)), &pg_); \ - &vm_physmem[bank_].pmseg.attrs[pg_]; \ -}) +static __inline pv_entry_t +pg_to_pvh(struct vm_page *pg) +{ + return &pg->mdpage.pvent; +} /* * Locking primitives @@ -215,28 +194,24 @@ pv_entry_t pv_head_table; /* array of entries, one per page */ #define PMAP_LOCK(pmap,spl) \ do { \ SPLVM(spl); \ - simple_lock(&(pmap)->lock); \ + simple_lock(&(pmap)->pm_lock); \ } while (0) #define PMAP_UNLOCK(pmap, spl) \ do { \ - simple_unlock(&(pmap)->lock); \ + simple_unlock(&(pmap)->pm_lock); \ SPLX(spl); \ } while (0) -#define PV_TABLE_SIZE(n) ((vsize_t)((n) * sizeof(struct pv_entry))) - #define ETHERPAGES 16 -void *etherbuf=NULL; -int etherlen; - -boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */ +void *etherbuf = NULL; +int etherlen; #ifdef PMAP_USE_BATC /* * number of BATC entries used */ -int batc_used; +int batc_used; /* * keep track BATC mapping @@ -260,9 +235,9 @@ void pmap_expand(pmap_t, vaddr_t); void pmap_release(pmap_t); vaddr_t pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t, u_int); pt_entry_t *pmap_pte(pmap_t, vaddr_t); -void pmap_remove_all(paddr_t); -void pmap_changebit(paddr_t, int, int); -boolean_t pmap_testbit(paddr_t, int); +void pmap_remove_all(struct vm_page *); +void pmap_changebit(struct vm_page *, int, int); +boolean_t pmap_testbit(struct vm_page *, int); /* * quick PTE field checking macros @@ -279,7 +254,7 @@ boolean_t pmap_testbit(paddr_t, int); * Convert machine-independent protection code to M88K protection bits. */ static __inline u_int32_t -m88k_protection(pmap_t map, vm_prot_t prot) +m88k_protection(pmap_t pmap, vm_prot_t prot) { pt_entry_t p; @@ -291,19 +266,19 @@ m88k_protection(pmap_t map, vm_prot_t prot) #ifdef M88110 if (cputyp == CPU_88110) { p |= PG_U; - /* if the map is the kernel's map and since this - * is not a paged kernel, we go ahead and mark - * the page as modified to avoid an exception - * upon writing to the page the first time. XXX smurph + /* if the map is the kernel's map and since this + * is not a paged kernel, we go ahead and mark + * the page as modified to avoid an exception + * upon writing to the page the first time. XXX smurph */ - if (map == kernel_pmap) { + if (pmap == kernel_pmap) { if (p & PG_PROT) p |= PG_M; } } -#endif - return (p); -} /* m88k_protection */ +#endif + return p; +} /* * Routine: FLUSH_ATC_ENTRY @@ -319,21 +294,19 @@ m88k_protection(pmap_t map, vm_prot_t prot) * va virtual address that should be flushed * kernel TRUE if supervisor mode, FALSE if user mode */ -void -flush_atc_entry(users, va, kernel) - long users; - vaddr_t va; - boolean_t kernel; +void +flush_atc_entry(long users, vaddr_t va, boolean_t kernel) { - int cpu; - long tusers = users; + int cpu; + long tusers = users; #ifdef DIAGNOSTIC if ((tusers != 0) && (ff1(tusers) >= MAX_CPUS)) { printf("ff1 users = %d!\n", ff1(tusers)); panic("bogus amount of users!!!"); } -#endif +#endif + while ((cpu = ff1(tusers)) != 32) { if (cpu_sets[cpu]) { /* just checking to make sure */ cmmu_flush_remote_tlb(cpu, kernel, va, PAGE_SIZE); @@ -359,30 +332,27 @@ flush_atc_entry(users, va, kernel) * the page table index is added, and the result is returned. */ pt_entry_t * -pmap_pte(map, virt) - pmap_t map; - vaddr_t virt; +pmap_pte(pmap_t pmap, vaddr_t virt) { - sdt_entry_t *sdt; + sdt_entry_t *sdt; #ifdef DIAGNOSTIC /*XXX will this change if physical memory is not contiguous? */ /* take a look at PDTIDX XXXnivas */ - if (map == PMAP_NULL) + if (pmap == PMAP_NULL) panic("pmap_pte: pmap is NULL"); #endif - sdt = SDTENT(map,virt); + sdt = SDTENT(pmap,virt); /* * Check whether page table is exist or not. */ if (!SDT_VALID(sdt)) return (PT_ENTRY_NULL); - return ((pt_entry_t *)(PG_PFNUM(*(sdt + SDT_ENTRIES))<<PDT_SHIFT) + - PDTIDX(virt)); - -} /* pmap_pte */ + return (pt_entry_t *)(PG_PFNUM(*(sdt + SDT_ENTRIES)) << PDT_SHIFT) + + PDTIDX(virt); +} /* * Routine: PMAP_EXPAND_KMAP (internal) @@ -417,20 +387,19 @@ pmap_pte(map, virt) * */ pt_entry_t * -pmap_expand_kmap(virt, prot) - vaddr_t virt; - vm_prot_t prot; +pmap_expand_kmap(vaddr_t virt, vm_prot_t prot) { - sdt_entry_t template, *sdt; - kpdt_entry_t kpdt_ent; + sdt_entry_t template, *sdt; + kpdt_entry_t kpdt_ent; #ifdef DEBUG if ((pmap_con_dbg & (CD_KMAP | CD_FULL)) == (CD_KMAP | CD_FULL)) printf("(pmap_expand_kmap: %x) v %x\n", curproc,virt); #endif + template = m88k_protection(kernel_pmap, prot) | SG_V; - /* segment table entry derivate from map and virt. */ + /* segment table entry derivate from map and virt. */ sdt = SDTENT(kernel_pmap, virt); if (SDT_VALID(sdt)) panic("pmap_expand_kmap: segment table entry VALID"); @@ -447,8 +416,8 @@ pmap_expand_kmap(virt, prot) kpdt_ent->phys = (paddr_t)0; kpdt_ent->next = NULL; - return ((pt_entry_t *)(kpdt_ent) + PDTIDX(virt)); -} /* pmap_expand_kmap() */ + return (pt_entry_t *)(kpdt_ent) + PDTIDX(virt); +} /* * Routine: PMAP_MAP @@ -489,19 +458,15 @@ pmap_expand_kmap(virt, prot) * */ vaddr_t -pmap_map(virt, start, end, prot, cmode) - vaddr_t virt; - paddr_t start, end; - vm_prot_t prot; - u_int cmode; +pmap_map(vaddr_t virt, paddr_t start, paddr_t end, vm_prot_t prot, u_int cmode) { - u_int npages; - u_int num_phys_pages; - pt_entry_t template, *pte; - paddr_t page; + u_int npages; + u_int num_phys_pages; + pt_entry_t template, *pte; + paddr_t page; #ifdef PMAP_USE_BATC batc_template_t batctmp; - int i; + int i; #endif #ifdef DEBUG @@ -518,7 +483,6 @@ pmap_map(virt, start, end, prot, cmode) template = m88k_protection(kernel_pmap, prot) | cmode | PG_V; #ifdef PMAP_USE_BATC - batctmp.bits = 0; batctmp.field.sup = 1; /* supervisor */ if (template & CACHE_WT) @@ -530,13 +494,11 @@ pmap_map(virt, start, end, prot, cmode) if (template & PG_PROT) batctmp.field.wp = 1; /* protection */ batctmp.field.v = 1; /* valid */ - #endif page = trunc_page(start); npages = atop(round_page(end) - page); for (num_phys_pages = npages; num_phys_pages != 0; num_phys_pages--) { - #ifdef PMAP_USE_BATC #ifdef DEBUG @@ -548,7 +510,7 @@ pmap_map(virt, start, end, prot, cmode) BATC_BLK_ALIGNED(page)); #endif - if (BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(page) && + if (BATC_BLK_ALIGNED(virt) && BATC_BLK_ALIGNED(page) && num_phys_pages >= BATC_BLKBYTES/PAGE_SIZE && batc_used < BATC_MAX ) { /* @@ -559,7 +521,7 @@ pmap_map(virt, start, end, prot, cmode) for (i = 0; i < MAX_CPUS; i++) if (cpu_sets[i]) - cmmu_set_pair_batc_entry(i, batc_used, + cmmu_set_pair_batc_entry(i, batc_used, batctmp.bits); batc_entry[batc_used] = batctmp.field; #ifdef DEBUG @@ -597,7 +559,7 @@ pmap_map(virt, start, end, prot, cmode) page += PAGE_SIZE; } return virt; -} /* pmap_map() */ +} /* * Routine: PMAP_CACHE_CONTROL @@ -607,7 +569,7 @@ pmap_map(virt, start, end, prot, cmode) * the specified virtual address range. * * Parameters: - * pmap_t map + * pmap_t pmap * vaddr_t s * vaddr_t e * u_int mode @@ -624,17 +586,14 @@ pmap_map(virt, start, end, prot, cmode) * */ void -pmap_cache_ctrl(pmap, s, e, mode) - pmap_t pmap; - vaddr_t s, e; - u_int mode; +pmap_cache_ctrl(pmap_t pmap, vaddr_t s, vaddr_t e, u_int mode) { - int spl; - pt_entry_t *pte; - vaddr_t va, pteva; - boolean_t kflush; - int cpu; - u_int users; + int spl; + pt_entry_t *pte; + vaddr_t va, pteva; + boolean_t kflush; + int cpu; + u_int users; #ifdef DEBUG if (mode & CACHE_MASK) { @@ -653,7 +612,7 @@ pmap_cache_ctrl(pmap, s, e, mode) PMAP_LOCK(pmap, spl); - users = pmap->cpus_using; + users = pmap->pm_cpus; if (pmap == kernel_pmap) { kflush = TRUE; } else { @@ -685,7 +644,7 @@ pmap_cache_ctrl(pmap, s, e, mode) cmmu_flush_remote_cache(cpu, pteva, PAGE_SIZE); } PMAP_UNLOCK(pmap, spl); -} /* pmap_cache_ctrl */ +} /* * Routine: PMAP_BOOTSTRAP @@ -728,34 +687,27 @@ pmap_cache_ctrl(pmap, s, e, mode) * purposes. They are arbitrarily mapped when needed. They are used, * for example, by pmap_copy_page and pmap_zero_page. * - * For m88k, we have to map BUG memory also. This is a read only + * For m88k, we have to map BUG memory also. This is a read only * mapping for 0x10000 bytes. We will end up having load_start as * 0 and VM_MIN_KERNEL_ADDRESS as 0 - yes sir, we have one-to-one * mapping!!! */ void -pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) - vaddr_t load_start; - paddr_t *phys_start, *phys_end; - vaddr_t *virt_start, *virt_end; +pmap_bootstrap(vaddr_t load_start, paddr_t *phys_start, paddr_t *phys_end, + vaddr_t *virt_start, vaddr_t *virt_end) { - kpdt_entry_t kpdt_virt; - sdt_entry_t *kmap; - vaddr_t vaddr, - virt, - kernel_pmap_size, - pdt_size; - paddr_t s_text, - e_text, - kpdt_phys; - apr_template_t apr_data; - pt_entry_t *pte; - int i; - pmap_table_t ptable; - extern void *kernelstart, *etext; - -#ifdef DEBUG + kpdt_entry_t kpdt_virt; + sdt_entry_t *kmap; + vaddr_t vaddr, virt, kernel_pmap_size, pdt_size; + paddr_t s_text, e_text, kpdt_phys; + apr_template_t apr_data; + pt_entry_t *pte; + int i; + pmap_table_t ptable; + extern void *kernelstart, *etext; + +#ifdef DEBUG if ((pmap_con_dbg & (CD_BOOT | CD_NORM)) == (CD_BOOT | CD_NORM)) { printf("pmap_bootstrap: \"load_start\" 0x%x\n", load_start); } @@ -765,17 +717,16 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) panic("pmap_bootstrap: \"load_start\" not on the m88k page boundary: 0x%x", load_start); #endif - simple_lock_init(&kernel_pmap->lock); + simple_lock_init(&kernel_pmap->pm_lock); /* * Allocate the kernel page table from the front of available - * physical memory, - * i.e. just after where the kernel image was loaded. + * physical memory, i.e. just after where the kernel image was loaded. */ /* - * The calling sequence is + * The calling sequence is * ... - * pmap_bootstrap(&kernelstart,...) + * pmap_bootstrap(&kernelstart,...); * kernelstart is the first symbol in the load image. * We link the kernel such that &kernelstart == 0x10000 (size of * BUG ROM) @@ -790,23 +741,20 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) /* * Initialize kernel_pmap structure */ - kernel_pmap->ref_count = 1; - kernel_pmap->cpus_using = 0; - kernel_pmap->sdt_paddr = kmap = (sdt_entry_t *)(*phys_start); - kernel_pmap->sdt_vaddr = (sdt_entry_t *)(*virt_start); + kernel_pmap->pm_count = 1; + kernel_pmap->pm_cpus = 0; + kernel_pmap->pm_stpa = kmap = (sdt_entry_t *)(*phys_start); + kernel_pmap->pm_stab = (sdt_entry_t *)(*virt_start); kmapva = *virt_start; #ifdef DEBUG if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) { - printf("kernel_pmap->sdt_paddr = 0x%x\n",kernel_pmap->sdt_paddr); - printf("kernel_pmap->sdt_vaddr = 0x%x\n",kernel_pmap->sdt_vaddr); + printf("kernel_pmap->pm_stpa = 0x%x\n",kernel_pmap->pm_stpa); + printf("kernel_pmap->pm_stab = 0x%x\n",kernel_pmap->pm_stab); } - /* init double-linked list of pmap structure */ - kernel_pmap->next = kernel_pmap; - kernel_pmap->prev = kernel_pmap; #endif - /* + /* * Reserve space for segment table entries. * One for the regular segment table and one for the shadow table * The shadow table keeps track of the virtual address of page @@ -818,17 +766,17 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) * Just to be consistent, we will maintain the shadow table for * kernel pmap also. */ - kernel_pmap_size = 2*SDT_SIZE; + kernel_pmap_size = 2 * SDT_SIZE; #ifdef DEBUG if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) { - printf(" kernel segment start = 0x%x\n", kernel_pmap->sdt_paddr); + printf(" kernel segment start = 0x%x\n", kernel_pmap->pm_stpa); printf("kernel segment table size = 0x%x\n", kernel_pmap_size); - printf(" kernel segment end = 0x%x\n", ((paddr_t)kernel_pmap->sdt_paddr) + kernel_pmap_size); + printf(" kernel segment end = 0x%x\n", ((paddr_t)kernel_pmap->pm_stpa) + kernel_pmap_size); } -#endif +#endif /* init all segment descriptors to zero */ - bzero(kernel_pmap->sdt_vaddr, kernel_pmap_size); + bzero(kernel_pmap->pm_stab, kernel_pmap_size); *phys_start += kernel_pmap_size; *virt_start += kernel_pmap_size; @@ -856,7 +804,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) printf(" kernel page table size = 0x%x\n", pdt_size); printf(" kernel page end = 0x%x\n", *phys_start); } -#endif +#endif #ifdef DEBUG if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) { @@ -883,8 +831,8 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) */ s_text = load_start; /* paddr of text */ - e_text = load_start + ((vaddr_t)&etext - - trunc_page((vaddr_t)&kernelstart)); + e_text = load_start + + ((vaddr_t)&etext - trunc_page((vaddr_t)&kernelstart)); /* paddr of end of text section*/ e_text = round_page(e_text); @@ -919,9 +867,9 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) VM_PROT_WRITE | VM_PROT_READ, CACHE_INH); if (vaddr != *virt_start) { - /* + /* * This should never happen because we now round the PDT - * table size up to a page boundry in the quest to get + * table size up to a page boundry in the quest to get * mc88110 working. - XXX smurph */ #ifdef DEBUG @@ -950,7 +898,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) VM_PROT_WRITE | VM_PROT_READ, CACHE_INH); *virt_start += etherlen; - *phys_start += etherlen; + *phys_start += etherlen; if (vaddr != *virt_start) { #ifdef DEBUG @@ -994,7 +942,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) * OBIO should be mapped cache inhibited. */ - ptable = pmap_table_build(0); /* see pmap_table.c for details */ + ptable = pmap_table_build(0); #ifdef DEBUG if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) { printf("pmap_bootstrap: -> pmap_table_build\n"); @@ -1060,7 +1008,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) */ apr_data.bits = 0; - apr_data.field.st_base = atop(kernel_pmap->sdt_paddr); + apr_data.field.st_base = atop(kernel_pmap->pm_stpa); apr_data.field.wt = 1; apr_data.field.g = 1; apr_data.field.ci = 0; @@ -1069,7 +1017,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) { show_apr(apr_data.bits); } -#endif +#endif /* Invalidate entire kernel TLB. */ #ifdef DEBUG if ((pmap_con_dbg & (CD_BOOT | CD_FULL)) == (CD_BOOT | CD_FULL)) { @@ -1083,8 +1031,8 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) cmmu_flush_remote_tlb(i, 1, 0, -1); /* still physical */ /* - * Set valid bit to DT_INVALID so that the very first - * pmap_enter() on these won't barf in + * Set valid bit to DT_INVALID so that the very first + * pmap_enter() on these won't barf in * pmap_remove_range(). */ pte = pmap_pte(kernel_pmap, phys_map_vaddr1); @@ -1098,7 +1046,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) printf("Processor %d running virtual.\n", i); } #endif - SETBIT_CPUSET(i, &kernel_pmap->cpus_using); + SETBIT_CPUSET(i, &kernel_pmap->pm_cpus); } #ifdef DEBUG @@ -1106,7 +1054,7 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) printf("running virtual - avail_next 0x%x\n", *phys_start); } #endif -} /* pmap_bootstrap() */ +} /* * Routine: PMAP_INIT @@ -1121,88 +1069,26 @@ pmap_bootstrap(load_start, phys_start, phys_end, virt_start, virt_end) * phys_end physical address of last available page * * Extern/Globals - * pv_head_table - * pmap_cached_attributes * pmap_phys_start * pmap_phys_end - * pmap_initialized * * Calls: - * uvm_km_zalloc - * - * This routine does not really have much to do. It allocates space - * for the pv_head_table, pmap_cached_attributes; and sets these - * pointers. It also initializes zones for pmap structures, pv_entry - * structures, and segment tables. + * pool_init * - * Last, it sets the pmap_phys_start and pmap_phys_end global - * variables. These define the range of pages 'managed' by pmap. These - * are pages for which pmap must maintain the PV list and the modify - * list. (All other pages are kernel-specific and are permanently - * wired.) - * - * uvm_km_zalloc() memory for pv_table - * uvm_km_zalloc() memory for modify_bits + * This routine does not really have much to do. It initializes + * pools for pmap structures and pv_entry structures. */ void pmap_init(void) { - long npages; - vaddr_t addr; - vsize_t s; - pv_entry_t pv; - u_int8_t *attr; - int bank; - #ifdef DEBUG if ((pmap_con_dbg & (CD_INIT | CD_NORM)) == (CD_INIT | CD_NORM)) printf("pmap_init()\n"); #endif - /* - * Allocate memory for the pv_head_table, - * the modify bit array, and the pte_page table. - */ - for (npages = 0, bank = 0; bank < vm_nphysseg; bank++) - npages += vm_physmem[bank].end - vm_physmem[bank].start; - - s = PV_TABLE_SIZE(npages); /* pv_list */ - s += npages * sizeof(u_int8_t); /* pmap_cached_attributes */ - -#ifdef DEBUG - if ((pmap_con_dbg & (CD_INIT | CD_FULL)) == (CD_INIT | CD_FULL)) { - printf("(pmap_init) nbr of managed pages = %x\n", npages); - printf("(pmap_init) size of pv_list = %x\n", - npages * sizeof(struct pv_entry)); - } -#endif - - s = round_page(s); - addr = (vaddr_t)uvm_km_zalloc(kernel_map, s); - - pv_head_table = (pv_entry_t)addr; - addr += PV_TABLE_SIZE(npages); - - pmap_cached_attributes = (u_int8_t *)addr; - - /* - * Now that the pv and attribute tables have been allocated, - * assign them to the memory segments. - */ - pv = pv_head_table; - attr = pmap_cached_attributes; - for (bank = 0; bank < vm_nphysseg; bank++) { - npages = vm_physmem[bank].end - vm_physmem[bank].start; - vm_physmem[bank].pmseg.pvent = pv; - vm_physmem[bank].pmseg.attrs = attr; - pv += npages; - attr += npages; - } pool_init(&pmappool, sizeof(struct pmap), 0, 0, 0, "pmappl", &pool_allocator_nointr); pool_init(&pvpool, sizeof(pv_entry_t), 0, 0, 0, "pvpl", NULL); - - pmap_initialized = TRUE; } /* pmap_init() */ /* @@ -1212,7 +1098,7 @@ pmap_init(void) * Zeroes the specified page. * * Parameters: - * phys PA of page to zero + * pg page to zero * * Extern/Global: * phys_map_vaddr1 @@ -1230,11 +1116,11 @@ pmap_init(void) void pmap_zero_page(struct vm_page *pg) { - paddr_t phys = VM_PAGE_TO_PHYS(pg); - vaddr_t srcva; - int spl; - int cpu; - pt_entry_t *srcpte; + paddr_t pa = VM_PAGE_TO_PHYS(pg); + vaddr_t srcva; + int spl; + int cpu; + pt_entry_t *srcpte; cpu = cpu_number(); srcva = (vaddr_t)(phys_map_vaddr1 + (cpu * PAGE_SIZE)); @@ -1242,15 +1128,15 @@ pmap_zero_page(struct vm_page *pg) SPLVM(spl); cmmu_flush_tlb(TRUE, srcva, PAGE_SIZE); - *srcpte = trunc_page(phys) | + *srcpte = trunc_page(pa) | m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE) | CACHE_GLOBAL | PG_V; SPLX(spl); bzero((void *)srcva, PAGE_SIZE); /* force the data out */ - cmmu_flush_remote_data_cache(cpu, phys, PAGE_SIZE); -} /* pmap_zero_page() */ + cmmu_flush_remote_data_cache(cpu, pa, PAGE_SIZE); +} /* * Routine: PMAP_CREATE @@ -1266,16 +1152,15 @@ pmap_zero_page(struct vm_page *pg) struct pmap * pmap_create(void) { - pmap_t p; - sdt_entry_t *segdt; - u_int s; - + pmap_t pmap; + sdt_entry_t *segdt; + u_int s; #ifdef PMAP_USE_BATC - int i; + int i; #endif - p = pool_get(&pmappool, PR_WAITOK); - bzero(p, sizeof(*p)); + pmap = pool_get(&pmappool, PR_WAITOK); + bzero(pmap, sizeof(*pmap)); /* * Allocate memory for *actual* segment table and *shadow* table. @@ -1295,19 +1180,19 @@ pmap_create(void) /* * Initialize pointer to segment table both virtual and physical. */ - p->sdt_vaddr = segdt; + pmap->pm_stab = segdt; if (pmap_extract(kernel_pmap, (vaddr_t)segdt, - (paddr_t *)&p->sdt_paddr) == FALSE) + (paddr_t *)&pmap->pm_stpa) == FALSE) panic("pmap_create: pmap_extract failed!"); - if (!PAGE_ALIGNED(p->sdt_paddr)) + if (!PAGE_ALIGNED(pmap->pm_stpa)) panic("pmap_create: sdt_table 0x%x not aligned on page boundary", - (int)p->sdt_paddr); + (int)pmap->pm_stpa); #ifdef DEBUG if ((pmap_con_dbg & (CD_CREAT | CD_NORM)) == (CD_CREAT | CD_NORM)) { - printf("(pmap_create: %x) pmap=0x%p, sdt_vaddr=0x%x, sdt_paddr=0x%x\n", - curproc, p, p->sdt_vaddr, p->sdt_paddr); + printf("(pmap_create: %x) pmap=0x%p, pm_stab=0x%x, pm_stpa=0x%x\n", + curproc, pmap, pmap->pm_stab, pmap->pm_stpa); } #endif @@ -1332,28 +1217,20 @@ pmap_create(void) /* * Initialize pmap structure. */ - p->ref_count = 1; - simple_lock_init(&p->lock); - p->cpus_using = 0; + pmap->pm_count = 1; + simple_lock_init(&pmap->pm_lock); + pmap->pm_cpus = 0; #ifdef PMAP_USE_BATC /* initialize block address translation cache */ for (i = 0; i < BATC_MAX; i++) { - p->i_batc[i].bits = 0; - p->d_batc[i].bits = 0; + pmap->pm_ibatc[i].bits = 0; + pmap->pm_dbatc[i].bits = 0; } #endif -#ifdef DEBUG - /* link into list of pmaps, just after kernel pmap */ - p->next = kernel_pmap->next; - p->prev = kernel_pmap; - kernel_pmap->next = p; - p->next->prev = p; -#endif - - return (p); -} /* pmap_create() */ + return pmap; +} /* * Routine: PMAP_RELEASE @@ -1371,7 +1248,7 @@ pmap_create(void) * * Special Assumptions: * No locking is needed, since this is only called which the - * ref_count field of the pmap structure goes to zero. + * pm_count field of the pmap structure goes to zero. * * This routine sequences of through the user address space, releasing * all translation table space back to the system using PT_FREE. @@ -1380,23 +1257,22 @@ pmap_create(void) * */ void -pmap_release(pmap) - pmap_t pmap; +pmap_release(pmap_t pmap) { - unsigned long sdt_va; /* outer loop index */ - sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */ - pt_entry_t *gdttbl; /* ptr to first entry in a page table */ - u_int i,j; + unsigned long sdt_va; /* outer loop index */ + sdt_entry_t *sdttbl; /* ptr to first entry in the segment table */ + pt_entry_t *gdttbl; /* ptr to first entry in a page table */ + u_int i, j; #ifdef DEBUG if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM)) printf("(pmap_release: %x) pmap %x\n", curproc, pmap); #endif - sdttbl = pmap->sdt_vaddr; /* addr of segment table */ - /* - This contortion is here instead of the natural loop - because of integer overflow/wraparound if VM_MAX_ADDRESS + sdttbl = pmap->pm_stab; /* addr of segment table */ + /* + This contortion is here instead of the natural loop + because of integer overflow/wraparound if VM_MAX_ADDRESS is near 0xffffffff */ i = VM_MIN_ADDRESS / PDT_TABLE_GROUP_VA_SPACE; @@ -1429,12 +1305,9 @@ pmap_release(pmap) #ifdef DEBUG if ((pmap_con_dbg & (CD_FREE | CD_NORM)) == (CD_FREE | CD_NORM)) - printf("(pmap_release: %x) ref_count = 0\n", curproc); - /* unlink from list of pmap structs */ - pmap->prev->next = pmap->next; - pmap->next->prev = pmap->prev; + printf("(pmap_release: %x) pm_count = 0\n", curproc); #endif -} /* pmap_release() */ +} /* * Routine: PMAP_DESTROY @@ -1459,24 +1332,23 @@ pmap_release(pmap) * pmap structure. */ void -pmap_destroy(p) - pmap_t p; +pmap_destroy(pmap_t pmap) { int count; #ifdef DIAGNOSTIC - if (p == kernel_pmap) + if (pmap == kernel_pmap) panic("pmap_destroy: Attempt to destroy kernel pmap"); #endif - simple_lock(&p->lock); - count = --p->ref_count; - simple_unlock(&p->lock); + simple_lock(&pmap->pm_lock); + count = --pmap->pm_count; + simple_unlock(&pmap->pm_lock); if (count == 0) { - pmap_release(p); - pool_put(&pmappool, p); + pmap_release(pmap); + pool_put(&pmappool, pmap); } -} /* pmap_destroy() */ +} /* @@ -1488,18 +1360,17 @@ pmap_destroy(p) * Parameters: * pmap pointer to pmap structure * - * Under a pmap read lock, the ref_count field of the pmap structure + * Under a pmap read lock, the pm_count field of the pmap structure * is incremented. The function then returns. */ void -pmap_reference(p) - pmap_t p; +pmap_reference(pmap_t pmap) { - simple_lock(&p->lock); - p->ref_count++; - simple_unlock(&p->lock); -} /* pmap_reference */ + simple_lock(&pmap->pm_lock); + pmap->pm_count++; + simple_unlock(&pmap->pm_lock); +} /* * Routine: PMAP_REMOVE_RANGE (internal) @@ -1516,7 +1387,6 @@ pmap_reference(p) * * External/Global: * pv lists - * pmap_cached_attributes * * Calls: * pmap_pte @@ -1535,29 +1405,26 @@ pmap_reference(p) * * If the PTE is valid, the routine must invalidated the entry. The * 'modified' bit, if on, is referenced to the VM, and into the appropriate - * entry in the pmap_cached_attributes. Next, the function must find the PV + * entry in the PV list entry. Next, the function must find the PV * list entry associated with this pmap/va (if it doesn't exist - the function * panics). The PV list entry is unlinked from the list, and returned to * its zone. */ void -pmap_remove_range(pmap, s, e) - pmap_t pmap; - vaddr_t s, e; +pmap_remove_range(pmap_t pmap, vaddr_t s, vaddr_t e) { - pt_entry_t *pte; - pv_entry_t prev, cur; - pv_entry_t pvl; - paddr_t pa; - vaddr_t va; - u_int users; - pt_entry_t opte; - boolean_t kflush; + pt_entry_t *pte, opte; + pv_entry_t prev, cur, pvl; + struct vm_page *pg; + paddr_t pa; + vaddr_t va; + u_int users; + boolean_t kflush; /* - * Pmap has been locked by the caller. + * pmap has been locked by the caller. */ - users = pmap->cpus_using; + users = pmap->pm_cpus; if (pmap == kernel_pmap) { kflush = TRUE; } else { @@ -1584,7 +1451,7 @@ pmap_remove_range(pmap, s, e) continue; } - pte = pmap_pte(pmap,va); + pte = pmap_pte(pmap, va); if (!PDT_VALID(pte)) { continue; /* no page mapping */ @@ -1593,43 +1460,44 @@ pmap_remove_range(pmap, s, e) /* * Update statistics. */ - pmap->stats.resident_count--; + pmap->pm_stats.resident_count--; if (pmap_pte_w(pte)) - pmap->stats.wired_count--; + pmap->pm_stats.wired_count--; pa = ptoa(PG_PFNUM(*pte)); + pg = PHYS_TO_VM_PAGE(pa); - if (PMAP_MANAGED(pa)) { + if (pg != NULL) { /* * Remove the mapping from the pvlist for * this physical page. */ - pvl = pa_to_pvh(pa); + pvl = pg_to_pvh(pg); #ifdef DIAGNOSTIC - if (pvl->pmap == PMAP_NULL) + if (pvl->pv_pmap == PMAP_NULL) panic("pmap_remove_range: null pv_list"); #endif - if (pvl->va == va && pvl->pmap == pmap) { + if (pvl->pv_va == va && pvl->pv_pmap == pmap) { /* * Hander is the pv_entry. Copy the next one * to hander and free the next one (we can't * free the hander) */ - cur = pvl->next; + cur = pvl->pv_next; if (cur != PV_ENTRY_NULL) { *pvl = *cur; pool_put(&pvpool, cur); } else { - pvl->pmap = PMAP_NULL; + pvl->pv_pmap = PMAP_NULL; } } else { for (cur = pvl; cur != PV_ENTRY_NULL; - cur = cur->next) { - if (cur->va == va && cur->pmap == pmap) + cur = cur->pv_next) { + if (cur->pv_va == va && cur->pv_pmap == pmap) break; prev = cur; } @@ -1639,10 +1507,10 @@ pmap_remove_range(pmap, s, e) panic("pmap_remove_range: mapping not in pv_list"); } - prev->next = cur->next; + prev->pv_next = cur->pv_next; pool_put(&pvpool, cur); } - } /* if PMAP_MANAGED */ + } /* if (pg != NULL) */ /* * Reflect modify bits to pager and zero (invalidate, @@ -1657,14 +1525,14 @@ pmap_remove_range(pmap, s, e) flush_atc_entry(users, va, kflush); if (opte & PG_M) { - if (PMAP_MANAGED(pa)) { + if (pg != NULL) { /* keep track ourselves too */ - *pa_to_attribute(pa) |= PG_M; + pvl->pv_flags |= PG_M; } } - } /* end for( va = s; ...) */ -} /* pmap_remove_range */ + } /* for (va = s; ...) */ +} /* * Routine: PMAP_REMOVE @@ -1675,7 +1543,7 @@ pmap_remove_range(pmap, s, e) * size. * * Parameters: - * map pointer to pmap structure + * pmap pointer to pmap structure * s * e * @@ -1689,18 +1557,16 @@ pmap_remove_range(pmap, s, e) * real work. */ void -pmap_remove(map, s, e) - pmap_t map; - vaddr_t s, e; +pmap_remove(pmap_t pmap, vaddr_t s, vaddr_t e) { int spl; - if (map == PMAP_NULL) + if (pmap == PMAP_NULL) return; #ifdef DEBUG if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM)) - printf("(pmap_remove: %x) map %x s %x e %x\n", curproc, map, s, e); + printf("(pmap_remove: %x) map %x s %x e %x\n", curproc, pmap, s, e); #endif #ifdef DIAGNOSTIC @@ -1708,10 +1574,10 @@ pmap_remove(map, s, e) panic("pmap_remove: start greater than end address"); #endif - PMAP_LOCK(map, spl); - pmap_remove_range(map, s, e); - PMAP_UNLOCK(map, spl); -} /* pmap_remove() */ + PMAP_LOCK(pmap, spl); + pmap_remove_range(pmap, s, e); + PMAP_UNLOCK(pmap, spl); +} /* * Routine: PMAP_REMOVE_ALL @@ -1721,12 +1587,11 @@ pmap_remove(map, s, e) * resides. Reflects back modify bits to the pager. * * Parameters: - * phys physical address of pages which is to + * pg physical pages which is to * be removed from all maps * * Extern/Global: - * pv_head_array, pv lists - * pmap_cached_attributes + * pv lists * * Calls: * simple_lock @@ -1739,7 +1604,7 @@ pmap_remove(map, s, e) * obtain a pointer to the page table entry (PTE) associated with the * va (the PTE must exist and be valid, otherwise the routine panics). * The hardware 'modified' bit in the PTE is examined. If it is on, the - * corresponding bit in the pmap_cached_attributes entry corresponding + * corresponding bit in the PV list entry corresponding * to the physical page is set to 1. * Then, the PTE is invalidated, and the PV list entry is unlinked and * freed. @@ -1748,23 +1613,22 @@ pmap_remove(map, s, e) * will be null. */ void -pmap_remove_all(phys) - paddr_t phys; +pmap_remove_all(struct vm_page *pg) { - pv_entry_t pvl; - pt_entry_t *pte; - vaddr_t va; - pmap_t pmap; - int spl; + pt_entry_t *pte; + pv_entry_t pvl; + vaddr_t va; + pmap_t pmap; + int spl; #ifdef DEBUG - int dbgcnt = 0; + int dbgcnt = 0; #endif - if (!PMAP_MANAGED(phys)) { + if (pg == NULL) { /* not a managed page. */ #ifdef DEBUG if (pmap_con_dbg & CD_RMAL) - printf("(pmap_remove_all: %x) phys addr 0x%x not a managed page\n", curproc, phys); + printf("(pmap_remove_all: %x) vm page 0x%x not a managed page\n", curproc, pg); #endif return; } @@ -1772,19 +1636,19 @@ pmap_remove_all(phys) SPLVM(spl); /* * Walk down PV list, removing all mappings. - * We don't have to lock the pv_head, since we have the entire pmap + * We don't have to lock the pv list, since we have the entire pmap * system. */ remove_all_Retry: - pvl = pa_to_pvh(phys); + pvl = pg_to_pvh(pg); /* * Loop for each entry on the pv list */ - while ((pmap = pvl->pmap) != PMAP_NULL) { - va = pvl->va; - if (!simple_lock_try(&pmap->lock)) + while ((pmap = pvl->pv_pmap) != PMAP_NULL) { + va = pvl->pv_va; + if (!simple_lock_try(&pmap->pm_lock)) goto remove_all_Retry; pte = pmap_pte(pmap, va); @@ -1796,23 +1660,23 @@ remove_all_Retry: #ifdef DIAGNOSTIC if (pte == PT_ENTRY_NULL) { #ifdef DEBUG - printf("(pmap_remove_all: %p) phys %p pmap %x va %x dbgcnt %x\n", - curproc, phys, pmap, va, dbgcnt); + printf("(pmap_remove_all: %p) vm page %p pmap %x va %x dbgcnt %x\n", + curproc, pg, pmap, va, dbgcnt); #endif panic("pmap_remove_all: pte NULL"); } #endif /* DIAGNOSTIC */ if (!PDT_VALID(pte)) { - pvl = pvl->next; + pvl = pvl->pv_next; goto next; /* no page mapping */ } if (pmap_pte_w(pte)) { #ifdef DEBUG if (pmap_con_dbg & CD_RMAL) printf("pmap_remove_all: wired mapping for %lx not removed\n", - phys); + pg); #endif - pvl = pvl->next; + pvl = pvl->pv_next; goto next; } @@ -1826,10 +1690,10 @@ remove_all_Retry: * leaves that for when VM calls pmap_collect(). */ next: - simple_unlock(&pmap->lock); + simple_unlock(&pmap->pm_lock); } SPLX(spl); -} /* pmap_remove_all() */ +} /* * Routine: PMAP_PROTECT @@ -1858,16 +1722,13 @@ next: * Otherwise, the PTE's protection attributes are adjusted as specified. */ void -pmap_protect(pmap, s, e, prot) - pmap_t pmap; - vaddr_t s, e; - vm_prot_t prot; +pmap_protect(pmap_t pmap, vaddr_t s, vaddr_t e, vm_prot_t prot) { - int spl; - pt_entry_t ap, *pte; - vaddr_t va; - u_int users; - boolean_t kflush; + int spl; + pt_entry_t *pte, ap; + vaddr_t va; + u_int users; + boolean_t kflush; if (pmap == PMAP_NULL || prot & VM_PROT_WRITE) return; @@ -1885,7 +1746,7 @@ pmap_protect(pmap, s, e, prot) PMAP_LOCK(pmap, spl); - users = pmap->cpus_using; + users = pmap->pm_cpus; if (pmap == kernel_pmap) { kflush = TRUE; } else { @@ -1924,8 +1785,8 @@ pmap_protect(pmap, s, e, prot) } /* - * Invalidate pte temporarily to avoid the - * modified bit and/or the reference bit being + * Invalidate pte temporarily to avoid the + * modified bit and/or the reference bit being * written back by any other cpu. */ *pte = (invalidate_pte(pte) & ~PG_PROT) | ap; @@ -1933,7 +1794,7 @@ pmap_protect(pmap, s, e, prot) pte++; } PMAP_UNLOCK(pmap, spl); -} /* pmap_protect() */ +} /* * Routine: PMAP_EXPAND @@ -1949,7 +1810,7 @@ pmap_protect(pmap, s, e, prot) * forever, since page table aren't shrunk.) * * Parameters: - * map point to map structure + * pmap point to pmap structure * v VA indicating which tables are needed * * Extern/Global: @@ -1971,40 +1832,27 @@ pmap_protect(pmap, s, e, prot) * 2: The page table entries (PTEs) are initialized (set invalid), and * the corresponding segment table entry is set to point to the new * page table. - * - * - * if (kernel_pmap) - * pmap_expand_kmap() - * ptva = kmem_alloc(user_pt_map) - * */ void -pmap_expand(map, v) - pmap_t map; - vaddr_t v; +pmap_expand(pmap_t pmap, vaddr_t v) { - int i, spl; - vaddr_t pdt_vaddr; - paddr_t pdt_paddr; - sdt_entry_t *sdt; - pt_entry_t *pte; - -#ifdef DIAGNOSTIC - if (map == PMAP_NULL) - panic("pmap_expand: pmap is NULL"); -#endif + int i, spl; + vaddr_t pdt_vaddr; + paddr_t pdt_paddr; + sdt_entry_t *sdt; + pt_entry_t *pte; #ifdef DEBUG if ((pmap_con_dbg & (CD_EXP | CD_NORM)) == (CD_EXP | CD_NORM)) - printf ("(pmap_expand: %x) map %x v %x\n", curproc, map, v); + printf ("(pmap_expand: %x) map %x v %x\n", curproc, pmap, v); #endif - CHECK_PAGE_ALIGN (v, "pmap_expand"); + CHECK_PAGE_ALIGN(v, "pmap_expand"); /* XXX */ pdt_vaddr = uvm_km_zalloc(kernel_map, PAGE_SIZE); if (pmap_extract(kernel_pmap, pdt_vaddr, &pdt_paddr) == FALSE) - panic("pmap_expand: pmap_extract failed!"); + panic("pmap_expand: pmap_extract failed"); #ifdef MVME188 if (brdtyp == BRD_188) { @@ -2015,14 +1863,14 @@ pmap_expand(map, v) } #endif - PMAP_LOCK(map, spl); + PMAP_LOCK(pmap, spl); - if ((pte = pmap_pte(map, v)) != PT_ENTRY_NULL) { + if ((pte = pmap_pte(pmap, v)) != PT_ENTRY_NULL) { /* * Someone else caused us to expand * during our vm_allocate. */ - PMAP_UNLOCK(map, spl); + PMAP_UNLOCK(pmap, spl); /* XXX */ uvm_km_free(kernel_map, pdt_vaddr, PAGE_SIZE); @@ -2042,7 +1890,7 @@ pmap_expand(map, v) */ v &= ~((1 << (LOG2_PDT_TABLE_GROUP_SIZE + PDT_BITS + PG_BITS)) - 1); - sdt = SDTENT(map,v); + sdt = SDTENT(pmap,v); /* * Init each of the segment entries to point the freshly allocated @@ -2055,8 +1903,8 @@ pmap_expand(map, v) pdt_paddr += PDT_SIZE; pdt_vaddr += PDT_SIZE; } - PMAP_UNLOCK(map, spl); -} /* pmap_expand() */ + PMAP_UNLOCK(pmap, spl); +} /* * Routine: PMAP_ENTER @@ -2079,8 +1927,7 @@ pmap_expand(map, v) * wired wired attribute for page * * Extern/Global: - * pv_head_array, pv lists - * pmap_cached_attributes + * pv lists * * Calls: * m88k_protection @@ -2126,51 +1973,42 @@ pmap_expand(map, v) * */ int -pmap_enter(pmap, va, pa, prot, flags) - pmap_t pmap; - vaddr_t va; - paddr_t pa; - vm_prot_t prot; - int flags; +pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags) { - int spl; - pv_entry_t pv_e; - pt_entry_t ap, *pte; - paddr_t old_pa; - pt_entry_t template; - pv_entry_t pvl; - u_int users; - boolean_t kflush; - boolean_t wired = (flags & PMAP_WIRED) != 0; + int spl; + pt_entry_t *pte, ap, template; + paddr_t old_pa; + pv_entry_t pv_e, pvl; + u_int users; + boolean_t kflush; + boolean_t wired = (flags & PMAP_WIRED) != 0; + struct vm_page *pg; CHECK_PAGE_ALIGN(va, "pmap_entry - VA"); CHECK_PAGE_ALIGN(pa, "pmap_entry - PA"); - /* - * Range check no longer use, since we use whole address space - */ - #ifdef DEBUG if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) { if (pmap == kernel_pmap) printf("(pmap_enter: %x) pmap kernel va %x pa %x\n", curproc, va, pa); else - printf("(pmap_enter: %x) pmap %x va %x pa %x\n", curproc, pmap, va, pa); + printf("(pmap_enter: %x) pmap %x va %x pa %x\n", curproc, pmap, va, pa); } #endif + ap = m88k_protection(pmap, prot); /* - * Must allocate a new pvlist entry while we're unlocked; - * zalloc may cause pageout (which will lock the pmap system). - * If we determine we need a pvlist entry, we will unlock - * and allocate one. Then will retry, throwing away - * the allocated entry later (if we no longer need it). + * Must allocate a new pvlist entry while we're unlocked; + * zalloc may cause pageout (which will lock the pmap system). + * If we determine we need a pvlist entry, we will unlock + * and allocate one. Then will retry, throwing away + * the allocated entry later (if we no longer need it). */ pv_e = PV_ENTRY_NULL; PMAP_LOCK(pmap, spl); - users = pmap->cpus_using; + users = pmap->pm_cpus; Retry: /* @@ -2205,9 +2043,9 @@ Retry: */ if (wired && !(pmap_pte_w(pte))) - pmap->stats.wired_count++; + pmap->pm_stats.wired_count++; else if (!wired && pmap_pte_w(pte)) - pmap->stats.wired_count--; + pmap->pm_stats.wired_count--; if ((unsigned long)pa >= MAXPHYSMEM) template = CACHE_INH | PG_V; @@ -2236,12 +2074,14 @@ Retry: /* * Remove old mapping from the PV list if necessary. */ + + pg = PHYS_TO_VM_PAGE(pa); #ifdef DEBUG if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) { if (va == phys_map_vaddr1 || va == phys_map_vaddr2) { - printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n", + printf("vaddr1 0x%x vaddr2 0x%x va 0x%x pa 0x%x managed %x\n", phys_map_vaddr1, phys_map_vaddr2, va, old_pa, - PMAP_MANAGED(pa) ? 1 : 0); + pg != NULL ? 1 : 0); printf("pte %x pfn %x valid %x\n", pte, PG_PFNUM(*pte), PDT_VALID(pte)); } @@ -2253,7 +2093,7 @@ Retry: pmap_remove_range(pmap, va, va + PAGE_SIZE); } - if (PMAP_MANAGED(pa)) { + if (pg != NULL) { #ifdef DEBUG if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) { if (va == phys_map_vaddr1 || va == phys_map_vaddr2) { @@ -2265,15 +2105,15 @@ Retry: * Enter the mappimg in the PV list for this * physical page. */ - pvl = pa_to_pvh(pa); + pvl = pg_to_pvh(pg); - if (pvl->pmap == PMAP_NULL) { + if (pvl->pv_pmap == PMAP_NULL) { /* * No mappings yet */ - pvl->va = va; - pvl->pmap = pmap; - pvl->next = PV_ENTRY_NULL; + pvl->pv_va = va; + pvl->pv_pmap = pmap; + pvl->pv_next = PV_ENTRY_NULL; } else { #ifdef DEBUG @@ -2283,9 +2123,9 @@ Retry: { pv_entry_t e = pvl; while (e != PV_ENTRY_NULL) { - if (e->pmap == pmap && e->va == va) + if (e->pv_pmap == pmap && e->pv_va == va) panic("pmap_enter: already in pv_list"); - e = e->next; + e = e->pv_next; } } #endif @@ -2296,10 +2136,10 @@ Retry: pv_e = pool_get(&pvpool, PR_NOWAIT); goto Retry; } - pv_e->va = va; - pv_e->pmap = pmap; - pv_e->next = pvl->next; - pvl->next = pv_e; + pv_e->pv_va = va; + pv_e->pv_pmap = pmap; + pv_e->pv_next = pvl->pv_next; + pvl->pv_next = pv_e; /* * Remember that we used the pvlist entry. */ @@ -2310,9 +2150,9 @@ Retry: /* * And count the mapping. */ - pmap->stats.resident_count++; + pmap->pm_stats.resident_count++; if (wired) - pmap->stats.wired_count++; + pmap->pm_stats.wired_count++; if ((unsigned long)pa >= MAXPHYSMEM) template = CACHE_INH | PG_V; @@ -2328,15 +2168,15 @@ Retry: *pte = template | ap | trunc_page(pa); - } /* if ( pa == old_pa ) ... else */ + } /* if (pa == old_pa) ... else */ PMAP_UNLOCK(pmap, spl); if (pv_e != PV_ENTRY_NULL) pool_put(&pvpool, pv_e); - return (0); -} /* pmap_enter */ + return 0; +} /* * Routine: pmap_unwire @@ -2354,26 +2194,24 @@ Retry: * The mapping must already exist in the pmap. */ void -pmap_unwire(map, v) - pmap_t map; - vaddr_t v; +pmap_unwire(pmap_t pmap, vaddr_t v) { - pt_entry_t *pte; - int spl; + pt_entry_t *pte; + int spl; - PMAP_LOCK(map, spl); + PMAP_LOCK(pmap, spl); - if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL) + if ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) panic("pmap_unwire: pte missing"); if (pmap_pte_w(pte)) { /* unwired mapping */ - map->stats.wired_count--; + pmap->pm_stats.wired_count--; *pte &= ~PG_W; } - PMAP_UNLOCK(map, spl); -} /* pmap_unwire() */ + PMAP_UNLOCK(pmap, spl); +} /* * Routine: PMAP_EXTRACT @@ -2392,7 +2230,7 @@ pmap_unwire(map, v) * pmap_pte * * If BATC mapping is enabled and the specified pmap is kernel_pmap, - * batc_entry is scanned to find out the mapping. + * batc_entry is scanned to find out the mapping. * * Then the routine calls pmap_pte to get a (virtual) pointer to * the page table entry (PTE) associated with the given virtual @@ -2401,18 +2239,15 @@ pmap_unwire(map, v) * the PTE is returned. */ boolean_t -pmap_extract(pmap, va, pap) - pmap_t pmap; - vaddr_t va; - paddr_t *pap; +pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap) { - pt_entry_t *pte; - paddr_t pa; - int spl; - boolean_t rv = FALSE; + pt_entry_t *pte; + paddr_t pa; + int spl; + boolean_t rv = FALSE; #ifdef PMAP_USE_BATC - int i; + int i; #endif #ifdef DIAGNOSTIC @@ -2428,9 +2263,9 @@ pmap_extract(pmap, va, pap) for (i = batc_used - 1; i != 0; i--) if (batc_entry[i].lba == M88K_BTOBLK(va)) { if (pap != NULL) - *pap = (batc_entry[i].pba << BATC_BLKSHIFT) | + *pap = (batc_entry[i].pba << BATC_BLKSHIFT) | (va & BATC_BLKMASK); - return (TRUE); + return TRUE; } #endif @@ -2448,36 +2283,8 @@ pmap_extract(pmap, va, pap) } PMAP_UNLOCK(pmap, spl); - return (rv); -} /* pmap_extract() */ - -/* - * Routine: PMAP_COPY - * - * Function: - * Copy the range specigfied by src_adr/len from the source map - * to the range dst_addr/len in the destination map. This routine - * is only advisory and need not do anything. - * - * Parameters: - * dst_pmap pointer to destination pmap structure - * src_pmap pointer to source pmap structure - * dst_addr VA in destination map - * len length of address space being copied - * src_addr VA in source map - * - * At this time, the 88200 pmap implementation does nothing in this - * function. Translation tables in the destination map will be allocated - * at VM fault time. - */ -/* ARGSUSED */ -void -pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) - pmap_t dst_pmap, src_pmap; - vaddr_t dst_addr, src_addr; - vsize_t len; -{ -}/* pmap_copy() */ + return rv; +} /* * Routine: PMAP_COLLECT @@ -2516,30 +2323,21 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) * module deals with a single table at a time.] */ void -pmap_collect(pmap) - pmap_t pmap; +pmap_collect( pmap_t pmap) { - vaddr_t sdt_va; /* outer loop index */ - vaddr_t sdt_vt; /* end of segment */ - sdt_entry_t *sdttbl; /* ptr to first entry in seg table */ - sdt_entry_t *sdtp; /* ptr to index into segment table */ - sdt_entry_t *sdt; /* ptr to index into segment table */ - pt_entry_t *gdttbl; /* ptr to first entry in a page table */ - pt_entry_t *gdttblend; /* ptr to byte after last entry in - table group */ - pt_entry_t *gdtp; /* ptr to index into a page table */ - boolean_t found_gdt_wired; /* flag indicating a wired page exists - in a page table's address range */ - int spl; - u_int i, j; - -#ifdef DIAGNOSTIC - if (pmap == PMAP_NULL) - panic("pmap_collect: pmap is NULL"); - - if (pmap == kernel_pmap) - panic("pmap_collect attempted on kernel pmap"); -#endif + vaddr_t sdt_va; /* outer loop index */ + vaddr_t sdt_vt; /* end of segment */ + sdt_entry_t *sdttbl; /* ptr to first entry in seg table */ + sdt_entry_t *sdtp; /* ptr to index into segment table */ + sdt_entry_t *sdt; /* ptr to index into segment table */ + pt_entry_t *gdttbl; /* ptr to first entry in a page table */ + pt_entry_t *gdttblend; /* ptr to byte after last entry in + table group */ + pt_entry_t *gdtp; /* ptr to index into a page table */ + boolean_t found_gdt_wired; /* flag indicating a wired page exists + in a page table's address range */ + int spl; + u_int i, j; #ifdef DEBUG if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM)) @@ -2548,12 +2346,12 @@ pmap_collect(pmap) PMAP_LOCK(pmap, spl); - sdttbl = pmap->sdt_vaddr; /* addr of segment table */ + sdttbl = pmap->pm_stab; /* addr of segment table */ sdtp = sdttbl; - /* - This contortion is here instead of the natural loop - because of integer overflow/wraparound if VM_MAX_ADDRESS + /* + This contortion is here instead of the natural loop + because of integer overflow/wraparound if VM_MAX_ADDRESS is near 0xffffffff */ i = VM_MIN_ADDRESS / PDT_TABLE_GROUP_VA_SPACE; @@ -2601,26 +2399,25 @@ pmap_collect(pmap) /* * we have to unlock before freeing the table, since PT_FREE - * calls uvm_km_free or free, which will invoke another + * calls uvm_km_free or free, which will invoke another * pmap routine */ PMAP_UNLOCK(pmap, spl); PT_FREE(gdttbl); PMAP_LOCK(pmap, spl); - - } /* Segment table Loop */ + } PMAP_UNLOCK(pmap, spl); #ifdef DEBUG if ((pmap_con_dbg & (CD_COL | CD_NORM)) == (CD_COL | CD_NORM)) - printf ("(pmap_collect: %x) done\n", curproc); + printf("(pmap_collect: %x) done\n", curproc); #endif -} /* pmap collect() */ +} /* * Routine: PMAP_ACTIVATE - * + * * Function: * Binds the given physical map to the given * processor, and returns a hardware map description. @@ -2642,15 +2439,14 @@ pmap_collect(pmap) * */ void -pmap_activate(p) - struct proc *p; +pmap_activate(struct proc *p) { - apr_template_t apr_data; - pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map); - int cpu = cpu_number(); + apr_template_t apr_data; + pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map); + int cpu = cpu_number(); #ifdef PMAP_USE_BATC - int n; + int n; #endif #ifdef DEBUG @@ -2660,12 +2456,12 @@ pmap_activate(p) if (pmap != kernel_pmap) { /* - * Lock the pmap to put this cpu in its active set. + * Lock the pmap to put this cpu in its active set. */ - simple_lock(&pmap->lock); + simple_lock(&pmap->pm_lock); apr_data.bits = 0; - apr_data.field.st_base = atop(pmap->sdt_paddr); + apr_data.field.st_base = atop(pmap->pm_stpa); apr_data.field.wt = 0; apr_data.field.g = 1; apr_data.field.ci = 0; @@ -2673,15 +2469,15 @@ pmap_activate(p) #ifdef PMAP_USE_BATC /* - * cmmu_pmap_activate will set the uapr and the batc entries, - * then flush the *USER* TLB. IF THE KERNEL WILL EVER CARE - * ABOUT THE BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE + * cmmu_pmap_activate will set the uapr and the batc entries, + * then flush the *USER* TLB. IF THE KERNEL WILL EVER CARE + * ABOUT THE BATC ENTRIES, THE SUPERVISOR TLBs SHOULB BE * FLUSHED AS WELL. */ - cmmu_pmap_activate(cpu, apr_data.bits, - pmap->i_batc, pmap->d_batc); + cmmu_pmap_activate(cpu, apr_data.bits, + pmap->pm_ibatc, pmap->pm_dbatc); for (n = 0; n < BATC_MAX; n++) - *(register_t *)&batc_entry[n] = pmap->i_batc[n].bits; + *(register_t *)&batc_entry[n] = pmap->pm_ibatc[n].bits; #else cmmu_set_uapr(apr_data.bits); cmmu_flush_tlb(FALSE, 0, -1); @@ -2690,11 +2486,11 @@ pmap_activate(p) /* * Mark that this cpu is using the pmap. */ - SETBIT_CPUSET(cpu, &(pmap->cpus_using)); + SETBIT_CPUSET(cpu, &(pmap->pm_cpus)); - simple_unlock(&pmap->lock); + simple_unlock(&pmap->pm_lock); } -} /* pmap_activate() */ +} /* * Routine: PMAP_DEACTIVATE @@ -2706,25 +2502,24 @@ pmap_activate(p) * Parameters: * p pointer to proc structure * - * pmap_deactive simply clears the cpus_using field in given pmap structure. + * pmap_deactive simply clears the pm_cpus field in given pmap structure. * */ void -pmap_deactivate(p) - struct proc *p; +pmap_deactivate(struct proc *p) { - pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map); - int cpu = cpu_number(); + pmap_t pmap = vm_map_pmap(&p->p_vmspace->vm_map); + int cpu = cpu_number(); if (pmap != kernel_pmap) { /* * we expect the spl is already raised to sched level. */ - simple_lock(&pmap->lock); - CLRBIT_CPUSET(cpu, &(pmap->cpus_using)); - simple_unlock(&pmap->lock); + simple_lock(&pmap->pm_lock); + CLRBIT_CPUSET(cpu, &(pmap->pm_cpus)); + simple_unlock(&pmap->pm_lock); } -} /* pmap_deactivate() */ +} /* * Routine: PMAP_COPY_PAGE @@ -2755,16 +2550,16 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) { paddr_t src = VM_PAGE_TO_PHYS(srcpg); paddr_t dst = VM_PAGE_TO_PHYS(dstpg); - vaddr_t dstva, srcva; - int spl; - pt_entry_t template, *dstpte, *srcpte; - int cpu = cpu_number(); + vaddr_t dstva, srcva; + int spl; + pt_entry_t template, *dstpte, *srcpte; + int cpu = cpu_number(); template = m88k_protection(kernel_pmap, VM_PROT_READ | VM_PROT_WRITE) | CACHE_GLOBAL | PG_V; /* - * Map source physical address. + * Map source physical address. */ srcva = (vaddr_t)(phys_map_vaddr1 + (cpu << PAGE_SHIFT)); dstva = (vaddr_t)(phys_map_vaddr2 + (cpu << PAGE_SHIFT)); @@ -2777,7 +2572,7 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) *srcpte = template | trunc_page(src); /* - * Map destination physical address. + * Map destination physical address. */ cmmu_flush_tlb(TRUE, dstva, PAGE_SIZE); *dstpte = template | trunc_page(dst); @@ -2787,7 +2582,7 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) /* flush source, dest out of cache? */ cmmu_flush_remote_data_cache(cpu, src, PAGE_SIZE); cmmu_flush_remote_data_cache(cpu, dst, PAGE_SIZE); -} /* pmap_copy_page() */ +} /* * Routine: PMAP_CHANGEBIT @@ -2796,16 +2591,14 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) * Update the pte bits on the specified physical page. * * Parameters: - * pg vm_page + * pg physical page * set bits to set * mask bits to mask * * Extern/Global: - * pv_head_table, pv_lists - * pmap_cached_attributes + * pv_lists * * Calls: - * pa_to_pvh * pmap_pte * * The pte bits corresponding to the page's frame index will be changed as @@ -2815,38 +2608,30 @@ pmap_copy_page(struct vm_page *srcpg, struct vm_page *dstpg) * a TLB flush will be performed. */ void -pmap_changebit(pg, set, mask) - paddr_t pg; - int set, mask; +pmap_changebit(struct vm_page *pg, int set, int mask) { - pv_entry_t pvl; - pv_entry_t pvep; - pt_entry_t *pte, npte; - pmap_t pmap; - int spl; - vaddr_t va; - u_int users; - boolean_t kflush; - -#ifdef DIAGNOSTIC - if (!PMAP_MANAGED(pg)) - panic("pmap_changebit: not managed?"); -#endif + pv_entry_t pvl, pvep; + pt_entry_t *pte, npte; + pmap_t pmap; + int spl; + vaddr_t va; + u_int users; + boolean_t kflush; SPLVM(spl); changebit_Retry: - pvl = pa_to_pvh(pg); + pvl = pg_to_pvh(pg); /* * Clear saved attributes (modify, reference) */ - *pa_to_attribute(pg) &= mask; + pvl->pv_flags &= mask; - if (pvl->pmap == PMAP_NULL) { + if (pvl->pv_pmap == PMAP_NULL) { #ifdef DEBUG if ((pmap_con_dbg & (CD_CBIT | CD_NORM)) == (CD_CBIT | CD_NORM)) - printf("(pmap_changebit: %x) phys addr 0x%x not mapped\n", + printf("(pmap_changebit: %x) vm page 0x%x not mapped\n", curproc, pg); #endif SPLX(spl); @@ -2854,13 +2639,13 @@ changebit_Retry: } /* for each listed pmap, update the affected bits */ - for (pvep = pvl; pvep != PV_ENTRY_NULL; pvep = pvep->next) { - pmap = pvep->pmap; - va = pvep->va; - if (!simple_lock_try(&pmap->lock)) { + for (pvep = pvl; pvep != PV_ENTRY_NULL; pvep = pvep->pv_next) { + pmap = pvep->pv_pmap; + va = pvep->pv_va; + if (!simple_lock_try(&pmap->pm_lock)) { goto changebit_Retry; } - users = pmap->cpus_using; + users = pmap->pm_cpus; if (pmap == kernel_pmap) { kflush = TRUE; } else { @@ -2877,7 +2662,7 @@ changebit_Retry: panic("pmap_changebit: bad pv list entry."); if (!PDT_VALID(pte)) panic("pmap_changebit: invalid pte"); - if (ptoa(PG_PFNUM(*pte)) != pg) + if (ptoa(PG_PFNUM(*pte)) != VM_PAGE_TO_PHYS(pg)) panic("pmap_changebit: pte doesn't point to page"); #endif @@ -2890,7 +2675,7 @@ changebit_Retry: /* * Flush TLB of which cpus using pmap. * - * Invalidate pte temporarily to avoid the modified bit + * Invalidate pte temporarily to avoid the modified bit * and/or the reference being written back by any other cpu. */ if (npte != *pte) { @@ -2898,10 +2683,10 @@ changebit_Retry: flush_atc_entry(users, va, kflush); } - simple_unlock(&pmap->lock); + simple_unlock(&pmap->pm_lock); } SPLX(spl); -} /* pmap_changebit() */ +} /* * Routine: PMAP_CLEAR_MODIFY @@ -2910,14 +2695,12 @@ changebit_Retry: * Clears the modify bits on the specified physical page. */ boolean_t -pmap_clear_modify(pg) - struct vm_page *pg; +pmap_clear_modify(struct vm_page *pg) { - paddr_t pa = VM_PAGE_TO_PHYS(pg); boolean_t rv; - rv = pmap_testbit(pa, PG_M); - pmap_changebit(pa, 0, ~PG_M); + rv = pmap_testbit(pg, PG_M); + pmap_changebit(pg, 0, ~PG_M); return rv; } @@ -2928,16 +2711,14 @@ pmap_clear_modify(pg) * Test the modified/referenced bits of a physical page. * * Parameters: - * pg vm_page + * pg physical page * bit bit to test * * Extern/Global: - * pv_head_array, pv lists - * pmap_cached_attributes + * pv lists * * Calls: * simple_lock, simple_unlock - * pa_to_pvh * pmap_pte * * If the attribute list for the given page has the bit, this routine @@ -2950,27 +2731,19 @@ pmap_clear_modify(pg) * attribute list. */ boolean_t -pmap_testbit(pg, bit) - paddr_t pg; - int bit; +pmap_testbit(struct vm_page *pg, int bit) { - pv_entry_t pvl; - pv_entry_t pvep; - pt_entry_t *ptep; - int spl; - boolean_t rv; - -#ifdef DIAGNOSTIC - if (!PMAP_MANAGED(pg)) - panic("pmap_testbit: not managed?"); -#endif + pv_entry_t pvl, pvep; + pt_entry_t *pte; + int spl; + boolean_t rv; SPLVM(spl); - pvl = pa_to_pvh(pg); + pvl = pg_to_pvh(pg); testbit_Retry: - if (*pa_to_attribute(pg) & bit) { + if (pvl->pv_flags & bit) { /* we've already cached a this flag for this page, no use looking further... */ #ifdef DEBUG @@ -2982,13 +2755,13 @@ testbit_Retry: return (TRUE); } - if (pvl->pmap == PMAP_NULL) { + if (pvl->pv_pmap == PMAP_NULL) { /* unmapped page - get info from attribute array maintained by pmap_remove_range/pmap_remove_all */ - rv = (boolean_t)(*pa_to_attribute(pg) & bit); + rv = (boolean_t)(pvl->pv_flags & bit); #ifdef DEBUG if ((pmap_con_dbg & (CD_TBIT | CD_NORM)) == (CD_TBIT | CD_NORM)) - printf("(pmap_testbit: %x) phys addr 0x%x not mapped\n", + printf("(pmap_testbit: %x) vm page 0x%x not mapped\n", curproc, pg); #endif SPLX(spl); @@ -2998,32 +2771,32 @@ testbit_Retry: /* for each listed pmap, check modified bit for given page */ pvep = pvl; while (pvep != PV_ENTRY_NULL) { - if (!simple_lock_try(&pvep->pmap->lock)) { + if (!simple_lock_try(&pvep->pv_pmap->pm_lock)) { goto testbit_Retry; } - ptep = pmap_pte(pvep->pmap, pvep->va); - if (ptep == PT_ENTRY_NULL) { - printf("pmap_testbit: pte from pv_list not in map virt = 0x%x\n", pvep->va); + pte = pmap_pte(pvep->pv_pmap, pvep->pv_va); + if (pte == PT_ENTRY_NULL) { + printf("pmap_testbit: pte from pv_list not in map virt = 0x%x\n", pvep->pv_va); panic("pmap_testbit: bad pv list entry"); } - if (*ptep & bit) { - simple_unlock(&pvep->pmap->lock); - *pa_to_attribute(pg) |= bit; + if (*pte & bit) { + simple_unlock(&pvep->pv_pmap->pm_lock); + pvl->pv_flags |= bit; #ifdef DEBUG if ((pmap_con_dbg & (CD_TBIT | CD_FULL)) == (CD_TBIT | CD_FULL)) - printf("(pmap_testbit: %x) modified page pte@0x%p\n", curproc, ptep); + printf("(pmap_testbit: %x) modified page pte@0x%p\n", curproc, pte); #endif SPLX(spl); return (TRUE); } - simple_unlock(&pvep->pmap->lock); - pvep = pvep->next; + simple_unlock(&pvep->pv_pmap->pm_lock); + pvep = pvep->pv_next; } SPLX(spl); return (FALSE); -} /* pmap_testbit() */ +} /* * Routine: PMAP_IS_MODIFIED @@ -3033,13 +2806,10 @@ testbit_Retry: * by any physical maps. */ boolean_t -pmap_is_modified(pg) - struct vm_page *pg; +pmap_is_modified(struct vm_page *pg) { - paddr_t phys = VM_PAGE_TO_PHYS(pg); - - return (pmap_testbit(phys, PG_M)); -} /* pmap_is_modified() */ + return pmap_testbit(pg, PG_M); +} /* * Routine: PMAP_CLEAR_REFERENCE @@ -3048,14 +2818,12 @@ pmap_is_modified(pg) * Clear the reference bit on the specified physical page. */ boolean_t -pmap_clear_reference(pg) - struct vm_page *pg; +pmap_clear_reference(struct vm_page *pg) { - paddr_t pa = VM_PAGE_TO_PHYS(pg); boolean_t rv; - rv = pmap_testbit(pa, PG_U); - pmap_changebit(pa, 0, ~PG_U); + rv = pmap_testbit(pg, PG_U); + pmap_changebit(pg, 0, ~PG_U); return rv; } @@ -3067,12 +2835,9 @@ pmap_clear_reference(pg) * any physical maps. */ boolean_t -pmap_is_referenced(pg) - struct vm_page *pg; +pmap_is_referenced(struct vm_page *pg) { - paddr_t phys = VM_PAGE_TO_PHYS(pg); - - return (pmap_testbit(phys, PG_U)); + return pmap_testbit(pg, PG_U); } /* @@ -3085,47 +2850,39 @@ pmap_is_referenced(pg) * Lower the permission for all mappings to a given page. */ void -pmap_page_protect(pg, prot) - struct vm_page *pg; - vm_prot_t prot; +pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { - paddr_t phys = VM_PAGE_TO_PHYS(pg); - switch (prot) { case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: /* copy on write */ - pmap_changebit(phys, PG_RO, ~0); + pmap_changebit(pg, PG_RO, ~0); break; case VM_PROT_READ|VM_PROT_WRITE: case VM_PROT_ALL: break; default: - pmap_remove_all(phys); + pmap_remove_all(pg); break; } } void -pmap_virtual_space(startp, endp) - vaddr_t *startp, *endp; +pmap_virtual_space(vaddr_t *startp, vaddr_t *endp) { *startp = virtual_avail; *endp = virtual_end; } void -pmap_kenter_pa(va, pa, prot) - vaddr_t va; - paddr_t pa; - vm_prot_t prot; +pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) { - int spl; - pt_entry_t template, *pte; - u_int users; + int spl; + pt_entry_t template, *pte; + u_int users; - CHECK_PAGE_ALIGN (va, "pmap_kenter_pa - VA"); - CHECK_PAGE_ALIGN (pa, "pmap_kenter_pa - PA"); + CHECK_PAGE_ALIGN(va, "pmap_kenter_pa - VA"); + CHECK_PAGE_ALIGN(pa, "pmap_kenter_pa - PA"); #ifdef DEBUG if ((pmap_con_dbg & (CD_ENT | CD_NORM)) == (CD_ENT | CD_NORM)) { @@ -3134,7 +2891,7 @@ pmap_kenter_pa(va, pa, prot) #endif PMAP_LOCK(kernel_pmap, spl); - users = kernel_pmap->cpus_using; + users = kernel_pmap->pm_cpus; template = m88k_protection(kernel_pmap, prot); @@ -3148,8 +2905,8 @@ pmap_kenter_pa(va, pa, prot) /* * And count the mapping. */ - kernel_pmap->stats.resident_count++; - kernel_pmap->stats.wired_count++; + kernel_pmap->pm_stats.resident_count++; + kernel_pmap->pm_stats.wired_count++; invalidate_pte(pte); if ((unsigned long)pa >= MAXPHYSMEM) @@ -3163,12 +2920,10 @@ pmap_kenter_pa(va, pa, prot) } void -pmap_kremove(va, len) - vaddr_t va; - vsize_t len; +pmap_kremove(vaddr_t va, vsize_t len) { - int spl; - u_int users; + int spl; + u_int users; #ifdef DEBUG if ((pmap_con_dbg & (CD_RM | CD_NORM)) == (CD_RM | CD_NORM)) @@ -3179,7 +2934,7 @@ pmap_kremove(va, len) CHECK_PAGE_ALIGN(len, "pmap_kremove len"); PMAP_LOCK(kernel_pmap, spl); - users = kernel_pmap->cpus_using; + users = kernel_pmap->pm_cpus; for (len >>= PAGE_SHIFT; len != 0; len--, va += PAGE_SIZE) { vaddr_t e = va + PAGE_SIZE; @@ -3200,8 +2955,8 @@ pmap_kremove(va, len) /* * Update the counts */ - kernel_pmap->stats.resident_count--; - kernel_pmap->stats.wired_count--; + kernel_pmap->pm_stats.resident_count--; + kernel_pmap->pm_stats.wired_count--; pte = pmap_pte(kernel_pmap, va); invalidate_pte(pte); |