aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@mtd.linutronix.de>2005-11-06 15:36:37 +0100
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-11-06 15:36:37 +0100
commit2fc2991175bf77395e6b15fe6b2304d3bf72da40 (patch)
treeb0ff38c09240e7c00e1577d447ebe89143d752dc /arch/sparc64/mm
parent[MTD] mtdchar: Return EINVAL for bad seeks instead of fixing up to valid byte (diff)
parent[PATCH] nvidiafb: Geforce 7800 series support added (diff)
downloadlinux-dev-2fc2991175bf77395e6b15fe6b2304d3bf72da40.tar.xz
linux-dev-2fc2991175bf77395e6b15fe6b2304d3bf72da40.zip
Merge branch 'master' of /home/tglx/work/mtd/git/linux-2.6.git/
Diffstat (limited to 'arch/sparc64/mm')
-rw-r--r--arch/sparc64/mm/Makefile2
-rw-r--r--arch/sparc64/mm/extable.c80
-rw-r--r--arch/sparc64/mm/fault.c77
-rw-r--r--arch/sparc64/mm/generic.c40
-rw-r--r--arch/sparc64/mm/init.c871
-rw-r--r--arch/sparc64/mm/tlb.c7
-rw-r--r--arch/sparc64/mm/ultra.S153
7 files changed, 446 insertions, 784 deletions
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
index cda87333a77b..9d0960e69f48 100644
--- a/arch/sparc64/mm/Makefile
+++ b/arch/sparc64/mm/Makefile
@@ -5,6 +5,6 @@
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
-obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o
+obj-y := ultra.o tlb.o fault.o init.o generic.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
deleted file mode 100644
index ec334297ff4f..000000000000
--- a/arch/sparc64/mm/extable.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * linux/arch/sparc64/mm/extable.c
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <asm/uaccess.h>
-
-extern const struct exception_table_entry __start___ex_table[];
-extern const struct exception_table_entry __stop___ex_table[];
-
-void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish)
-{
-}
-
-/* Caller knows they are in a range if ret->fixup == 0 */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *start,
- const struct exception_table_entry *last,
- unsigned long value)
-{
- const struct exception_table_entry *walk;
-
- /* Single insn entries are encoded as:
- * word 1: insn address
- * word 2: fixup code address
- *
- * Range entries are encoded as:
- * word 1: first insn address
- * word 2: 0
- * word 3: last insn address + 4 bytes
- * word 4: fixup code address
- *
- * See asm/uaccess.h for more details.
- */
-
- /* 1. Try to find an exact match. */
- for (walk = start; walk <= last; walk++) {
- if (walk->fixup == 0) {
- /* A range entry, skip both parts. */
- walk++;
- continue;
- }
-
- if (walk->insn == value)
- return walk;
- }
-
- /* 2. Try to find a range match. */
- for (walk = start; walk <= (last - 1); walk++) {
- if (walk->fixup)
- continue;
-
- if (walk[0].insn <= value && walk[1].insn > value)
- return walk;
-
- walk++;
- }
-
- return NULL;
-}
-
-/* Special extable search, which handles ranges. Returns fixup */
-unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
-{
- const struct exception_table_entry *entry;
-
- entry = search_exception_tables(addr);
- if (!entry)
- return 0;
-
- /* Inside range? Fix g2 and return correct fixup */
- if (!entry->fixup) {
- *g2 = (addr - entry->insn) / 4;
- return (entry + 1)->fixup;
- }
-
- return entry->fixup;
-}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 52e9375288a9..31fbc67719a1 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/smp_lock.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/kprobes.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -31,8 +32,6 @@
#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
-extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
-
/*
* To debug kernel to catch accesses to certain virtual/physical addresses.
* Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -70,55 +69,9 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
: "memory");
}
-/* Nice, simple, prom library does all the sweating for us. ;) */
-unsigned long __init prom_probe_memory (void)
-{
- register struct linux_mlist_p1275 *mlist;
- register unsigned long bytes, base_paddr, tally;
- register int i;
-
- i = 0;
- mlist = *prom_meminfo()->p1275_available;
- bytes = tally = mlist->num_bytes;
- base_paddr = mlist->start_adr;
-
- sp_banks[0].base_addr = base_paddr;
- sp_banks[0].num_bytes = bytes;
-
- while (mlist->theres_more != (void *) 0) {
- i++;
- mlist = mlist->theres_more;
- bytes = mlist->num_bytes;
- tally += bytes;
- if (i >= SPARC_PHYS_BANKS-1) {
- printk ("The machine has more banks than "
- "this kernel can support\n"
- "Increase the SPARC_PHYS_BANKS "
- "setting (currently %d)\n",
- SPARC_PHYS_BANKS);
- i = SPARC_PHYS_BANKS-1;
- break;
- }
-
- sp_banks[i].base_addr = mlist->start_adr;
- sp_banks[i].num_bytes = mlist->num_bytes;
- }
-
- i++;
- sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
- sp_banks[i].num_bytes = 0;
-
- /* Now mask all bank sizes on a page boundary, it is all we can
- * use anyways.
- */
- for (i = 0; sp_banks[i].num_bytes != 0; i++)
- sp_banks[i].num_bytes &= PAGE_MASK;
-
- return tally;
-}
-
-static void unhandled_fault(unsigned long address, struct task_struct *tsk,
- struct pt_regs *regs)
+static void __kprobes unhandled_fault(unsigned long address,
+ struct task_struct *tsk,
+ struct pt_regs *regs)
{
if ((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL "
@@ -240,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
unsigned int insn, unsigned long address)
{
- unsigned long g2;
unsigned char asi = ASI_P;
if ((!insn) && (regs->tstate & TSTATE_PRIV))
@@ -271,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
}
}
- g2 = regs->u_regs[UREG_G2];
-
/* Is this in ex_table? */
if (regs->tstate & TSTATE_PRIV) {
- unsigned long fixup;
+ const struct exception_table_entry *entry;
if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
if (insn & 0x2000)
@@ -286,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
/* Look in asi.h: All _S asis have LS bit set */
if ((asi & 0x1) &&
- (fixup = search_extables_range(regs->tpc, &g2))) {
- regs->tpc = fixup;
+ (entry = search_exception_tables(regs->tpc))) {
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs[UREG_G2] = g2;
return;
}
} else {
@@ -304,7 +253,7 @@ cannot_handle:
unhandled_fault (address, current, regs);
}
-asmlinkage void do_sparc64_fault(struct pt_regs *regs)
+asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -459,7 +408,7 @@ good_area:
}
up_read(&mm->mmap_sem);
- goto fault_done;
+ return;
/*
* Something tried to access memory that isn't in our memory map..
@@ -471,8 +420,7 @@ bad_area:
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
-
- goto fault_done;
+ return;
/*
* We ran out of memory, or some other thing happened to us that made
@@ -503,9 +451,4 @@ do_sigbus:
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
goto handle_kernel_fault;
-
-fault_done:
- /* These values are no longer needed, clear them. */
- set_thread_fault_code(0);
- current_thread_info()->fault_address = 0;
}
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 6b31f6117a95..112c316e7cd2 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -116,37 +116,6 @@ static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned
return 0;
}
-int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
-{
- int error = 0;
- pgd_t * dir;
- unsigned long beg = from;
- unsigned long end = from + size;
- struct mm_struct *mm = vma->vm_mm;
-
- prot = __pgprot(pg_iobits);
- offset -= from;
- dir = pgd_offset(mm, from);
- flush_cache_range(vma, beg, end);
-
- spin_lock(&mm->page_table_lock);
- while (from < end) {
- pud_t *pud = pud_alloc(mm, dir, from);
- error = -ENOMEM;
- if (!pud)
- break;
- error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
- if (error)
- break;
- from = (from + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- }
- flush_tlb_range(vma, beg, end);
- spin_unlock(&mm->page_table_lock);
-
- return error;
-}
-
int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot)
{
@@ -158,14 +127,16 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
int space = GET_IOSPACE(pfn);
unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
+ /* See comment in mm/memory.c remap_pfn_range */
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
prot = __pgprot(pg_iobits);
offset -= from;
dir = pgd_offset(mm, from);
flush_cache_range(vma, beg, end);
- spin_lock(&mm->page_table_lock);
while (from < end) {
- pud_t *pud = pud_alloc(current->mm, dir, from);
+ pud_t *pud = pud_alloc(mm, dir, from);
error = -ENOMEM;
if (!pud)
break;
@@ -175,8 +146,7 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++;
}
- flush_tlb_range(vma, beg, end);
- spin_unlock(&mm->page_table_lock);
+ flush_tlb_range(vma, beg, end);
return error;
}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 3fbaf342a452..1e44ee26cee8 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -19,6 +19,9 @@
#include <linux/pagemap.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/kprobes.h>
+#include <linux/cache.h>
+#include <linux/sort.h>
#include <asm/head.h>
#include <asm/system.h>
@@ -39,24 +42,80 @@
extern void device_scan(void);
-struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+#define MAX_BANKS 32
-unsigned long *sparc64_valid_addr_bitmap;
+static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
+static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
+static int pavail_ents __initdata;
+static int pavail_rescan_ents __initdata;
+
+static int cmp_p64(const void *a, const void *b)
+{
+ const struct linux_prom64_registers *x = a, *y = b;
+
+ if (x->phys_addr > y->phys_addr)
+ return 1;
+ if (x->phys_addr < y->phys_addr)
+ return -1;
+ return 0;
+}
+
+static void __init read_obp_memory(const char *property,
+ struct linux_prom64_registers *regs,
+ int *num_ents)
+{
+ int node = prom_finddevice("/memory");
+ int prop_size = prom_getproplen(node, property);
+ int ents, ret, i;
+
+ ents = prop_size / sizeof(struct linux_prom64_registers);
+ if (ents > MAX_BANKS) {
+ prom_printf("The machine has more %s property entries than "
+ "this kernel can support (%d).\n",
+ property, MAX_BANKS);
+ prom_halt();
+ }
+
+ ret = prom_getproperty(node, property, (char *) regs, prop_size);
+ if (ret == -1) {
+ prom_printf("Couldn't get %s property from /memory.\n");
+ prom_halt();
+ }
+
+ *num_ents = ents;
+
+ /* Sanitize what we got from the firmware, by page aligning
+ * everything.
+ */
+ for (i = 0; i < ents; i++) {
+ unsigned long base, size;
+
+ base = regs[i].phys_addr;
+ size = regs[i].reg_size;
+
+ size &= PAGE_MASK;
+ if (base & ~PAGE_MASK) {
+ unsigned long new_base = PAGE_ALIGN(base);
+
+ size -= new_base - base;
+ if ((long) size < 0L)
+ size = 0UL;
+ base = new_base;
+ }
+ regs[i].phys_addr = base;
+ regs[i].reg_size = size;
+ }
+ sort(regs, ents, sizeof(struct linux_prom64_registers),
+ cmp_p64, NULL);
+}
+
+unsigned long *sparc64_valid_addr_bitmap __read_mostly;
/* Ugly, but necessary... -DaveM */
-unsigned long phys_base;
-unsigned long kern_base;
-unsigned long kern_size;
-unsigned long pfn_base;
-
-/* This is even uglier. We have a problem where the kernel may not be
- * located at phys_base. However, initial __alloc_bootmem() calls need to
- * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
- * those page mappings wont work. Things are ok after inherit_prom_mappings
- * is called though. Dave says he'll clean this up some other time.
- * -- BenC
- */
-static unsigned long bootmap_base;
+unsigned long phys_base __read_mostly;
+unsigned long kern_base __read_mostly;
+unsigned long kern_size __read_mostly;
+unsigned long pfn_base __read_mostly;
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -72,7 +131,13 @@ extern unsigned long sparc_ramdisk_image64;
extern unsigned int sparc_ramdisk_image;
extern unsigned int sparc_ramdisk_size;
-struct page *mem_map_zero;
+struct page *mem_map_zero __read_mostly;
+
+unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
+
+unsigned long sparc64_kern_pri_context __read_mostly;
+unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
+unsigned long sparc64_kern_sec_context __read_mostly;
int bigkernel = 0;
@@ -178,8 +243,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
: "g1", "g7");
}
-extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
-
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct page *page;
@@ -206,10 +269,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
put_cpu();
}
-
- if (get_thread_fault_code())
- __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
- address, pte, get_thread_fault_code());
}
void flush_dcache_page(struct page *page)
@@ -250,7 +309,7 @@ out:
put_cpu();
}
-void flush_icache_range(unsigned long start, unsigned long end)
+void __kprobes flush_icache_range(unsigned long start, unsigned long end)
{
/* Cheetah has coherent I-cache. */
if (tlb_type == spitfire) {
@@ -309,6 +368,11 @@ struct linux_prom_translation {
unsigned long data;
};
+/* Exported for kernel TLB miss handling in ktlb.S */
+struct linux_prom_translation prom_trans[512] __read_mostly;
+unsigned int prom_trans_ents __read_mostly;
+unsigned int swapper_pgd_zero __read_mostly;
+
extern unsigned long prom_boot_page;
extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
extern int prom_get_mmu_ihandle(void);
@@ -317,297 +381,162 @@ extern void register_prom_callbacks(void);
/* Exported for SMP bootup purposes. */
unsigned long kern_locked_tte_data;
-void __init early_pgtable_allocfail(char *type)
-{
- prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
- prom_halt();
-}
-
-#define BASE_PAGE_SIZE 8192
-static pmd_t *prompmd;
-
/*
* Translate PROM's mapping we capture at boot time into physical address.
* The second parameter is only set from prom_callback() invocations.
*/
unsigned long prom_virt_to_phys(unsigned long promva, int *error)
{
- pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
- pte_t *ptep;
- unsigned long base;
-
- if (pmd_none(*pmdp)) {
- if (error)
- *error = 1;
- return(0);
- }
- ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
- if (!pte_present(*ptep)) {
- if (error)
- *error = 1;
- return(0);
- }
- if (error) {
- *error = 0;
- return(pte_val(*ptep));
+ int i;
+
+ for (i = 0; i < prom_trans_ents; i++) {
+ struct linux_prom_translation *p = &prom_trans[i];
+
+ if (promva >= p->virt &&
+ promva < (p->virt + p->size)) {
+ unsigned long base = p->data & _PAGE_PADDR;
+
+ if (error)
+ *error = 0;
+ return base + (promva & (8192 - 1));
+ }
}
- base = pte_val(*ptep) & _PAGE_PADDR;
- return(base + (promva & (BASE_PAGE_SIZE - 1)));
+ if (error)
+ *error = 1;
+ return 0UL;
}
-static void inherit_prom_mappings(void)
+/* The obp translations are saved based on 8k pagesize, since obp can
+ * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
+ * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte
+ * scheme (also, see rant in inherit_locked_prom_mappings()).
+ */
+static inline int in_obp_range(unsigned long vaddr)
{
- struct linux_prom_translation *trans;
- unsigned long phys_page, tte_vaddr, tte_data;
- void (*remap_func)(unsigned long, unsigned long, int);
- pmd_t *pmdp;
- pte_t *ptep;
- int node, n, i, tsz;
- extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
+ return (vaddr >= LOW_OBP_ADDRESS &&
+ vaddr < HI_OBP_ADDRESS);
+}
+
+static int cmp_ptrans(const void *a, const void *b)
+{
+ const struct linux_prom_translation *x = a, *y = b;
+
+ if (x->virt > y->virt)
+ return 1;
+ if (x->virt < y->virt)
+ return -1;
+ return 0;
+}
+
+/* Read OBP translations property into 'prom_trans[]'. */
+static void __init read_obp_translations(void)
+{
+ int n, node, ents, first, last, i;
node = prom_finddevice("/virtual-memory");
n = prom_getproplen(node, "translations");
- if (n == 0 || n == -1) {
- prom_printf("Couldn't get translation property\n");
+ if (unlikely(n == 0 || n == -1)) {
+ prom_printf("prom_mappings: Couldn't get size.\n");
prom_halt();
}
- n += 5 * sizeof(struct linux_prom_translation);
- for (tsz = 1; tsz < n; tsz <<= 1)
- /* empty */;
- trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
- if (trans == NULL) {
- prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
+ if (unlikely(n > sizeof(prom_trans))) {
+ prom_printf("prom_mappings: Size %Zd is too big.\n", n);
prom_halt();
}
- memset(trans, 0, tsz);
- if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
- prom_printf("Couldn't get translation property\n");
+ if ((n = prom_getproperty(node, "translations",
+ (char *)&prom_trans[0],
+ sizeof(prom_trans))) == -1) {
+ prom_printf("prom_mappings: Couldn't get property.\n");
prom_halt();
}
- n = n / sizeof(*trans);
- /*
- * The obp translations are saved based on 8k pagesize, since obp can
- * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
- * ie obp range, are handled in entry.S and do not use the vpte scheme
- * (see rant in inherit_locked_prom_mappings()).
- */
-#define OBP_PMD_SIZE 2048
- prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
- if (prompmd == NULL)
- early_pgtable_allocfail("pmd");
- memset(prompmd, 0, OBP_PMD_SIZE);
- for (i = 0; i < n; i++) {
- unsigned long vaddr;
-
- if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
- for (vaddr = trans[i].virt;
- ((vaddr < trans[i].virt + trans[i].size) &&
- (vaddr < HI_OBP_ADDRESS));
- vaddr += BASE_PAGE_SIZE) {
- unsigned long val;
-
- pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
- if (pmd_none(*pmdp)) {
- ptep = __alloc_bootmem(BASE_PAGE_SIZE,
- BASE_PAGE_SIZE,
- bootmap_base);
- if (ptep == NULL)
- early_pgtable_allocfail("pte");
- memset(ptep, 0, BASE_PAGE_SIZE);
- pmd_set(pmdp, ptep);
- }
- ptep = (pte_t *)__pmd_page(*pmdp) +
- ((vaddr >> 13) & 0x3ff);
+ n = n / sizeof(struct linux_prom_translation);
- val = trans[i].data;
+ ents = n;
- /* Clear diag TTE bits. */
- if (tlb_type == spitfire)
- val &= ~0x0003fe0000000000UL;
+ sort(prom_trans, ents, sizeof(struct linux_prom_translation),
+ cmp_ptrans, NULL);
- set_pte_at(&init_mm, vaddr,
- ptep, __pte(val | _PAGE_MODIFIED));
- trans[i].data += BASE_PAGE_SIZE;
- }
- }
+ /* Now kick out all the non-OBP entries. */
+ for (i = 0; i < ents; i++) {
+ if (in_obp_range(prom_trans[i].virt))
+ break;
+ }
+ first = i;
+ for (; i < ents; i++) {
+ if (!in_obp_range(prom_trans[i].virt))
+ break;
}
- phys_page = __pa(prompmd);
- obp_iaddr_patch[0] |= (phys_page >> 10);
- obp_iaddr_patch[1] |= (phys_page & 0x3ff);
- flushi((long)&obp_iaddr_patch[0]);
- obp_daddr_patch[0] |= (phys_page >> 10);
- obp_daddr_patch[1] |= (phys_page & 0x3ff);
- flushi((long)&obp_daddr_patch[0]);
+ last = i;
- /* Now fixup OBP's idea about where we really are mapped. */
- prom_printf("Remapping the kernel... ");
+ for (i = 0; i < (last - first); i++) {
+ struct linux_prom_translation *src = &prom_trans[i + first];
+ struct linux_prom_translation *dest = &prom_trans[i];
- /* Spitfire Errata #32 workaround */
- /* NOTE: Using plain zero for the context value is
- * correct here, we are not using the Linux trap
- * tables yet so we should not use the special
- * UltraSPARC-III+ page size encodings yet.
- */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- switch (tlb_type) {
- default:
- case spitfire:
- phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
- break;
-
- case cheetah:
- case cheetah_plus:
- phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
- break;
- };
-
- phys_page &= _PAGE_PADDR;
- phys_page += ((unsigned long)&prom_boot_page -
- (unsigned long)KERNBASE);
+ *dest = *src;
+ }
+ for (; i < ents; i++) {
+ struct linux_prom_translation *dest = &prom_trans[i];
+ dest->virt = dest->size = dest->data = 0x0UL;
+ }
+
+ prom_trans_ents = last - first;
if (tlb_type == spitfire) {
- /* Lock this into i/d tlb entry 59 */
- __asm__ __volatile__(
- "stxa %%g0, [%2] %3\n\t"
- "stxa %0, [%1] %4\n\t"
- "membar #Sync\n\t"
- "flush %%g6\n\t"
- "stxa %%g0, [%2] %5\n\t"
- "stxa %0, [%1] %6\n\t"
- "membar #Sync\n\t"
- "flush %%g6"
- : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
- _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
- "r" (59 << 3), "r" (TLB_TAG_ACCESS),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
- "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
- : "memory");
- } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- /* Lock this into i/d tlb-0 entry 11 */
- __asm__ __volatile__(
- "stxa %%g0, [%2] %3\n\t"
- "stxa %0, [%1] %4\n\t"
- "membar #Sync\n\t"
- "flush %%g6\n\t"
- "stxa %%g0, [%2] %5\n\t"
- "stxa %0, [%1] %6\n\t"
- "membar #Sync\n\t"
- "flush %%g6"
- : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
- _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
- "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
- "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
- : "memory");
- } else {
- /* Implement me :-) */
- BUG();
+ /* Clear diag TTE bits. */
+ for (i = 0; i < prom_trans_ents; i++)
+ prom_trans[i].data &= ~0x0003fe0000000000UL;
}
+}
- tte_vaddr = (unsigned long) KERNBASE;
+static void __init remap_kernel(void)
+{
+ unsigned long phys_page, tte_vaddr, tte_data;
+ int tlb_ent = sparc64_highest_locked_tlbent();
- /* Spitfire Errata #32 workaround */
- /* NOTE: Using plain zero for the context value is
- * correct here, we are not using the Linux trap
- * tables yet so we should not use the special
- * UltraSPARC-III+ page size encodings yet.
- */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- if (tlb_type == spitfire)
- tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
- else
- tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
+ tte_vaddr = (unsigned long) KERNBASE;
+ phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
+ _PAGE_CP | _PAGE_CV | _PAGE_P |
+ _PAGE_L | _PAGE_W));
kern_locked_tte_data = tte_data;
- remap_func = (void *) ((unsigned long) &prom_remap -
- (unsigned long) &prom_boot_page);
-
-
- /* Spitfire Errata #32 workaround */
- /* NOTE: Using plain zero for the context value is
- * correct here, we are not using the Linux trap
- * tables yet so we should not use the special
- * UltraSPARC-III+ page size encodings yet.
- */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- remap_func((tlb_type == spitfire ?
- (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
- (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
- (unsigned long) KERNBASE,
- prom_get_mmu_ihandle());
-
- if (bigkernel)
- remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
- (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
-
- /* Flush out that temporary mapping. */
- spitfire_flush_dtlb_nucleus_page(0x0);
- spitfire_flush_itlb_nucleus_page(0x0);
-
- /* Now lock us back into the TLBs via OBP. */
- prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
- prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+ /* Now lock us into the TLBs via OBP. */
+ prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
+ prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
if (bigkernel) {
- prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
- tte_vaddr + 0x400000);
- prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
- tte_vaddr + 0x400000);
+ tlb_ent -= 1;
+ prom_dtlb_load(tlb_ent,
+ tte_data + 0x400000,
+ tte_vaddr + 0x400000);
+ prom_itlb_load(tlb_ent,
+ tte_data + 0x400000,
+ tte_vaddr + 0x400000);
}
-
- /* Re-read translations property. */
- if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
- prom_printf("Couldn't get translation property\n");
- prom_halt();
+ sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+ if (tlb_type == cheetah_plus) {
+ sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
+ CTX_CHEETAH_PLUS_NUC);
+ sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
+ sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
}
- n = n / sizeof(*trans);
-
- for (i = 0; i < n; i++) {
- unsigned long vaddr = trans[i].virt;
- unsigned long size = trans[i].size;
-
- if (vaddr < 0xf0000000UL) {
- unsigned long avoid_start = (unsigned long) KERNBASE;
- unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
-
- if (bigkernel)
- avoid_end += (4 * 1024 * 1024);
- if (vaddr < avoid_start) {
- unsigned long top = vaddr + size;
+}
- if (top > avoid_start)
- top = avoid_start;
- prom_unmap(top - vaddr, vaddr);
- }
- if ((vaddr + size) > avoid_end) {
- unsigned long bottom = vaddr;
- if (bottom < avoid_end)
- bottom = avoid_end;
- prom_unmap((vaddr + size) - bottom, bottom);
- }
- }
- }
+static void __init inherit_prom_mappings(void)
+{
+ read_obp_translations();
+ /* Now fixup OBP's idea about where we really are mapped. */
+ prom_printf("Remapping the kernel... ");
+ remap_kernel();
prom_printf("done.\n");
+ prom_printf("Registering callbacks... ");
register_prom_callbacks();
+ prom_printf("done.\n");
}
/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
@@ -791,8 +720,8 @@ void inherit_locked_prom_mappings(int save_p)
}
}
if (tlb_type == spitfire) {
- int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
- for (i = 0; i < high; i++) {
+ int high = sparc64_highest_unlocked_tlb_ent;
+ for (i = 0; i <= high; i++) {
unsigned long data;
/* Spitfire Errata #32 workaround */
@@ -880,9 +809,9 @@ void inherit_locked_prom_mappings(int save_p)
}
}
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+ int high = sparc64_highest_unlocked_tlb_ent;
- for (i = 0; i < high; i++) {
+ for (i = 0; i <= high; i++) {
unsigned long data;
data = cheetah_get_ldtlb_data(i);
@@ -1275,14 +1204,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
int i;
#ifdef CONFIG_DEBUG_BOOTMEM
- prom_printf("bootmem_init: Scan sp_banks, ");
+ prom_printf("bootmem_init: Scan pavail, ");
#endif
bytes_avail = 0UL;
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- end_of_phys_memory = sp_banks[i].base_addr +
- sp_banks[i].num_bytes;
- bytes_avail += sp_banks[i].num_bytes;
+ for (i = 0; i < pavail_ents; i++) {
+ end_of_phys_memory = pavail[i].phys_addr +
+ pavail[i].reg_size;
+ bytes_avail += pavail[i].reg_size;
if (cmdline_memory_size) {
if (bytes_avail > cmdline_memory_size) {
unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1290,12 +1219,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
bytes_avail -= slack;
end_of_phys_memory -= slack;
- sp_banks[i].num_bytes -= slack;
- if (sp_banks[i].num_bytes == 0) {
- sp_banks[i].base_addr = 0xdeadbeef;
+ pavail[i].reg_size -= slack;
+ if ((long)pavail[i].reg_size <= 0L) {
+ pavail[i].phys_addr = 0xdeadbeefUL;
+ pavail[i].reg_size = 0UL;
+ pavail_ents = i;
} else {
- sp_banks[i+1].num_bytes = 0;
- sp_banks[i+1].base_addr = 0xdeadbeef;
+ pavail[i+1].reg_size = 0Ul;
+ pavail[i+1].phys_addr = 0xdeadbeefUL;
+ pavail_ents = i + 1;
}
break;
}
@@ -1346,17 +1278,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
#endif
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
- bootmap_base = bootmap_pfn << PAGE_SHIFT;
-
/* Now register the available physical memory with the
* allocator.
*/
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ for (i = 0; i < pavail_ents; i++) {
#ifdef CONFIG_DEBUG_BOOTMEM
- prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
- i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
+ prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
+ i, pavail[i].phys_addr, pavail[i].reg_size);
#endif
- free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
+ free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -1397,121 +1327,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
return end_pfn;
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
+{
+ unsigned long vstart = PAGE_OFFSET + pstart;
+ unsigned long vend = PAGE_OFFSET + pend;
+ unsigned long alloc_bytes = 0UL;
+
+ if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
+ prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
+ vstart, vend);
+ prom_halt();
+ }
+
+ while (vstart < vend) {
+ unsigned long this_end, paddr = __pa(vstart);
+ pgd_t *pgd = pgd_offset_k(vstart);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pud = pud_offset(pgd, vstart);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pud_populate(&init_mm, pud, new);
+ }
+
+ pmd = pmd_offset(pud, vstart);
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
+
+ pte = pte_offset_kernel(pmd, vstart);
+ this_end = (vstart + PMD_SIZE) & PMD_MASK;
+ if (this_end > vend)
+ this_end = vend;
+
+ while (vstart < this_end) {
+ pte_val(*pte) = (paddr | pgprot_val(prot));
+
+ vstart += PAGE_SIZE;
+ paddr += PAGE_SIZE;
+ pte++;
+ }
+ }
+
+ return alloc_bytes;
+}
+
+static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+static int pall_ents __initdata;
+
+extern unsigned int kvmap_linear_patch[1];
+
+static void __init kernel_physical_mapping_init(void)
+{
+ unsigned long i, mem_alloced = 0UL;
+
+ read_obp_memory("reg", &pall[0], &pall_ents);
+
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = pall[i].phys_addr;
+ phys_end = phys_start + pall[i].reg_size;
+ mem_alloced += kernel_map_range(phys_start, phys_end,
+ PAGE_KERNEL);
+ }
+
+ printk("Allocated %ld bytes for kernel page tables.\n",
+ mem_alloced);
+
+ kvmap_linear_patch[0] = 0x01000000; /* nop */
+ flushi(&kvmap_linear_patch[0]);
+
+ __flush_tlb_all();
+}
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+
+ kernel_map_range(phys_start, phys_end,
+ (enable ? PAGE_KERNEL : __pgprot(0)));
+
+ /* we should perform an IPI and flush all tlbs,
+ * but that can deadlock->flush only current cpu.
+ */
+ __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
+ PAGE_OFFSET + phys_end);
+}
+#endif
+
+unsigned long __init find_ecache_flush_span(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < pavail_ents; i++) {
+ if (pavail[i].reg_size >= size)
+ return pavail[i].phys_addr;
+ }
+
+ return ~0UL;
+}
+
/* paging_init() sets up the page tables */
extern void cheetah_ecache_flush_init(void);
static unsigned long last_valid_pfn;
+pgd_t swapper_pg_dir[2048];
void __init paging_init(void)
{
- extern pmd_t swapper_pmd_dir[1024];
- extern unsigned int sparc64_vpte_patchme1[1];
- extern unsigned int sparc64_vpte_patchme2[1];
- unsigned long alias_base = kern_base + PAGE_OFFSET;
- unsigned long second_alias_page = 0;
- unsigned long pt, flags, end_pfn, pages_avail;
- unsigned long shift = alias_base - ((unsigned long)KERNBASE);
- unsigned long real_end;
+ unsigned long end_pfn, pages_avail, shift;
+ unsigned long real_end, i;
+
+ /* Find available physical memory... */
+ read_obp_memory("available", &pavail[0], &pavail_ents);
+
+ phys_base = 0xffffffffffffffffUL;
+ for (i = 0; i < pavail_ents; i++)
+ phys_base = min(phys_base, pavail[i].phys_addr);
+
+ pfn_base = phys_base >> PAGE_SHIFT;
+
+ kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
set_bit(0, mmu_context_bmap);
+ shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
+
real_end = (unsigned long)_end;
if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
bigkernel = 1;
-#ifdef CONFIG_BLK_DEV_INITRD
- if (sparc_ramdisk_image || sparc_ramdisk_image64)
- real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
-#endif
-
- /* We assume physical memory starts at some 4mb multiple,
- * if this were not true we wouldn't boot up to this point
- * anyways.
- */
- pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
- pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
- local_irq_save(flags);
- if (tlb_type == spitfire) {
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
- : "memory");
- if (real_end >= KERNBASE + 0x340000) {
- second_alias_page = alias_base + 0x400000;
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
- : "memory");
- }
- } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
- : "memory");
- if (real_end >= KERNBASE + 0x340000) {
- second_alias_page = alias_base + 0x400000;
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
- : "memory");
- }
+ if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
+ prom_printf("paging_init: Kernel > 8MB, too large.\n");
+ prom_halt();
}
- local_irq_restore(flags);
-
- /* Now set kernel pgd to upper alias so physical page computations
+
+ /* Set kernel pgd to upper alias so physical page computations
* work.
*/
init_mm.pgd += ((shift) / (sizeof(pgd_t)));
- memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
+ memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
/* Now can init the kernel/bad page tables. */
pud_set(pud_offset(&swapper_pg_dir[0], 0),
- swapper_pmd_dir + (shift / sizeof(pgd_t)));
+ swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
- sparc64_vpte_patchme1[0] |=
- (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
- sparc64_vpte_patchme2[0] |=
- (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
- flushi((long)&sparc64_vpte_patchme1[0]);
+ swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
- /* Setup bootmem... */
- pages_avail = 0;
- last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
-
- /* Inherit non-locked OBP mappings. */
inherit_prom_mappings();
/* Ok, we can use our TLB miss and window trap handlers safely.
@@ -1526,13 +1502,16 @@ void __init paging_init(void)
inherit_locked_prom_mappings(1);
- /* We only created DTLB mapping of this stuff. */
- spitfire_flush_dtlb_nucleus_page(alias_base);
- if (second_alias_page)
- spitfire_flush_dtlb_nucleus_page(second_alias_page);
-
__flush_tlb_all();
+ /* Setup bootmem... */
+ pages_avail = 0;
+ last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ kernel_physical_mapping_init();
+#endif
+
{
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
@@ -1553,128 +1532,35 @@ void __init paging_init(void)
device_scan();
}
-/* Ok, it seems that the prom can allocate some more memory chunks
- * as a side effect of some prom calls we perform during the
- * boot sequence. My most likely theory is that it is from the
- * prom_set_traptable() call, and OBP is allocating a scratchpad
- * for saving client program register state etc.
- */
-static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
-{
- int swapi = 0;
- int i, mitr;
- unsigned long tmpaddr, tmpsize;
- unsigned long lowest;
-
- for (i = 0; thislist[i].theres_more != 0; i++) {
- lowest = thislist[i].start_adr;
- for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
- if (thislist[mitr].start_adr < lowest) {
- lowest = thislist[mitr].start_adr;
- swapi = mitr;
- }
- if (lowest == thislist[i].start_adr)
- continue;
- tmpaddr = thislist[swapi].start_adr;
- tmpsize = thislist[swapi].num_bytes;
- for (mitr = swapi; mitr > i; mitr--) {
- thislist[mitr].start_adr = thislist[mitr-1].start_adr;
- thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
- }
- thislist[i].start_adr = tmpaddr;
- thislist[i].num_bytes = tmpsize;
- }
-}
-
-void __init rescan_sp_banks(void)
-{
- struct linux_prom64_registers memlist[64];
- struct linux_mlist_p1275 avail[64], *mlist;
- unsigned long bytes, base_paddr;
- int num_regs, node = prom_finddevice("/memory");
- int i;
-
- num_regs = prom_getproperty(node, "available",
- (char *) memlist, sizeof(memlist));
- num_regs = (num_regs / sizeof(struct linux_prom64_registers));
- for (i = 0; i < num_regs; i++) {
- avail[i].start_adr = memlist[i].phys_addr;
- avail[i].num_bytes = memlist[i].reg_size;
- avail[i].theres_more = &avail[i + 1];
- }
- avail[i - 1].theres_more = NULL;
- sort_memlist(avail);
-
- mlist = &avail[0];
- i = 0;
- bytes = mlist->num_bytes;
- base_paddr = mlist->start_adr;
-
- sp_banks[0].base_addr = base_paddr;
- sp_banks[0].num_bytes = bytes;
-
- while (mlist->theres_more != NULL){
- i++;
- mlist = mlist->theres_more;
- bytes = mlist->num_bytes;
- if (i >= SPARC_PHYS_BANKS-1) {
- printk ("The machine has more banks than "
- "this kernel can support\n"
- "Increase the SPARC_PHYS_BANKS "
- "setting (currently %d)\n",
- SPARC_PHYS_BANKS);
- i = SPARC_PHYS_BANKS-1;
- break;
- }
-
- sp_banks[i].base_addr = mlist->start_adr;
- sp_banks[i].num_bytes = mlist->num_bytes;
- }
-
- i++;
- sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
- sp_banks[i].num_bytes = 0;
-
- for (i = 0; sp_banks[i].num_bytes != 0; i++)
- sp_banks[i].num_bytes &= PAGE_MASK;
-}
-
static void __init taint_real_pages(void)
{
- struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
int i;
- for (i = 0; i < SPARC_PHYS_BANKS; i++) {
- saved_sp_banks[i].base_addr =
- sp_banks[i].base_addr;
- saved_sp_banks[i].num_bytes =
- sp_banks[i].num_bytes;
- }
-
- rescan_sp_banks();
+ read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
- /* Find changes discovered in the sp_bank rescan and
+ /* Find changes discovered in the physmem available rescan and
* reserve the lost portions in the bootmem maps.
*/
- for (i = 0; saved_sp_banks[i].num_bytes; i++) {
+ for (i = 0; i < pavail_ents; i++) {
unsigned long old_start, old_end;
- old_start = saved_sp_banks[i].base_addr;
+ old_start = pavail[i].phys_addr;
old_end = old_start +
- saved_sp_banks[i].num_bytes;
+ pavail[i].reg_size;
while (old_start < old_end) {
int n;
- for (n = 0; sp_banks[n].num_bytes; n++) {
+ for (n = 0; pavail_rescan_ents; n++) {
unsigned long new_start, new_end;
- new_start = sp_banks[n].base_addr;
- new_end = new_start + sp_banks[n].num_bytes;
+ new_start = pavail_rescan[n].phys_addr;
+ new_end = new_start +
+ pavail_rescan[n].reg_size;
if (new_start <= old_start &&
new_end >= (old_start + PAGE_SIZE)) {
- set_bit (old_start >> 22,
- sparc64_valid_addr_bitmap);
+ set_bit(old_start >> 22,
+ sparc64_valid_addr_bitmap);
goto do_next_page;
}
}
@@ -1694,8 +1580,7 @@ void __init mem_init(void)
i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
i += 1;
- sparc64_valid_addr_bitmap = (unsigned long *)
- __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
+ sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
if (sparc64_valid_addr_bitmap == NULL) {
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
prom_halt();
@@ -1748,7 +1633,7 @@ void __init mem_init(void)
cheetah_ecache_flush_init();
}
-void free_initmem (void)
+void free_initmem(void)
{
unsigned long addr, initend;
diff --git a/arch/sparc64/mm/tlb.c b/arch/sparc64/mm/tlb.c
index 90ca99d0b89c..8b104be4662b 100644
--- a/arch/sparc64/mm/tlb.c
+++ b/arch/sparc64/mm/tlb.c
@@ -18,8 +18,7 @@
/* Heavily inspired by the ppc64 code. */
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) =
- { NULL, 0, 0, 0, 0, 0, { 0 }, { NULL }, };
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
void flush_tlb_pending(void)
{
@@ -72,7 +71,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t
no_cache_flush:
- if (mp->tlb_frozen)
+ if (mp->fullmm)
return;
nr = mp->tlb_nr;
@@ -97,7 +96,7 @@ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long
unsigned long nr = mp->tlb_nr;
long s = start, e = end, vpte_base;
- if (mp->tlb_frozen)
+ if (mp->fullmm)
return;
/* If start is greater than end, that is a real problem. */
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index 363770893797..e4c9151fa116 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -10,6 +10,7 @@
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/mmu_context.h>
+#include <asm/mmu.h>
#include <asm/pil.h>
#include <asm/head.h>
#include <asm/thread_info.h>
@@ -45,6 +46,8 @@ __flush_tlb_mm: /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
nop
nop
nop
+ nop
+ nop
.align 32
.globl __flush_tlb_pending
@@ -73,6 +76,9 @@ __flush_tlb_pending:
retl
wrpr %g7, 0x0, %pstate
nop
+ nop
+ nop
+ nop
.align 32
.globl __flush_tlb_kernel_range
@@ -113,6 +119,7 @@ __spitfire_flush_tlb_mm_slow:
#else
#error unsupported PAGE_SIZE
#endif
+ .section .kprobes.text, "ax"
.align 32
.globl __flush_icache_page
__flush_icache_page: /* %o0 = phys_page */
@@ -137,42 +144,29 @@ __flush_icache_page: /* %o0 = phys_page */
#define DTAG_MASK 0x3
+ /* This routine is Spitfire specific so the hardcoded
+ * D-cache size and line-size are OK.
+ */
.align 64
.globl __flush_dcache_page
__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
sethi %uhi(PAGE_OFFSET), %g1
sllx %g1, 32, %g1
- sub %o0, %g1, %o0
- clr %o4
- srlx %o0, 11, %o0
- sethi %hi(1 << 14), %o2
-1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
- add %o4, (1 << 5), %o4 ! IEU0
- ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
- add %o4, (1 << 5), %o4 ! IEU0
- ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
- add %o4, (1 << 5), %o4 ! IEU0
- andn %o3, DTAG_MASK, %o3 ! IEU1
- ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
- add %o4, (1 << 5), %o4 ! IEU0
- andn %g1, DTAG_MASK, %g1 ! IEU1
- cmp %o0, %o3 ! IEU1 Group
- be,a,pn %xcc, dflush1 ! CTI
- sub %o4, (4 << 5), %o4 ! IEU0 (Group)
- cmp %o0, %g1 ! IEU1 Group
- andn %g2, DTAG_MASK, %g2 ! IEU0
- be,a,pn %xcc, dflush2 ! CTI
- sub %o4, (3 << 5), %o4 ! IEU0 (Group)
- cmp %o0, %g2 ! IEU1 Group
- andn %g3, DTAG_MASK, %g3 ! IEU0
- be,a,pn %xcc, dflush3 ! CTI
- sub %o4, (2 << 5), %o4 ! IEU0 (Group)
- cmp %o0, %g3 ! IEU1 Group
- be,a,pn %xcc, dflush4 ! CTI
- sub %o4, (1 << 5), %o4 ! IEU0
-2: cmp %o4, %o2 ! IEU1 Group
- bne,pt %xcc, 1b ! CTI
- nop ! IEU0
+ sub %o0, %g1, %o0 ! physical address
+ srlx %o0, 11, %o0 ! make D-cache TAG
+ sethi %hi(1 << 14), %o2 ! D-cache size
+ sub %o2, (1 << 5), %o2 ! D-cache line size
+1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
+ andcc %o3, DTAG_MASK, %g0 ! Valid?
+ be,pn %xcc, 2f ! Nope, branch
+ andn %o3, DTAG_MASK, %o3 ! Clear valid bits
+ cmp %o3, %o0 ! TAG match?
+ bne,pt %xcc, 2f ! Nope, branch
+ nop
+ stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
+ membar #Sync
+2: brnz,pt %o2, 1b
+ sub %o2, (1 << 5), %o2 ! D-cache line size
/* The I-cache does not snoop local stores so we
* better flush that too when necessary.
@@ -182,58 +176,12 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
retl
nop
-dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
-dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
-dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
-dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
- membar #Sync
- ba,pt %xcc, 2b
- nop
#endif /* DCACHE_ALIASING_POSSIBLE */
- .align 32
-__prefill_dtlb:
- rdpr %pstate, %g7
- wrpr %g7, PSTATE_IE, %pstate
- mov TLB_TAG_ACCESS, %g1
- stxa %o5, [%g1] ASI_DMMU
- stxa %o2, [%g0] ASI_DTLB_DATA_IN
- flush %g6
- retl
- wrpr %g7, %pstate
-__prefill_itlb:
- rdpr %pstate, %g7
- wrpr %g7, PSTATE_IE, %pstate
- mov TLB_TAG_ACCESS, %g1
- stxa %o5, [%g1] ASI_IMMU
- stxa %o2, [%g0] ASI_ITLB_DATA_IN
- flush %g6
- retl
- wrpr %g7, %pstate
-
- .globl __update_mmu_cache
-__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
- srlx %o1, PAGE_SHIFT, %o1
- andcc %o3, FAULT_CODE_DTLB, %g0
- sllx %o1, PAGE_SHIFT, %o5
- bne,pt %xcc, __prefill_dtlb
- or %o5, %o0, %o5
- ba,a,pt %xcc, __prefill_itlb
-
- /* Cheetah specific versions, patched at boot time.
- *
- * This writes of the PRIMARY_CONTEXT register in this file are
- * safe even on Cheetah+ and later wrt. the page size fields.
- * The nucleus page size fields do not matter because we make
- * no data references, and these instructions execute out of a
- * locked I-TLB entry sitting in the fully assosciative I-TLB.
- * This sequence should also never trap.
- */
-__cheetah_flush_tlb_mm: /* 15 insns */
+ .previous
+
+ /* Cheetah specific versions, patched at boot time. */
+__cheetah_flush_tlb_mm: /* 18 insns */
rdpr %pstate, %g7
andn %g7, PSTATE_IE, %g2
wrpr %g2, 0x0, %pstate
@@ -241,6 +189,9 @@ __cheetah_flush_tlb_mm: /* 15 insns */
mov PRIMARY_CONTEXT, %o2
mov 0x40, %g3
ldxa [%o2] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
+ sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
+ or %o0, %o1, %o0 /* Preserve nucleus page size fields */
stxa %o0, [%o2] ASI_DMMU
stxa %g0, [%g3] ASI_DMMU_DEMAP
stxa %g0, [%g3] ASI_IMMU_DEMAP
@@ -250,7 +201,7 @@ __cheetah_flush_tlb_mm: /* 15 insns */
retl
wrpr %g7, 0x0, %pstate
-__cheetah_flush_tlb_pending: /* 23 insns */
+__cheetah_flush_tlb_pending: /* 26 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
sllx %o1, 3, %o1
@@ -259,6 +210,9 @@ __cheetah_flush_tlb_pending: /* 23 insns */
wrpr %g0, 1, %tl
mov PRIMARY_CONTEXT, %o4
ldxa [%o4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
+ sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
+ or %o0, %o3, %o0 /* Preserve nucleus page size fields */
stxa %o0, [%o4] ASI_DMMU
1: sub %o1, (1 << 3), %o1
ldx [%o2 + %o1], %o3
@@ -277,7 +231,7 @@ __cheetah_flush_tlb_pending: /* 23 insns */
wrpr %g7, 0x0, %pstate
#ifdef DCACHE_ALIASING_POSSIBLE
-flush_dcpage_cheetah: /* 11 insns */
+__cheetah_flush_dcache_page: /* 11 insns */
sethi %uhi(PAGE_OFFSET), %g1
sllx %g1, 32, %g1
sub %o0, %g1, %o0
@@ -311,20 +265,20 @@ cheetah_patch_cachetlbops:
sethi %hi(__cheetah_flush_tlb_mm), %o1
or %o1, %lo(__cheetah_flush_tlb_mm), %o1
call cheetah_patch_one
- mov 15, %o2
+ mov 18, %o2
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1
or %o1, %lo(__cheetah_flush_tlb_pending), %o1
call cheetah_patch_one
- mov 23, %o2
+ mov 26, %o2
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0
or %o0, %lo(__flush_dcache_page), %o0
- sethi %hi(flush_dcpage_cheetah), %o1
- or %o1, %lo(flush_dcpage_cheetah), %o1
+ sethi %hi(__cheetah_flush_dcache_page), %o1
+ or %o1, %lo(__cheetah_flush_dcache_page), %o1
call cheetah_patch_one
mov 11, %o2
#endif /* DCACHE_ALIASING_POSSIBLE */
@@ -352,9 +306,12 @@ cheetah_patch_cachetlbops:
.globl xcall_flush_tlb_mm
xcall_flush_tlb_mm:
mov PRIMARY_CONTEXT, %g2
- mov 0x40, %g4
ldxa [%g2] ASI_DMMU, %g3
+ srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
+ sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
+ or %g5, %g4, %g5 /* Preserve nucleus page size fields */
stxa %g5, [%g2] ASI_DMMU
+ mov 0x40, %g4
stxa %g0, [%g4] ASI_DMMU_DEMAP
stxa %g0, [%g4] ASI_IMMU_DEMAP
stxa %g3, [%g2] ASI_DMMU
@@ -366,6 +323,10 @@ xcall_flush_tlb_pending:
sllx %g1, 3, %g1
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
+ srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
+ sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
+ or %g5, %g4, %g5
+ mov PRIMARY_CONTEXT, %g4
stxa %g5, [%g4] ASI_DMMU
1: sub %g1, (1 << 3), %g1
ldx [%g7 + %g1], %g5
@@ -492,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
nop
nop
- .globl xcall_promstop
-xcall_promstop:
- rdpr %pstate, %g2
- wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
- rdpr %pil, %g2
- wrpr %g0, 15, %pil
- sethi %hi(109f), %g7
- b,pt %xcc, etrap_irq
-109: or %g7, %lo(109b), %g7
- flushw
- call prom_stopself
- nop
- /* We should not return, just spin if we do... */
-1: b,a,pt %xcc, 1b
- nop
-
.data
errata32_hwbug: