aboutsummaryrefslogtreecommitdiffstats
path: root/arch/m32r/mm
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-03-07 21:36:19 +0100
committerArnd Bergmann <arnd@arndb.de>2018-03-09 23:20:00 +0100
commit553b085c2075f6a4a2591108554f830fa61e881f (patch)
tree68d63911f2c12e0fb9fa23498df9300442a88f92 /arch/m32r/mm
parentarch: remove frv port (diff)
downloadlinux-dev-553b085c2075f6a4a2591108554f830fa61e881f.tar.xz
linux-dev-553b085c2075f6a4a2591108554f830fa61e881f.zip
arch: remove m32r port
The Mitsubishi/Renesas m32r architecture has been around for many years, but the Linux port has been obsolete for a very long time as well, with the last significant updates done for linux-2.6.14. While some m32r microcontrollers are still being marketed by Renesas, those are apparently no longer possible to support, mainly due to the lack of an external memory interface. Hirokazu Takata was the maintainer until the architecture got marked Orphaned in 2014. Link: http://www.linux-m32r.org/ Link: https://www.renesas.com/en-eu/products/microcontrollers-microprocessors/m32r.html Cc: Hirokazu Takata <takata@linux-m32r.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/m32r/mm')
-rw-r--r--arch/m32r/mm/Makefile13
-rw-r--r--arch/m32r/mm/cache.c89
-rw-r--r--arch/m32r/mm/discontig.c163
-rw-r--r--arch/m32r/mm/extable.c20
-rw-r--r--arch/m32r/mm/fault-nommu.c134
-rw-r--r--arch/m32r/mm/fault.c550
-rw-r--r--arch/m32r/mm/init.c152
-rw-r--r--arch/m32r/mm/ioremap-nommu.c52
-rw-r--r--arch/m32r/mm/ioremap.c111
-rw-r--r--arch/m32r/mm/mmu.S355
-rw-r--r--arch/m32r/mm/page.S82
11 files changed, 0 insertions, 1721 deletions
diff --git a/arch/m32r/mm/Makefile b/arch/m32r/mm/Makefile
deleted file mode 100644
index cb20d90c51d1..000000000000
--- a/arch/m32r/mm/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Linux M32R-specific parts of the memory manager.
-#
-
-ifdef CONFIG_MMU
-obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o
-else
-obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o
-endif
-
-obj-$(CONFIG_DISCONTIGMEM) += discontig.o
-
diff --git a/arch/m32r/mm/cache.c b/arch/m32r/mm/cache.c
deleted file mode 100644
index 0d1ae744e56f..000000000000
--- a/arch/m32r/mm/cache.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/m32r/mm/cache.c
- *
- * Copyright (C) 2002-2005 Hirokazu Takata, Hayato Fujiwara
- */
-
-#include <asm/pgtable.h>
-
-#undef MCCR
-
-#if defined(CONFIG_CHIP_XNUX2) || defined(CONFIG_CHIP_M32700) \
- || defined(CONFIG_CHIP_VDEC2) || defined(CONFIG_CHIP_OPSP)
-/* Cache Control Register */
-#define MCCR ((volatile unsigned long*)0xfffffffc)
-#define MCCR_CC (1UL << 7) /* Cache mode modify bit */
-#define MCCR_IIV (1UL << 6) /* I-cache invalidate */
-#define MCCR_DIV (1UL << 5) /* D-cache invalidate */
-#define MCCR_DCB (1UL << 4) /* D-cache copy back */
-#define MCCR_ICM (1UL << 1) /* I-cache mode [0:off,1:on] */
-#define MCCR_DCM (1UL << 0) /* D-cache mode [0:off,1:on] */
-#define MCCR_ICACHE_INV (MCCR_CC|MCCR_IIV)
-#define MCCR_DCACHE_CB (MCCR_CC|MCCR_DCB)
-#define MCCR_DCACHE_CBINV (MCCR_CC|MCCR_DIV|MCCR_DCB)
-#define CHECK_MCCR(mccr) (mccr = *MCCR)
-#elif defined(CONFIG_CHIP_M32102)
-#define MCCR ((volatile unsigned char*)0xfffffffe)
-#define MCCR_IIV (1UL << 0) /* I-cache invalidate */
-#define MCCR_ICACHE_INV MCCR_IIV
-#elif defined(CONFIG_CHIP_M32104)
-#define MCCR ((volatile unsigned short*)0xfffffffe)
-#define MCCR_IIV (1UL << 8) /* I-cache invalidate */
-#define MCCR_DIV (1UL << 9) /* D-cache invalidate */
-#define MCCR_DCB (1UL << 10) /* D-cache copy back */
-#define MCCR_ICM (1UL << 0) /* I-cache mode [0:off,1:on] */
-#define MCCR_DCM (1UL << 1) /* D-cache mode [0:off,1:on] */
-#define MCCR_ICACHE_INV MCCR_IIV
-#define MCCR_DCACHE_CB MCCR_DCB
-#define MCCR_DCACHE_CBINV (MCCR_DIV|MCCR_DCB)
-#endif
-
-#ifndef MCCR
-#error Unknown cache type.
-#endif
-
-
-/* Copy back and invalidate D-cache and invalidate I-cache all */
-void _flush_cache_all(void)
-{
-#if defined(CONFIG_CHIP_M32102)
- unsigned char mccr;
- *MCCR = MCCR_ICACHE_INV;
-#elif defined(CONFIG_CHIP_M32104)
- unsigned short mccr;
-
- /* Copyback and invalidate D-cache */
- /* Invalidate I-cache */
- *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CBINV);
-#else
- unsigned long mccr;
-
- /* Copyback and invalidate D-cache */
- /* Invalidate I-cache */
- *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CBINV;
-#endif
- while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
-}
-
-/* Copy back D-cache and invalidate I-cache all */
-void _flush_cache_copyback_all(void)
-{
-#if defined(CONFIG_CHIP_M32102)
- unsigned char mccr;
- *MCCR = MCCR_ICACHE_INV;
-#elif defined(CONFIG_CHIP_M32104)
- unsigned short mccr;
-
- /* Copyback and invalidate D-cache */
- /* Invalidate I-cache */
- *MCCR |= (MCCR_ICACHE_INV | MCCR_DCACHE_CB);
-#else
- unsigned long mccr;
-
- /* Copyback D-cache */
- /* Invalidate I-cache */
- *MCCR = MCCR_ICACHE_INV | MCCR_DCACHE_CB;
-#endif
- while ((mccr = *MCCR) & MCCR_IIV); /* loop while invalidating... */
-}
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
deleted file mode 100644
index eb8e7966dcaf..000000000000
--- a/arch/m32r/mm/discontig.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/m32r/mm/discontig.c
- *
- * Discontig memory support
- *
- * Copyright (c) 2003 Hitoshi Yamamoto
- */
-
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/mmzone.h>
-#include <linux/initrd.h>
-#include <linux/nodemask.h>
-#include <linux/module.h>
-#include <linux/pfn.h>
-
-#include <asm/setup.h>
-
-extern char _end[];
-
-struct pglist_data *node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(node_data);
-
-pg_data_t m32r_node_data[MAX_NUMNODES];
-
-/* Memory profile */
-typedef struct {
- unsigned long start_pfn;
- unsigned long pages;
- unsigned long holes;
- unsigned long free_pfn;
-} mem_prof_t;
-static mem_prof_t mem_prof[MAX_NUMNODES];
-
-extern unsigned long memory_start;
-extern unsigned long memory_end;
-
-static void __init mem_prof_init(void)
-{
- unsigned long start_pfn, holes, free_pfn;
- const unsigned long zone_alignment = 1UL << (MAX_ORDER - 1);
- unsigned long ul;
- mem_prof_t *mp;
-
- /* Node#0 SDRAM */
- mp = &mem_prof[0];
- mp->start_pfn = PFN_UP(CONFIG_MEMORY_START);
- mp->pages = PFN_DOWN(memory_end - memory_start);
- mp->holes = 0;
- mp->free_pfn = PFN_UP(__pa(_end));
-
- /* Node#1 internal SRAM */
- mp = &mem_prof[1];
- start_pfn = free_pfn = PFN_UP(CONFIG_IRAM_START);
- holes = 0;
- if (start_pfn & (zone_alignment - 1)) {
- ul = zone_alignment;
- while (start_pfn >= ul)
- ul += zone_alignment;
-
- start_pfn = ul - zone_alignment;
- holes = free_pfn - start_pfn;
- }
-
- mp->start_pfn = start_pfn;
- mp->pages = PFN_DOWN(CONFIG_IRAM_SIZE) + holes;
- mp->holes = holes;
- mp->free_pfn = PFN_UP(CONFIG_IRAM_START);
-}
-
-unsigned long __init setup_memory(void)
-{
- unsigned long bootmap_size;
- unsigned long min_pfn;
- int nid;
- mem_prof_t *mp;
-
- max_low_pfn = 0;
- min_low_pfn = -1;
-
- mem_prof_init();
-
- for_each_online_node(nid) {
- mp = &mem_prof[nid];
- NODE_DATA(nid)=(pg_data_t *)&m32r_node_data[nid];
- NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
- min_pfn = mp->start_pfn;
- max_pfn = mp->start_pfn + mp->pages;
- bootmap_size = init_bootmem_node(NODE_DATA(nid), mp->free_pfn,
- mp->start_pfn, max_pfn);
-
- free_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
- PFN_PHYS(mp->pages));
-
- reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(mp->start_pfn),
- PFN_PHYS(mp->free_pfn - mp->start_pfn) + bootmap_size,
- BOOTMEM_DEFAULT);
-
- if (max_low_pfn < max_pfn)
- max_low_pfn = max_pfn;
-
- if (min_low_pfn > min_pfn)
- min_low_pfn = min_pfn;
- }
-
-#ifdef CONFIG_BLK_DEV_INITRD
- if (LOADER_TYPE && INITRD_START) {
- if (INITRD_START + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
- reserve_bootmem_node(NODE_DATA(0), INITRD_START,
- INITRD_SIZE, BOOTMEM_DEFAULT);
- initrd_start = INITRD_START + PAGE_OFFSET;
- initrd_end = initrd_start + INITRD_SIZE;
- printk("initrd:start[%08lx],size[%08lx]\n",
- initrd_start, INITRD_SIZE);
- } else {
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%08llx)\ndisabling initrd\n",
- INITRD_START + INITRD_SIZE,
- (unsigned long long)PFN_PHYS(max_low_pfn));
-
- initrd_start = 0;
- }
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-
- return max_low_pfn;
-}
-
-#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
-#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
-
-void __init zone_sizes_init(void)
-{
- unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES];
- unsigned long low, start_pfn;
- int nid, i;
- mem_prof_t *mp;
-
- for_each_online_node(nid) {
- mp = &mem_prof[nid];
- for (i = 0 ; i < MAX_NR_ZONES ; i++) {
- zones_size[i] = 0;
- zholes_size[i] = 0;
- }
- start_pfn = START_PFN(nid);
- low = MAX_LOW_PFN(nid);
- zones_size[ZONE_DMA] = low - start_pfn;
- zholes_size[ZONE_DMA] = mp->holes;
-
- node_set_state(nid, N_NORMAL_MEMORY);
- free_area_init_node(nid, zones_size, start_pfn, zholes_size);
- }
-
- /*
- * For test
- * Use all area of internal RAM.
- * see __alloc_pages()
- */
- NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0;
- NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0;
- NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0;
-}
diff --git a/arch/m32r/mm/extable.c b/arch/m32r/mm/extable.c
deleted file mode 100644
index 066982756a4e..000000000000
--- a/arch/m32r/mm/extable.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/m32r/mm/extable.c
- */
-
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-int fixup_exception(struct pt_regs *regs)
-{
- const struct exception_table_entry *fixup;
-
- fixup = search_exception_tables(regs->bpc);
- if (fixup) {
- regs->bpc = fixup->fixup;
- return 1;
- }
-
- return 0;
-}
diff --git a/arch/m32r/mm/fault-nommu.c b/arch/m32r/mm/fault-nommu.c
deleted file mode 100644
index 240e00067d5e..000000000000
--- a/arch/m32r/mm/fault-nommu.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/m32r/mm/fault.c
- *
- * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo
- *
- * Some code taken from i386 version.
- * Copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
-
-#include <asm/m32r.h>
-#include <linux/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-
-extern void die(const char *, struct pt_regs *, long);
-
-#ifndef CONFIG_SMP
-asmlinkage unsigned int tlb_entry_i_dat;
-asmlinkage unsigned int tlb_entry_d_dat;
-#define tlb_entry_i tlb_entry_i_dat
-#define tlb_entry_d tlb_entry_d_dat
-#else
-unsigned int tlb_entry_i_dat[NR_CPUS];
-unsigned int tlb_entry_d_dat[NR_CPUS];
-#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
-#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
-#endif
-
-void do_BUG(const char *file, int line)
-{
- bust_spinlocks(1);
- printk("kernel BUG at %s:%d!\n", file, line);
-}
-
-/*======================================================================*
- * do_page_fault()
- *======================================================================*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- *
- * ARGUMENT:
- * regs : M32R SP reg.
- * error_code : See below
- * address : M32R MMU MDEVA reg. (Operand ACE)
- * : M32R BPC reg. (Instruction ACE)
- *
- * error_code :
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- *======================================================================*/
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
-{
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-
- bust_spinlocks(1);
-
- if (address < PAGE_SIZE)
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- else
- printk(KERN_ALERT "Unable to handle kernel paging request");
- printk(" at virtual address %08lx\n",address);
- printk(" printing bpc:\n");
- printk(KERN_ALERT "bpc = %08lx\n", regs->bpc);
-
- die("Oops", regs, error_code);
- bust_spinlocks(0);
- do_exit(SIGKILL);
-}
-
-/*======================================================================*
- * update_mmu_cache()
- *======================================================================*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t *ptep)
-{
- BUG();
-}
-
-/*======================================================================*
- * flush_tlb_page() : flushes one page
- *======================================================================*/
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- BUG();
-}
-
-/*======================================================================*
- * flush_tlb_range() : flushes a range of pages
- *======================================================================*/
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- BUG();
-}
-
-/*======================================================================*
- * flush_tlb_mm() : flushes the specified mm context TLB's
- *======================================================================*/
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- BUG();
-}
-
-/*======================================================================*
- * flush_tlb_all() : flushes all processes TLBs
- *======================================================================*/
-void local_flush_tlb_all(void)
-{
- BUG();
-}
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
deleted file mode 100644
index 46d9a5ca0e3a..000000000000
--- a/arch/m32r/mm/fault.c
+++ /dev/null
@@ -1,550 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/m32r/mm/fault.c
- *
- * Copyright (c) 2001, 2002 Hitoshi Yamamoto, and H. Kondo
- * Copyright (c) 2004 Naoto Sugai, NIIBE Yutaka
- *
- * Some code taken from i386 version.
- * Copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h> /* For unblank_screen() */
-#include <linux/highmem.h>
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-#include <asm/m32r.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-
-extern void die(const char *, struct pt_regs *, long);
-
-#ifndef CONFIG_SMP
-asmlinkage unsigned int tlb_entry_i_dat;
-asmlinkage unsigned int tlb_entry_d_dat;
-#define tlb_entry_i tlb_entry_i_dat
-#define tlb_entry_d tlb_entry_d_dat
-#else
-unsigned int tlb_entry_i_dat[NR_CPUS];
-unsigned int tlb_entry_d_dat[NR_CPUS];
-#define tlb_entry_i tlb_entry_i_dat[smp_processor_id()]
-#define tlb_entry_d tlb_entry_d_dat[smp_processor_id()]
-#endif
-
-extern void init_tlb(void);
-
-/*======================================================================*
- * do_page_fault()
- *======================================================================*
- * This routine handles page faults. It determines the address,
- * and the problem, and then passes it off to one of the appropriate
- * routines.
- *
- * ARGUMENT:
- * regs : M32R SP reg.
- * error_code : See below
- * address : M32R MMU MDEVA reg. (Operand ACE)
- * : M32R BPC reg. (Instruction ACE)
- *
- * error_code :
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- * bit 3 == 0 means data, 1 means instruction
- *======================================================================*/
-#define ACE_PROTECTION 1
-#define ACE_WRITE 2
-#define ACE_USERMODE 4
-#define ACE_INSTRUCTION 8
-
-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
-{
- struct task_struct *tsk;
- struct mm_struct *mm;
- struct vm_area_struct * vma;
- unsigned long page, addr;
- unsigned long flags = 0;
- int fault;
- siginfo_t info;
-
- /*
- * If BPSW IE bit enable --> set PSW IE bit
- */
- if (regs->psw & M32R_PSW_BIE)
- local_irq_enable();
-
- tsk = current;
-
- info.si_code = SEGV_MAPERR;
-
- /*
- * We fault-in kernel-space virtual memory on-demand. The
- * 'reference' page table is init_mm.pgd.
- *
- * NOTE! We MUST NOT take any locks for this case. We may
- * be in an interrupt or a critical region, and should
- * only copy the information from the master page table,
- * nothing more.
- *
- * This verifies that the fault happens in kernel space
- * (error_code & ACE_USERMODE) == 0, and that the fault was not a
- * protection error (error_code & ACE_PROTECTION) == 0.
- */
- if (address >= TASK_SIZE && !(error_code & ACE_USERMODE))
- goto vmalloc_fault;
-
- mm = tsk->mm;
-
- /*
- * If we're in an interrupt or have no user context or have pagefaults
- * disabled then we must not take the fault.
- */
- if (faulthandler_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (error_code & ACE_USERMODE)
- flags |= FAULT_FLAG_USER;
-
- /* When running in the kernel we expect faults to occur only to
- * addresses in user space. All other faults represent errors in the
- * kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
- * we will deadlock attempting to validate the fault against the
- * address space. Luckily the kernel only validly references user
- * space from well defined areas of code, which are listed in the
- * exceptions table.
- *
- * As the vast majority of faults will be valid we will only perform
- * the source reference check when there is a possibility of a deadlock.
- * Attempt to lock the address space, if we cannot we then validate the
- * source. If this is invalid we can skip the address space check,
- * thus avoiding the deadlock.
- */
- if (!down_read_trylock(&mm->mmap_sem)) {
- if ((error_code & ACE_USERMODE) == 0 &&
- !search_exception_tables(regs->psw))
- goto bad_area_nosemaphore;
- down_read(&mm->mmap_sem);
- }
-
- vma = find_vma(mm, address);
- if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
-
- if (error_code & ACE_USERMODE) {
- /*
- * accessing the stack below "spu" is always a bug.
- * The "+ 4" is there due to the push instruction
- * doing pre-decrement on the stack and that
- * doesn't show up until later..
- */
- if (address + 4 < regs->spu)
- goto bad_area;
- }
-
- if (expand_stack(vma, address))
- goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
-good_area:
- info.si_code = SEGV_ACCERR;
- switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
- default: /* 3: write, present */
- /* fall through */
- case ACE_WRITE: /* write, not present */
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- flags |= FAULT_FLAG_WRITE;
- break;
- case ACE_PROTECTION: /* read, present */
- case 0: /* read, not present */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
- goto bad_area;
- }
-
- /*
- * For instruction access exception, check if the area is executable
- */
- if ((error_code & ACE_INSTRUCTION) && !(vma->vm_flags & VM_EXEC))
- goto bad_area;
-
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- addr = (address & PAGE_MASK);
- set_thread_fault_code(error_code);
- fault = handle_mm_fault(vma, addr, flags);
- if (unlikely(fault & VM_FAULT_ERROR)) {
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGSEGV)
- goto bad_area;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
- BUG();
- }
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
- set_thread_fault_code(0);
- up_read(&mm->mmap_sem);
- return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
- up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
- /* User mode accesses just cause a SIGSEGV */
- if (error_code & ACE_USERMODE) {
- tsk->thread.address = address;
- tsk->thread.error_code = error_code | (address >= TASK_SIZE);
- tsk->thread.trap_no = 14;
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void __user *)address;
- force_sig_info(SIGSEGV, &info, tsk);
- return;
- }
-
-no_context:
- /* Are we prepared to handle this kernel fault? */
- if (fixup_exception(regs))
- return;
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-
- bust_spinlocks(1);
-
- if (address < PAGE_SIZE)
- printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
- else
- printk(KERN_ALERT "Unable to handle kernel paging request");
- printk(" at virtual address %08lx\n",address);
- printk(KERN_ALERT " printing bpc:\n");
- printk("%08lx\n", regs->bpc);
- page = *(unsigned long *)MPTB;
- page = ((unsigned long *) page)[address >> PGDIR_SHIFT];
- printk(KERN_ALERT "*pde = %08lx\n", page);
- if (page & _PAGE_PRESENT) {
- page &= PAGE_MASK;
- address &= 0x003ff000;
- page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
- printk(KERN_ALERT "*pte = %08lx\n", page);
- }
- die("Oops", regs, error_code);
- bust_spinlocks(0);
- do_exit(SIGKILL);
-
-/*
- * We ran out of memory, or some other thing happened to us that made
- * us unable to handle the page fault gracefully.
- */
-out_of_memory:
- up_read(&mm->mmap_sem);
- if (!(error_code & ACE_USERMODE))
- goto no_context;
- pagefault_out_of_memory();
- return;
-
-do_sigbus:
- up_read(&mm->mmap_sem);
-
- /* Kernel mode? Handle exception or die */
- if (!(error_code & ACE_USERMODE))
- goto no_context;
-
- tsk->thread.address = address;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_no = 14;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, tsk);
- return;
-
-vmalloc_fault:
- {
- /*
- * Synchronize this task's top level page-table
- * with the 'reference' page table.
- *
- * Do _not_ use "tsk" here. We might be inside
- * an interrupt in the middle of a task switch..
- */
- int offset = pgd_index(address);
- pgd_t *pgd, *pgd_k;
- pmd_t *pmd, *pmd_k;
- pte_t *pte_k;
-
- pgd = (pgd_t *)*(unsigned long *)MPTB;
- pgd = offset + (pgd_t *)pgd;
- pgd_k = init_mm.pgd + offset;
-
- if (!pgd_present(*pgd_k))
- goto no_context;
-
- /*
- * set_pgd(pgd, *pgd_k); here would be useless on PAE
- * and redundant with the set_pmd() on non-PAE.
- */
-
- pmd = pmd_offset(pgd, address);
- pmd_k = pmd_offset(pgd_k, address);
- if (!pmd_present(*pmd_k))
- goto no_context;
- set_pmd(pmd, *pmd_k);
-
- pte_k = pte_offset_kernel(pmd_k, address);
- if (!pte_present(*pte_k))
- goto no_context;
-
- addr = (address & PAGE_MASK);
- set_thread_fault_code(error_code);
- update_mmu_cache(NULL, addr, pte_k);
- set_thread_fault_code(0);
- return;
- }
-}
-
-/*======================================================================*
- * update_mmu_cache()
- *======================================================================*/
-#define TLB_MASK (NR_TLB_ENTRIES - 1)
-#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
-#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
- pte_t *ptep)
-{
- volatile unsigned long *entry1, *entry2;
- unsigned long pte_data, flags;
- unsigned int *entry_dat;
- int inst = get_thread_fault_code() & ACE_INSTRUCTION;
- int i;
-
- /* Ptrace may call this routine. */
- if (vma && current->active_mm != vma->vm_mm)
- return;
-
- local_irq_save(flags);
-
- vaddr = (vaddr & PAGE_MASK) | get_asid();
-
- pte_data = pte_val(*ptep);
-
-#ifdef CONFIG_CHIP_OPSP
- entry1 = (unsigned long *)ITLB_BASE;
- for (i = 0; i < NR_TLB_ENTRIES; i++) {
- if (*entry1++ == vaddr) {
- set_tlb_data(entry1, pte_data);
- break;
- }
- entry1++;
- }
- entry2 = (unsigned long *)DTLB_BASE;
- for (i = 0; i < NR_TLB_ENTRIES; i++) {
- if (*entry2++ == vaddr) {
- set_tlb_data(entry2, pte_data);
- break;
- }
- entry2++;
- }
-#else
- /*
- * Update TLB entries
- * entry1: ITLB entry address
- * entry2: DTLB entry address
- */
- __asm__ __volatile__ (
- "seth %0, #high(%4) \n\t"
- "st %2, @(%5, %0) \n\t"
- "ldi %1, #1 \n\t"
- "st %1, @(%6, %0) \n\t"
- "add3 r4, %0, %7 \n\t"
- ".fillinsn \n"
- "1: \n\t"
- "ld %1, @(%6, %0) \n\t"
- "bnez %1, 1b \n\t"
- "ld %0, @r4+ \n\t"
- "ld %1, @r4 \n\t"
- "st %3, @+%0 \n\t"
- "st %3, @+%1 \n\t"
- : "=&r" (entry1), "=&r" (entry2)
- : "r" (vaddr), "r" (pte_data), "i" (MMU_REG_BASE),
- "i" (MSVA_offset), "i" (MTOP_offset), "i" (MIDXI_offset)
- : "r4", "memory"
- );
-#endif
-
- if ((!inst && entry2 >= DTLB_END) || (inst && entry1 >= ITLB_END))
- goto notfound;
-
-found:
- local_irq_restore(flags);
-
- return;
-
- /* Valid entry not found */
-notfound:
- /*
- * Update ITLB or DTLB entry
- * entry1: TLB entry address
- * entry2: TLB base address
- */
- if (!inst) {
- entry2 = (unsigned long *)DTLB_BASE;
- entry_dat = &tlb_entry_d;
- } else {
- entry2 = (unsigned long *)ITLB_BASE;
- entry_dat = &tlb_entry_i;
- }
- entry1 = entry2 + (((*entry_dat - 1) & TLB_MASK) << 1);
-
- for (i = 0 ; i < NR_TLB_ENTRIES ; i++) {
- if (!(entry1[1] & 2)) /* Valid bit check */
- break;
-
- if (entry1 != entry2)
- entry1 -= 2;
- else
- entry1 += TLB_MASK << 1;
- }
-
- if (i >= NR_TLB_ENTRIES) { /* Empty entry not found */
- entry1 = entry2 + (*entry_dat << 1);
- *entry_dat = (*entry_dat + 1) & TLB_MASK;
- }
- *entry1++ = vaddr; /* Set TLB tag */
- set_tlb_data(entry1, pte_data);
-
- goto found;
-}
-
-/*======================================================================*
- * flush_tlb_page() : flushes one page
- *======================================================================*/
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
- if (vma->vm_mm && mm_context(vma->vm_mm) != NO_CONTEXT) {
- unsigned long flags;
-
- local_irq_save(flags);
- page &= PAGE_MASK;
- page |= (mm_context(vma->vm_mm) & MMU_CONTEXT_ASID_MASK);
- __flush_tlb_page(page);
- local_irq_restore(flags);
- }
-}
-
-/*======================================================================*
- * flush_tlb_range() : flushes a range of pages
- *======================================================================*/
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- struct mm_struct *mm;
-
- mm = vma->vm_mm;
- if (mm_context(mm) != NO_CONTEXT) {
- unsigned long flags;
- int size;
-
- local_irq_save(flags);
- size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (size > (NR_TLB_ENTRIES / 4)) { /* Too many TLB to flush */
- mm_context(mm) = NO_CONTEXT;
- if (mm == current->mm)
- activate_context(mm);
- } else {
- unsigned long asid;
-
- asid = mm_context(mm) & MMU_CONTEXT_ASID_MASK;
- start &= PAGE_MASK;
- end += (PAGE_SIZE - 1);
- end &= PAGE_MASK;
-
- start |= asid;
- end |= asid;
- while (start < end) {
- __flush_tlb_page(start);
- start += PAGE_SIZE;
- }
- }
- local_irq_restore(flags);
- }
-}
-
-/*======================================================================*
- * flush_tlb_mm() : flushes the specified mm context TLB's
- *======================================================================*/
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
- /* Invalidate all TLB of this process. */
- /* Instead of invalidating each TLB, we get new MMU context. */
- if (mm_context(mm) != NO_CONTEXT) {
- unsigned long flags;
-
- local_irq_save(flags);
- mm_context(mm) = NO_CONTEXT;
- if (mm == current->mm)
- activate_context(mm);
- local_irq_restore(flags);
- }
-}
-
-/*======================================================================*
- * flush_tlb_all() : flushes all processes TLBs
- *======================================================================*/
-void local_flush_tlb_all(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- __flush_tlb_all();
- local_irq_restore(flags);
-}
-
-/*======================================================================*
- * init_mmu()
- *======================================================================*/
-void __init init_mmu(void)
-{
- tlb_entry_i = 0;
- tlb_entry_d = 0;
- mmu_context_cache = MMU_CONTEXT_FIRST_VERSION;
- set_asid(mmu_context_cache & MMU_CONTEXT_ASID_MASK);
- *(volatile unsigned long *)MPTB = (unsigned long)swapper_pg_dir;
-}
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
deleted file mode 100644
index 93abc8c3a46e..000000000000
--- a/arch/m32r/mm/init.c
+++ /dev/null
@@ -1,152 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/m32r/mm/init.c
- *
- * Copyright (c) 2001, 2002 Hitoshi Yamamoto
- *
- * Some code taken from sh version.
- * Copyright (C) 1999 Niibe Yutaka
- * Based on linux/arch/i386/mm/init.c:
- * Copyright (C) 1995 Linus Torvalds
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/bootmem.h>
-#include <linux/swap.h>
-#include <linux/highmem.h>
-#include <linux/bitops.h>
-#include <linux/nodemask.h>
-#include <linux/pfn.h>
-#include <linux/gfp.h>
-#include <asm/types.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-#include <asm/setup.h>
-#include <asm/tlb.h>
-#include <asm/sections.h>
-
-pgd_t swapper_pg_dir[1024];
-
-/*
- * Cache of MMU context last used.
- */
-#ifndef CONFIG_SMP
-unsigned long mmu_context_cache_dat;
-#else
-unsigned long mmu_context_cache_dat[NR_CPUS];
-#endif
-
-/*
- * function prototype
- */
-void __init paging_init(void);
-void __init mem_init(void);
-void free_initmem(void);
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long, unsigned long);
-#endif
-
-/* It'd be good if these lines were in the standard header file. */
-#define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn)
-#define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn)
-
-#ifndef CONFIG_DISCONTIGMEM
-void __init zone_sizes_init(void)
-{
- unsigned long zones_size[MAX_NR_ZONES] = {0, };
- unsigned long start_pfn;
-
-#ifdef CONFIG_MMU
- {
- unsigned long low;
- unsigned long max_dma;
-
- start_pfn = START_PFN(0);
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- low = MAX_LOW_PFN(0);
-
- if (low < max_dma) {
- zones_size[ZONE_DMA] = low - start_pfn;
- zones_size[ZONE_NORMAL] = 0;
- } else {
- zones_size[ZONE_DMA] = low - start_pfn;
- zones_size[ZONE_NORMAL] = low - max_dma;
- }
- }
-#else
- zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
- zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT;
- start_pfn = __MEMORY_START >> PAGE_SHIFT;
-#endif /* CONFIG_MMU */
-
- free_area_init_node(0, zones_size, start_pfn, 0);
-}
-#else /* CONFIG_DISCONTIGMEM */
-extern void zone_sizes_init(void);
-#endif /* CONFIG_DISCONTIGMEM */
-
-/*======================================================================*
- * paging_init() : sets up the page tables
- *======================================================================*/
-void __init paging_init(void)
-{
-#ifdef CONFIG_MMU
- int i;
- pgd_t *pg_dir;
-
- /* We don't need kernel mapping as hardware support that. */
- pg_dir = swapper_pg_dir;
-
- for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++)
- pgd_val(pg_dir[i]) = 0;
-#endif /* CONFIG_MMU */
- zone_sizes_init();
-}
-
-/*======================================================================*
- * mem_init() :
- * orig : arch/sh/mm/init.c
- *======================================================================*/
-void __init mem_init(void)
-{
-#ifndef CONFIG_MMU
- extern unsigned long memory_end;
-
- high_memory = (void *)(memory_end & PAGE_MASK);
-#else
- high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0)));
-#endif /* CONFIG_MMU */
-
- /* clear the zero-page */
- memset(empty_zero_page, 0, PAGE_SIZE);
-
- set_max_mapnr(get_num_physpages());
- free_all_bootmem();
- mem_init_print_info(NULL);
-}
-
-/*======================================================================*
- * free_initmem() :
- * orig : arch/sh/mm/init.c
- *======================================================================*/
-void free_initmem(void)
-{
- free_initmem_default(-1);
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-/*======================================================================*
- * free_initrd_mem() :
- * orig : arch/sh/mm/init.c
- *======================================================================*/
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
-}
-#endif
diff --git a/arch/m32r/mm/ioremap-nommu.c b/arch/m32r/mm/ioremap-nommu.c
deleted file mode 100644
index 2759f2d48384..000000000000
--- a/arch/m32r/mm/ioremap-nommu.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * linux/arch/m32r/mm/ioremap-nommu.c
- *
- * Copyright (c) 2001, 2002 Hiroyuki Kondo
- *
- * Taken from mips version.
- * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001 Ralf Baechle
- */
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-
-#include <linux/module.h>
-#include <asm/addrspace.h>
-#include <asm/byteorder.h>
-
-#include <linux/vmalloc.h>
-#include <asm/io.h>
-#include <asm/pgalloc.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-
-#define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL))
-
-void __iomem *
-__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-{
- return (void *)phys_addr;
-}
-
-#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
-
-void iounmap(volatile void __iomem *addr)
-{
-}
-
diff --git a/arch/m32r/mm/ioremap.c b/arch/m32r/mm/ioremap.c
deleted file mode 100644
index 5152c4e6ac80..000000000000
--- a/arch/m32r/mm/ioremap.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * linux/arch/m32r/mm/ioremap.c
- *
- * Copyright (c) 2001, 2002 Hiroyuki Kondo
- *
- * Taken from mips version.
- * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001 Ralf Baechle
- */
-
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
-
-#include <linux/module.h>
-#include <asm/addrspace.h>
-#include <asm/byteorder.h>
-
-#include <linux/vmalloc.h>
-#include <linux/io.h>
-#include <asm/pgalloc.h>
-
-/*
- * Generic mapping function (not visible outside):
- */
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
-
-#define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL))
-
-void __iomem *
-__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
-{
- void __iomem * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
- pgprot_t pgprot;
-
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
-
- /*
- * Map objects in the low 512mb of address space using KSEG1, otherwise
- * map using page tables.
- */
- if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1))
- return (void *) KSEG1ADDR(phys_addr);
-
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
- if (phys_addr < virt_to_phys(high_memory)) {
- char *t_addr, *t_end;
- struct page *page;
-
- t_addr = __va(phys_addr);
- t_end = t_addr + (size - 1);
-
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
- if(!PageReserved(page))
- return NULL;
- }
-
- pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
- | _PAGE_WRITE | flags);
-
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr + 1) - phys_addr;
-
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP);
- if (!area)
- return NULL;
- area->phys_addr = phys_addr;
- addr = (void __iomem *) area->addr;
- if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
- phys_addr, pgprot)) {
- vunmap((void __force *) addr);
- return NULL;
- }
-
- return (void __iomem *) (offset + (char __iomem *)addr);
-}
-
-#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1)
-
-void iounmap(volatile void __iomem *addr)
-{
- if (!IS_KSEG1(addr))
- vfree((void *) (PAGE_MASK & (unsigned long) addr));
-}
-
diff --git a/arch/m32r/mm/mmu.S b/arch/m32r/mm/mmu.S
deleted file mode 100644
index fd8f9c9b7b07..000000000000
--- a/arch/m32r/mm/mmu.S
+++ /dev/null
@@ -1,355 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/arch/m32r/mm/mmu.S
- *
- * Copyright (C) 2001 by Hiroyuki Kondo
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/smp.h>
-
- .text
-#ifdef CONFIG_MMU
-
-#include <asm/mmu_context.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/m32r.h>
-
-/*
- * TLB Miss Exception handler
- */
- .balign 16
-ENTRY(tme_handler)
- .global tlb_entry_i_dat
- .global tlb_entry_d_dat
-
- SWITCH_TO_KERNEL_STACK
-
-#if defined(CONFIG_ISA_M32R2)
- st r0, @-sp
- st r1, @-sp
- st r2, @-sp
- st r3, @-sp
-
- seth r3, #high(MMU_REG_BASE)
- ld r1, @(MESTS_offset, r3) ; r1: status (MESTS reg.)
- ld r0, @(MDEVP_offset, r3) ; r0: PFN + ASID (MDEVP reg.)
- st r1, @(MESTS_offset, r3) ; clear status (MESTS reg.)
- and3 r1, r1, #(MESTS_IT)
- bnez r1, 1f ; instruction TLB miss?
-
-;; data TLB miss
-;; input
-;; r0: PFN + ASID (MDEVP reg.)
-;; r1 - r3: free
-;; output
-;; r0: PFN + ASID
-;; r1: TLB entry base address
-;; r2: &tlb_entry_{i|d}_dat
-;; r3: free
-
-#ifndef CONFIG_SMP
- seth r2, #high(tlb_entry_d_dat)
- or3 r2, r2, #low(tlb_entry_d_dat)
-#else /* CONFIG_SMP */
- ldi r1, #-8192
- seth r2, #high(tlb_entry_d_dat)
- or3 r2, r2, #low(tlb_entry_d_dat)
- and r1, sp
- ld r1, @(16, r1) ; current_thread_info->cpu
- slli r1, #2
- add r2, r1
-#endif /* !CONFIG_SMP */
- seth r1, #high(DTLB_BASE)
- or3 r1, r1, #low(DTLB_BASE)
- bra 2f
-
- .balign 16
- .fillinsn
-1:
-;; instrucntion TLB miss
-;; input
-;; r0: MDEVP reg. (included ASID)
-;; r1 - r3: free
-;; output
-;; r0: PFN + ASID
-;; r1: TLB entry base address
-;; r2: &tlb_entry_{i|d}_dat
-;; r3: free
- ldi r3, #-4096
- and3 r0, r0, #(MMU_CONTEXT_ASID_MASK)
- mvfc r1, bpc
- and r1, r3
- or r0, r1 ; r0: PFN + ASID
-#ifndef CONFIG_SMP
- seth r2, #high(tlb_entry_i_dat)
- or3 r2, r2, #low(tlb_entry_i_dat)
-#else /* CONFIG_SMP */
- ldi r1, #-8192
- seth r2, #high(tlb_entry_i_dat)
- or3 r2, r2, #low(tlb_entry_i_dat)
- and r1, sp
- ld r1, @(16, r1) ; current_thread_info->cpu
- slli r1, #2
- add r2, r1
-#endif /* !CONFIG_SMP */
- seth r1, #high(ITLB_BASE)
- or3 r1, r1, #low(ITLB_BASE)
-
- .fillinsn
-2:
-;; select TLB entry
-;; input
-;; r0: PFN + ASID
-;; r1: TLB entry base address
-;; r2: &tlb_entry_{i|d}_dat
-;; r3: free
-;; output
-;; r0: PFN + ASID
-;; r1: TLB entry address
-;; r2, r3: free
-#ifdef CONFIG_ISA_DUAL_ISSUE
- ld r3, @r2 || srli r1, #3
-#else
- ld r3, @r2
- srli r1, #3
-#endif
- add r1, r3
- ; tlb_entry_{d|i}_dat++;
- addi r3, #1
- and3 r3, r3, #(NR_TLB_ENTRIES - 1)
-#ifdef CONFIG_ISA_DUAL_ISSUE
- st r3, @r2 || slli r1, #3
-#else
- st r3, @r2
- slli r1, #3
-#endif
-
-;; load pte
-;; input
-;; r0: PFN + ASID
-;; r1: TLB entry address
-;; r2, r3: free
-;; output
-;; r0: PFN + ASID
-;; r1: TLB entry address
-;; r2: pte_data
-;; r3: free
- ; pgd = *(unsigned long *)MPTB;
- ld24 r2, #(-MPTB - 1)
- srl3 r3, r0, #22
-#ifdef CONFIG_ISA_DUAL_ISSUE
- not r2, r2 || slli r3, #2 ; r3: pgd offset
-#else
- not r2, r2
- slli r3, #2
-#endif
- ld r2, @r2 ; r2: pgd base addr (MPTB reg.)
- or r3, r2 ; r3: pmd addr
-
- ; pmd = pmd_offset(pgd, address);
- ld r3, @r3 ; r3: pmd data
- beqz r3, 3f ; pmd_none(*pmd) ?
-
- and3 r2, r3, #0xfff
- add3 r2, r2, #-355 ; _KERNPG_TABLE(=0x163)
- bnez r2, 3f ; pmd_bad(*pmd) ?
- ldi r2, #-4096
-
- ; pte = pte_offset(pmd, address);
- and r2, r3 ; r2: pte base addr
- srl3 r3, r0, #10
- and3 r3, r3, #0xffc ; r3: pte offset
- or r3, r2
- seth r2, #0x8000
- or r3, r2 ; r3: pte addr
-
- ; pte_data = (unsigned long)pte_val(*pte);
- ld r2, @r3 ; r2: pte data
- and3 r3, r2, #2 ; _PAGE_PRESENT(=2) check
- beqz r3, 3f
-
- .fillinsn
-5:
-;; set tlb
-;; input
-;; r0: PFN + ASID
-;; r1: TLB entry address
-;; r2: pte_data
-;; r3: free
- st r0, @r1 ; set_tlb_tag(entry++, address);
- st r2, @+r1 ; set_tlb_data(entry, pte_data);
-
- .fillinsn
-6:
- ld r3, @sp+
- ld r2, @sp+
- ld r1, @sp+
- ld r0, @sp+
- rte
-
- .fillinsn
-3:
-;; error
-;; input
-;; r0: PFN + ASID
-;; r1: TLB entry address
-;; r2, r3: free
-;; output
-;; r0: PFN + ASID
-;; r1: TLB entry address
-;; r2: pte_data
-;; r3: free
-#ifdef CONFIG_ISA_DUAL_ISSUE
- bra 5b || ldi r2, #2
-#else
- ldi r2, #2 ; r2: pte_data = 0 | _PAGE_PRESENT(=2)
- bra 5b
-#endif
-
-#elif defined (CONFIG_ISA_M32R)
-
- st sp, @-sp
- st r0, @-sp
- st r1, @-sp
- st r2, @-sp
- st r3, @-sp
- st r4, @-sp
-
- seth r3, #high(MMU_REG_BASE)
- ld r0, @(MDEVA_offset,r3) ; r0: address (MDEVA reg.)
- mvfc r2, bpc ; r2: bpc
- ld r1, @(MESTS_offset,r3) ; r1: status (MESTS reg.)
- st r1, @(MESTS_offset,r3) ; clear status (MESTS reg.)
- and3 r1, r1, #(MESTS_IT)
- beqz r1, 1f ; data TLB miss?
-
-;; instrucntion TLB miss
- mv r0, r2 ; address = bpc;
- ; entry = (unsigned long *)ITLB_BASE+tlb_entry_i*2;
- seth r3, #shigh(tlb_entry_i_dat)
- ld r4, @(low(tlb_entry_i_dat),r3)
- sll3 r2, r4, #3
- seth r1, #high(ITLB_BASE)
- or3 r1, r1, #low(ITLB_BASE)
- add r2, r1 ; r2: entry
- addi r4, #1 ; tlb_entry_i++;
- and3 r4, r4, #(NR_TLB_ENTRIES-1)
- st r4, @(low(tlb_entry_i_dat),r3)
- bra 2f
- .fillinsn
-1:
-;; data TLB miss
- ; entry = (unsigned long *)DTLB_BASE+tlb_entry_d*2;
- seth r3, #shigh(tlb_entry_d_dat)
- ld r4, @(low(tlb_entry_d_dat),r3)
- sll3 r2, r4, #3
- seth r1, #high(DTLB_BASE)
- or3 r1, r1, #low(DTLB_BASE)
- add r2, r1 ; r2: entry
- addi r4, #1 ; tlb_entry_d++;
- and3 r4, r4, #(NR_TLB_ENTRIES-1)
- st r4, @(low(tlb_entry_d_dat),r3)
- .fillinsn
-2:
-;; load pte
-; r0: address, r2: entry
-; r1,r3,r4: (free)
- ; pgd = *(unsigned long *)MPTB;
- ld24 r1, #(-MPTB-1)
- not r1, r1
- ld r1, @r1
- srl3 r4, r0, #22
- sll3 r3, r4, #2
- add r3, r1 ; r3: pgd
- ; pmd = pmd_offset(pgd, address);
- ld r1, @r3 ; r1: pmd
- beqz r1, 3f ; pmd_none(*pmd) ?
-;
- and3 r1, r1, #0x3ff
- ldi r4, #0x163 ; _KERNPG_TABLE(=0x163)
- bne r1, r4, 3f ; pmd_bad(*pmd) ?
-
- .fillinsn
-4:
- ; pte = pte_offset(pmd, address);
- ld r4, @r3 ; r4: pte
- ldi r3, #-4096
- and r4, r3
- srl3 r3, r0, #10
- and3 r3, r3, #0xffc
- add r4, r3
- seth r3, #0x8000
- add r4, r3 ; r4: pte
- ; pte_data = (unsigned long)pte_val(*pte);
- ld r1, @r4 ; r1: pte_data
- and3 r3, r1, #2 ; _PAGE_PRESENT(=2) check
- beqz r3, 3f
-
- .fillinsn
-;; set tlb
-; r0: address, r1: pte_data, r2: entry
-; r3,r4: (free)
-5:
- ldi r3, #-4096 ; set_tlb_tag(entry++, address);
- and r3, r0
- seth r4, #shigh(MASID)
- ld r4, @(low(MASID),r4) ; r4: MASID
- and3 r4, r4, #(MMU_CONTEXT_ASID_MASK)
- or r3, r4
- st r3, @r2
- st r1, @(4,r2) ; set_tlb_data(entry, pte_data);
-
- ld r4, @sp+
- ld r3, @sp+
- ld r2, @sp+
- ld r1, @sp+
- ld r0, @sp+
- ld sp, @sp+
- rte
-
- .fillinsn
-3:
- ldi r1, #2 ; r1: pte_data = 0 | _PAGE_PRESENT(=2)
- bra 5b
-
-#else
-#error unknown isa configuration
-#endif
-
-ENTRY(init_tlb)
-;; Set MMU Register
- seth r0, #high(MMU_REG_BASE) ; Set MMU_REG_BASE higher
- or3 r0, r0, #low(MMU_REG_BASE) ; Set MMU_REG_BASE lower
- ldi r1, #0
- st r1, @(MPSZ_offset,r0) ; Set MPSZ Reg(Page size 4KB:0 16KB:1 64KB:2)
- ldi r1, #0
- st r1, @(MASID_offset,r0) ; Set ASID Zero
-
-;; Set TLB
- seth r0, #high(ITLB_BASE) ; Set ITLB_BASE higher
- or3 r0, r0, #low(ITLB_BASE) ; Set ITLB_BASE lower
- seth r1, #high(DTLB_BASE) ; Set DTLB_BASE higher
- or3 r1, r1, #low(DTLB_BASE) ; Set DTLB_BASE lower
- ldi r2, #0
- ldi r3, #NR_TLB_ENTRIES
- addi r0, #-4
- addi r1, #-4
-clear_tlb:
- st r2, @+r0 ; VPA <- 0
- st r2, @+r0 ; PPA <- 0
- st r2, @+r1 ; VPA <- 0
- st r2, @+r1 ; PPA <- 0
- addi r3, #-1
- bnez r3, clear_tlb
-;;
- jmp r14
-
-ENTRY(m32r_itlb_entrys)
-ENTRY(m32r_otlb_entrys)
-
-#endif /* CONFIG_MMU */
-
- .end
diff --git a/arch/m32r/mm/page.S b/arch/m32r/mm/page.S
deleted file mode 100644
index a2e9367dbf79..000000000000
--- a/arch/m32r/mm/page.S
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * linux/arch/m32r/mm/page.S
- *
- * Clear/Copy page with CPU
- *
- * Copyright (C) 2004 The Free Software Initiative of Japan
- *
- * Written by Niibe Yutaka
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- */
- .text
- .global copy_page
- /*
- * copy_page (to, from)
- *
- * PAGE_SIZE = 4096-byte
- * Cache line = 16-byte
- * 16 * 256
- */
- .align 4
-copy_page:
- ldi r2, #255
- ld r3, @r0 /* cache line allocate */
- ld r4, @r1+
- ld r5, @r1+
- ld r6, @r1+
- ld r7, @r1+
- .fillinsn
-0:
- st r4, @r0
- st r5, @+r0
- st r6, @+r0
- st r7, @+r0
- ld r4, @r1+
- addi r0, #4
- ld r5, @r1+
- ld r6, @r1+
- ld r7, @r1+
- ld r3, @r0 /* cache line allocate */
- addi r2, #-1
- bnez r2, 0b
-
- st r4, @r0
- st r5, @+r0
- st r6, @+r0
- st r7, @+r0
- jmp r14
-
- .text
- .global clear_page
- /*
- * clear_page (to)
- *
- * PAGE_SIZE = 4096-byte
- * Cache line = 16-byte
- * 16 * 256
- */
- .align 4
-clear_page:
- ldi r2, #255
- ldi r4, #0
- ld r3, @r0 /* cache line allocate */
- .fillinsn
-0:
- st r4, @r0
- st r4, @+r0
- st r4, @+r0
- st r4, @+r0
- addi r0, #4
- ld r3, @r0 /* cache line allocate */
- addi r2, #-1
- bnez r2, 0b
-
- st r4, @r0
- st r4, @+r0
- st r4, @+r0
- st r4, @+r0
- jmp r14