diff options
Diffstat (limited to 'arch/hexagon/mm')
-rw-r--r-- | arch/hexagon/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/hexagon/mm/init.c | 77 | ||||
-rw-r--r-- | arch/hexagon/mm/ioremap.c | 44 | ||||
-rw-r--r-- | arch/hexagon/mm/uaccess.c | 8 | ||||
-rw-r--r-- | arch/hexagon/mm/vm_fault.c | 30 | ||||
-rw-r--r-- | arch/hexagon/mm/vm_tlb.c | 1 |
6 files changed, 63 insertions, 99 deletions
diff --git a/arch/hexagon/mm/Makefile b/arch/hexagon/mm/Makefile index 49911a906fd0..ba4b04d962d6 100644 --- a/arch/hexagon/mm/Makefile +++ b/arch/hexagon/mm/Makefile @@ -3,5 +3,5 @@ # Makefile for Hexagon memory management subsystem # -obj-y := init.o ioremap.o uaccess.o vm_fault.o cache.o +obj-y := init.o uaccess.o vm_fault.o cache.o obj-y += copy_to_user.o copy_from_user.o vm_tlb.o diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index 3167a3b5c97b..34eb9d424b96 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -12,6 +12,7 @@ #include <linux/highmem.h> #include <asm/tlb.h> #include <asm/sections.h> +#include <asm/setup.h> #include <asm/vm_mmu.h> /* @@ -42,32 +43,6 @@ DEFINE_SPINLOCK(kmap_gen_lock); /* checkpatch says don't init this to 0. */ unsigned long long kmap_generation; -/* - * mem_init - initializes memory - * - * Frees up bootmem - * Fixes up more stuff for HIGHMEM - * Calculates and displays memory available/used - */ -void __init mem_init(void) -{ - /* No idea where this is actually declared. Seems to evade LXR. */ - memblock_free_all(); - - /* - * To-Do: someone somewhere should wipe out the bootmem map - * after we're done? - */ - - /* - * This can be moved to some more virtual-memory-specific - * initialization hook at some point. Set the init_mm - * descriptors "context" value to point to the initial - * kernel segment table's physical address. - */ - init_mm.context.ptbase = __pa(init_mm.pgd); -} - void sync_icache_dcache(pte_t pte) { unsigned long addr; @@ -86,7 +61,7 @@ void sync_icache_dcache(pte_t pte) * In this mode, we only have one pg_data_t * structure: contig_mem_data. */ -void __init paging_init(void) +static void __init paging_init(void) { unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; @@ -103,10 +78,10 @@ void __init paging_init(void) free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */ /* - * Start of high memory area. Will probably need something more - * fancy if we... get more fancy. + * Set the init_mm descriptors "context" value to point to the + * initial kernel segment table's physical address. */ - high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT); + init_mm.context.ptbase = __pa(init_mm.pgd); } #ifndef DMA_RESERVE @@ -234,3 +209,45 @@ void __init setup_arch_memory(void) * which is called by start_kernel() later on in the process */ } + +static const pgprot_t protection_map[16] = { + [VM_NONE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_SHARED] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + CACHEDEF), + [VM_SHARED | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | CACHEDEF), + [VM_SHARED | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_WRITE | CACHEDEF), + [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | _PAGE_WRITE | + CACHEDEF), + [VM_SHARED | VM_EXEC] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | CACHEDEF), + [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_READ | + CACHEDEF), + [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_EXECUTE | _PAGE_WRITE | + CACHEDEF), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_PAGE_PRESENT | _PAGE_USER | + _PAGE_READ | _PAGE_EXECUTE | + _PAGE_WRITE | CACHEDEF) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/hexagon/mm/ioremap.c b/arch/hexagon/mm/ioremap.c deleted file mode 100644 index 255c5b1ee1a7..000000000000 --- a/arch/hexagon/mm/ioremap.c +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * I/O remap functions for Hexagon - * - * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. - */ - -#include <linux/io.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> - -void __iomem *ioremap(unsigned long phys_addr, unsigned long size) -{ - unsigned long last_addr, addr; - unsigned long offset = phys_addr & ~PAGE_MASK; - struct vm_struct *area; - - pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE - |(__HEXAGON_C_DEV << 6)); - - last_addr = phys_addr + size - 1; - - /* Wrapping not allowed */ - if (!size || (last_addr < phys_addr)) - return NULL; - - /* Rounds up to next page size, including whole-page offset */ - size = PAGE_ALIGN(offset + size); - - area = get_vm_area(size, VM_IOREMAP); - addr = (unsigned long)area->addr; - - if (ioremap_page_range(addr, addr+size, phys_addr, prot)) { - vunmap((void *)addr); - return NULL; - } - - return (void __iomem *) (offset + addr); -} - -void iounmap(const volatile void __iomem *addr) -{ - vunmap((void *) ((unsigned long) addr & PAGE_MASK)); -} diff --git a/arch/hexagon/mm/uaccess.c b/arch/hexagon/mm/uaccess.c index 650bca92f0b7..3204e9ba6d6f 100644 --- a/arch/hexagon/mm/uaccess.c +++ b/arch/hexagon/mm/uaccess.c @@ -35,11 +35,3 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count) return count; } - -unsigned long clear_user_hexagon(void __user *dest, unsigned long count) -{ - if (!access_ok(dest, count)) - return count; - else - return __clear_user_hexagon(dest, count); -} diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index 4fac4b9eb316..3771fb453898 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -12,6 +12,7 @@ */ #include <asm/traps.h> +#include <asm/vm_fault.h> #include <linux/uaccess.h> #include <linux/mm.h> #include <linux/sched/signal.h> @@ -33,7 +34,7 @@ /* * Canonical page fault handler */ -void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) +static void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -57,21 +58,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); - if (!vma) - goto bad_area; + vma = lock_mm_and_find_vma(mm, address, regs); + if (unlikely(!vma)) + goto bad_area_nosemaphore; - if (vma->vm_start <= address) - goto good_area; - - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - - if (expand_stack(vma, address)) - goto bad_area; - -good_area: /* Address space is OK. Now check access rights. */ si_code = SEGV_ACCERR; @@ -93,7 +83,14 @@ good_area: fault = handle_mm_fault(vma, address, flags, regs); - if (fault_signal_pending(fault, regs)) + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return; /* The most common case -- we are done. */ @@ -136,6 +133,7 @@ good_area: bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: if (user_mode(regs)) { force_sig_fault(SIGSEGV, si_code, (void __user *)address); return; diff --git a/arch/hexagon/mm/vm_tlb.c b/arch/hexagon/mm/vm_tlb.c index 53482f2a9ff9..8b6405e2234b 100644 --- a/arch/hexagon/mm/vm_tlb.c +++ b/arch/hexagon/mm/vm_tlb.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <asm/page.h> #include <asm/hexagon_vm.h> +#include <asm/tlbflush.h> /* * Initial VM implementation has only one map active at a time, with |