aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/nios2/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/nios2/mm')
-rw-r--r--arch/nios2/mm/init.c49
-rw-r--r--arch/nios2/mm/pgtable.c3
-rw-r--r--arch/nios2/mm/tlb.c18
3 files changed, 43 insertions, 27 deletions
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 7bc82ee889c9..94efa3de3933 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -26,6 +26,7 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/binfmts.h>
+#include <linux/execmem.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -50,7 +51,7 @@ void __init paging_init(void)
pagetable_init();
pgd_current = swapper_pg_dir;
- max_zone_pfn[ZONE_NORMAL] = max_mapnr;
+ max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
/* pass the memory from the bootmem allocator to the main allocator */
free_area_init(max_zone_pfn);
@@ -59,20 +60,6 @@ void __init paging_init(void)
(unsigned long)empty_zero_page + PAGE_SIZE);
}
-void __init mem_init(void)
-{
- unsigned long end_mem = memory_end; /* this must not include
- kernel stack at top */
-
- pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
-
- end_mem &= PAGE_MASK;
- high_memory = __va(end_mem);
-
- /* this will put all memory onto the freelists */
- memblock_free_all();
-}
-
void __init mmu_init(void)
{
flush_tlb_all();
@@ -81,6 +68,10 @@ void __init mmu_init(void)
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
pte_t invalid_pte_table[PTRS_PER_PTE] __aligned(PAGE_SIZE);
static struct page *kuser_page[1];
+static struct vm_special_mapping vdso_mapping = {
+ .name = "[vdso]",
+ .pages = kuser_page,
+};
static int alloc_kuser_page(void)
{
@@ -105,18 +96,18 @@ arch_initcall(alloc_kuser_page);
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mm_struct *mm = current->mm;
- int ret;
+ struct vm_area_struct *vma;
mmap_write_lock(mm);
/* Map kuser helpers to user space address */
- ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
+ vma = _install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD |
- VM_MAYEXEC, kuser_page);
+ VM_MAYEXEC, &vdso_mapping);
mmap_write_unlock(mm);
- return ret;
+ return IS_ERR(vma) ? PTR_ERR(vma) : 0;
}
const char *arch_vma_name(struct vm_area_struct *vma)
@@ -143,3 +134,23 @@ static const pgprot_t protection_map[16] = {
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = MKP(1, 1, 1)
};
DECLARE_VM_GET_PAGE_PROT
+
+#ifdef CONFIG_EXECMEM
+static struct execmem_info execmem_info __ro_after_init;
+
+struct execmem_info __init *execmem_arch_setup(void)
+{
+ execmem_info = (struct execmem_info){
+ .ranges = {
+ [EXECMEM_DEFAULT] = {
+ .start = MODULES_VADDR,
+ .end = MODULES_END,
+ .pgprot = PAGE_KERNEL_EXEC,
+ .alignment = 1,
+ },
+ },
+ };
+
+ return &execmem_info;
+}
+#endif /* CONFIG_EXECMEM */
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
index 7c76e8a7447a..6470ed378782 100644
--- a/arch/nios2/mm/pgtable.c
+++ b/arch/nios2/mm/pgtable.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <asm/cpuinfo.h>
+#include <asm/pgalloc.h>
/* pteaddr:
* ptbase | vpn* | zero
@@ -54,7 +55,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret, *init;
- ret = (pgd_t *) __get_free_page(GFP_KERNEL);
+ ret = __pgd_alloc(mm, 0);
if (ret) {
init = pgd_offset(&init_mm, 0UL);
pgd_init(ret);
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
index f90ac35f05f3..a9cbe20f9e79 100644
--- a/arch/nios2/mm/tlb.c
+++ b/arch/nios2/mm/tlb.c
@@ -144,10 +144,11 @@ static void flush_tlb_one(unsigned long addr)
if (((pteaddr >> 2) & 0xfffff) != (addr >> PAGE_SHIFT))
continue;
+ tlbmisc = RDCTL(CTL_TLBMISC);
pr_debug("Flush entry by writing way=%dl pid=%ld\n",
- way, (pid_misc >> TLBMISC_PID_SHIFT));
+ way, ((tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK));
- tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
+ tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) | (tlbmisc & TLBMISC_PID);
WRCTL(CTL_TLBMISC, tlbmisc);
WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
WRCTL(CTL_TLBACC, 0);
@@ -237,7 +238,8 @@ void flush_tlb_pid(unsigned long mmu_pid)
if (pid != mmu_pid)
continue;
- tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT);
+ tlbmisc = TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) |
+ (pid << TLBMISC_PID_SHIFT);
WRCTL(CTL_TLBMISC, tlbmisc);
WRCTL(CTL_TLBACC, 0);
}
@@ -272,15 +274,17 @@ void flush_tlb_all(void)
/* remember pid/way until we return */
get_misc_and_pid(&org_misc, &pid_misc);
- /* Start at way 0, way is auto-incremented after each TLBACC write */
- WRCTL(CTL_TLBMISC, TLBMISC_WE);
-
/* Map each TLB entry to physcal address 0 with no-access and a
bad ptbase */
for (line = 0; line < cpuinfo.tlb_num_lines; line++) {
WRCTL(CTL_PTEADDR, pteaddr_invalid(addr));
- for (way = 0; way < cpuinfo.tlb_num_ways; way++)
+ for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+ // Code such as replace_tlb_one_pid assumes that no duplicate entries exist
+ // for a single address across ways, so also use way as a dummy PID
+ WRCTL(CTL_TLBMISC, TLBMISC_WE | (way << TLBMISC_WAY_SHIFT) |
+ (way << TLBMISC_PID_SHIFT));
WRCTL(CTL_TLBACC, 0);
+ }
addr += PAGE_SIZE;
}