diff options
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/dma.c | 3 | ||||
-rw-r--r-- | arch/arc/mm/init.c | 25 | ||||
-rw-r--r-- | arch/arc/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/arc/mm/mmap.c | 7 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 4 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 8 |
6 files changed, 13 insertions, 40 deletions
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 197707bc7658..6b85e94f3275 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, /* * Plug in direct dma map ops. */ -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { /* * IOC hardware snoops all DMA traffic keeping the caches consistent diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 6a71b23f1383..a73cc94f806e 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -150,41 +150,18 @@ void __init setup_arch_memory(void) */ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn; - high_memory = (void *)(min_high_pfn << PAGE_SHIFT); - arch_pfn_offset = min(min_low_pfn, min_high_pfn); kmap_init(); - -#else /* CONFIG_HIGHMEM */ - /* pfn_valid() uses this when FLATMEM=y and HIGHMEM=n */ - max_mapnr = max_low_pfn - min_low_pfn; - #endif /* CONFIG_HIGHMEM */ free_area_init(max_zone_pfn); } -static void __init highmem_init(void) +void __init arch_mm_preinit(void) { #ifdef CONFIG_HIGHMEM - unsigned long tmp; - memblock_phys_free(high_mem_start, high_mem_sz); - for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++) - free_highmem_page(pfn_to_page(tmp)); #endif -} - -/* - * mem_init - initializes memory - * - * Frees up bootmem - * Calculates and displays memory available/used - */ -void __init mem_init(void) -{ - memblock_free_all(); - highmem_init(); BUILD_BUG_ON((PTRS_PER_PGD * sizeof(pgd_t)) > PAGE_SIZE); BUILD_BUG_ON((PTRS_PER_PUD * sizeof(pud_t)) > PAGE_SIZE); diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c index b07004d53267..fd8897a0e52c 100644 --- a/arch/arc/mm/ioremap.c +++ b/arch/arc/mm/ioremap.c @@ -32,7 +32,7 @@ void __iomem *ioremap(phys_addr_t paddr, unsigned long size) return (void __iomem *)(u32)paddr; return ioremap_prot(paddr, size, - pgprot_val(pgprot_noncached(PAGE_KERNEL))); + pgprot_noncached(PAGE_KERNEL)); } EXPORT_SYMBOL(ioremap); @@ -44,10 +44,8 @@ EXPORT_SYMBOL(ioremap); * might need finer access control (R/W/X) */ void __iomem *ioremap_prot(phys_addr_t paddr, size_t size, - unsigned long flags) + pgprot_t prot) { - pgprot_t prot = __pgprot(flags); - /* force uncached */ return generic_ioremap_prot(paddr, size, pgprot_noncached(prot)); } diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c index 3c1c7ae73292..2185afe8d59f 100644 --- a/arch/arc/mm/mmap.c +++ b/arch/arc/mm/mmap.c @@ -23,11 +23,12 @@ */ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - struct vm_unmapped_area_info info; + struct vm_unmapped_area_info info = {}; /* * We enforce the MAP_FIXED case. @@ -51,11 +52,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, return addr; } - info.flags = 0; info.length = len; info.low_limit = mm->mmap_base; info.high_limit = TASK_SIZE; - info.align_mask = 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); } diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index ad702b49aeb3..cae4a7aae0ed 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -212,7 +212,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long flags; /* If range @start to @end is more than 32 TLB entries deep, - * its better to move to a new ASID rather than searching for + * it's better to move to a new ASID rather than searching for * individual entries and then shooting them down * * The calc above is rough, doesn't account for unaligned parts, @@ -408,7 +408,7 @@ static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *p * -More importantly it makes this handler inconsistent with fast-path * TLB Refill handler which always deals with "current" * - * Lets see the use cases when current->mm != vma->mm and we land here + * Let's see the use cases when current->mm != vma->mm and we land here * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault * Here VM wants to pre-install a TLB entry for user stack while * current->mm still points to pre-execve mm (hence the condition). diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index e054780a8fe0..dc65e87a531f 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S @@ -5,19 +5,19 @@ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) * * Vineetg: April 2011 : - * -MMU v1: moved out legacy code into a seperate file + * -MMU v1: moved out legacy code into a separate file * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, * helps avoid a shift when preparing PD0 from PTE * * Vineetg: July 2009 - * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB - * entry, so that it doesn't knock out it's I-TLB entry + * -For MMU V2, we need not do heuristics at the time of committing a D-TLB + * entry, so that it doesn't knock out its I-TLB entry * -Some more fine tuning: * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc * * Vineetg: July 2009 * -Practically rewrote the I/D TLB Miss handlers - * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. + * Now 40 and 135 instructions apiece as compared to 131 and 449 resp. * Hence Leaner by 1.5 K * Used Conditional arithmetic to replace excessive branching * Also used short instructions wherever possible |