diff options
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/mips/mm/c-octeon.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 45 | ||||
-rw-r--r-- | arch/mips/mm/c-tx39.c | 414 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 8 | ||||
-rw-r--r-- | arch/mips/mm/context.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/fault.c | 29 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 14 | ||||
-rw-r--r-- | arch/mips/mm/page.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/pgtable.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/physaddr.c | 14 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r3k.c | 40 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 77 |
13 files changed, 56 insertions, 603 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 4acc4f3d31f8..304692391519 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -36,7 +36,6 @@ obj-$(CONFIG_CPU_R3K_TLB) += tlb-r3k.o obj-$(CONFIG_CPU_R4K_CACHE_TLB) += c-r4k.o cex-gen.o tlb-r4k.o obj-$(CONFIG_CPU_R3000) += c-r3k.o obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o -obj-$(CONFIG_CPU_TX39XX) += c-tx39.o obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c index ec2ae501539a..c7ed589de882 100644 --- a/arch/mips/mm/c-octeon.c +++ b/arch/mips/mm/c-octeon.c @@ -23,7 +23,6 @@ #include <asm/r4kcache.h> #include <asm/traps.h> #include <asm/mmu_context.h> -#include <asm/war.h> #include <asm/octeon/octeon.h> @@ -332,7 +331,7 @@ static void co_cache_error_call_notifiers(unsigned long val) } /* - * Called when the the exception is recoverable + * Called when the exception is recoverable */ asmlinkage void cache_parity_error_octeon_recoverable(void) @@ -341,7 +340,7 @@ asmlinkage void cache_parity_error_octeon_recoverable(void) } /* - * Called when the the exception is not recoverable + * Called when the exception is not recoverable */ asmlinkage void cache_parity_error_octeon_non_recoverable(void) diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 50261fd8eb21..a549fa98c2f4 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -33,7 +33,6 @@ #include <asm/r4kcache.h> #include <asm/sections.h> #include <asm/mmu_context.h> -#include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ #include <asm/traps.h> #include <asm/mips-cps.h> @@ -1195,50 +1194,6 @@ static void probe_pcache(void) c->options |= MIPS_CPU_PREFETCH; break; - case CPU_VR4133: - write_c0_config(config & ~VR41_CONF_P4K); - fallthrough; - case CPU_VR4131: - /* Workaround for cache instruction bug of VR4131 */ - if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || - c->processor_id == 0x0c82U) { - config |= 0x00400000U; - if (c->processor_id == 0x0c80U) - config |= VR41_CONF_BP; - write_c0_config(config); - } else - c->options |= MIPS_CPU_CACHE_CDEX_P; - - icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); - c->icache.linesz = 16 << ((config & CONF_IB) >> 5); - c->icache.ways = 2; - c->icache.waybit = __ffs(icache_size/2); - - dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); - c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); - c->dcache.ways = 2; - c->dcache.waybit = __ffs(dcache_size/2); - break; - - case CPU_VR41XX: - case CPU_VR4111: - case CPU_VR4121: - case CPU_VR4122: - case CPU_VR4181: - case CPU_VR4181A: - icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); - c->icache.linesz = 16 << ((config & CONF_IB) >> 5); - c->icache.ways = 1; - c->icache.waybit = 0; /* doesn't matter */ - - dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); - c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); - c->dcache.ways = 1; - c->dcache.waybit = 0; /* does not matter */ - - c->options |= MIPS_CPU_CACHE_CDEX_P; - break; - case CPU_RM7000: rm7k_erratum31(); diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c deleted file mode 100644 index 03dfbb40ec73..000000000000 --- a/arch/mips/mm/c-tx39.c +++ /dev/null @@ -1,414 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * r2300.c: R2000 and R3000 specific mmu/cache code. - * - * Copyright (C) 1996 David S. Miller (davem@davemloft.net) - * - * with a lot of changes to make this thing work for R3000s - * Tx39XX R4k style caches added. HK - * Copyright (C) 1998, 1999, 2000 Harald Koerfgen - * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov - */ -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/mm.h> - -#include <asm/cacheops.h> -#include <asm/page.h> -#include <asm/mmu_context.h> -#include <asm/isadep.h> -#include <asm/io.h> -#include <asm/bootinfo.h> -#include <asm/cpu.h> - -/* For R3000 cores with R4000 style caches */ -static unsigned long icache_size, dcache_size; /* Size in bytes */ - -#include <asm/r4kcache.h> - -/* This sequence is required to ensure icache is disabled immediately */ -#define TX39_STOP_STREAMING() \ -__asm__ __volatile__( \ - ".set push\n\t" \ - ".set noreorder\n\t" \ - "b 1f\n\t" \ - "nop\n\t" \ - "1:\n\t" \ - ".set pop" \ - ) - -/* TX39H-style cache flush routines. */ -static void tx39h_flush_icache_all(void) -{ - unsigned long flags, config; - - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16(); - write_c0_conf(config); - local_irq_restore(flags); -} - -static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size) -{ - /* Catch bad driver code */ - BUG_ON(size == 0); - - iob(); - blast_inv_dcache_range(addr, addr + size); -} - - -/* TX39H2,TX39H3 */ -static inline void tx39_blast_dcache_page(unsigned long addr) -{ - if (current_cpu_type() != CPU_TX3912) - blast_dcache16_page(addr); -} - -static inline void tx39_blast_dcache_page_indexed(unsigned long addr) -{ - blast_dcache16_page_indexed(addr); -} - -static inline void tx39_blast_dcache(void) -{ - blast_dcache16(); -} - -static inline void tx39_blast_icache_page(unsigned long addr) -{ - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16_page(addr); - write_c0_conf(config); - local_irq_restore(flags); -} - -static inline void tx39_blast_icache_page_indexed(unsigned long addr) -{ - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16_page_indexed(addr); - write_c0_conf(config); - local_irq_restore(flags); -} - -static inline void tx39_blast_icache(void) -{ - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - blast_icache16(); - write_c0_conf(config); - local_irq_restore(flags); -} - -static void tx39__flush_cache_vmap(void) -{ - tx39_blast_dcache(); -} - -static void tx39__flush_cache_vunmap(void) -{ - tx39_blast_dcache(); -} - -static inline void tx39_flush_cache_all(void) -{ - if (!cpu_has_dc_aliases) - return; - - tx39_blast_dcache(); -} - -static inline void tx39___flush_cache_all(void) -{ - tx39_blast_dcache(); - tx39_blast_icache(); -} - -static void tx39_flush_cache_mm(struct mm_struct *mm) -{ - if (!cpu_has_dc_aliases) - return; - - if (cpu_context(smp_processor_id(), mm) != 0) - tx39_blast_dcache(); -} - -static void tx39_flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if (!cpu_has_dc_aliases) - return; - if (!(cpu_context(smp_processor_id(), vma->vm_mm))) - return; - - tx39_blast_dcache(); -} - -static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) -{ - int exec = vma->vm_flags & VM_EXEC; - struct mm_struct *mm = vma->vm_mm; - pmd_t *pmdp; - pte_t *ptep; - - /* - * If ownes no valid ASID yet, cannot possibly have gotten - * this page into the cache. - */ - if (cpu_context(smp_processor_id(), mm) == 0) - return; - - page &= PAGE_MASK; - pmdp = pmd_off(mm, page); - ptep = pte_offset_kernel(pmdp, page); - - /* - * If the page isn't marked valid, the page cannot possibly be - * in the cache. - */ - if (!(pte_val(*ptep) & _PAGE_PRESENT)) - return; - - /* - * Doing flushes for another ASID than the current one is - * too difficult since stupid R4k caches do a TLB translation - * for every cache flush operation. So we do indexed flushes - * in that case, which doesn't overly flush the cache too much. - */ - if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) { - if (cpu_has_dc_aliases || exec) - tx39_blast_dcache_page(page); - if (exec) - tx39_blast_icache_page(page); - - return; - } - - /* - * Do indexed flush, too much work to get the (possible) TLB refills - * to work correctly. - */ - if (cpu_has_dc_aliases || exec) - tx39_blast_dcache_page_indexed(page); - if (exec) - tx39_blast_icache_page_indexed(page); -} - -static void local_tx39_flush_data_cache_page(void * addr) -{ - tx39_blast_dcache_page((unsigned long)addr); -} - -static void tx39_flush_data_cache_page(unsigned long addr) -{ - tx39_blast_dcache_page(addr); -} - -static void tx39_flush_icache_range(unsigned long start, unsigned long end) -{ - if (end - start > dcache_size) - tx39_blast_dcache(); - else - protected_blast_dcache_range(start, end); - - if (end - start > icache_size) - tx39_blast_icache(); - else { - unsigned long flags, config; - /* disable icache (set ICE#) */ - local_irq_save(flags); - config = read_c0_conf(); - write_c0_conf(config & ~TX39_CONF_ICE); - TX39_STOP_STREAMING(); - protected_blast_icache_range(start, end); - write_c0_conf(config); - local_irq_restore(flags); - } -} - -static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size) -{ - BUG(); -} - -static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) -{ - unsigned long end; - - if (((size | addr) & (PAGE_SIZE - 1)) == 0) { - end = addr + size; - do { - tx39_blast_dcache_page(addr); - addr += PAGE_SIZE; - } while(addr != end); - } else if (size > dcache_size) { - tx39_blast_dcache(); - } else { - blast_dcache_range(addr, addr + size); - } -} - -static void tx39_dma_cache_inv(unsigned long addr, unsigned long size) -{ - unsigned long end; - - if (((size | addr) & (PAGE_SIZE - 1)) == 0) { - end = addr + size; - do { - tx39_blast_dcache_page(addr); - addr += PAGE_SIZE; - } while(addr != end); - } else if (size > dcache_size) { - tx39_blast_dcache(); - } else { - blast_inv_dcache_range(addr, addr + size); - } -} - -static __init void tx39_probe_cache(void) -{ - unsigned long config; - - config = read_c0_conf(); - - icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >> - TX39_CONF_ICS_SHIFT)); - dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >> - TX39_CONF_DCS_SHIFT)); - - current_cpu_data.icache.linesz = 16; - switch (current_cpu_type()) { - case CPU_TX3912: - current_cpu_data.icache.ways = 1; - current_cpu_data.dcache.ways = 1; - current_cpu_data.dcache.linesz = 4; - break; - - case CPU_TX3927: - current_cpu_data.icache.ways = 2; - current_cpu_data.dcache.ways = 2; - current_cpu_data.dcache.linesz = 16; - break; - - case CPU_TX3922: - default: - current_cpu_data.icache.ways = 1; - current_cpu_data.dcache.ways = 1; - current_cpu_data.dcache.linesz = 16; - break; - } -} - -void tx39_cache_init(void) -{ - extern void build_clear_page(void); - extern void build_copy_page(void); - unsigned long config; - - config = read_c0_conf(); - config &= ~TX39_CONF_WBON; - write_c0_conf(config); - - tx39_probe_cache(); - - switch (current_cpu_type()) { - case CPU_TX3912: - /* TX39/H core (writethru direct-map cache) */ - __flush_cache_vmap = tx39__flush_cache_vmap; - __flush_cache_vunmap = tx39__flush_cache_vunmap; - flush_cache_all = tx39h_flush_icache_all; - __flush_cache_all = tx39h_flush_icache_all; - flush_cache_mm = (void *) tx39h_flush_icache_all; - flush_cache_range = (void *) tx39h_flush_icache_all; - flush_cache_page = (void *) tx39h_flush_icache_all; - flush_icache_range = (void *) tx39h_flush_icache_all; - local_flush_icache_range = (void *) tx39h_flush_icache_all; - - local_flush_data_cache_page = (void *) tx39h_flush_icache_all; - flush_data_cache_page = (void *) tx39h_flush_icache_all; - - _dma_cache_wback_inv = tx39h_dma_cache_wback_inv; - - shm_align_mask = PAGE_SIZE - 1; - - break; - - case CPU_TX3922: - case CPU_TX3927: - default: - /* TX39/H2,H3 core (writeback 2way-set-associative cache) */ - /* board-dependent init code may set WBON */ - - __flush_cache_vmap = tx39__flush_cache_vmap; - __flush_cache_vunmap = tx39__flush_cache_vunmap; - - flush_cache_all = tx39_flush_cache_all; - __flush_cache_all = tx39___flush_cache_all; - flush_cache_mm = tx39_flush_cache_mm; - flush_cache_range = tx39_flush_cache_range; - flush_cache_page = tx39_flush_cache_page; - flush_icache_range = tx39_flush_icache_range; - local_flush_icache_range = tx39_flush_icache_range; - - __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range; - - local_flush_data_cache_page = local_tx39_flush_data_cache_page; - flush_data_cache_page = tx39_flush_data_cache_page; - - _dma_cache_wback_inv = tx39_dma_cache_wback_inv; - _dma_cache_wback = tx39_dma_cache_wback_inv; - _dma_cache_inv = tx39_dma_cache_inv; - - shm_align_mask = max_t(unsigned long, - (dcache_size / current_cpu_data.dcache.ways) - 1, - PAGE_SIZE - 1); - - break; - } - - __flush_icache_user_range = flush_icache_range; - __local_flush_icache_user_range = local_flush_icache_range; - - current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways; - current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways; - - current_cpu_data.icache.sets = - current_cpu_data.icache.waysize / current_cpu_data.icache.linesz; - current_cpu_data.dcache.sets = - current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz; - - if (current_cpu_data.dcache.waysize > PAGE_SIZE) - current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES; - - current_cpu_data.icache.waybit = 0; - current_cpu_data.dcache.waybit = 0; - - pr_info("Primary instruction cache %ldkB, linesize %d bytes\n", - icache_size >> 10, current_cpu_data.icache.linesz); - pr_info("Primary data cache %ldkB, linesize %d bytes\n", - dcache_size >> 10, current_cpu_data.dcache.linesz); - - build_clear_page(); - build_copy_page(); - tx39h_flush_icache_all(); -} diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 830ab91e574f..11b3e7ddafd5 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -159,6 +159,9 @@ EXPORT_SYMBOL(_page_cachable_default); #define PM(p) __pgprot(_page_cachable_default | (p)) +static pgprot_t protection_map[16] __ro_after_init; +DECLARE_VM_GET_PAGE_PROT + static inline void setup_protection_map(void) { protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); @@ -195,11 +198,6 @@ void cpu_cache_init(void) r4k_cache_init(); } - if (cpu_has_tx39_cache) { - extern void __weak tx39_cache_init(void); - - tx39_cache_init(); - } if (cpu_has_octeon_cache) { extern void __weak octeon_cache_init(void); diff --git a/arch/mips/mm/context.c b/arch/mips/mm/context.c index b25564090939..966f40066f03 100644 --- a/arch/mips/mm/context.c +++ b/arch/mips/mm/context.c @@ -67,7 +67,7 @@ static void flush_context(void) int cpu; /* Update the list of reserved MMIDs and the MMID bitmap */ - bitmap_clear(mmid_map, 0, num_mmids); + bitmap_zero(mmid_map, num_mmids); /* Reserve an MMID for kmap/wired entries */ __set_bit(MMID_KERNEL_WIRED, mmid_map); @@ -277,8 +277,7 @@ static int mmid_init(void) WARN_ON(num_mmids <= num_possible_cpus()); atomic64_set(&mmid_version, asid_first_version(0)); - mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map), - GFP_KERNEL); + mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL); if (!mmid_map) panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids); diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index e7abda9c013f..a27045f5a556 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -35,7 +35,7 @@ int show_unhandled_signals = 1; * and the problem, and then passes it off to one of the appropriate * routines. */ -static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, +static void __do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { struct vm_area_struct * vma = NULL; @@ -162,6 +162,10 @@ good_area: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -171,18 +175,17 @@ good_area: goto do_sigbus; BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_RETRY) { - flags |= FAULT_FLAG_TRIED; - /* - * No need to mmap_read_unlock(mm) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; - goto retry; - } + /* + * No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; } mmap_read_unlock(mm); @@ -323,8 +326,9 @@ vmalloc_fault: } #endif } +NOKPROBE_SYMBOL(__do_page_fault); -asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, +asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, unsigned long address) { enum ctx_state prev_state; @@ -333,3 +337,4 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, __do_page_fault(regs, write, address); exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_page_fault); diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 325e1552cbea..5a8002839550 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -519,17 +519,9 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) return node_distance(cpu_to_node(from), cpu_to_node(to)); } -static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, - size_t align) +static int __init pcpu_cpu_to_node(int cpu) { - return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), - MEMBLOCK_ALLOC_ACCESSIBLE, - cpu_to_node(cpu)); -} - -static void __init pcpu_fc_free(void *ptr, size_t size) -{ - memblock_free(ptr, size); + return cpu_to_node(cpu); } void __init setup_per_cpu_areas(void) @@ -545,7 +537,7 @@ void __init setup_per_cpu_areas(void) rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, pcpu_cpu_distance, - pcpu_fc_alloc, pcpu_fc_free); + pcpu_cpu_to_node); if (rc < 0) panic("Failed to initialize percpu areas."); diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 504bc4047c4c..d3b4459d0fe8 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -25,7 +25,6 @@ #include <asm/mipsregs.h> #include <asm/mmu_context.h> #include <asm/cpu.h> -#include <asm/war.h> #ifdef CONFIG_SIBYTE_DMA_PAGEOPS #include <asm/sibyte/sb1250.h> @@ -103,7 +102,9 @@ static int cache_line_size; static inline void pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) { - if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { + if (cpu_has_64bit_gp_regs && + IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) && + r4k_daddiu_bug()) { if (off > 0x7fff) { uasm_i_lui(buf, T9, uasm_rel_hi(off)); uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off)); diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 05560b042d82..3b7590660a04 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -12,7 +12,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); + ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long)ret); diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c index a1ced5e44951..f9b8c85e9843 100644 --- a/arch/mips/mm/physaddr.c +++ b/arch/mips/mm/physaddr.c @@ -5,6 +5,7 @@ #include <linux/mmdebug.h> #include <linux/mm.h> +#include <asm/addrspace.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/page.h> @@ -12,15 +13,6 @@ static inline bool __debug_virt_addr_valid(unsigned long x) { - /* high_memory does not get immediately defined, and there - * are early callers of __pa() against PAGE_OFFSET - */ - if (!high_memory && x >= PAGE_OFFSET) - return true; - - if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) - return true; - /* * MAX_DMA_ADDRESS is a virtual address that may not correspond to an * actual physical address. Enough code relies on @@ -30,7 +22,9 @@ static inline bool __debug_virt_addr_valid(unsigned long x) if (x == MAX_DMA_ADDRESS) return true; - return false; + return x >= PAGE_OFFSET && (KSEGX(x) < KSEG2 || + IS_ENABLED(CONFIG_EVA) || + !IS_ENABLED(CONFIG_HIGHMEM)); } phys_addr_t __virt_to_phys(volatile const void *x) diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index a36622ebea55..53dfa2b9316b 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c @@ -36,8 +36,6 @@ extern void build_tlb_refill_handler(void); "nop\n\t" \ ".set pop\n\t") -int r3k_have_wired_reg; /* Should be in cpu_data? */ - /* TLB operations. */ static void local_flush_tlb_from(int entry) { @@ -62,7 +60,7 @@ void local_flush_tlb_all(void) printk("[tlball]"); #endif local_irq_save(flags); - local_flush_tlb_from(r3k_have_wired_reg ? read_c0_wired() : 8); + local_flush_tlb_from(8); local_irq_restore(flags); } @@ -224,34 +222,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, unsigned long old_ctx; static unsigned long wired = 0; - if (r3k_have_wired_reg) { /* TX39XX */ - unsigned long old_pagemask; - unsigned long w; - -#ifdef DEBUG_TLB - printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n", - entrylo0, entryhi, pagemask); -#endif - - local_irq_save(flags); - /* Save old context and create impossible VPN2 value */ - old_ctx = read_c0_entryhi() & asid_mask; - old_pagemask = read_c0_pagemask(); - w = read_c0_wired(); - write_c0_wired(w + 1); - write_c0_index(w << 8); - write_c0_pagemask(pagemask); - write_c0_entryhi(entryhi); - write_c0_entrylo0(entrylo0); - BARRIER; - tlb_write_indexed(); - - write_c0_entryhi(old_ctx); - write_c0_pagemask(old_pagemask); - local_flush_tlb_all(); - local_irq_restore(flags); - - } else if (wired < 8) { + if (wired < 8) { #ifdef DEBUG_TLB printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n", entrylo0, entryhi); @@ -272,13 +243,6 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, void tlb_init(void) { - switch (current_cpu_type()) { - case CPU_TX3922: - case CPU_TX3927: - r3k_have_wired_reg = 1; - write_c0_wired(0); /* Set to 8 on reset... */ - break; - } local_flush_tlb_from(0); build_tlb_refill_handler(); } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b131e6a77383..80e05ee98d62 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -33,7 +33,6 @@ #include <asm/cacheflush.h> #include <asm/cpu-type.h> #include <asm/mmu_context.h> -#include <asm/war.h> #include <asm/uasm.h> #include <asm/setup.h> #include <asm/tlbex.h> @@ -587,25 +586,6 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l, tlbw(p); break; - case CPU_VR4111: - case CPU_VR4121: - case CPU_VR4122: - case CPU_VR4181: - case CPU_VR4181A: - uasm_i_nop(p); - uasm_i_nop(p); - tlbw(p); - uasm_i_nop(p); - uasm_i_nop(p); - break; - - case CPU_VR4131: - case CPU_VR4133: - uasm_i_nop(p); - uasm_i_nop(p); - tlbw(p); - break; - case CPU_XBURST: tlbw(p); uasm_i_nop(p); @@ -627,7 +607,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p, return; } - if (cpu_has_rixi && !!_PAGE_NO_EXEC) { + if (cpu_has_rixi && _PAGE_NO_EXEC != 0) { if (fill_includes_sw_bits) { UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); } else { @@ -819,7 +799,7 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, * everything but the lower xuseg addresses goes down * the module_alloc/vmalloc path. */ - uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, ptr, label_vmalloc); } else { uasm_il_bltz(p, r, tmp, label_vmalloc); @@ -996,22 +976,6 @@ static void build_adjust_context(u32 **p, unsigned int ctx) unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); - switch (current_cpu_type()) { - case CPU_VR41XX: - case CPU_VR4111: - case CPU_VR4121: - case CPU_VR4122: - case CPU_VR4131: - case CPU_VR4181: - case CPU_VR4181A: - case CPU_VR4133: - shift += 2; - break; - - default: - break; - } - if (shift) UASM_i_SRL(p, ctx, ctx, shift); uasm_i_andi(p, ctx, ctx, mask); @@ -1128,7 +1092,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, UASM_i_SW(p, scratch, scratchpad_offset(0), 0); uasm_i_dsrl_safe(p, scratch, tmp, - PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_bnez(p, r, scratch, label_vmalloc); if (pgd_reg == -1) { @@ -1494,12 +1458,12 @@ static void setup_pw(void) #endif pgd_i = PGDIR_SHIFT; /* 1st level PGD */ #ifndef __PAGETABLE_PMD_FOLDED - pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_TABLE_ORDER; pmd_i = PMD_SHIFT; /* 2nd level PMD */ pmd_w = PMD_SHIFT - PAGE_SHIFT; #else - pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER; + pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_TABLE_ORDER; #endif pt_i = PAGE_SHIFT; /* 3rd level PTE */ @@ -1537,7 +1501,7 @@ static void build_loongson3_tlb_refill_handler(void) if (check_for_high_segbits) { uasm_i_dmfc0(&p, K0, C0_BADVADDR); - uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); uasm_il_beqz(&p, &r, K1, label_vmalloc); uasm_i_nop(&p); @@ -2066,7 +2030,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, UASM_i_MFC0(p, wr.r1, C0_BADVADDR); UASM_i_LW(p, wr.r2, 0, wr.r2); - UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); + UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT - PTE_T_LOG2); uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2); UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1); @@ -2160,16 +2124,14 @@ static void build_r4000_tlb_load_handler(void) uasm_i_tlbr(&p); switch (current_cpu_type()) { - default: - if (cpu_has_mips_r2_exec_hazard) { - uasm_i_ehb(&p); - fallthrough; - case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: - break; - } + break; + default: + if (cpu_has_mips_r2_exec_hazard) + uasm_i_ehb(&p); + break; } /* Examine entrylo 0 or 1 based on ptr. */ @@ -2236,15 +2198,14 @@ static void build_r4000_tlb_load_handler(void) uasm_i_tlbr(&p); switch (current_cpu_type()) { - default: - if (cpu_has_mips_r2_exec_hazard) { - uasm_i_ehb(&p); - case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: - break; - } + break; + default: + if (cpu_has_mips_r2_exec_hazard) + uasm_i_ehb(&p); + break; } /* Examine entrylo 0 or 1 based on ptr. */ @@ -2569,7 +2530,7 @@ static void check_pabits(void) unsigned long entry; unsigned pabits, fillbits; - if (!cpu_has_rixi || !_PAGE_NO_EXEC) { + if (!cpu_has_rixi || _PAGE_NO_EXEC == 0) { /* * We'll only be making use of the fact that we can rotate bits * into the fill if the CPU supports RIXI, so don't bother @@ -2615,7 +2576,7 @@ void build_tlb_refill_handler(void) check_pabits(); #ifdef CONFIG_64BIT - check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3); + check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); #endif if (cpu_has_3kex) { |