diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/book3s32/hash_low.S | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s32/mmu.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_utils.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/iommu_api.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/pgtable.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_pgtable.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/book3s64/radix_tlb.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 29 | ||||
-rw-r--r-- | arch/powerpc/mm/kasan/kasan_init_32.c | 85 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/8xx.c | 13 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 12 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/ptdump/ptdump.c | 6 |
16 files changed, 124 insertions, 95 deletions
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S index 8bbbd9775c8a..2015c4f96238 100644 --- a/arch/powerpc/mm/book3s32/hash_low.S +++ b/arch/powerpc/mm/book3s32/hash_low.S @@ -131,8 +131,10 @@ retry: bne- retry /* retry if someone got there first */ mfsrin r3,r4 /* get segment reg for segment */ +#ifndef CONFIG_VMAP_STACK mfctr r0 stw r0,_CTR(r11) +#endif bl create_hpte /* add the hash table entry */ #ifdef CONFIG_SMP @@ -142,12 +144,16 @@ retry: stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8) #endif +#ifdef CONFIG_VMAP_STACK + b fast_hash_page_return +#else /* Return from the exception */ lwz r5,_CTR(r11) mtctr r5 lwz r0,GPR0(r11) lwz r8,GPR8(r11) b fast_exception_return +#endif #ifdef CONFIG_SMP hash_page_out: diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 69b2419accef..f888cbb109b9 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -413,6 +413,7 @@ void __init MMU_init_hw(void) void __init MMU_init_hw_patch(void) { unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE); + unsigned int hash = (unsigned int)Hash - PAGE_OFFSET; if (ppc_md.progress) ppc_md.progress("hash:patch", 0x345); @@ -424,8 +425,7 @@ void __init MMU_init_hw_patch(void) /* * Patch up the instructions in hashtable.S:create_hpte */ - modify_instruction_site(&patch__hash_page_A0, 0xffff, - ((unsigned int)Hash - PAGE_OFFSET) >> 16); + modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6); modify_instruction_site(&patch__hash_page_B, 0xffff, hmask); @@ -434,8 +434,7 @@ void __init MMU_init_hw_patch(void) /* * Patch up the instructions in hashtable.S:flush_hash_page */ - modify_instruction_site(&patch__flush_hash_A0, 0xffff, - ((unsigned int)Hash - PAGE_OFFSET) >> 16); + modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6); modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6); modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask); diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index b30435c7d804..523d4d39d11e 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -652,6 +652,7 @@ static void init_hpte_page_sizes(void) static void __init htab_init_page_sizes(void) { + bool aligned = true; init_hpte_page_sizes(); if (!debug_pagealloc_enabled()) { @@ -659,7 +660,15 @@ static void __init htab_init_page_sizes(void) * Pick a size for the linear mapping. Currently, we only * support 16M, 1M and 4K which is the default */ - if (mmu_psize_defs[MMU_PAGE_16M].shift) + if (IS_ENABLED(STRICT_KERNEL_RWX) && + (unsigned long)_stext % 0x1000000) { + if (mmu_psize_defs[MMU_PAGE_16M].shift) + pr_warn("Kernel not 16M aligned, " + "disabling 16M linear map alignment"); + aligned = false; + } + + if (mmu_psize_defs[MMU_PAGE_16M].shift && aligned) mmu_linear_psize = MMU_PAGE_16M; else if (mmu_psize_defs[MMU_PAGE_1M].shift) mmu_linear_psize = MMU_PAGE_1M; diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 56cc84520577..eba73ebd8ae5 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -103,7 +103,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, for (entry = 0; entry < entries; entry += chunk) { unsigned long n = min(entries - entry, chunk); - ret = get_user_pages(ua + (entry << PAGE_SHIFT), n, + ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, FOLL_WRITE | FOLL_LONGTERM, mem->hpages + entry, NULL); if (ret == n) { @@ -167,9 +167,8 @@ good_exit: return 0; free_exit: - /* free the reference taken */ - for (i = 0; i < pinned; i++) - put_page(mem->hpages[i]); + /* free the references taken */ + unpin_user_pages(mem->hpages, pinned); vfree(mem->hpas); kfree(mem); @@ -215,7 +214,8 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem) if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY) SetPageDirty(page); - put_page(page); + unpin_user_page(page); + mem->hpas[i] = 0; } } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 75483b40fcb1..2bf7e1b4fd82 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -378,7 +378,6 @@ static inline void pgtable_free(void *table, int index) } } -#ifdef CONFIG_SMP void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) { unsigned long pgf = (unsigned long)table; @@ -395,12 +394,6 @@ void __tlb_remove_table(void *_table) return pgtable_free(table, index); } -#else -void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index) -{ - return pgtable_free(table, index); -} -#endif #ifdef CONFIG_PROC_FS atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 974109bb85db..dd1bea45325c 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -337,7 +337,11 @@ static void __init radix_init_pgtable(void) } /* Find out how many PID bits are supported */ - if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { + if (!mmu_pid_bits) + mmu_pid_bits = 20; + mmu_base_pid = 1; + } else if (cpu_has_feature(CPU_FTR_HVMODE)) { if (!mmu_pid_bits) mmu_pid_bits = 20; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c index a95175c0972b..03f43c924e00 100644 --- a/arch/powerpc/mm/book3s64/radix_tlb.c +++ b/arch/powerpc/mm/book3s64/radix_tlb.c @@ -1161,6 +1161,9 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm) if (unlikely(pid == MMU_NO_CONTEXT)) return; + if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) + return; + /* * If this context hasn't run on that CPU before and KVM is * around, there's a slim chance that the guest on another diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index b5047f9b5dec..8db0507619e2 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -233,7 +233,7 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code, // Read/write fault in a valid region (the exception table search passed // above), but blocked by KUAP is bad, it can never succeed. - if (bad_kuap_fault(regs, is_write)) + if (bad_kuap_fault(regs, address, is_write)) return true; // What's left? Kernel fault on user in well defined regions (extable @@ -279,12 +279,8 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address, if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) && access_ok(nip, sizeof(*nip))) { unsigned int inst; - int res; - pagefault_disable(); - res = __get_user_inatomic(inst, nip); - pagefault_enable(); - if (!res) + if (!probe_user_read(&inst, nip, sizeof(inst))) return !store_updates_sp(inst); *must_retry = true; } @@ -354,6 +350,9 @@ static void sanity_check_fault(bool is_write, bool is_user, * Userspace trying to access kernel address, we get PROTFAULT for that. */ if (is_user && address >= TASK_SIZE) { + if ((long)address == -1) + return; + pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n", current->comm, current->pid, address, from_kuid(&init_user_ns, current_uid())); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 73d4873fc7f8..33b3461d91e8 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (pshift >= pdshift) { cachep = PGT_CACHE(PTE_T_ORDER); num_hugepd = 1 << (pshift - pdshift); + new = NULL; } else if (IS_ENABLED(CONFIG_PPC_8xx)) { - cachep = PGT_CACHE(PTE_INDEX_SIZE); + cachep = NULL; num_hugepd = 1; + new = pte_alloc_one(mm); } else { cachep = PGT_CACHE(pdshift - pshift); num_hugepd = 1; + new = NULL; } - if (!cachep) { + if (!cachep && !new) { WARN_ONCE(1, "No page table cache created for hugetlb tables"); return -ENOMEM; } - new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); + if (cachep) + new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL)); BUG_ON(pshift > HUGEPD_SHIFT_MASK); BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK); @@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, if (i < num_hugepd) { for (i = i - 1 ; i >= 0; i--, hpdp--) *hpdp = __hugepd(0); - kmem_cache_free(cachep, new); + if (cachep) + kmem_cache_free(cachep, new); + else + pte_free(mm, new); } else { kmemleak_ignore(new); } @@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif if (shift >= pdshift) hugepd_free(tlb, hugepte); else if (IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_free_tlb(tlb, hugepte, - get_hugepd_cache_index(PTE_INDEX_SIZE)); + pgtable_free_tlb(tlb, hugepte, 0); else pgtable_free_tlb(tlb, hugepte, get_hugepd_cache_index(pdshift - shift)); @@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void) * if we have pdshift and shift value same, we don't * use pgt cache for hugepd. */ - if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx)) - pgtable_cache_add(PTE_INDEX_SIZE); - else if (pdshift > shift) - pgtable_cache_add(pdshift - shift); - else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx)) + if (pdshift > shift) { + if (!IS_ENABLED(CONFIG_PPC_8xx)) + pgtable_cache_add(pdshift - shift); + } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || + IS_ENABLED(CONFIG_PPC_8xx)) { pgtable_cache_add(PTE_T_ORDER); + } configured = true; } diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c index 0e6ed4413eea..d2bed3fcb719 100644 --- a/arch/powerpc/mm/kasan/kasan_init_32.c +++ b/arch/powerpc/mm/kasan/kasan_init_32.c @@ -12,7 +12,7 @@ #include <asm/code-patching.h> #include <mm/mmu_decl.h> -static pgprot_t kasan_prot_ro(void) +static pgprot_t __init kasan_prot_ro(void) { if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) return PAGE_READONLY; @@ -20,7 +20,7 @@ static pgprot_t kasan_prot_ro(void) return PAGE_KERNEL_RO; } -static void kasan_populate_pte(pte_t *ptep, pgprot_t prot) +static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot) { unsigned long va = (unsigned long)kasan_early_shadow_page; phys_addr_t pa = __pa(kasan_early_shadow_page); @@ -30,29 +30,25 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot) __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0); } -static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) +static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end) { pmd_t *pmd; unsigned long k_cur, k_next; - pgprot_t prot = slab_is_available() ? kasan_prot_ro() : PAGE_KERNEL; + pte_t *new = NULL; pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start); for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) { - pte_t *new; - k_next = pgd_addr_end(k_cur, k_end); if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte) continue; - if (slab_is_available()) - new = pte_alloc_one_kernel(&init_mm); - else + if (!new) new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE); if (!new) return -ENOMEM; - kasan_populate_pte(new, prot); + kasan_populate_pte(new, PAGE_KERNEL); smp_wmb(); /* See comment in __pte_alloc */ @@ -63,39 +59,27 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l new = NULL; } spin_unlock(&init_mm.page_table_lock); - - if (new && slab_is_available()) - pte_free_kernel(&init_mm, new); } return 0; } -static void __ref *kasan_get_one_page(void) -{ - if (slab_is_available()) - return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); - - return memblock_alloc(PAGE_SIZE, PAGE_SIZE); -} - -static int __ref kasan_init_region(void *start, size_t size) +static int __init kasan_init_region(void *start, size_t size) { unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start); unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size); unsigned long k_cur; int ret; - void *block = NULL; + void *block; ret = kasan_init_shadow_page_tables(k_start, k_end); if (ret) return ret; - if (!slab_is_available()) - block = memblock_alloc(k_end - k_start, PAGE_SIZE); + block = memblock_alloc(k_end - k_start, PAGE_SIZE); for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); - void *va = block ? block + k_cur - k_start : kasan_get_one_page(); + void *va = block + k_cur - k_start; pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL); if (!va) @@ -129,12 +113,32 @@ static void __init kasan_remap_early_shadow_ro(void) flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END); } +static void __init kasan_unmap_early_shadow_vmalloc(void) +{ + unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START); + unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END); + unsigned long k_cur; + phys_addr_t pa = __pa(kasan_early_shadow_page); + + for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) { + pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur); + pte_t *ptep = pte_offset_kernel(pmd, k_cur); + + if ((pte_val(*ptep) & PTE_RPN_MASK) != pa) + continue; + + __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0); + } + flush_tlb_kernel_range(k_start, k_end); +} + void __init kasan_mmu_init(void) { int ret; struct memblock_region *reg; - if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) { + if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) || + IS_ENABLED(CONFIG_KASAN_VMALLOC)) { ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END); if (ret) @@ -165,34 +169,21 @@ void __init kasan_init(void) pr_info("KASAN init done\n"); } -#ifdef CONFIG_MODULES -void *module_alloc(unsigned long size) +void __init kasan_late_init(void) { - void *base; - - base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, - NUMA_NO_NODE, __builtin_return_address(0)); - - if (!base) - return NULL; - - if (!kasan_init_region(base, size)) - return base; - - vfree(base); - - return NULL; + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) + kasan_unmap_early_shadow_vmalloc(); } -#endif #ifdef CONFIG_PPC_BOOK3S_32 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0}; static void __init kasan_early_hash_table(void) { - modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16); - modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16); + unsigned int hash = __pa(early_hash); + + modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16); + modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16); Hash = (struct hash_pte *)early_hash; } diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index f5535eae637f..1c07d5a3f543 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -49,6 +49,7 @@ #include <asm/fixmap.h> #include <asm/swiotlb.h> #include <asm/rtas.h> +#include <asm/kasan.h> #include <mm/mmu_decl.h> @@ -301,6 +302,9 @@ void __init mem_init(void) high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); set_max_mapnr(max_pfn); + + kasan_late_init(); + memblock_free_all(); #ifdef CONFIG_HIGHMEM @@ -369,7 +373,9 @@ static inline bool flush_coherent_icache(unsigned long addr) */ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { mb(); /* sync */ + allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES); icbi((void *)addr); + prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES); mb(); /* sync */ isync(); return true; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 8e99649c24fc..7097e07a209a 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -181,3 +181,9 @@ void mmu_mark_rodata_ro(void); static inline void mmu_mark_initmem_nx(void) { } static inline void mmu_mark_rodata_ro(void) { } #endif + +#ifdef CONFIG_PPC_DEBUG_WX +void ptdump_check_wx(void); +#else +static inline void ptdump_check_wx(void) { } +#endif diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 96eb8e43f39b..3189308dece4 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -21,33 +21,34 @@ extern int __map_without_ltlbs; static unsigned long block_mapped_ram; /* - * Return PA for this VA if it is in an area mapped with LTLBs. + * Return PA for this VA if it is in an area mapped with LTLBs or fixmap. * Otherwise, returns 0 */ phys_addr_t v_block_mapped(unsigned long va) { unsigned long p = PHYS_IMMR_BASE; - if (__map_without_ltlbs) - return 0; if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) return p + va - VIRT_IMMR_BASE; + if (__map_without_ltlbs) + return 0; if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) return __pa(va); return 0; } /* - * Return VA for a given PA mapped with LTLBs or 0 if not mapped + * Return VA for a given PA mapped with LTLBs or fixmap + * Return 0 if not mapped */ unsigned long p_block_mapped(phys_addr_t pa) { unsigned long p = PHYS_IMMR_BASE; - if (__map_without_ltlbs) - return 0; if (pa >= p && pa < p + IMMR_SIZE) return VIRT_IMMR_BASE + pa - p; + if (__map_without_ltlbs) + return 0; if (pa < block_mapped_ram) return (unsigned long)__va(pa); return 0; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 50d68d21ddcc..3c7dec70cda0 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1616,11 +1616,11 @@ static ssize_t topology_write(struct file *file, const char __user *buf, return count; } -static const struct file_operations topology_ops = { - .read = seq_read, - .write = topology_write, - .open = topology_open, - .release = single_release +static const struct proc_ops topology_proc_ops = { + .proc_read = seq_read, + .proc_write = topology_write, + .proc_open = topology_open, + .proc_release = single_release, }; static int topology_update_init(void) @@ -1630,7 +1630,7 @@ static int topology_update_init(void) if (vphn_enabled) topology_schedule_update(); - if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops)) + if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_proc_ops)) return -ENOMEM; topology_inited = 1; diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 73b84166d06a..5fb90edd865e 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -218,6 +218,7 @@ void mark_rodata_ro(void) if (v_block_mapped((unsigned long)_sinittext)) { mmu_mark_rodata_ro(); + ptdump_check_wx(); return; } diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 2f9ddc29c535..206156255247 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -24,6 +24,8 @@ #include <asm/page.h> #include <asm/pgalloc.h> +#include <mm/mmu_decl.h> + #include "ptdump.h" /* @@ -173,10 +175,12 @@ static void dump_addr(struct pg_state *st, unsigned long addr) static void note_prot_wx(struct pg_state *st, unsigned long addr) { + pte_t pte = __pte(st->current_flags); + if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx) return; - if (!((st->current_flags & pgprot_val(PAGE_KERNEL_X)) == pgprot_val(PAGE_KERNEL_X))) + if (!pte_write(pte) || !pte_exec(pte)) return; WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", |