aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm
diff options
context:
space:
mode:
authorJoel Stanley <joel@jms.id.au>2021-10-14 08:04:38 +1030
committerMichael Ellerman <mpe@ellerman.id.au>2021-10-22 15:22:02 +1100
commit4f703e7faa67a116016c4678fc88b507c12670c9 (patch)
tree29b21d3ba6945f30efd81ef52076fc1708774ba4 /arch/powerpc/include/asm
parentpowerpc: Mark .opd section read-only (diff)
downloadlinux-dev-4f703e7faa67a116016c4678fc88b507c12670c9.tar.xz
linux-dev-4f703e7faa67a116016c4678fc88b507c12670c9.zip
powerpc/s64: Clarify that radix lacks DEBUG_PAGEALLOC
The page_alloc.c code will call into __kernel_map_pages() when DEBUG_PAGEALLOC is configured and enabled. As the implementation assumes hash, this should crash spectacularly if not for a bit of luck in __kernel_map_pages(). In this function linear_map_hash_count is always zero, the for loop exits without doing any damage. There are no other platforms that determine if they support debug_pagealloc at runtime. Instead of adding code to mm/page_alloc.c to do that, this change turns the map/unmap into a noop when in radix mode and prints a warning once. Signed-off-by: Joel Stanley <joel@jms.id.au> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> [mpe: Reformat if per Christophe's suggestion] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20211013213438.675095-1-joel@jms.id.au
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h3
3 files changed, 15 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index d959b0195ad9..674fe0e890dc 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -255,6 +255,8 @@ int hash__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
int hash__remove_section_mapping(unsigned long start, unsigned long end);
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 5d34a8646f08..33e073d6b0c4 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1101,6 +1101,16 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (radix_enabled())
+ radix__kernel_map_pages(page, numpages, enable);
+ else
+ hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
+
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte_raw(pmd_raw(pmd));
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index 59cab558e2f0..d090d9612348 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -316,5 +316,8 @@ int radix__create_section_mapping(unsigned long start, unsigned long end,
int nid, pgprot_t prot);
int radix__remove_section_mapping(unsigned long start, unsigned long end);
#endif /* CONFIG_MEMORY_HOTPLUG */
+
+void radix__kernel_map_pages(struct page *page, int numpages, int enable);
+
#endif /* __ASSEMBLY__ */
#endif