aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2022-09-21 10:48:41 +0300
committerCatalin Marinas <catalin.marinas@arm.com>2022-09-29 17:59:28 +0100
commitb9dd04a20f81333e4b99662f1bbaf7c9e3a1e137 (patch)
treeea68c3c97d9a48dedd5e4858e156f7d195ec2202
parentarm64: uaccess: simplify uaccess_mask_ptr() (diff)
downloadlinux-dev-b9dd04a20f81333e4b99662f1bbaf7c9e3a1e137.tar.xz
linux-dev-b9dd04a20f81333e4b99662f1bbaf7c9e3a1e137.zip
arm64/mm: fold check for KFENCE into can_set_direct_map()
KFENCE requires linear map to be mapped at page granularity, so that it is possible to protect/unprotect single pages, just like with rodata_full and DEBUG_PAGEALLOC. Instead of repating can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE) make can_set_direct_map() handle the KFENCE case. This also prevents potential false positives in kernel_page_present() that may return true for non-present page if CONFIG_KFENCE is enabled. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20220921074841.382615-1-rppt@kernel.org Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/mm/mmu.c8
-rw-r--r--arch/arm64/mm/pageattr.c8
2 files changed, 9 insertions, 7 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 69deed27dec8..e8a48dfd550f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -535,7 +535,7 @@ static void __init map_mem(pgd_t *pgdp)
*/
BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
- if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
+ if (can_set_direct_map())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@@ -1542,11 +1542,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
VM_BUG_ON(!mhp_range_allowed(start, size, true));
- /*
- * KFENCE requires linear map to be mapped at page granularity, so that
- * it is possible to protect/unprotect single pages in the KFENCE pool.
- */
- if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE))
+ if (can_set_direct_map())
flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 64e985eaa52d..d107c3d434e2 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -21,7 +21,13 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void)
{
- return rodata_full || debug_pagealloc_enabled();
+ /*
+ * rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be
+ * mapped at page granularity, so that it is possible to
+ * protect/unprotect single pages.
+ */
+ return rodata_full || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE);
}
static int change_page_range(pte_t *ptep, unsigned long addr, void *data)