aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJordan Niethe <jniethe5@gmail.com>2021-06-09 11:34:25 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-06-21 21:13:20 +1000
commit4fcc636615b1a309b39cab101a2b433cbf1f63f1 (patch)
tree87ae49c74c699c3518fc4a603658bdb982d67c5d
parentpowerpc/lib/code-patching: Set up Strict RWX patching earlier (diff)
downloadwireguard-linux-4fcc636615b1a309b39cab101a2b433cbf1f63f1.tar.xz
wireguard-linux-4fcc636615b1a309b39cab101a2b433cbf1f63f1.zip
powerpc/modules: Make module_alloc() Strict Module RWX aware
Make module_alloc() use PAGE_KERNEL protections instead of PAGE_KERNEL_EXEX if Strict Module RWX is enabled. Signed-off-by: Jordan Niethe <jniethe5@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210609013431.9805-4-jniethe5@gmail.com
-rw-r--r--arch/powerpc/include/asm/mmu.h5
-rw-r--r--arch/powerpc/kernel/module.c4
2 files changed, 8 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 998fe01dd1a8..27016b98ecb2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -345,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void)
return false;
}
#endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+ return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 3f35c8d20be7..ed04a3ba66fe 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -92,12 +92,14 @@ int module_finalize(const Elf_Ehdr *hdr,
static __always_inline void *
__module_alloc(unsigned long size, unsigned long start, unsigned long end)
{
+ pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+
/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
- return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC,
+ return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}