From 4fcc636615b1a309b39cab101a2b433cbf1f63f1 Mon Sep 17 00:00:00 2001 From: Jordan Niethe Date: Wed, 9 Jun 2021 11:34:25 +1000 Subject: [PATCH] powerpc/modules: Make module_alloc() Strict Module RWX aware Make module_alloc() use PAGE_KERNEL protections instead of PAGE_KERNEL_EXEX if Strict Module RWX is enabled. Signed-off-by: Jordan Niethe Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210609013431.9805-4-jniethe5@gmail.com --- arch/powerpc/include/asm/mmu.h | 5 +++++ arch/powerpc/kernel/module.c | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 998fe01dd1a8..27016b98ecb2 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -345,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void) return false; } #endif + +static inline bool strict_module_rwx_enabled(void) +{ + return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled(); +} #endif /* !__ASSEMBLY__ */ /* The kernel use the constants below to index in the page sizes array. diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index 3f35c8d20be7..ed04a3ba66fe 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -92,12 +92,14 @@ int module_finalize(const Elf_Ehdr *hdr, static __always_inline void * __module_alloc(unsigned long size, unsigned long start, unsigned long end) { + pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC; + /* * Don't do huge page allocations for modules yet until more testing * is done. STRICT_MODULE_RWX may require extra work to support this * too. */ - return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, PAGE_KERNEL_EXEC, + return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot, VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP, NUMA_NO_NODE, __builtin_return_address(0)); } -- 2.11.0