aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2019-06-25 19:08:54 +0200
committerWill Deacon <will@kernel.org>2019-06-26 11:34:10 +0100
commit6f496a555d93db7a11d4860b9220d904822f586a (patch)
tree36fc4be44f0f094b0c3255a3081f936b2db21492
parent615c48ad8f4275b4d39fa57df68d4015078be201 (diff)
downloadlinux-rt-devel-6f496a555d93db7a11d4860b9220d904822f586a.tar.gz
arm64: kaslr: keep modules inside module region when KASAN is enabled
When KASLR and KASAN are both enabled, we keep the modules where they are, and randomize the placement of the kernel so it is within 2 GB of the module region. The reason for this is that putting modules in the vmalloc region (like we normally do when KASLR is enabled) is not possible in this case, given that the entire vmalloc region is already backed by KASAN zero shadow pages, and so allocating dedicated KASAN shadow space as required by loaded modules is not possible. The default module allocation window is set to [_etext - 128MB, _etext] in kaslr.c, which is appropriate for KASLR kernels booted without a seed or with 'nokaslr' on the command line. However, as it turns out, it is not quite correct for the KASAN case, since it still intersects the vmalloc region at the top, where attempts to allocate shadow pages will collide with the KASAN zero shadow pages, causing a WARN() and all kinds of other trouble. So cap the top end to MODULES_END explicitly when running with KASAN. Cc: <stable@vger.kernel.org> # 4.9+ Acked-by: Catalin Marinas <catalin.marinas@arm.com> Tested-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will@kernel.org>
-rw-r--r--arch/arm64/kernel/module.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index dd080837e6a9c1..ed3706d6b3a089 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -32,6 +32,7 @@
void *module_alloc(unsigned long size)
{
+ u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
gfp_t gfp_mask = GFP_KERNEL;
void *p;
@@ -39,9 +40,12 @@ void *module_alloc(unsigned long size)
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
gfp_mask |= __GFP_NOWARN;
+ if (IS_ENABLED(CONFIG_KASAN))
+ /* don't exceed the static module region - see below */
+ module_alloc_end = MODULES_END;
+
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + MODULES_VSIZE,
- gfp_mask, PAGE_KERNEL_EXEC, 0,
+ module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&