aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeffrey Hugo <jhugo@codeaurora.org>2018-05-11 16:01:42 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-06-21 04:01:41 +0900
commita3a0a24a9bfae8b537e51d08fa06d8643efa63ef (patch)
tree4d0ed149988316febcd37e8fa638e1286b5cc14d
parentb5e2b10d79679ce83d787d8aa609b8116a3bd8e9 (diff)
downloadlinux-a3a0a24a9bfae8b537e51d08fa06d8643efa63ef.tar.gz
init: fix false positives in W+X checking
[ Upstream commit ae646f0b9ca135b87bc73ff606ef996c3029780a ] load_module() creates W+X mappings via __vmalloc_node_range() (from layout_and_allocate()->move_module()->module_alloc()) by using PAGE_KERNEL_EXEC. These mappings are later cleaned up via "call_rcu_sched(&freeinit->rcu, do_free_init)" from do_init_module(). This is a problem because call_rcu_sched() queues work, which can be run after debug_checkwx() is run, resulting in a race condition. If hit, the race results in a nasty splat about insecure W+X mappings, which results in a poor user experience as these are not the mappings that debug_checkwx() is intended to catch. This issue is observed on multiple arm64 platforms, and has been artificially triggered on an x86 platform. Address the race by flushing the queued work before running the arch-defined mark_rodata_ro() which then calls debug_checkwx(). Link: http://lkml.kernel.org/r/1525103946-29526-1-git-send-email-jhugo@codeaurora.org Fixes: e1a58320a38d ("x86/mm: Warn on W^X mappings") Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org> Reported-by: Timur Tabi <timur@codeaurora.org> Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com> Acked-by: Kees Cook <keescook@chromium.org> Acked-by: Ingo Molnar <mingo@kernel.org> Acked-by: Will Deacon <will.deacon@arm.com> Acked-by: Laura Abbott <labbott@redhat.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Stephen Smalley <sds@tycho.nsa.gov> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Sasha Levin <alexander.levin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--init/main.c7
-rw-r--r--kernel/module.c5
2 files changed, 12 insertions, 0 deletions
diff --git a/init/main.c b/init/main.c
index 21efbf6ace93dd..dacaf589226a0c 100644
--- a/init/main.c
+++ b/init/main.c
@@ -981,6 +981,13 @@ __setup("rodata=", set_debug_rodata);
static void mark_readonly(void)
{
if (rodata_enabled) {
+ /*
+ * load_module() results in W+X mappings, which are cleaned up
+ * with call_rcu_sched(). Let's make sure that queued work is
+ * flushed so that we don't hit false positives looking for
+ * insecure pages which are W+X.
+ */
+ rcu_barrier_sched();
mark_rodata_ro();
rodata_test();
} else
diff --git a/kernel/module.c b/kernel/module.c
index bbb45c038321f3..c3cc1f8615e1f0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3521,6 +3521,11 @@ static noinline int do_init_module(struct module *mod)
* walking this with preempt disabled. In all the failure paths, we
* call synchronize_sched(), but we don't want to slow down the success
* path, so use actual RCU here.
+ * Note that module_alloc() on most architectures creates W+X page
+ * mappings which won't be cleaned up until do_free_init() runs. Any
+ * code such as mark_rodata_ro() which depends on those mappings to
+ * be cleaned up needs to sync with the queued work - ie
+ * rcu_barrier_sched()
*/
call_rcu_sched(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);