aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@intel.com>2017-11-06 17:29:54 -0800
committerDave Hansen <dave.hansen@intel.com>2017-11-06 17:29:54 -0800
commite8bbf6923dd0ace0de16ae302c6f9b7ad5c81308 (patch)
tree4d0b0ea24b625cfa830539f9c386d804b0a8ddbb
parent8e7073eede37963b866a18f84a6ce5ca0aa3f2ae (diff)
downloadx86-kaiser-e8bbf6923dd0ace0de16ae302c6f9b7ad5c81308.tar.gz
x86, kaiser: map espfix structures
From: Dave Hansen <dave.hansen@linux.intel.com> We have some rather arcane code to help when we IRET to 16-bit segments: the "espfix" code. This consists of a few per-cpu variables: espfix_stack: tells us where we allocated the stack (the bottom) espfix_waddr: tells us where we can actually point %rsp and the stack itself. We need all three things mapped for this to work. Note: the espfix code runs with a kernel GSBASE, but user (shadow) page tables. We could switch to the kernel page tables here and then not have to map any of this, but just user-pagetable-mapping is simpler. To switch over to the kernel copy, we would need some temporary storage which is in short supply at this point. The original KAISER patch missed this case. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Moritz Lipp <moritz.lipp@iaik.tugraz.at> Cc: Daniel Gruss <daniel.gruss@iaik.tugraz.at> Cc: Michael Schwarz <michael.schwarz@iaik.tugraz.at> Cc: Richard Fellner <richard.fellner@student.tugraz.at> Cc: Andy Lutomirski <luto@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Kees Cook <keescook@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: x86@kernel.org
-rw-r--r--arch/x86/kernel/espfix_64.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 168ed69c828f3d..fc079ec8028ac3 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -33,6 +33,7 @@
#include <linux/init.h>
#include <linux/init_task.h>
+#include <linux/kaiser.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
@@ -41,7 +42,6 @@
#include <asm/pgalloc.h>
#include <asm/setup.h>
#include <asm/espfix.h>
-#include <asm/kaiser.h>
/*
* Note: we only need 6*8 = 48 bytes for the espfix stack, but round
@@ -61,8 +61,8 @@
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/* This contains the *bottom* address of the espfix stack */
-DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
-DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
+DEFINE_PER_CPU_USER_MAPPED(unsigned long, espfix_stack);
+DEFINE_PER_CPU_USER_MAPPED(unsigned long, espfix_waddr);
/* Initialization mutex - should this be a spinlock? */
static DEFINE_MUTEX(espfix_init_mutex);
@@ -225,4 +225,10 @@ done:
per_cpu(espfix_stack, cpu) = addr;
per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
+ (addr & ~PAGE_MASK);
+ /*
+ * _PAGE_GLOBAL is not really required. This is not a hot
+ * path, but we do it here for consistency.
+ */
+ kaiser_add_mapping((unsigned long)stack_page, PAGE_SIZE,
+ __PAGE_KERNEL | _PAGE_GLOBAL);
}