aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Hansen <dave.hansen@intel.com>2017-11-06 17:29:52 -0800
committerDave Hansen <dave.hansen@intel.com>2017-11-06 17:29:52 -0800
commit66c2620dc495d74ae0456e9f192410a0a89ee432 (patch)
treedbea10834af9f34b236caf1c040e643e088f2703
parentef4f9e8cd08df643cedbccf043489edfdf88bf71 (diff)
downloadx86-kaiser-66c2620dc495d74ae0456e9f192410a0a89ee432.tar.gz
x86, kaiser: make sure static PGDs are 8k in size
From: Dave Hansen <dave.hansen@linux.intel.com> We have a few PGDs that come out of the kernel binary instead of being allocated dynamically. Before this patch, they are all 8k-aligned, but we also need them to be 8k in *size*e The original KAISER patch did not do this. It probably just lucked out that it did not trample over data after the last PGD. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Cc: Moritz Lipp <moritz.lipp@iaik.tugraz.at> Cc: Daniel Gruss <daniel.gruss@iaik.tugraz.at> Cc: Michael Schwarz <michael.schwarz@iaik.tugraz.at> Cc: Richard Fellner <richard.fellner@student.tugraz.at> Cc: Andy Lutomirski <luto@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Kees Cook <keescook@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: x86@kernel.org
-rw-r--r--arch/x86/kernel/head_64.S16
1 files changed, 16 insertions, 0 deletions
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3f13bacd7fe83f..f8c43243666dc1 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -340,11 +340,24 @@ GLOBAL(early_recursion_flag)
GLOBAL(name)
#ifdef CONFIG_KAISER
+/*
+ * Each PGD needs to be 8k long and 8k aligned. We do not
+ * ever go out to userspace with these, so we do not
+ * strictly *need* the second page, but this allows us to
+ * have a single set_pgd() implementation that does not
+ * need to worry about whether it has 4k or 8k to work
+ * with.
+ *
+ * This ensures PGDs are 8k long:
+ */
+#define KAISER_USER_PGD_FILL 512
+/* This ensures they are 8k-aligned: */
#define NEXT_PGD_PAGE(name) \
.balign 2 * PAGE_SIZE; \
GLOBAL(name)
#else
#define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
+#define KAISER_USER_PGD_FILL 0
#endif
/* Automate the creation of 1 to 1 mapping pmd entries */
@@ -363,6 +376,7 @@ NEXT_PGD_PAGE(early_top_pgt)
#else
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
#endif
+ .fill KAISER_USER_PGD_FILL,8,0
NEXT_PAGE(early_dynamic_pgts)
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
@@ -372,6 +386,7 @@ NEXT_PAGE(early_dynamic_pgts)
#ifndef CONFIG_XEN
NEXT_PGD_PAGE(init_top_pgt)
.fill 512,8,0
+ .fill KAISER_USER_PGD_FILL,8,0
#else
NEXT_PGD_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
@@ -380,6 +395,7 @@ NEXT_PGD_PAGE(init_top_pgt)
.org init_top_pgt + PGD_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
+ .fill KAISER_USER_PGD_FILL,8,0
NEXT_PAGE(level3_ident_pgt)
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC