summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2018-09-29 16:47:52 +0100
committerBen Hutchings <ben@decadent.org.uk>2018-09-29 18:26:20 +0100
commit6e7a94e2c6827d7fe3d6be5fee521b65d20f296a (patch)
tree64ef1ba80039653585056a7e562b55317a6e49a1
parentac5ee8cb77fe9723357b26d1b605a5e6907d4607 (diff)
downloadlinux-stable-queue-6e7a94e2c6827d7fe3d6be5fee521b65d20f296a.tar.gz
Add a 3.16-specific patch for L1TF mitigation
-rw-r--r--queue-3.16/series1
-rw-r--r--queue-3.16/x86-speculation-l1tf-protect-numa-balance-entries-against-l1tf.patch76
2 files changed, 77 insertions, 0 deletions
diff --git a/queue-3.16/series b/queue-3.16/series
index 2bad97d0..8e94089f 100644
--- a/queue-3.16/series
+++ b/queue-3.16/series
@@ -115,6 +115,7 @@ x86-speculation-l1tf-fix-up-pte-pfn-conversion-for-pae.patch
x86-speculation-l1tf-unbreak-__have_arch_pfn_modify_allowed.patch
x86-speculation-l1tf-invert-all-not-present-mappings.patch
x86-speculation-l1tf-exempt-zeroed-ptes-from-inversion.patch
+x86-speculation-l1tf-protect-numa-balance-entries-against-l1tf.patch
x86-speculation-l1tf-make-pmd-pud_mknotpresent-invert.patch
x86-mm-pat-make-set_memory_np-l1tf-safe.patch
x86-mm-kmmio-make-the-tracer-robust-against-l1tf.patch
diff --git a/queue-3.16/x86-speculation-l1tf-protect-numa-balance-entries-against-l1tf.patch b/queue-3.16/x86-speculation-l1tf-protect-numa-balance-entries-against-l1tf.patch
new file mode 100644
index 00000000..5e015022
--- /dev/null
+++ b/queue-3.16/x86-speculation-l1tf-protect-numa-balance-entries-against-l1tf.patch
@@ -0,0 +1,76 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Sat, 29 Sep 2018 16:31:50 +0100
+Subject: x86/speculation/l1tf: Protect NUMA-balance entries against L1TF
+
+NUMA balancing has its own functions that manipulated the PRESENT flag
+in PTEs and PMDs. These were not affected by the changes in commit
+6b28baca9b1f "x86/speculation/l1tf: Protect PROT_NONE PTEs against
+speculation".
+
+This is not a problem upstream because NUMA balancing was changed to
+use {pte,pmd}_modify() in Linux 4.0.
+
+Override the generic implementations for x86 with implementations
+that do the same inversion as {pte,pmd}_modify().
+
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: x86@kernel.org
+Cc: Mel Gorman <mgorman@suse.de>
+---
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -419,6 +419,54 @@ static inline pmd_t pmd_modify(pmd_t pmd
+ return __pmd(val);
+ }
+
++#ifdef CONFIG_NUMA_BALANCING
++
++static inline pte_t pte_mknonnuma(pte_t pte)
++{
++ pteval_t val = pte_val(pte), oldval = val;
++
++ val &= ~_PAGE_NUMA;
++ val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
++ return __pte(val);
++}
++#define pte_mknonnuma pte_mknonnuma
++
++static inline pte_t pte_mknuma(pte_t pte)
++{
++ pteval_t val = pte_val(pte), oldval = val;
++
++ val &= ~_PAGE_PRESENT;
++ val |= _PAGE_NUMA;
++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
++ return __pte(val);
++}
++#define pte_mknuma pte_mknuma
++
++static inline pmd_t pmd_mknonnuma(pmd_t pmd)
++{
++ pmdval_t val = pmd_val(pmd), oldval = val;
++
++ val &= ~_PAGE_NUMA;
++ val |= (_PAGE_PRESENT|_PAGE_ACCESSED);
++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
++ return __pmd(val);
++}
++#define pmd_mknonnuma pmd_mknonnuma
++
++static inline pmd_t pmd_mknuma(pmd_t pmd)
++{
++ pmdval_t val = pmd_val(pmd), oldval = val;
++
++ val &= ~_PAGE_PRESENT;
++ val |= _PAGE_NUMA;
++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
++ return __pmd(val);
++}
++#define pmd_mknuma pmd_mknuma
++
++#endif /* CONFIG_NUMA_BALANCING */
++
+ /* mprotect needs to preserve PAT bits when updating vm_page_prot */
+ #define pgprot_modify pgprot_modify
+ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)