summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2018-09-20 23:17:04 +0100
committerBen Hutchings <ben@decadent.org.uk>2018-09-20 23:17:04 +0100
commitcd76c89e0795fd56c89f9d2f26d9807d5033a4fd (patch)
tree5c85b9c62634c378b2ed55cecda667c87d072e52
parent36c5c4dd8d80bd3ba458d908595b0f5834050b41 (diff)
downloadlinux-stable-queue-cd76c89e0795fd56c89f9d2f26d9807d5033a4fd.tar.gz
Add vmacache security fix
-rw-r--r--queue-3.16/mm-get-rid-of-vmacache_flush_all-entirely.patch117
-rw-r--r--queue-3.16/series1
2 files changed, 118 insertions, 0 deletions
diff --git a/queue-3.16/mm-get-rid-of-vmacache_flush_all-entirely.patch b/queue-3.16/mm-get-rid-of-vmacache_flush_all-entirely.patch
new file mode 100644
index 00000000..8cd6735a
--- /dev/null
+++ b/queue-3.16/mm-get-rid-of-vmacache_flush_all-entirely.patch
@@ -0,0 +1,117 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Wed, 12 Sep 2018 23:57:48 -1000
+Subject: mm: get rid of vmacache_flush_all() entirely
+
+commit 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 upstream.
+
+Jann Horn points out that the vmacache_flush_all() function is not only
+potentially expensive, it's buggy too. It also happens to be entirely
+unnecessary, because the sequence number overflow case can be avoided by
+simply making the sequence number be 64-bit. That doesn't even grow the
+data structures in question, because the other adjacent fields are
+already 64-bit.
+
+So simplify the whole thing by just making the sequence number overflow
+case go away entirely, which gets rid of all the complications and makes
+the code faster too. Win-win.
+
+[ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics
+ also just goes away entirely with this ]
+
+Reported-by: Jann Horn <jannh@google.com>
+Suggested-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[bwh: Backported to 3.16: drop changes to mm debug code]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -345,7 +345,7 @@ struct kioctx_table;
+ struct mm_struct {
+ struct vm_area_struct *mmap; /* list of VMAs */
+ struct rb_root mm_rb;
+- u32 vmacache_seqnum; /* per-thread vmacache */
++ u64 vmacache_seqnum; /* per-thread vmacache */
+ #ifdef CONFIG_MMU
+ unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long addr, unsigned long len,
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1291,7 +1291,7 @@ struct task_struct {
+ unsigned brk_randomized:1;
+ #endif
+ /* per-thread vma caching */
+- u32 vmacache_seqnum;
++ u64 vmacache_seqnum;
+ struct vm_area_struct *vmacache[VMACACHE_SIZE];
+ #if defined(SPLIT_RSS_COUNTING)
+ struct task_rss_stat rss_stat;
+--- a/include/linux/vmacache.h
++++ b/include/linux/vmacache.h
+@@ -15,7 +15,6 @@ static inline void vmacache_flush(struct
+ memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
+ }
+
+-extern void vmacache_flush_all(struct mm_struct *mm);
+ extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
+ extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
+ unsigned long addr);
+@@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_f
+ static inline void vmacache_invalidate(struct mm_struct *mm)
+ {
+ mm->vmacache_seqnum++;
+-
+- /* deal with overflows */
+- if (unlikely(mm->vmacache_seqnum == 0))
+- vmacache_flush_all(mm);
+ }
+
+ #endif /* __LINUX_VMACACHE_H */
+--- a/mm/vmacache.c
++++ b/mm/vmacache.c
+@@ -6,42 +6,6 @@
+ #include <linux/vmacache.h>
+
+ /*
+- * Flush vma caches for threads that share a given mm.
+- *
+- * The operation is safe because the caller holds the mmap_sem
+- * exclusively and other threads accessing the vma cache will
+- * have mmap_sem held at least for read, so no extra locking
+- * is required to maintain the vma cache.
+- */
+-void vmacache_flush_all(struct mm_struct *mm)
+-{
+- struct task_struct *g, *p;
+-
+- /*
+- * Single threaded tasks need not iterate the entire
+- * list of process. We can avoid the flushing as well
+- * since the mm's seqnum was increased and don't have
+- * to worry about other threads' seqnum. Current's
+- * flush will occur upon the next lookup.
+- */
+- if (atomic_read(&mm->mm_users) == 1)
+- return;
+-
+- rcu_read_lock();
+- for_each_process_thread(g, p) {
+- /*
+- * Only flush the vmacache pointers as the
+- * mm seqnum is already set and curr's will
+- * be set upon invalidation when the next
+- * lookup is done.
+- */
+- if (mm == p->mm)
+- vmacache_flush(p);
+- }
+- rcu_read_unlock();
+-}
+-
+-/*
+ * This task may be accessing a foreign mm via (for example)
+ * get_user_pages()->find_vma(). The vmacache is task-local and this
+ * task's vmacache pertains to a different mm (ie, its own). There is
diff --git a/queue-3.16/series b/queue-3.16/series
index f4939ed4..ee455509 100644
--- a/queue-3.16/series
+++ b/queue-3.16/series
@@ -60,3 +60,4 @@ x86-process-correct-and-optimize-tif_blockstep-switch.patch
x86-cpu-amd-fix-erratum-1076-cpb-bit.patch
x86-cpu-intel-add-knights-mill-to-intel-family.patch
kvm-x86-introduce-num_emulated_msrs.patch
+mm-get-rid-of-vmacache_flush_all-entirely.patch