summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2018-12-09 21:27:47 +0000
committerBen Hutchings <ben@decadent.org.uk>2018-12-09 21:27:47 +0000
commit72f2b13d0c06ac1afcd60c88a2a4f36a1b5f9965 (patch)
treecc0aecee12ba104b5c586105e8acb08889f2230d
parent20b93096187b2e6267421d00d2a415d7377671fd (diff)
downloadlinux-stable-queue-72f2b13d0c06ac1afcd60c88a2a4f36a1b5f9965.tar.gz
Add various security fixes
-rw-r--r--queue-3.16/cdrom-fix-improper-type-cast-which-can-leat-to-information-leak.patch30
-rw-r--r--queue-3.16/keys-encrypted-fix-buffer-overread-in-valid_master_desc.patch63
-rw-r--r--queue-3.16/mm-cleancache-fix-corruption-on-missed-inode-invalidation.patch69
-rw-r--r--queue-3.16/mremap-properly-flush-tlb-before-releasing-the-page.patch136
-rw-r--r--queue-3.16/posix-timers-sanitize-overrun-handling.patch142
-rw-r--r--queue-3.16/series7
-rw-r--r--queue-3.16/wil6210-missing-length-check-in-wmi_set_ie.patch31
-rw-r--r--queue-3.16/xfs-don-t-fail-when-converting-shortform-attr-to-long-form-during.patch45
8 files changed, 523 insertions, 0 deletions
diff --git a/queue-3.16/cdrom-fix-improper-type-cast-which-can-leat-to-information-leak.patch b/queue-3.16/cdrom-fix-improper-type-cast-which-can-leat-to-information-leak.patch
new file mode 100644
index 00000000..c1bbeaaf
--- /dev/null
+++ b/queue-3.16/cdrom-fix-improper-type-cast-which-can-leat-to-information-leak.patch
@@ -0,0 +1,30 @@
+From: Young_X <YangX92@hotmail.com>
+Date: Wed, 3 Oct 2018 12:54:29 +0000
+Subject: cdrom: fix improper type cast, which can leat to information leak.
+
+commit e4f3aa2e1e67bb48dfbaaf1cad59013d5a5bc276 upstream.
+
+There is another cast from unsigned long to int which causes
+a bounds check to fail with specially crafted input. The value is
+then used as an index in the slot array in cdrom_slot_status().
+
+This issue is similar to CVE-2018-16658 and CVE-2018-10940.
+
+Signed-off-by: Young_X <YangX92@hotmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ drivers/cdrom/cdrom.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2427,7 +2427,7 @@ static int cdrom_ioctl_select_disc(struc
+ return -ENOSYS;
+
+ if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
+- if ((int)arg >= cdi->capacity)
++ if (arg >= cdi->capacity)
+ return -EINVAL;
+ }
+
diff --git a/queue-3.16/keys-encrypted-fix-buffer-overread-in-valid_master_desc.patch b/queue-3.16/keys-encrypted-fix-buffer-overread-in-valid_master_desc.patch
new file mode 100644
index 00000000..476bacb4
--- /dev/null
+++ b/queue-3.16/keys-encrypted-fix-buffer-overread-in-valid_master_desc.patch
@@ -0,0 +1,63 @@
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 8 Jun 2017 14:48:18 +0100
+Subject: KEYS: encrypted: fix buffer overread in valid_master_desc()
+
+commit 794b4bc292f5d31739d89c0202c54e7dc9bc3add upstream.
+
+With the 'encrypted' key type it was possible for userspace to provide a
+data blob ending with a master key description shorter than expected,
+e.g. 'keyctl add encrypted desc "new x" @s'. When validating such a
+master key description, validate_master_desc() could read beyond the end
+of the buffer. Fix this by using strncmp() instead of memcmp(). [Also
+clean up the code to deduplicate some logic.]
+
+Cc: Mimi Zohar <zohar@linux.vnet.ibm.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ security/keys/encrypted-keys/encrypted.c | 31 ++++++++++++------------
+ 1 file changed, 15 insertions(+), 16 deletions(-)
+
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -141,23 +141,22 @@ static int valid_ecryptfs_desc(const cha
+ */
+ static int valid_master_desc(const char *new_desc, const char *orig_desc)
+ {
+- if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
+- if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
+- goto out;
+- if (orig_desc)
+- if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
+- goto out;
+- } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
+- if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
+- goto out;
+- if (orig_desc)
+- if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
+- goto out;
+- } else
+- goto out;
++ int prefix_len;
++
++ if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
++ prefix_len = KEY_TRUSTED_PREFIX_LEN;
++ else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
++ prefix_len = KEY_USER_PREFIX_LEN;
++ else
++ return -EINVAL;
++
++ if (!new_desc[prefix_len])
++ return -EINVAL;
++
++ if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
++ return -EINVAL;
++
+ return 0;
+-out:
+- return -EINVAL;
+ }
+
+ /*
diff --git a/queue-3.16/mm-cleancache-fix-corruption-on-missed-inode-invalidation.patch b/queue-3.16/mm-cleancache-fix-corruption-on-missed-inode-invalidation.patch
new file mode 100644
index 00000000..91c6b757
--- /dev/null
+++ b/queue-3.16/mm-cleancache-fix-corruption-on-missed-inode-invalidation.patch
@@ -0,0 +1,69 @@
+From: Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
+Date: Fri, 30 Nov 2018 14:09:00 -0800
+Subject: mm: cleancache: fix corruption on missed inode invalidation
+
+commit 6ff38bd40230af35e446239396e5fc8ebd6a5248 upstream.
+
+If all pages are deleted from the mapping by memory reclaim and also
+moved to the cleancache:
+
+__delete_from_page_cache
+ (no shadow case)
+ unaccount_page_cache_page
+ cleancache_put_page
+ page_cache_delete
+ mapping->nrpages -= nr
+ (nrpages becomes 0)
+
+We don't clean the cleancache for an inode after final file truncation
+(removal).
+
+truncate_inode_pages_final
+ check (nrpages || nrexceptional) is false
+ no truncate_inode_pages
+ no cleancache_invalidate_inode(mapping)
+
+These way when reading the new file created with same inode we may get
+these trash leftover pages from cleancache and see wrong data instead of
+the contents of the new file.
+
+Fix it by always doing truncate_inode_pages which is already ready for
+nrpages == 0 && nrexceptional == 0 case and just invalidates inode.
+
+[akpm@linux-foundation.org: add comment, per Jan]
+Link: http://lkml.kernel.org/r/20181112095734.17979-1-ptikhomirov@virtuozzo.com
+Fixes: commit 91b0abe36a7b ("mm + fs: store shadow entries in page cache")
+Signed-off-by: Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
+Reviewed-by: Vasily Averin <vvs@virtuozzo.com>
+Reviewed-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[bwh: Backported to 3.16: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ mm/truncate.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/mm/truncate.c
++++ b/mm/truncate.c
+@@ -461,9 +461,13 @@ void truncate_inode_pages_final(struct a
+ */
+ spin_lock_irq(&mapping->tree_lock);
+ spin_unlock_irq(&mapping->tree_lock);
+-
+- truncate_inode_pages(mapping, 0);
+ }
++
++ /*
++ * Cleancache needs notification even if there are no pages or shadow
++ * entries.
++ */
++ truncate_inode_pages(mapping, 0);
+ }
+ EXPORT_SYMBOL(truncate_inode_pages_final);
+
diff --git a/queue-3.16/mremap-properly-flush-tlb-before-releasing-the-page.patch b/queue-3.16/mremap-properly-flush-tlb-before-releasing-the-page.patch
new file mode 100644
index 00000000..641547af
--- /dev/null
+++ b/queue-3.16/mremap-properly-flush-tlb-before-releasing-the-page.patch
@@ -0,0 +1,136 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 12 Oct 2018 15:22:59 -0700
+Subject: mremap: properly flush TLB before releasing the page
+
+commit eb66ae030829605d61fbef1909ce310e29f78821 upstream.
+
+Jann Horn points out that our TLB flushing was subtly wrong for the
+mremap() case. What makes mremap() special is that we don't follow the
+usual "add page to list of pages to be freed, then flush tlb, and then
+free pages". No, mremap() obviously just _moves_ the page from one page
+table location to another.
+
+That matters, because mremap() thus doesn't directly control the
+lifetime of the moved page with a freelist: instead, the lifetime of the
+page is controlled by the page table locking, that serializes access to
+the entry.
+
+As a result, we need to flush the TLB not just before releasing the lock
+for the source location (to avoid any concurrent accesses to the entry),
+but also before we release the destination page table lock (to avoid the
+TLB being flushed after somebody else has already done something to that
+page).
+
+This also makes the whole "need_flush" logic unnecessary, since we now
+always end up flushing the TLB for every valid entry.
+
+Reported-and-tested-by: Jann Horn <jannh@google.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Tested-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[will: backport to 4.4 stable]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[bwh: Backported to 3.16: adjust context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ mm/huge_memory.c | 6 +++++-
+ mm/mremap.c | 21 ++++++++++++++++-----
+ 2 files changed, 21 insertions(+), 6 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1452,7 +1452,7 @@ int move_huge_pmd(struct vm_area_struct
+ spinlock_t *old_ptl, *new_ptl;
+ int ret = 0;
+ pmd_t pmd;
+-
++ bool force_flush = false;
+ struct mm_struct *mm = vma->vm_mm;
+
+ if ((old_addr & ~HPAGE_PMD_MASK) ||
+@@ -1480,6 +1480,8 @@ int move_huge_pmd(struct vm_area_struct
+ if (new_ptl != old_ptl)
+ spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+ pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
++ if (pmd_present(pmd))
++ force_flush = true;
+ VM_BUG_ON(!pmd_none(*new_pmd));
+
+ if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
+@@ -1488,6 +1490,8 @@ int move_huge_pmd(struct vm_area_struct
+ pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
+ }
+ set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
++ if (force_flush)
++ flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ spin_unlock(old_ptl);
+--- a/mm/mremap.c
++++ b/mm/mremap.c
+@@ -95,6 +95,8 @@ static void move_ptes(struct vm_area_str
+ struct mm_struct *mm = vma->vm_mm;
+ pte_t *old_pte, *new_pte, pte;
+ spinlock_t *old_ptl, *new_ptl;
++ bool force_flush = false;
++ unsigned long len = old_end - old_addr;
+
+ /*
+ * When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma
+@@ -141,12 +143,26 @@ static void move_ptes(struct vm_area_str
+ if (pte_none(*old_pte))
+ continue;
+ pte = ptep_get_and_clear(mm, old_addr, old_pte);
++ /*
++ * If we are remapping a valid PTE, make sure
++ * to flush TLB before we drop the PTL for the PTE.
++ *
++ * NOTE! Both old and new PTL matter: the old one
++ * for racing with page_mkclean(), the new one to
++ * make sure the physical page stays valid until
++ * the TLB entry for the old mapping has been
++ * flushed.
++ */
++ if (pte_present(pte))
++ force_flush = true;
+ pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
+ pte = move_soft_dirty_pte(pte);
+ set_pte_at(mm, new_addr, new_pte, pte);
+ }
+
+ arch_leave_lazy_mmu_mode();
++ if (force_flush)
++ flush_tlb_range(vma, old_end - len, old_end);
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
+ pte_unmap(new_pte - 1);
+@@ -166,7 +182,6 @@ unsigned long move_page_tables(struct vm
+ {
+ unsigned long extent, next, old_end;
+ pmd_t *old_pmd, *new_pmd;
+- bool need_flush = false;
+ unsigned long mmun_start; /* For mmu_notifiers */
+ unsigned long mmun_end; /* For mmu_notifiers */
+
+@@ -204,7 +219,6 @@ unsigned long move_page_tables(struct vm
+ anon_vma_unlock_write(vma->anon_vma);
+ }
+ if (err > 0) {
+- need_flush = true;
+ continue;
+ } else if (!err) {
+ split_huge_page_pmd(vma, old_addr, old_pmd);
+@@ -221,10 +235,7 @@ unsigned long move_page_tables(struct vm
+ extent = LATENCY_LIMIT;
+ move_ptes(vma, old_pmd, old_addr, old_addr + extent,
+ new_vma, new_pmd, new_addr, need_rmap_locks);
+- need_flush = true;
+ }
+- if (likely(need_flush))
+- flush_tlb_range(vma, old_end-len, old_addr);
+
+ mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
+
diff --git a/queue-3.16/posix-timers-sanitize-overrun-handling.patch b/queue-3.16/posix-timers-sanitize-overrun-handling.patch
new file mode 100644
index 00000000..c6b59d9e
--- /dev/null
+++ b/queue-3.16/posix-timers-sanitize-overrun-handling.patch
@@ -0,0 +1,142 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 26 Jun 2018 15:21:32 +0200
+Subject: posix-timers: Sanitize overrun handling
+
+commit 78c9c4dfbf8c04883941445a195276bb4bb92c76 upstream.
+
+The posix timer overrun handling is broken because the forwarding functions
+can return a huge number of overruns which does not fit in an int. As a
+consequence timer_getoverrun(2) and siginfo::si_overrun can turn into
+random number generators.
+
+The k_clock::timer_forward() callbacks return a 64 bit value now. Make
+k_itimer::ti_overrun[_last] 64bit as well, so the kernel internal
+accounting is correct. 3Remove the temporary (int) casts.
+
+Add a helper function which clamps the overrun value returned to user space
+via timer_getoverrun(2) or siginfo::si_overrun limited to a positive value
+between 0 and INT_MAX. INT_MAX is an indicator for user space that the
+overrun value has been clamped.
+
+Reported-by: Team OWL337 <icytxw@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: John Stultz <john.stultz@linaro.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Michael Kerrisk <mtk.manpages@gmail.com>
+Link: https://lkml.kernel.org/r/20180626132705.018623573@linutronix.de
+[bwh: Backported to 3.16: adjust filenames, context]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ include/linux/posix-timers.h | 4 ++--
+ kernel/posix-cpu-timers.c | 2 +-
+ kernel/posix-timers.c | 31 ++++++++++++++++++++-----------
+ 3 files changed, 23 insertions(+), 14 deletions(-)
+
+--- a/include/linux/posix-timers.h
++++ b/include/linux/posix-timers.h
+@@ -65,8 +65,8 @@ struct k_itimer {
+ spinlock_t it_lock;
+ clockid_t it_clock; /* which timer type */
+ timer_t it_id; /* timer id */
+- int it_overrun; /* overrun on pending signal */
+- int it_overrun_last; /* overrun on last delivered signal */
++ s64 it_overrun; /* overrun on pending signal */
++ s64 it_overrun_last; /* overrun on last delivered signal */
+ int it_requeue_pending; /* waiting to requeue this timer */
+ #define REQUEUE_PENDING 1
+ int it_sigev_notify; /* notify word of sigevent struct */
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -103,7 +103,7 @@ static void bump_cpu_timer(struct k_itim
+ continue;
+
+ timer->it.cpu.expires += incr;
+- timer->it_overrun += 1 << i;
++ timer->it_overrun += 1LL << i;
+ delta -= incr;
+ }
+ }
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -347,6 +347,17 @@ static __init int init_posix_timers(void
+
+ __initcall(init_posix_timers);
+
++/*
++ * The siginfo si_overrun field and the return value of timer_getoverrun(2)
++ * are of type int. Clamp the overrun value to INT_MAX
++ */
++static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
++{
++ s64 sum = timr->it_overrun_last + (s64)baseval;
++
++ return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
++}
++
+ static void schedule_next_timer(struct k_itimer *timr)
+ {
+ struct hrtimer *timer = &timr->it.real.timer;
+@@ -354,12 +365,11 @@ static void schedule_next_timer(struct k
+ if (timr->it.real.interval.tv64 == 0)
+ return;
+
+- timr->it_overrun += (unsigned int) hrtimer_forward(timer,
+- timer->base->get_time(),
+- timr->it.real.interval);
++ timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
++ timr->it.real.interval);
+
+ timr->it_overrun_last = timr->it_overrun;
+- timr->it_overrun = -1;
++ timr->it_overrun = -1LL;
+ ++timr->it_requeue_pending;
+ hrtimer_restart(timer);
+ }
+@@ -388,7 +398,7 @@ void do_schedule_next_timer(struct sigin
+ else
+ schedule_next_timer(timr);
+
+- info->si_overrun += timr->it_overrun_last;
++ info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
+ }
+
+ if (timr)
+@@ -483,9 +493,8 @@ static enum hrtimer_restart posix_timer_
+ now = ktime_add(now, kj);
+ }
+ #endif
+- timr->it_overrun += (unsigned int)
+- hrtimer_forward(timer, now,
+- timr->it.real.interval);
++ timr->it_overrun += hrtimer_forward(timer, now,
++ timr->it.real.interval);
+ ret = HRTIMER_RESTART;
+ ++timr->it_requeue_pending;
+ }
+@@ -633,7 +642,7 @@ SYSCALL_DEFINE3(timer_create, const cloc
+ it_id_set = IT_ID_SET;
+ new_timer->it_id = (timer_t) new_timer_id;
+ new_timer->it_clock = which_clock;
+- new_timer->it_overrun = -1;
++ new_timer->it_overrun = -1LL;
+
+ if (timer_event_spec) {
+ if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
+@@ -763,7 +772,7 @@ common_timer_get(struct k_itimer *timr,
+ * expiry is > now.
+ */
+ if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
+- timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
++ timr->it_overrun += hrtimer_forward(timer, now, iv);
+
+ remaining = ktime_sub(hrtimer_get_expires(timer), now);
+ /* Return 0 only, when the timer is expired and not pending */
+@@ -825,7 +834,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+ if (!timr)
+ return -EINVAL;
+
+- overrun = timr->it_overrun_last;
++ overrun = timer_overrun_to_int(timr, 0);
+ unlock_timer(timr, flags);
+
+ return overrun;
diff --git a/queue-3.16/series b/queue-3.16/series
index 99c0fd53..9f158897 100644
--- a/queue-3.16/series
+++ b/queue-3.16/series
@@ -319,3 +319,10 @@ r8169-fix-napi-handling-under-high-load.patch
net-ipv6-fix-index-counter-for-unicast-addresses-in-in6_dump_addrs.patch
mtd-fsl-quadspi-fix-macro-collision-problems-with-read-write.patch
cpuidle-do-not-access-cpuidle_devices-when-config_cpu_idle.patch
+keys-encrypted-fix-buffer-overread-in-valid_master_desc.patch
+wil6210-missing-length-check-in-wmi_set_ie.patch
+posix-timers-sanitize-overrun-handling.patch
+mm-cleancache-fix-corruption-on-missed-inode-invalidation.patch
+mremap-properly-flush-tlb-before-releasing-the-page.patch
+xfs-don-t-fail-when-converting-shortform-attr-to-long-form-during.patch
+cdrom-fix-improper-type-cast-which-can-leat-to-information-leak.patch
diff --git a/queue-3.16/wil6210-missing-length-check-in-wmi_set_ie.patch b/queue-3.16/wil6210-missing-length-check-in-wmi_set_ie.patch
new file mode 100644
index 00000000..1d306b1c
--- /dev/null
+++ b/queue-3.16/wil6210-missing-length-check-in-wmi_set_ie.patch
@@ -0,0 +1,31 @@
+From: Lior David <qca_liord@qca.qualcomm.com>
+Date: Tue, 14 Nov 2017 15:25:39 +0200
+Subject: wil6210: missing length check in wmi_set_ie
+
+commit b5a8ffcae4103a9d823ea3aa3a761f65779fbe2a upstream.
+
+Add a length check in wmi_set_ie to detect unsigned integer
+overflow.
+
+Signed-off-by: Lior David <qca_liord@qca.qualcomm.com>
+Signed-off-by: Maya Erez <qca_merez@qca.qualcomm.com>
+Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
+[bwh: Backported to 3.16: return directly rather than via "out" label]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+--- a/drivers/net/wireless/ath/wil6210/wmi.c
++++ b/drivers/net/wireless/ath/wil6210/wmi.c
+@@ -958,7 +958,12 @@ int wmi_set_ie(struct wil6210_priv *wil,
+ {
+ int rc;
+ u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
+- struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
++ struct wmi_set_appie_cmd *cmd;
++
++ if (len < ie_len)
++ return -EINVAL;
++
++ cmd = kzalloc(len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
diff --git a/queue-3.16/xfs-don-t-fail-when-converting-shortform-attr-to-long-form-during.patch b/queue-3.16/xfs-don-t-fail-when-converting-shortform-attr-to-long-form-during.patch
new file mode 100644
index 00000000..d055e744
--- /dev/null
+++ b/queue-3.16/xfs-don-t-fail-when-converting-shortform-attr-to-long-form-during.patch
@@ -0,0 +1,45 @@
+From: "Darrick J. Wong" <darrick.wong@oracle.com>
+Date: Tue, 17 Apr 2018 19:10:15 -0700
+Subject: xfs: don't fail when converting shortform attr to long form during
+ ATTR_REPLACE
+
+commit 7b38460dc8e4eafba06c78f8e37099d3b34d473c upstream.
+
+Kanda Motohiro reported that expanding a tiny xattr into a large xattr
+fails on XFS because we remove the tiny xattr from a shortform fork and
+then try to re-add it after converting the fork to extents format having
+not removed the ATTR_REPLACE flag. This fails because the attr is no
+longer present, causing a fs shutdown.
+
+This is derived from the patch in his bug report, but we really
+shouldn't ignore a nonzero retval from the remove call.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=199119
+Reported-by: kanda.motohiro@gmail.com
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+[bwh: Backported to 3.16: adjust filename]
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+---
+ fs/xfs/xfs_attr.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/fs/xfs/xfs_attr.c
++++ b/fs/xfs/xfs_attr.c
+@@ -540,7 +540,14 @@ xfs_attr_shortform_addname(xfs_da_args_t
+ if (args->flags & ATTR_CREATE)
+ return(retval);
+ retval = xfs_attr_shortform_remove(args);
+- ASSERT(retval == 0);
++ if (retval)
++ return retval;
++ /*
++ * Since we have removed the old attr, clear ATTR_REPLACE so
++ * that the leaf format add routine won't trip over the attr
++ * not being around.
++ */
++ args->flags &= ~ATTR_REPLACE;
+ }
+
+ if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||