diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-12-26 11:21:17 -0500 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-12-26 11:38:08 -0500 |
commit | 69022ed7dd23b951e3e4ddb1ac670e7a1ab2322e (patch) | |
tree | 5c69c2ae99eaed56fcba3002ef1b4e47c3c947b6 | |
parent | 5aa00714c982ea6e4c68fef0ec858a764981f392 (diff) | |
download | 4.9-rt-patches-69022ed7dd23b951e3e4ddb1ac670e7a1ab2322e.tar.gz |
rt: import patches new for v4.9-rtrt-v4.9-rc1
7 files changed, 260 insertions, 0 deletions
diff --git a/patches/arm-include-definition-for-cpumask_t.patch b/patches/arm-include-definition-for-cpumask_t.patch new file mode 100644 index 00000000000000..3750303cfa6fd2 --- /dev/null +++ b/patches/arm-include-definition-for-cpumask_t.patch @@ -0,0 +1,24 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Thu, 22 Dec 2016 17:28:33 +0100 +Subject: [PATCH] arm: include definition for cpumask_t + +This definition gets pulled in by other files. With the (later) split of +RCU and spinlock.h it won't compile anymore. +The split is done in ("rbtree: don't include the rcu header"). + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + arch/arm/include/asm/irq.h | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/arch/arm/include/asm/irq.h ++++ b/arch/arm/include/asm/irq.h +@@ -22,6 +22,8 @@ + #endif + + #ifndef __ASSEMBLY__ ++#include <linux/cpumask.h> ++ + struct irqaction; + struct pt_regs; + extern void migrate_irqs(void); diff --git a/patches/btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch b/patches/btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch new file mode 100644 index 00000000000000..ca7952d7b49375 --- /dev/null +++ b/patches/btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch @@ -0,0 +1,38 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Wed, 14 Dec 2016 14:44:18 +0100 +Subject: [PATCH] btrfs: drop trace_btrfs_all_work_done() from + normal_work_helper() + +For btrfs_scrubparity_helper() the ->func() is set to +scrub_parity_bio_endio_worker(). This functions invokes invokes +scrub_free_parity() which kfrees() the worked object. All is good as +long as trace events are not enabled because we boom with a backtrace +like this: +| Workqueue: btrfs-endio btrfs_endio_helper +| RIP: 0010:[<ffffffff812f81ae>] [<ffffffff812f81ae>] trace_event_raw_event_btrfs__work__done+0x4e/0xa0 +| Call Trace: +| [<ffffffff8136497d>] btrfs_scrubparity_helper+0x59d/0x780 +| [<ffffffff81364c49>] btrfs_endio_helper+0x9/0x10 +| [<ffffffff8108af8e>] process_one_work+0x26e/0x7b0 +| [<ffffffff8108b516>] worker_thread+0x46/0x560 +| [<ffffffff81091c4e>] kthread+0xee/0x110 +| [<ffffffff818e166a>] ret_from_fork+0x2a/0x40 + +So in order to avoid this, I remove the trace point. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + fs/btrfs/async-thread.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/fs/btrfs/async-thread.c ++++ b/fs/btrfs/async-thread.c +@@ -318,8 +318,6 @@ static void normal_work_helper(struct bt + set_bit(WORK_DONE_BIT, &work->flags); + run_ordered_work(wq); + } +- if (!need_order) +- trace_btrfs_all_work_done(work); + } + + void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, diff --git a/patches/btrfs-swap-free-and-trace-point-in-run_ordered_work.patch b/patches/btrfs-swap-free-and-trace-point-in-run_ordered_work.patch new file mode 100644 index 00000000000000..41d0070ef22339 --- /dev/null +++ b/patches/btrfs-swap-free-and-trace-point-in-run_ordered_work.patch @@ -0,0 +1,33 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Wed, 14 Dec 2016 12:28:52 +0100 +Subject: [PATCH] btrfs: swap free() and trace point in run_ordered_work() + +The previous patch removed a trace point due to a use after free problem +with tracing enabled. While looking at the backtrace it took me a while +to find the right spot. While doing so I noticed that this trace point +could be used with two clean-up functions in run_ordered_work(): +- run_one_async_free() +- async_cow_free() + +Both of them free the `work' item so a later use in the tracepoint is +not possible. +This patches swaps the order so we first have the trace point and then +free the struct. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + fs/btrfs/async-thread.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/fs/btrfs/async-thread.c ++++ b/fs/btrfs/async-thread.c +@@ -288,8 +288,8 @@ static void run_ordered_work(struct __bt + * we don't want to call the ordered free functions + * with the lock held though + */ +- work->ordered_free(work); + trace_btrfs_all_work_done(work); ++ work->ordered_free(work); + } + spin_unlock_irqrestore(lock, flags); + } diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch new file mode 100644 index 00000000000000..986d5e1b363918 --- /dev/null +++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -0,0 +1,60 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Mon, 21 Nov 2016 19:31:08 +0100 +Subject: [PATCH] kernel/sched: move stack + kprobe clean up to + __put_task_struct() + +There is no need to free the stack before the task struct. This also +comes handy on -RT because we can't free memory in preempt disabled +region. + +Cc: stable-rt@vger.kernel.org #for kprobe_flush_task() +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/fork.c | 10 ++++++++++ + kernel/sched/core.c | 9 --------- + 2 files changed, 10 insertions(+), 9 deletions(-) + +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -76,6 +76,7 @@ + #include <linux/compiler.h> + #include <linux/sysctl.h> + #include <linux/kcov.h> ++#include <linux/kprobes.h> + + #include <asm/pgtable.h> + #include <asm/pgalloc.h> +@@ -385,6 +386,15 @@ void __put_task_struct(struct task_struc + WARN_ON(atomic_read(&tsk->usage)); + WARN_ON(tsk == current); + ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(tsk); ++ ++ /* Task is done with its stack. */ ++ put_task_stack(tsk); ++ + cgroup_free(tsk); + task_numa_free(tsk); + security_task_free(tsk); +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2795,15 +2795,6 @@ static struct rq *finish_task_switch(str + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); + +- /* +- * Remove function-return probe instances associated with this +- * task and put them back on the free list. +- */ +- kprobe_flush_task(prev); +- +- /* Task is done with its stack. */ +- put_task_stack(prev); +- + put_task_struct(prev); + } + diff --git a/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch b/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch new file mode 100644 index 00000000000000..0071ac7ce39cbf --- /dev/null +++ b/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch @@ -0,0 +1,72 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Mon, 21 Nov 2016 19:26:15 +0100 +Subject: [PATCH] locking/percpu-rwsem: use swait for the wating writer + +Use struct swait_queue_head instead of wait_queue_head_t for the waiting +writer. The swait implementation is smaller and lightweight compared to +wait_queue_head_t. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/percpu-rwsem.h | 6 +++--- + kernel/locking/percpu-rwsem.c | 6 +++--- + 2 files changed, 6 insertions(+), 6 deletions(-) + +--- a/include/linux/percpu-rwsem.h ++++ b/include/linux/percpu-rwsem.h +@@ -4,7 +4,7 @@ + #include <linux/atomic.h> + #include <linux/rwsem.h> + #include <linux/percpu.h> +-#include <linux/wait.h> ++#include <linux/swait.h> + #include <linux/rcu_sync.h> + #include <linux/lockdep.h> + +@@ -12,7 +12,7 @@ struct percpu_rw_semaphore { + struct rcu_sync rss; + unsigned int __percpu *read_count; + struct rw_semaphore rw_sem; +- wait_queue_head_t writer; ++ struct swait_queue_head writer; + int readers_block; + }; + +@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name = + .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ + .read_count = &__percpu_rwsem_rc_##name, \ + .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ +- .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ ++ .writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ + } + + extern int __percpu_down_read(struct percpu_rw_semaphore *, int); +--- a/kernel/locking/percpu-rwsem.c ++++ b/kernel/locking/percpu-rwsem.c +@@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw + /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ + rcu_sync_init(&sem->rss, RCU_SCHED_SYNC); + __init_rwsem(&sem->rw_sem, name, rwsem_key); +- init_waitqueue_head(&sem->writer); ++ init_swait_queue_head(&sem->writer); + sem->readers_block = 0; + return 0; + } +@@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_s + __this_cpu_dec(*sem->read_count); + + /* Prod writer to recheck readers_active */ +- wake_up(&sem->writer); ++ swake_up(&sem->writer); + } + EXPORT_SYMBOL_GPL(__percpu_up_read); + +@@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_ + */ + + /* Wait for all now active readers to complete. */ +- wait_event(sem->writer, readers_active_check(sem)); ++ swait_event(sem->writer, readers_active_check(sem)); + } + EXPORT_SYMBOL_GPL(percpu_down_write); + diff --git a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch new file mode 100644 index 00000000000000..152606c2f649c8 --- /dev/null +++ b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch @@ -0,0 +1,27 @@ +Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock +From: Thomas Gleixner <tglx@linutronix.de> +Date: Thu, 27 Sep 2012 11:11:46 +0200 + +The plain spinlock while sufficient does not update the local_lock +internals. Use a proper local_lock function instead to ease debugging. + +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + +--- + mm/page_alloc.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -286,9 +286,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + + #ifdef CONFIG_PREEMPT_RT_BASE + # define cpu_lock_irqsave(cpu, flags) \ +- spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags) ++ local_lock_irqsave_on(pa_lock, flags, cpu) + # define cpu_unlock_irqrestore(cpu, flags) \ +- spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags) ++ local_unlock_irqrestore_on(pa_lock, flags, cpu) + #else + # define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) + # define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) diff --git a/patches/series b/patches/series index 3fc42c6bafe0af..463203b04324e9 100644 --- a/patches/series +++ b/patches/series @@ -37,6 +37,9 @@ iommu-vt-d-don-t-disable-preemption-while-accessing-.patch x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch rxrpc-remove-unused-static-variables.patch rcu-update-make-RCU_EXPEDITE_BOOT-default.patch +locking-percpu-rwsem-use-swait-for-the-wating-writer.patch +btrfs-drop-trace_btrfs_all_work_done-from-normal_wor.patch +btrfs-swap-free-and-trace-point-in-run_ordered_work.patch # Wants a different fix for upstream NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -236,6 +239,7 @@ slub-enable-irqs-for-no-wait.patch slub-disable-SLUB_CPU_PARTIAL.patch # MM +mm-page-alloc-use-local-lock-on-target-cpu.patch mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch mm-memcontrol-do_not_disable_irq.patch mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch @@ -267,6 +271,7 @@ posix-timers-thread-posix-cpu-timers-on-rt.patch sched-delay-put-task.patch sched-limit-nr-migrate.patch sched-mmdrop-delayed.patch +kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch sched-rt-mutex-wakeup.patch sched-might-sleep-do-not-account-rcu-depth.patch cond-resched-softirq-rt.patch @@ -315,6 +320,7 @@ futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch # RTMUTEX pid.h-include-atomic.h.patch +arm-include-definition-for-cpumask_t.patch locking-locktorture-Do-NOT-include-rwlock.h-directly.patch rtmutex-lock-killable.patch spinlock-types-separate-raw.patch |