diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-06-07 11:04:13 +0200 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-06-07 22:24:56 +0200 |
commit | e435c24ff8bc5e5ba909c337419b0bcd9e61de55 (patch) | |
tree | 690f6ba81489a71f535d0b69b51ad01b1679fde2 | |
parent | 92747649c94d4d6b1cf3ee8b24caafe8c8c019e3 (diff) | |
download | 4.12-rt-patches-e435c24ff8bc5e5ba909c337419b0bcd9e61de55.tar.gz |
[ANNOUNCE] v4.9.30-rt21
Dear RT folks!
I'm pleased to announce the v4.9.30-rt21 patch set.
Changes since v4.9.30-rt20:
- gdb. While gdb is following a task was is possible that after a
fork() operation the task was waiting for gdb and gdb waiting for
the task. Reported by Mathias Koehrer and David Hauck. Patched by
Thomas Gleixner.
- Since the change above the define TASK_ALL remains unused and got
removed by Peter Zijlstra.
Known issues
- CPU hotplug got a little better but can deadlock.
The delta patch against v4.9.30-rt20 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/incr/patch-4.9.30-rt20-rt21.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.9.30-rt21
The RT patch against v4.9.30 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patch-4.9.30-rt21.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.9/older/patches-4.9.30-rt21.tar.xz
Sebastian
-rw-r--r-- | patches/cond-resched-softirq-rt.patch | 2 | ||||
-rw-r--r-- | patches/cpu-rt-rework-cpu-down.patch | 4 | ||||
-rw-r--r-- | patches/localversion.patch | 2 | ||||
-rw-r--r-- | patches/mm-rt-kmap-atomic-scheduling.patch | 2 | ||||
-rw-r--r-- | patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch | 2 | ||||
-rw-r--r-- | patches/preempt-lazy-support.patch | 2 | ||||
-rw-r--r-- | patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch | 4 | ||||
-rw-r--r-- | patches/rt-add-rt-locks.patch | 4 | ||||
-rw-r--r-- | patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch | 77 | ||||
-rw-r--r-- | patches/sched-Remove-TASK_ALL.patch | 29 | ||||
-rw-r--r-- | patches/series | 2 | ||||
-rw-r--r-- | patches/softirq-split-locks.patch | 4 |
12 files changed, 121 insertions, 13 deletions
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch index cd245953aff9ff..5475b66575de39 100644 --- a/patches/cond-resched-softirq-rt.patch +++ b/patches/cond-resched-softirq-rt.patch @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -3373,12 +3373,16 @@ extern int __cond_resched_lock(spinlock_ +@@ -3372,12 +3372,16 @@ extern int __cond_resched_lock(spinlock_ __cond_resched_lock(lock); \ }) diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch index ecfa0355015a38..00b2a2b57fcf29 100644 --- a/patches/cpu-rt-rework-cpu-down.patch +++ b/patches/cpu-rt-rework-cpu-down.patch @@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -2480,6 +2480,10 @@ extern void do_set_cpus_allowed(struct t +@@ -2479,6 +2479,10 @@ extern void do_set_cpus_allowed(struct t extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); @@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -@@ -2492,6 +2496,9 @@ static inline int set_cpus_allowed_ptr(s +@@ -2491,6 +2495,9 @@ static inline int set_cpus_allowed_ptr(s return -EINVAL; return 0; } diff --git a/patches/localversion.patch b/patches/localversion.patch index d7c1a50b87ee55..bba4391fd1bdb9 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt20 ++-rt21 diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch index 6e7e6203024a32..0b521f4b9a92da 100644 --- a/patches/mm-rt-kmap-atomic-scheduling.patch +++ b/patches/mm-rt-kmap-atomic-scheduling.patch @@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins #include <asm/page.h> #include <asm/ptrace.h> -@@ -1986,6 +1987,12 @@ struct task_struct { +@@ -1985,6 +1986,12 @@ struct task_struct { int softirq_nestcnt; unsigned int softirqs_raised; #endif diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch index 63795fe8c2bd32..6c6125c6d2aff2 100644 --- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1989,6 +1989,9 @@ struct task_struct { +@@ -1988,6 +1988,9 @@ struct task_struct { #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index b1f3fcf35d1d7d..131245a37735a8 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -3349,6 +3349,43 @@ static inline int test_tsk_need_resched( +@@ -3348,6 +3348,43 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch index f14263f8b2df7d..dd83349b49d15b 100644 --- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -243,10 +243,7 @@ extern char ___assert_task_state[1 - 2*! +@@ -242,10 +242,7 @@ extern char ___assert_task_state[1 - 2*! TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) @@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #define task_contributes_to_load(task) \ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0 && \ -@@ -3366,6 +3363,51 @@ static inline int signal_pending_state(l +@@ -3365,6 +3362,51 @@ static inline int signal_pending_state(l return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch index 90426cd427bf5e..e53cd235d57bcc 100644 --- a/patches/rt-add-rt-locks.patch +++ b/patches/rt-add-rt-locks.patch @@ -600,7 +600,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +#endif --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -312,6 +312,11 @@ extern char ___assert_task_state[1 - 2*! +@@ -311,6 +311,11 @@ extern char ___assert_task_state[1 - 2*! #endif @@ -612,7 +612,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Task command name length */ #define TASK_COMM_LEN 16 -@@ -1013,8 +1018,18 @@ struct wake_q_head { +@@ -1012,8 +1017,18 @@ struct wake_q_head { struct wake_q_head name = { WAKE_Q_TAIL, &name.first } extern void wake_q_add(struct wake_q_head *head, diff --git a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch new file mode 100644 index 00000000000000..93f554941463c8 --- /dev/null +++ b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch @@ -0,0 +1,77 @@ +From: Thomas Gleixner <tglx@linutronix.de> +Date: Tue, 6 Jun 2017 14:20:37 +0200 +Subject: sched: Prevent task state corruption by spurious lock wakeup + +Mathias and others reported GDB failures on RT. + +The following scenario leads to task state corruption: + +CPU0 CPU1 + +T1->state = TASK_XXX; +spin_lock(&lock) + rt_spin_lock_slowlock(&lock->rtmutex) + raw_spin_lock(&rtm->wait_lock); + T1->saved_state = current->state; + T1->state = TASK_UNINTERRUPTIBLE; + spin_unlock(&lock) + task_blocks_on_rt_mutex(rtm) rt_spin_lock_slowunlock(&lock->rtmutex) + queue_waiter(rtm) raw_spin_lock(&rtm->wait_lock); + pi_chain_walk(rtm) + raw_spin_unlock(&rtm->wait_lock); + wake_top_waiter(T1) + + raw_spin_lock(&rtm->wait_lock); + + for (;;) { + if (__try_to_take_rt_mutex()) <- Succeeds + break; + ... + } + + T1->state = T1->saved_state; + try_to_wake_up(T1) + ttwu_do_wakeup(T1) + T1->state = TASK_RUNNING; + +In most cases this is harmless because waiting for some event, which is the +usual reason for TASK_[UN]INTERRUPTIBLE has to be safe against other forms +of spurious wakeups anyway. + +But in case of TASK_TRACED this is actually fatal, because the task loses +the TASK_TRACED state. In consequence it fails to consume SIGSTOP which was +sent from the debugger and actually delivers SIGSTOP to the task which +breaks the ptrace mechanics and brings the debugger into an unexpected +state. + +The TASK_TRACED state should prevent getting there due to the state +matching logic in try_to_wake_up(). But that's not true because +wake_up_lock_sleeper() uses TASK_ALL as state mask. That's bogus because +lock sleepers always use TASK_UNINTERRUPTIBLE, so the wakeup should use +that as well. + +The cure is way simpler as figuring it out: + +Change the mask used in wake_up_lock_sleeper() from TASK_ALL to +TASK_UNINTERRUPTIBLE. + +Cc: stable-rt@vger.kernel.org +Reported-by: Mathias Koehrer <mathias.koehrer@etas.com> +Reported-by: David Hauck <davidh@netacquire.com> +Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + kernel/sched/core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -2207,7 +2207,7 @@ EXPORT_SYMBOL(wake_up_process); + */ + int wake_up_lock_sleeper(struct task_struct *p) + { +- return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); ++ return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER); + } + + int wake_up_state(struct task_struct *p, unsigned int state) diff --git a/patches/sched-Remove-TASK_ALL.patch b/patches/sched-Remove-TASK_ALL.patch new file mode 100644 index 00000000000000..6b25930e3dedd9 --- /dev/null +++ b/patches/sched-Remove-TASK_ALL.patch @@ -0,0 +1,29 @@ +From: Peter Zijlstra <peterz@infradead.org> +Date: Wed, 7 Jun 2017 10:12:45 +0200 +Subject: [PATCH] sched: Remove TASK_ALL + +It's unused: + +$ git grep "\<TASK_ALL\>" | wc -l +1 + +And dangerous, kill the bugger. + +Cc: stable-rt@vger.kernel.org +Acked-by: Thomas Gleixner <tglx@linutronix.de> +Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/linux/sched.h | 1 - + 1 file changed, 1 deletion(-) + +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -236,7 +236,6 @@ extern char ___assert_task_state[1 - 2*! + + /* Convenience macros for the sake of wake_up */ + #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) +-#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) + + /* get_task_state() */ + #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ diff --git a/patches/series b/patches/series index 2dbc3f45e48a18..601935aca5cc30 100644 --- a/patches/series +++ b/patches/series @@ -305,6 +305,8 @@ sched-limit-nr-migrate.patch sched-mmdrop-delayed.patch kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch sched-rt-mutex-wakeup.patch +sched-Prevent-task-state-corruption-by-spurious-lock.patch +sched-Remove-TASK_ALL.patch sched-might-sleep-do-not-account-rcu-depth.patch cond-resched-softirq-rt.patch cond-resched-lock-rt-tweak.patch diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch index 7db052dc25eb87..9c2c45c2d96912 100644 --- a/patches/softirq-split-locks.patch +++ b/patches/softirq-split-locks.patch @@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * Are we in NMI context? --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1971,6 +1971,8 @@ struct task_struct { +@@ -1970,6 +1970,8 @@ struct task_struct { #endif #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; @@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; -@@ -2287,6 +2289,7 @@ extern void thread_group_cputime_adjuste +@@ -2286,6 +2288,7 @@ extern void thread_group_cputime_adjuste /* * Per process flags */ |