summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-09-15 18:33:45 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-09-15 18:33:45 +0200
commit721e3970304532c88dddce11e794001cb90f374f (patch)
tree879abf5ab510458ef57280828707779b8afee6ec
parent813679b92ea1fca9d3ca9a9cac5fc06f99a046b5 (diff)
download4.8-rt-patches-v4.6-rt.tar.gz
[ANNOUNCE] 4.6.7-rt13v4.6.7-rt13-patchesv4.6-rt
Dear RT folks! I'm pleased to announce the v4.6.7-rt13 patch set. Changes since v4.6.7-rt12: - The dcache regression fix up introduced another problem. As pointed out by Thomas Gleixner we can't avoid cpu_chill() for !RT tasks because the owner might be preempted and we would spin until our time slice is used up. Therefore the sched class is ignored and we "chill" if the lock is taken and cond_resched() did not work. - Never gcc make some noise if __builtin_return_address(x) with x > 1 is used. Warning can be ignored by a config option (Steven Rostedt) - might_resched() on x86 with lazy preempt might ignores the preemption counter. Now no more. Known issues - CPU hotplug got a little better but can deadlock. The delta patch against 4.6.7-rt12 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/incr/patch-4.6.7-rt12-rt13.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.6.7-rt13 The RT patch against 4.6.5 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patch-4.6.7-rt13.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.6/patches-4.6.7-rt13.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch22
-rw-r--r--patches/latency-hist.patch4
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch110
-rw-r--r--patches/series2
-rw-r--r--patches/x86-preempt-lazy-fixup-should_resched.patch49
6 files changed, 171 insertions, 18 deletions
diff --git a/patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch b/patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch
index a59001e85da832..185bf2e8cd886c 100644
--- a/patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch
+++ b/patches/fs-dcache-resched-chill-only-if-we-make-no-progress.patch
@@ -23,21 +23,12 @@ progress.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- fs/dcache.c | 19 +++++++++++++------
- 1 file changed, 13 insertions(+), 6 deletions(-)
+ fs/dcache.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -40,6 +40,8 @@
- #include <linux/ratelimit.h>
- #include <linux/list_lru.h>
- #include <linux/kasan.h>
-+#include <linux/sched/rt.h>
-+#include <linux/sched/deadline.h>
-
- #include "internal.h"
- #include "mount.h"
-@@ -748,6 +750,8 @@ static inline bool fast_dput(struct dent
+@@ -748,6 +748,8 @@ static inline bool fast_dput(struct dent
*/
void dput(struct dentry *dentry)
{
@@ -46,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!dentry))
return;
-@@ -784,14 +788,17 @@ void dput(struct dentry *dentry)
+@@ -784,14 +786,18 @@ void dput(struct dentry *dentry)
return;
kill_it:
@@ -63,10 +54,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (parent == dentry) {
+ /* the task with the highest priority won't schedule */
+ r = cond_resched();
-+ if (!r && (rt_task(current) || dl_task(current)))
++ if (!r)
+ cpu_chill();
-+ } else
++ } else {
+ dentry = parent;
++ }
goto repeat;
}
}
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index ce45e4c337e494..8b0db63d4f3efd 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -436,7 +436,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
-@@ -211,6 +229,24 @@ config PREEMPT_TRACER
+@@ -212,6 +230,24 @@ config PREEMPT_TRACER
enabled. This option and the irqs-off timing option can be
used together or separately.)
@@ -461,7 +461,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select GENERIC_TRACER
-@@ -221,6 +257,74 @@ config SCHED_TRACER
+@@ -222,6 +258,74 @@ config SCHED_TRACER
This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up.
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 12bd473a33f5b0..25e5fadbaae8f1 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt12
++-rt13
diff --git a/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch b/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
new file mode 100644
index 00000000000000..bce6d3d9d0b223
--- /dev/null
+++ b/patches/lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
@@ -0,0 +1,110 @@
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 8 Sep 2016 12:34:33 -0400
+Subject: [PATCH] lockdep: Quiet gcc about dangerous __builtin_return_address()
+ operations
+
+[
+ Boris, does this quiet gcc for you?
+ I haven't fully tested this yet, as I still don't have a compiler
+ that does the warning.
+]
+
+Gcc's new warnings about __builtin_return_address(n) operations with
+n > 0 is popping up around the kernel. The operation is dangerous, and
+the warning is "good to know". But there's instances that we use
+__builtin_return_address(n) with n > 0 and are aware of the issues,
+and work around them. And its used mostly for tracing and debugging. In
+these cases, the warning becomes a distraction and is not helpful.
+
+To get better lock issue traces, a function like get_lock_parent_ip()
+uses __builtin_return_address() to find the caller of the lock, and
+skip over the internal callers of the lock itself. Currently it is only
+used in the kernel/ directory and only if certain configs are enabled.
+
+Create a new config called CONFIG_USING_GET_LOCK_PARENT_IP that gets
+selected when another config relies on get_lock_parent_ip(), and this
+will now enable the function get_lock_parent_ip(), otherwise it wont be
+defined. It will also disable the frame-address warnings from gcc in
+the kernel directory.
+
+Reported-by: Borislav Petkov <bp@alien8.de>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+---
+ include/linux/ftrace.h | 2 ++
+ kernel/Makefile | 7 +++++++
+ kernel/trace/Kconfig | 1 +
+ lib/Kconfig.debug | 10 ++++++++++
+ 4 files changed, 20 insertions(+)
+
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -713,6 +713,7 @@ static inline void __ftrace_enabled_rest
+ #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
+ #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
+
++#ifdef CONFIG_USING_GET_LOCK_PARENT_IP
+ static inline unsigned long get_lock_parent_ip(void)
+ {
+ unsigned long addr = CALLER_ADDR0;
+@@ -724,6 +725,7 @@ static inline unsigned long get_lock_par
+ return addr;
+ return CALLER_ADDR2;
+ }
++#endif
+
+ #ifdef CONFIG_IRQSOFF_TRACER
+ extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+--- a/kernel/Makefile
++++ b/kernel/Makefile
+@@ -11,6 +11,13 @@ obj-y = fork.o exec_domain.o panic.o
+ notifier.o ksysfs.o cred.o reboot.o \
+ async.o range.o smpboot.o
+
++# Tracing may do some dangerous __builtin_return_address() operations
++# We know they are dangerous, we don't need gcc telling us that.
++ifdef CONFIG_USING_GET_LOCK_PARENT_IP
++FRAME_CFLAGS := $(call cc-disable-warning,frame-address)
++KBUILD_CFLAGS += $(FRAME_CFLAGS)
++endif
++
+ obj-$(CONFIG_MULTIUSER) += groups.o
+
+ ifdef CONFIG_FUNCTION_TRACER
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -197,6 +197,7 @@ config PREEMPT_TRACER
+ select RING_BUFFER_ALLOW_SWAP
+ select TRACER_SNAPSHOT
+ select TRACER_SNAPSHOT_PER_CPU_SWAP
++ select USING_GET_LOCK_PARENT_IP
+ help
+ This option measures the time spent in preemption-off critical
+ sections, with microsecond accuracy.
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -962,6 +962,7 @@ config TIMER_STATS
+ config DEBUG_PREEMPT
+ bool "Debug preemptible kernel"
+ depends on DEBUG_KERNEL && PREEMPT && TRACE_IRQFLAGS_SUPPORT
++ select USING_GET_LOCK_PARENT_IP
+ default y
+ help
+ If you say Y here then the kernel will use a debug variant of the
+@@ -1144,8 +1145,17 @@ config LOCK_TORTURE_TEST
+
+ endmenu # lock debugging
+
++config USING_GET_LOCK_PARENT_IP
++ bool
++ help
++ Enables the use of the function get_lock_parent_ip() that
++ will use __builtin_return_address(n) with n > 0 causing
++ some gcc warnings. When this is selected, those warnings
++ will be suppressed.
++
+ config TRACE_IRQFLAGS
+ bool
++ select USING_GET_LOCK_PARENT_IP
+ help
+ Enables hooks to interrupt enabling and disabling for
+ either tracing or lock debugging.
diff --git a/patches/series b/patches/series
index 84e90a36938452..1d2fa04282f757 100644
--- a/patches/series
+++ b/patches/series
@@ -49,6 +49,7 @@ crypto-ccp-remove-rwlocks_types.h.patch
infiniband-ulp-ipoib-remove-pkey_mutex.patch
sched-preempt-Fix-preempt_count-manipulations.patch
x86-mm-disable-preemption-during-CR3-read-write.patch
+lockdep-Quiet-gcc-about-dangerous-__builtin_return_a.patch
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
@@ -586,6 +587,7 @@ preempt-lazy-support.patch
preempt-lazy-check-preempt_schedule.patch
sched-lazy_preempt-avoid-a-warning-in-the-RT-case.patch
x86-preempt-lazy.patch
+x86-preempt-lazy-fixup-should_resched.patch
arm-preempt-lazy-support.patch
arm-lazy-preempt-correct-resched-condition.patch
powerpc-preempt-lazy-support.patch
diff --git a/patches/x86-preempt-lazy-fixup-should_resched.patch b/patches/x86-preempt-lazy-fixup-should_resched.patch
new file mode 100644
index 00000000000000..5e771a368d9e78
--- /dev/null
+++ b/patches/x86-preempt-lazy-fixup-should_resched.patch
@@ -0,0 +1,49 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Sep 2016 19:18:47 +0200
+Subject: [PATCH] x86/preempt-lazy: fixup should_resched()
+
+should_resched() returns true if NEED_RESCHED is set and the
+preempt_count is 0 _or_ if NEED_RESCHED_LAZY is set ignoring the preempt
+counter. Ignoring the preemp counter is wrong. This patch adds this into
+account.
+While at it, __preempt_count_dec_and_test() ignores preempt_lazy_count
+while checking TIF_NEED_RESCHED_LAZY so we this check, too.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/include/asm/preempt.h | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -89,6 +89,8 @@ static __always_inline bool __preempt_co
+ if (____preempt_count_dec_and_test())
+ return true;
+ #ifdef CONFIG_PREEMPT_LAZY
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+ #else
+ return false;
+@@ -101,8 +103,19 @@ static __always_inline bool __preempt_co
+ static __always_inline bool should_resched(int preempt_offset)
+ {
+ #ifdef CONFIG_PREEMPT_LAZY
+- return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset ||
+- test_thread_flag(TIF_NEED_RESCHED_LAZY));
++ u32 tmp;
++
++ tmp = raw_cpu_read_4(__preempt_count);
++ if (tmp == preempt_offset)
++ return true;
++
++ /* preempt count == 0 ? */
++ tmp &= ~PREEMPT_NEED_RESCHED;
++ if (tmp)
++ return false;
++ if (current_thread_info()->preempt_lazy_count)
++ return false;
++ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+ #else
+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
+ #endif