summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-08-25 09:50:02 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-08-25 09:50:02 -0400
commitdb8feb5429e13ebfe9d0d6a1124021e6015fe2be (patch)
tree48668a727d37a6c52708608b4bf8eadb2ea16417
parentbe84710cfc392a11dca1a224a2dade0cea3d5ea2 (diff)
download3.10-rt-patches-db8feb5429e13ebfe9d0d6a1124021e6015fe2be.tar.gz
patches-3.10.9-rt5.tar.xzv3.10.9-rt5
md5sum: 847490ddf45287f2c8607188d4c54ee0 ../patches-3.10.9-rt5.tar.xz Announce: --------------- Dear RT folks! I'm pleased to announce the v3.10.9-rt5 patch set. Changes since v3.10.9-rt4 - swait fixes from Steven. It fixed the issues with CONFIG_RCU_NOCB_CPU where the system suddenly froze and RCU wasn't doing its job anymore - hwlat improvements by Steven Known issues: - SLAB support not working - The cpsw network driver shows some issues. - bcache does not compile. - set_affinity callbacks result in splat due to sleeping while atomic - an ancient race (since we got sleeping spinlocks) where the TASK_TRACED state is temporary replaced while waiting on a rw lock and the task can't be traced. The delta patch against v3.10.9-rt4 is appended below and can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/incr/patch-3.10.9-rt4-rt5.patch.xz The RT patch against 3.10.9 can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patch-3.10.9-rt5.patch.xz The split quilt queue is available at: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.9-rt5.tar.xz Sebastian [delta diff snipped] --------------- http://marc.info/?l=linux-rt-users&m=137719569422440&w=2 Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/arm-disable-highmem-on-rt.patch2
-rw-r--r--patches/arm-enable-highmem-for-rt.patch2
-rw-r--r--patches/arm-preempt-lazy-support.patch2
-rw-r--r--patches/cond-resched-lock-rt-tweak.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch2
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch4
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch6
-rw-r--r--patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch152
-rw-r--r--patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch132
-rw-r--r--patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch188
-rw-r--r--patches/hwlat-detector-Use-trace_clock_local-if-available.patch98
-rw-r--r--patches/latency-hist.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mips-disable-highmem-on-rt.patch2
-rw-r--r--patches/mm-prepare-pf-disable-discoupling.patch4
-rw-r--r--patches/mm-remove-preempt-count-from-pf.patch4
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch6
-rw-r--r--patches/mm-shrink-the-page-frame-to-rt-size.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch2
-rw-r--r--patches/peter_zijlstra-frob-migrate_disable-2.patch6
-rw-r--r--patches/peter_zijlstra-frob-pagefault_disable.patch4
-rw-r--r--patches/peterz-raw_pagefault_disable.patch4
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch2
-rw-r--r--patches/powerpc-52xx-provide-a-default-in-mpc52xx_irqhost_ma.patch9
-rw-r--r--patches/powerpc-preempt-lazy-support.patch28
-rw-r--r--patches/preempt-lazy-support.patch24
-rw-r--r--patches/rcu-swait-Fix-RCU-conversion-of-wake_up_all-to-swait.patch34
-rw-r--r--patches/sched-better-debug-output-for-might-sleep.patch2
-rw-r--r--patches/sched-delay-put-task.patch6
-rw-r--r--patches/sched-migrate-disable.patch6
-rw-r--r--patches/sched-mmdrop-delayed.patch2
-rw-r--r--patches/sched-rt-mutex-wakeup.patch4
-rw-r--r--patches/sched-teach-migrate_disable-about-atomic-contexts.patch2
-rw-r--r--patches/series6
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/softirq-local-lock.patch2
-rw-r--r--patches/softirq-make-serving-softirqs-a-task-flag.patch2
-rw-r--r--patches/softirq-split-locks.patch2
-rw-r--r--patches/swait-Add-memory-barrier-before-checking-list-empty.patch54
-rw-r--r--patches/swait-Add-smp_mb-after-setting-h-list.patch65
-rw-r--r--patches/vtime-split-lock-and-seqcount.patch2
-rw-r--r--patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch61
42 files changed, 833 insertions, 110 deletions
diff --git a/patches/arm-disable-highmem-on-rt.patch b/patches/arm-disable-highmem-on-rt.patch
index cbfd519..fff3eb0 100644
--- a/patches/arm-disable-highmem-on-rt.patch
+++ b/patches/arm-disable-highmem-on-rt.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -1704,7 +1704,7 @@ config HAVE_ARCH_PFN_VALID
+@@ -1705,7 +1705,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/arm-enable-highmem-for-rt.patch b/patches/arm-enable-highmem-for-rt.patch
index ed94171..f481778 100644
--- a/patches/arm-enable-highmem-for-rt.patch
+++ b/patches/arm-enable-highmem-for-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -1704,7 +1704,7 @@ config HAVE_ARCH_PFN_VALID
+@@ -1705,7 +1705,7 @@ config HAVE_ARCH_PFN_VALID
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index 864b36e..df191b1 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -47,6 +47,7 @@ config ARM
+@@ -48,6 +48,7 @@ config ARM
select HAVE_MEMBLOCK
select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
select HAVE_PERF_EVENTS
diff --git a/patches/cond-resched-lock-rt-tweak.patch b/patches/cond-resched-lock-rt-tweak.patch
index bd6a153..62604a1 100644
--- a/patches/cond-resched-lock-rt-tweak.patch
+++ b/patches/cond-resched-lock-rt-tweak.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2486,7 +2486,7 @@ extern int _cond_resched(void);
+@@ -2487,7 +2487,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index c2b8f90..439ce2f 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2497,12 +2497,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2498,12 +2498,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index efaf1a1..05899ac 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1811,6 +1811,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -1812,6 +1812,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -1823,6 +1827,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -1824,6 +1828,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index 01a01db..3fa66a9 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define FTRACE_MAX_EVENT \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -399,7 +399,7 @@ int __trace_puts(unsigned long ip, const
+@@ -428,7 +428,7 @@ int __trace_puts(unsigned long ip, const
local_save_flags(irq_flags);
buffer = global_trace.trace_buffer.buffer;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
irq_flags, preempt_count());
if (!event)
return 0;
-@@ -1449,6 +1449,8 @@ tracing_generic_entry_update(struct trac
+@@ -1493,6 +1493,8 @@ tracing_generic_entry_update(struct trac
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2352,9 +2354,10 @@ static void print_lat_help_header(struct
+@@ -2396,9 +2398,10 @@ static void print_lat_help_header(struct
seq_puts(m, "# | / _----=> need-resched \n");
seq_puts(m, "# || / _---=> hardirq/softirq \n");
seq_puts(m, "# ||| / _--=> preempt-depth \n");
diff --git a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
new file mode 100644
index 0000000..5dbd27d
--- /dev/null
+++ b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -0,0 +1,152 @@
+From 76666dbbdd40e963e7df84c123fc9aea4a2bcc69 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 21 Aug 2013 17:48:46 +0200
+Subject: [PATCH] genirq: do not invoke the affinity callback via a workqueue
+
+Joe Korty reported, that __irq_set_affinity_locked() schedules a
+workqueue while holding a rawlock which results in a might_sleep()
+warning.
+This patch moves the invokation into a process context so that we only
+wakeup() a process while holding the lock.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 1 +
+ kernel/irq/manage.c | 79 +++++++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 77 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
+index 11bdb1e..838d4bd 100644
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -263,6 +263,7 @@ struct irq_affinity_notify {
+ unsigned int irq;
+ struct kref kref;
+ struct work_struct work;
++ struct list_head list;
+ void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
+ void (*release)(struct kref *ref);
+ };
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index 0e34a98..5999a67 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ return ret;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static void _irq_affinity_notify(struct irq_affinity_notify *notify);
++static struct task_struct *set_affinity_helper;
++static LIST_HEAD(affinity_list);
++static DEFINE_RAW_SPINLOCK(affinity_list_lock);
++
++static int set_affinity_thread(void *unused)
++{
++ while (1) {
++ struct irq_affinity_notify *notify;
++ int empty;
++
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ raw_spin_lock_irq(&affinity_list_lock);
++ empty = list_empty(&affinity_list);
++ raw_spin_unlock_irq(&affinity_list_lock);
++
++ if (empty)
++ schedule();
++ if (kthread_should_stop())
++ break;
++ set_current_state(TASK_RUNNING);
++try_next:
++ notify = NULL;
++
++ raw_spin_lock_irq(&affinity_list_lock);
++ if (!list_empty(&affinity_list)) {
++ notify = list_first_entry(&affinity_list,
++ struct irq_affinity_notify, list);
++ list_del(&notify->list);
++ }
++ raw_spin_unlock_irq(&affinity_list_lock);
++
++ if (!notify)
++ continue;
++ _irq_affinity_notify(notify);
++ goto try_next;
++ }
++ return 0;
++}
++
++static void init_helper_thread(void)
++{
++ if (set_affinity_helper)
++ return;
++ set_affinity_helper = kthread_run(set_affinity_thread, NULL,
++ "affinity-cb");
++ WARN_ON(IS_ERR(set_affinity_helper));
++}
++#else
++
++static inline void init_helper_thread(void) { }
++
++#endif
++
+ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+ {
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+@@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
+
+ if (desc->affinity_notify) {
+ kref_get(&desc->affinity_notify->kref);
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++ raw_spin_lock(&affinity_list_lock);
++ if (list_empty(&desc->affinity_notify->list))
++ list_add_tail(&affinity_list,
++ &desc->affinity_notify->list);
++ raw_spin_unlock(&affinity_list_lock);
++ wake_up_process(set_affinity_helper);
++#else
+ schedule_work(&desc->affinity_notify->work);
++#endif
+ }
+ irqd_set(data, IRQD_AFFINITY_SET);
+
+@@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+ }
+ EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+
+-static void irq_affinity_notify(struct work_struct *work)
++static void _irq_affinity_notify(struct irq_affinity_notify *notify)
+ {
+- struct irq_affinity_notify *notify =
+- container_of(work, struct irq_affinity_notify, work);
+ struct irq_desc *desc = irq_to_desc(notify->irq);
+ cpumask_var_t cpumask;
+ unsigned long flags;
+@@ -248,6 +312,13 @@ static void irq_affinity_notify(struct work_struct *work)
+ kref_put(&notify->kref, notify->release);
+ }
+
++static void irq_affinity_notify(struct work_struct *work)
++{
++ struct irq_affinity_notify *notify =
++ container_of(work, struct irq_affinity_notify, work);
++ _irq_affinity_notify(notify);
++}
++
+ /**
+ * irq_set_affinity_notifier - control notification of IRQ affinity changes
+ * @irq: Interrupt for which to enable/disable notification
+@@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
+ notify->irq = irq;
+ kref_init(&notify->kref);
+ INIT_WORK(&notify->work, irq_affinity_notify);
++ INIT_LIST_HEAD(&notify->list);
++ init_helper_thread();
+ }
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+--
+1.8.4.rc3
+
diff --git a/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch b/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
new file mode 100644
index 0000000..253b4dd
--- /dev/null
+++ b/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
@@ -0,0 +1,132 @@
+From 7a036d4dfcf3f2d3247ff7f739284f4b5056bdcb Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Mon, 19 Aug 2013 17:33:25 -0400
+Subject: [PATCH 1/3] hwlat-detector: Update hwlat_detector to add outer loop
+ detection
+
+The hwlat_detector reads two timestamps in a row, then reports any
+gap between those calls. The problem is, it misses everything between
+the second reading of the time stamp to the first reading of the time stamp
+in the next loop. That's were most of the time is spent, which means,
+chances are likely that it will miss all hardware latencies. This
+defeats the purpose.
+
+By also testing the first time stamp from the previous loop second
+time stamp (the outer loop), we are more likely to find a latency.
+
+Setting the threshold to 1, here's what the report now looks like:
+
+1347415723.0232202770 0 2
+1347415725.0234202822 0 2
+1347415727.0236202875 0 2
+1347415729.0238202928 0 2
+1347415731.0240202980 0 2
+1347415734.0243203061 0 2
+1347415736.0245203113 0 2
+1347415738.0247203166 2 0
+1347415740.0249203219 0 3
+1347415742.0251203272 0 3
+1347415743.0252203299 0 3
+1347415745.0254203351 0 2
+1347415747.0256203404 0 2
+1347415749.0258203457 0 2
+1347415751.0260203510 0 2
+1347415754.0263203589 0 2
+1347415756.0265203642 0 2
+1347415758.0267203695 0 2
+1347415760.0269203748 0 2
+1347415762.0271203801 0 2
+1347415764.0273203853 2 0
+
+There's some hardware latency that takes 2 microseconds to run.
+
+Signed-off-by: Steven Rostedt <srostedt@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------
+ 1 file changed, 26 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
+index b7b7c90..f93b8ef 100644
+--- a/drivers/misc/hwlat_detector.c
++++ b/drivers/misc/hwlat_detector.c
+@@ -143,6 +143,7 @@ static void detector_exit(void);
+ struct sample {
+ u64 seqnum; /* unique sequence */
+ u64 duration; /* ktime delta */
++ u64 outer_duration; /* ktime delta (outer loop) */
+ struct timespec timestamp; /* wall time */
+ unsigned long lost;
+ };
+@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample(struct sample *sample)
+ */
+ static int get_sample(void *unused)
+ {
+- ktime_t start, t1, t2;
++ ktime_t start, t1, t2, last_t2;
+ s64 diff, total = 0;
+ u64 sample = 0;
++ u64 outer_sample = 0;
+ int ret = 1;
+
++ last_t2.tv64 = 0;
+ start = ktime_get(); /* start timestamp */
+
+ do {
+@@ -231,7 +234,22 @@ static int get_sample(void *unused)
+ t1 = ktime_get(); /* we'll look for a discontinuity */
+ t2 = ktime_get();
+
++ if (last_t2.tv64) {
++ /* Check the delta from the outer loop (t2 to next t1) */
++ diff = ktime_to_us(ktime_sub(t1, last_t2));
++ /* This shouldn't happen */
++ if (diff < 0) {
++ printk(KERN_ERR BANNER "time running backwards\n");
++ goto out;
++ }
++ if (diff > outer_sample)
++ outer_sample = diff;
++ }
++ last_t2 = t2;
++
+ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
++
++ /* This checks the inner loop (t1 to t2) */
+ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
+
+ /* This shouldn't happen */
+@@ -246,12 +264,13 @@ static int get_sample(void *unused)
+ } while (total <= data.sample_width);
+
+ /* If we exceed the threshold value, we have found a hardware latency */
+- if (sample > data.threshold) {
++ if (sample > data.threshold || outer_sample > data.threshold) {
+ struct sample s;
+
+ data.count++;
+ s.seqnum = data.count;
+ s.duration = sample;
++ s.outer_duration = outer_sample;
+ s.timestamp = CURRENT_TIME;
+ __buffer_add_sample(&s);
+
+@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
+ }
+ }
+
+- len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
+- sample->timestamp.tv_sec,
+- sample->timestamp.tv_nsec,
+- sample->duration);
++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
++ sample->timestamp.tv_sec,
++ sample->timestamp.tv_nsec,
++ sample->duration,
++ sample->outer_duration);
+
+
+ /* handling partial reads is more trouble than it's worth */
+--
+1.8.4.rc3
+
diff --git a/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch b/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch
new file mode 100644
index 0000000..f41beef
--- /dev/null
+++ b/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch
@@ -0,0 +1,188 @@
+From 42b3963c5d3dcdb54226fc6bbb6b5fbcf3f2ddee Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Mon, 19 Aug 2013 17:33:27 -0400
+Subject: [PATCH 3/3] hwlat-detector: Use thread instead of stop machine
+
+There's no reason to use stop machine to search for hardware latency.
+Simply disabling interrupts while running the loop will do enough to
+check if something comes in that wasn't disabled by interrupts being
+off, which is exactly what stop machine does.
+
+Instead of using stop machine, just have the thread disable interrupts
+while it checks for hardware latency.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/misc/hwlat_detector.c | 59 ++++++++++++++++++-------------------------
+ 1 file changed, 25 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
+index d80a857..0bfa40d 100644
+--- a/drivers/misc/hwlat_detector.c
++++ b/drivers/misc/hwlat_detector.c
+@@ -41,7 +41,6 @@
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/ring_buffer.h>
+-#include <linux/stop_machine.h>
+ #include <linux/time.h>
+ #include <linux/hrtimer.h>
+ #include <linux/kthread.h>
+@@ -107,7 +106,6 @@ struct data; /* Global state */
+ /* Sampling functions */
+ static int __buffer_add_sample(struct sample *sample);
+ static struct sample *buffer_get_sample(struct sample *sample);
+-static int get_sample(void *unused);
+
+ /* Threading and state */
+ static int kthread_fn(void *unused);
+@@ -149,7 +147,7 @@ struct sample {
+ unsigned long lost;
+ };
+
+-/* keep the global state somewhere. Mostly used under stop_machine. */
++/* keep the global state somewhere. */
+ static struct data {
+
+ struct mutex lock; /* protect changes */
+@@ -172,7 +170,7 @@ static struct data {
+ * @sample: The new latency sample value
+ *
+ * This receives a new latency sample and records it in a global ring buffer.
+- * No additional locking is used in this case - suited for stop_machine use.
++ * No additional locking is used in this case.
+ */
+ static int __buffer_add_sample(struct sample *sample)
+ {
+@@ -229,18 +227,17 @@ static struct sample *buffer_get_sample(struct sample *sample)
+ #endif
+ /**
+ * get_sample - sample the CPU TSC and look for likely hardware latencies
+- * @unused: This is not used but is a part of the stop_machine API
+ *
+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
+- * hardware-induced latency. Called under stop_machine, with data.lock held.
++ * hardware-induced latency. Called with interrupts disabled and with data.lock held.
+ */
+-static int get_sample(void *unused)
++static int get_sample(void)
+ {
+ time_type start, t1, t2, last_t2;
+ s64 diff, total = 0;
+ u64 sample = 0;
+ u64 outer_sample = 0;
+- int ret = 1;
++ int ret = -1;
+
+ init_time(last_t2, 0);
+ start = time_get(); /* start timestamp */
+@@ -279,10 +276,14 @@ static int get_sample(void *unused)
+
+ } while (total <= data.sample_width);
+
++ ret = 0;
++
+ /* If we exceed the threshold value, we have found a hardware latency */
+ if (sample > data.threshold || outer_sample > data.threshold) {
+ struct sample s;
+
++ ret = 1;
++
+ data.count++;
+ s.seqnum = data.count;
+ s.duration = sample;
+@@ -295,7 +296,6 @@ static int get_sample(void *unused)
+ data.max_sample = sample;
+ }
+
+- ret = 0;
+ out:
+ return ret;
+ }
+@@ -305,32 +305,30 @@ static int get_sample(void *unused)
+ * @unused: A required part of the kthread API.
+ *
+ * Used to periodically sample the CPU TSC via a call to get_sample. We
+- * use stop_machine, whith does (intentionally) introduce latency since we
++ * disable interrupts, which does (intentionally) introduce latency since we
+ * need to ensure nothing else might be running (and thus pre-empting).
+ * Obviously this should never be used in production environments.
+ *
+- * stop_machine will schedule us typically only on CPU0 which is fine for
+- * almost every real-world hardware latency situation - but we might later
+- * generalize this if we find there are any actualy systems with alternate
+- * SMI delivery or other non CPU0 hardware latencies.
++ * Currently this runs on which ever CPU it was scheduled on, but most
++ * real-worald hardware latency situations occur across several CPUs,
++ * but we might later generalize this if we find there are any actualy
++ * systems with alternate SMI delivery or other hardware latencies.
+ */
+ static int kthread_fn(void *unused)
+ {
+- int err = 0;
+- u64 interval = 0;
++ int ret;
++ u64 interval;
+
+ while (!kthread_should_stop()) {
+
+ mutex_lock(&data.lock);
+
+- err = stop_machine(get_sample, unused, 0);
+- if (err) {
+- /* Houston, we have a problem */
+- mutex_unlock(&data.lock);
+- goto err_out;
+- }
++ local_irq_disable();
++ ret = get_sample();
++ local_irq_enable();
+
+- wake_up(&data.wq); /* wake up reader(s) */
++ if (ret > 0)
++ wake_up(&data.wq); /* wake up reader(s) */
+
+ interval = data.sample_window - data.sample_width;
+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
+@@ -338,15 +336,10 @@ static int kthread_fn(void *unused)
+ mutex_unlock(&data.lock);
+
+ if (msleep_interruptible(interval))
+- goto out;
++ break;
+ }
+- goto out;
+-err_out:
+- printk(KERN_ERR BANNER "could not call stop_machine, disabling\n");
+- enabled = 0;
+-out:
+- return err;
+
++ return 0;
+ }
+
+ /**
+@@ -442,8 +435,7 @@ static int init_stats(void)
+ * This function provides a generic read implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_read directly, but we need to make sure that the data.lock
+- * spinlock is held during the actual read (even though we likely won't ever
+- * actually race here as the updater runs under a stop_machine context).
++ * is held during the actual read.
+ */
+ static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos, const u64 *entry)
+@@ -478,8 +470,7 @@ static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+ * This function provides a generic write implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_write directly, but we need to make sure that the data.lock
+- * spinlock is held during the actual write (even though we likely won't ever
+- * actually race here as the updater runs under a stop_machine context).
++ * is held during the actual write.
+ */
+ static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, u64 *entry)
+--
+1.8.4.rc3
+
diff --git a/patches/hwlat-detector-Use-trace_clock_local-if-available.patch b/patches/hwlat-detector-Use-trace_clock_local-if-available.patch
new file mode 100644
index 0000000..9d7d327
--- /dev/null
+++ b/patches/hwlat-detector-Use-trace_clock_local-if-available.patch
@@ -0,0 +1,98 @@
+From 4aaca90c0255caee9a55371afaecb32365123762 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Mon, 19 Aug 2013 17:33:26 -0400
+Subject: [PATCH 2/3] hwlat-detector: Use trace_clock_local if available
+
+As ktime_get() calls into the timing code which does a read_seq(), it
+may be affected by other CPUS that touch that lock. To remove this
+dependency, use the trace_clock_local() which is already exported
+for module use. If CONFIG_TRACING is enabled, use that as the clock,
+otherwise use ktime_get().
+
+Signed-off-by: Steven Rostedt <srostedt@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++---------
+ 1 file changed, 25 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
+index f93b8ef..d80a857 100644
+--- a/drivers/misc/hwlat_detector.c
++++ b/drivers/misc/hwlat_detector.c
+@@ -51,6 +51,7 @@
+ #include <linux/version.h>
+ #include <linux/delay.h>
+ #include <linux/slab.h>
++#include <linux/trace_clock.h>
+
+ #define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
+ #define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
+@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample(struct sample *sample)
+ return sample;
+ }
+
++#ifndef CONFIG_TRACING
++#define time_type ktime_t
++#define time_get() ktime_get()
++#define time_to_us(x) ktime_to_us(x)
++#define time_sub(a, b) ktime_sub(a, b)
++#define init_time(a, b) (a).tv64 = b
++#define time_u64(a) (a).tv64
++#else
++#define time_type u64
++#define time_get() trace_clock_local()
++#define time_to_us(x) ((x) / 1000)
++#define time_sub(a, b) ((a) - (b))
++#define init_time(a, b) a = b
++#define time_u64(a) a
++#endif
+ /**
+ * get_sample - sample the CPU TSC and look for likely hardware latencies
+ * @unused: This is not used but is a part of the stop_machine API
+@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample(struct sample *sample)
+ */
+ static int get_sample(void *unused)
+ {
+- ktime_t start, t1, t2, last_t2;
++ time_type start, t1, t2, last_t2;
+ s64 diff, total = 0;
+ u64 sample = 0;
+ u64 outer_sample = 0;
+ int ret = 1;
+
+- last_t2.tv64 = 0;
+- start = ktime_get(); /* start timestamp */
++ init_time(last_t2, 0);
++ start = time_get(); /* start timestamp */
+
+ do {
+
+- t1 = ktime_get(); /* we'll look for a discontinuity */
+- t2 = ktime_get();
++ t1 = time_get(); /* we'll look for a discontinuity */
++ t2 = time_get();
+
+- if (last_t2.tv64) {
++ if (time_u64(last_t2)) {
+ /* Check the delta from the outer loop (t2 to next t1) */
+- diff = ktime_to_us(ktime_sub(t1, last_t2));
++ diff = time_to_us(time_sub(t1, last_t2));
+ /* This shouldn't happen */
+ if (diff < 0) {
+ printk(KERN_ERR BANNER "time running backwards\n");
+@@ -247,10 +263,10 @@ static int get_sample(void *unused)
+ }
+ last_t2 = t2;
+
+- total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
++ total = time_to_us(time_sub(t2, start)); /* sample width */
+
+ /* This checks the inner loop (t1 to t2) */
+- diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
++ diff = time_to_us(time_sub(t2, t1)); /* current diff */
+
+ /* This shouldn't happen */
+ if (diff < 0) {
+--
+1.8.4.rc3
+
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 2cc56fc..26a0b51 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -228,7 +228,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *start_site;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1409,6 +1409,12 @@ struct task_struct {
+@@ -1410,6 +1410,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 0e1394d..e5fcadf 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -12,4 +12,4 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt4
++-rt5
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index 4511ade..8f11f93 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2114,7 +2114,7 @@ config CPU_R4400_WORKAROUNDS
+@@ -2115,7 +2115,7 @@ config CPU_R4400_WORKAROUNDS
#
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/mm-prepare-pf-disable-discoupling.patch b/patches/mm-prepare-pf-disable-discoupling.patch
index f4c98b9..9bf7e52 100644
--- a/patches/mm-prepare-pf-disable-discoupling.patch
+++ b/patches/mm-prepare-pf-disable-discoupling.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1260,6 +1260,7 @@ struct task_struct {
+@@ -1261,6 +1261,7 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
p->curr_chain_key = 0;
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3751,6 +3751,35 @@ unlock:
+@@ -3757,6 +3757,35 @@ unlock:
return 0;
}
diff --git a/patches/mm-remove-preempt-count-from-pf.patch b/patches/mm-remove-preempt-count-from-pf.patch
index b7beaf7..93bf166 100644
--- a/patches/mm-remove-preempt-count-from-pf.patch
+++ b/patches/mm-remove-preempt-count-from-pf.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3754,7 +3754,6 @@ unlock:
+@@ -3760,7 +3760,6 @@ unlock:
#ifdef CONFIG_PREEMPT_RT_FULL
void pagefault_disable(void)
{
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
current->pagefault_disabled++;
/*
* make sure to have issued the store before a pagefault
-@@ -3772,12 +3771,6 @@ void pagefault_enable(void)
+@@ -3778,12 +3777,6 @@ void pagefault_enable(void)
*/
barrier();
current->pagefault_disabled--;
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 5c467f0..e1272e6 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -215,7 +215,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
-@@ -1449,6 +1450,12 @@ struct task_struct {
+@@ -1450,6 +1451,12 @@ struct task_struct {
struct rcu_head put_rcu;
int softirq_nestcnt;
#endif
@@ -256,7 +256,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
{
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3754,6 +3754,7 @@ unlock:
+@@ -3760,6 +3760,7 @@ unlock:
#ifdef CONFIG_PREEMPT_RT_FULL
void pagefault_disable(void)
{
@@ -264,7 +264,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
current->pagefault_disabled++;
/*
* make sure to have issued the store before a pagefault
-@@ -3771,6 +3772,7 @@ void pagefault_enable(void)
+@@ -3777,6 +3778,7 @@ void pagefault_enable(void)
*/
barrier();
current->pagefault_disabled--;
diff --git a/patches/mm-shrink-the-page-frame-to-rt-size.patch b/patches/mm-shrink-the-page-frame-to-rt-size.patch
index 7654f94..a95cbf3 100644
--- a/patches/mm-shrink-the-page-frame-to-rt-size.patch
+++ b/patches/mm-shrink-the-page-frame-to-rt-size.patch
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct page *first_page; /* Compound tail pages */
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -4322,3 +4322,35 @@ void copy_user_huge_page(struct page *ds
+@@ -4328,3 +4328,35 @@ void copy_user_huge_page(struct page *ds
}
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 271c516..a5ccd44 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1220,6 +1220,10 @@ struct task_struct {
+@@ -1221,6 +1221,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
diff --git a/patches/peter_zijlstra-frob-migrate_disable-2.patch b/patches/peter_zijlstra-frob-migrate_disable-2.patch
index 157a4fa..17f8071 100644
--- a/patches/peter_zijlstra-frob-migrate_disable-2.patch
+++ b/patches/peter_zijlstra-frob-migrate_disable-2.patch
@@ -63,7 +63,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
#ifdef CONFIG_PREEMPT_NOTIFIERS
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1074,7 +1074,9 @@ struct task_struct {
+@@ -1075,7 +1075,9 @@ struct task_struct {
#endif
unsigned int policy;
@@ -73,7 +73,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -2617,11 +2619,22 @@ static inline void set_task_cpu(struct t
+@@ -2618,11 +2620,22 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
@@ -152,7 +152,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
* Move (not current) task off this cpu, onto dest cpu. We're doing
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1450,7 +1450,7 @@ tracing_generic_entry_update(struct trac
+@@ -1494,7 +1494,7 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
diff --git a/patches/peter_zijlstra-frob-pagefault_disable.patch b/patches/peter_zijlstra-frob-pagefault_disable.patch
index d1070a1..355a340 100644
--- a/patches/peter_zijlstra-frob-pagefault_disable.patch
+++ b/patches/peter_zijlstra-frob-pagefault_disable.patch
@@ -288,7 +288,7 @@ Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org
#include <asm/processor.h>
-@@ -1260,7 +1261,9 @@ struct task_struct {
+@@ -1261,7 +1262,9 @@ struct task_struct {
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
@@ -298,7 +298,7 @@ Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
-@@ -1443,6 +1446,17 @@ static inline void set_numabalancing_sta
+@@ -1444,6 +1447,17 @@ static inline void set_numabalancing_sta
}
#endif
diff --git a/patches/peterz-raw_pagefault_disable.patch b/patches/peterz-raw_pagefault_disable.patch
index 0bda2da..ea4524d 100644
--- a/patches/peterz-raw_pagefault_disable.patch
+++ b/patches/peterz-raw_pagefault_disable.patch
@@ -129,7 +129,7 @@ Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org
})
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -3751,6 +3751,7 @@ unlock:
+@@ -3757,6 +3757,7 @@ unlock:
return 0;
}
@@ -137,7 +137,7 @@ Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org
void pagefault_disable(void)
{
inc_preempt_count();
-@@ -3779,6 +3780,7 @@ void pagefault_enable(void)
+@@ -3785,6 +3786,7 @@ void pagefault_enable(void)
preempt_check_resched();
}
EXPORT_SYMBOL(pagefault_enable);
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index e6dc56a..77f29e9 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1185,6 +1185,9 @@ struct task_struct {
+@@ -1186,6 +1186,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
diff --git a/patches/powerpc-52xx-provide-a-default-in-mpc52xx_irqhost_ma.patch b/patches/powerpc-52xx-provide-a-default-in-mpc52xx_irqhost_ma.patch
index 532494c..47f2878 100644
--- a/patches/powerpc-52xx-provide-a-default-in-mpc52xx_irqhost_ma.patch
+++ b/patches/powerpc-52xx-provide-a-default-in-mpc52xx_irqhost_ma.patch
@@ -21,14 +21,12 @@ the number that is evaluated.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/powerpc/platforms/52xx/mpc52xx_pic.c | 3 ++-
+ arch/powerpc/platforms/52xx/mpc52xx_pic.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
-diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
-index b89ef65..b69221b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
-@@ -373,8 +373,9 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
+@@ -373,8 +373,9 @@ static int mpc52xx_irqhost_map(struct ir
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
case MPC52xx_IRQ_L1_CRIT:
@@ -39,6 +37,3 @@ index b89ef65..b69221b 100644
irq_set_chip(virq, &no_irq_chip);
return 0;
}
---
-1.8.4.rc1
-
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index 32f9a8d..78b43ce 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -71,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
-@@ -162,6 +162,7 @@ int main(void)
+@@ -165,6 +165,7 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
.align 7
_GLOBAL(_switch)
-@@ -637,7 +637,7 @@ _GLOBAL(ret_from_except_lite)
+@@ -653,7 +653,7 @@ _GLOBAL(ret_from_except_lite)
andi. r0,r4,_TIF_USER_WORK_MASK
beq restore
@@ -189,7 +189,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq 1f
bl .restore_interrupts
SCHEDULE_USER
-@@ -687,10 +687,18 @@ resume_kernel:
+@@ -703,10 +703,18 @@ resume_kernel:
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -209,7 +209,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
-@@ -707,7 +715,7 @@ resume_kernel:
+@@ -723,7 +731,7 @@ resume_kernel:
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
@@ -218,7 +218,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne 1b
/*
-@@ -874,7 +882,7 @@ restore_check_irq_replay:
+@@ -890,7 +898,7 @@ restore_check_irq_replay:
bl .__check_irq_replay
cmpwi cr0,r3,0
beq restore_no_replay
@@ -227,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We need to re-emit an interrupt. We do so by re-using our
* existing exception frame. We first change the trap value,
-@@ -916,7 +924,7 @@ restore_check_irq_replay:
+@@ -932,7 +940,7 @@ restore_check_irq_replay:
b .ret_from_except
#endif /* CONFIG_PPC_DOORBELL */
1: b .ret_from_except /* What else to do here ? */
@@ -236,7 +236,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unrecov_restore:
addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
-@@ -928,7 +936,7 @@ unrecov_restore:
+@@ -944,7 +952,7 @@ unrecov_restore:
* called with the MMU off.
*
* In addition, we need to be in 32b mode, at least for now.
@@ -245,7 +245,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Note: r3 is an input parameter to rtas, so don't trash it...
*/
_GLOBAL(enter_rtas)
-@@ -962,7 +970,7 @@ _GLOBAL(enter_rtas)
+@@ -978,7 +986,7 @@ _GLOBAL(enter_rtas)
li r0,0
mtcr r0
@@ -254,7 +254,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* There is no way it is acceptable to get here with interrupts enabled,
* check it with the asm equivalent of WARN_ON
*/
-@@ -970,7 +978,7 @@ _GLOBAL(enter_rtas)
+@@ -986,7 +994,7 @@ _GLOBAL(enter_rtas)
1: tdnei r0,0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
#endif
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Hard-disable interrupts */
mfmsr r6
rldicl r7,r6,48,1
-@@ -984,7 +992,7 @@ _GLOBAL(enter_rtas)
+@@ -1000,7 +1008,7 @@ _GLOBAL(enter_rtas)
std r1,PACAR1(r13)
std r6,PACASAVEDMSR(r13)
@@ -272,7 +272,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
LOAD_REG_ADDR(r4,.rtas_return_loc)
clrldi r4,r4,2 /* convert to realmode address */
mtlr r4
-@@ -992,7 +1000,7 @@ _GLOBAL(enter_rtas)
+@@ -1008,7 +1016,7 @@ _GLOBAL(enter_rtas)
li r0,0
ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
andc r0,r6,r0
@@ -281,7 +281,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
li r9,1
rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
-@@ -1003,7 +1011,7 @@ _GLOBAL(enter_rtas)
+@@ -1019,7 +1027,7 @@ _GLOBAL(enter_rtas)
LOAD_REG_ADDR(r4, rtas)
ld r5,RTASENTRY(r4) /* get the rtas->entry value */
ld r4,RTASBASE(r4) /* get the rtas->base value */
@@ -290,7 +290,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mtspr SPRN_SRR0,r5
mtspr SPRN_SRR1,r6
rfid
-@@ -1021,9 +1029,9 @@ _STATIC(rtas_return_loc)
+@@ -1037,9 +1045,9 @@ _STATIC(rtas_return_loc)
mfmsr r6
li r0,MSR_RI
andc r6,r6,r0
@@ -302,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ld r1,PACAR1(r4) /* Restore our SP */
ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
-@@ -1121,7 +1129,7 @@ _GLOBAL(enter_prom)
+@@ -1137,7 +1145,7 @@ _GLOBAL(enter_prom)
REST_10GPRS(22, r1)
ld r4,_CCR(r1)
mtcr r4
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 896e80d..fcf683a 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define add_preempt_count_notrace(val) \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2454,6 +2454,52 @@ static inline int test_tsk_need_resched(
+@@ -2455,6 +2455,52 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -199,7 +199,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
-@@ -2485,11 +2531,6 @@ static inline int signal_pending_state(l
+@@ -2486,11 +2532,6 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
@@ -364,7 +364,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -1991,7 +1991,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -1992,7 +1992,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -373,7 +373,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -2180,7 +2180,7 @@ static void __account_cfs_rq_runtime(str
+@@ -2181,7 +2181,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -382,7 +382,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -2765,7 +2765,7 @@ static void hrtick_start_fair(struct rq
+@@ -2766,7 +2766,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -391,7 +391,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
-@@ -3590,7 +3590,7 @@ static void check_preempt_wakeup(struct
+@@ -3591,7 +3591,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -400,7 +400,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -5795,7 +5795,7 @@ static void task_fork_fair(struct task_s
+@@ -5796,7 +5796,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -409,7 +409,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -5820,7 +5820,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -5821,7 +5821,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1439,6 +1439,7 @@ tracing_generic_entry_update(struct trac
+@@ -1483,6 +1483,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -458,7 +458,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1448,7 +1449,8 @@ tracing_generic_entry_update(struct trac
+@@ -1492,7 +1493,8 @@ tracing_generic_entry_update(struct trac
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
}
-@@ -2349,15 +2351,17 @@ get_total_entries(struct trace_buffer *b
+@@ -2393,15 +2395,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -495,7 +495,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2381,13 +2385,16 @@ static void print_func_help_header(struc
+@@ -2425,13 +2429,16 @@ static void print_func_help_header(struc
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
{
print_event_info(buf, m);
diff --git a/patches/rcu-swait-Fix-RCU-conversion-of-wake_up_all-to-swait.patch b/patches/rcu-swait-Fix-RCU-conversion-of-wake_up_all-to-swait.patch
new file mode 100644
index 0000000..64ea078
--- /dev/null
+++ b/patches/rcu-swait-Fix-RCU-conversion-of-wake_up_all-to-swait.patch
@@ -0,0 +1,34 @@
+From 1c126aeea000d5acd67b3adb153daa43fbabb600 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Mon, 19 Aug 2013 11:35:31 -0400
+Subject: [PATCH] rcu/swait: Fix RCU conversion of wake_up_all() to
+ swait_wake()
+
+Reverting the rcu swait patches fixed the boot problem. Then when I was
+looking at the revert itself, this stood out like a sore thumb.
+
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) {
+- swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
++ wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ }
+
+See the problem there?
+
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcutree_plugin.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/rcutree_plugin.h
++++ b/kernel/rcutree_plugin.h
+@@ -2036,7 +2036,7 @@ static int rcu_nocb_needs_gp(struct rcu_
+ */
+ static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
+ {
+- swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
++ swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+ }
+
+ /*
diff --git a/patches/sched-better-debug-output-for-might-sleep.patch b/patches/sched-better-debug-output-for-might-sleep.patch
index b00703f..57e5c04 100644
--- a/patches/sched-better-debug-output-for-might-sleep.patch
+++ b/patches/sched-better-debug-output-for-might-sleep.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1329,6 +1329,9 @@ struct task_struct {
+@@ -1330,6 +1330,9 @@ struct task_struct {
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index 2f23347..1446197 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1438,6 +1438,9 @@ struct task_struct {
+@@ -1439,6 +1439,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
-@@ -1597,6 +1600,15 @@ extern struct pid *cad_pid;
+@@ -1598,6 +1601,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -1604,6 +1616,7 @@ static inline void put_task_struct(struc
+@@ -1605,6 +1617,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
diff --git a/patches/sched-migrate-disable.patch b/patches/sched-migrate-disable.patch
index d9a328a..4e3a8b4 100644
--- a/patches/sched-migrate-disable.patch
+++ b/patches/sched-migrate-disable.patch
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define preempt_enable_rt() preempt_enable()
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1074,6 +1074,7 @@ struct task_struct {
+@@ -1075,6 +1075,7 @@ struct task_struct {
#endif
unsigned int policy;
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1444,9 +1445,6 @@ struct task_struct {
+@@ -1445,9 +1446,6 @@ struct task_struct {
#endif
};
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
-@@ -2619,6 +2617,15 @@ static inline void set_task_cpu(struct t
+@@ -2620,6 +2618,15 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 870e86f..2695d65 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* first nid will either be a valid NID or one of these values */
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2132,12 +2132,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2133,12 +2133,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index fa60771..3255b24 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1034,6 +1034,7 @@ enum perf_event_task_context {
+@@ -1035,6 +1035,7 @@ enum perf_event_task_context {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2018,6 +2019,7 @@ extern void xtime_update(unsigned long t
+@@ -2019,6 +2020,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
diff --git a/patches/sched-teach-migrate_disable-about-atomic-contexts.patch b/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
index afcc4bd..d27013f 100644
--- a/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
+++ b/patches/sched-teach-migrate_disable-about-atomic-contexts.patch
@@ -39,7 +39,7 @@ Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1076,6 +1076,9 @@ struct task_struct {
+@@ -1077,6 +1077,9 @@ struct task_struct {
unsigned int policy;
#ifdef CONFIG_PREEMPT_RT_FULL
int migrate_disable;
diff --git a/patches/series b/patches/series
index 96f00cc..2267ed0 100644
--- a/patches/series
+++ b/patches/series
@@ -178,6 +178,9 @@ latency-hist.patch
# HW LATENCY DETECTOR - this really wants a rewrite
hwlatdetect.patch
+hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch
+hwlat-detector-Use-trace_clock_local-if-available.patch
+hwlat-detector-Use-thread-instead-of-stop-machine.patch
##################################################
# REAL RT STUFF starts here
@@ -607,10 +610,13 @@ drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
# SIMPLE WAITQUEUE
wait-simple-implementation.patch
wait-simple-rework-for-completions.patch
+swait-Add-memory-barrier-before-checking-list-empty.patch
+swait-Add-smp_mb-after-setting-h-list.patch
rcutiny-use-simple-waitqueue.patch
treercu-use-simple-waitqueue.patch
rcu-more-swait-conversions.patch
+rcu-swait-Fix-RCU-conversion-of-wake_up_all-to-swait.patch
completion-use-simple-wait-queues.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index d16f297..62bdcab 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1215,6 +1215,7 @@ struct task_struct {
+@@ -1216,6 +1216,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
diff --git a/patches/softirq-local-lock.patch b/patches/softirq-local-lock.patch
index 3261641..6825a0a 100644
--- a/patches/softirq-local-lock.patch
+++ b/patches/softirq-local-lock.patch
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1447,6 +1447,7 @@ struct task_struct {
+@@ -1448,6 +1448,7 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
diff --git a/patches/softirq-make-serving-softirqs-a-task-flag.patch b/patches/softirq-make-serving-softirqs-a-task-flag.patch
index 6d75318..71d9425 100644
--- a/patches/softirq-make-serving-softirqs-a-task-flag.patch
+++ b/patches/softirq-make-serving-softirqs-a-task-flag.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1667,6 +1667,7 @@ extern void thread_group_cputime_adjuste
+@@ -1668,6 +1668,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 66df3ab..db7ede3 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1449,6 +1449,7 @@ struct task_struct {
+@@ -1450,6 +1450,7 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
int softirq_nestcnt;
diff --git a/patches/swait-Add-memory-barrier-before-checking-list-empty.patch b/patches/swait-Add-memory-barrier-before-checking-list-empty.patch
new file mode 100644
index 0000000..24ddbdd
--- /dev/null
+++ b/patches/swait-Add-memory-barrier-before-checking-list-empty.patch
@@ -0,0 +1,54 @@
+From 8c7c8225cf6bfcf8a6cdcc86bc8d1137e38dde56 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Mon, 19 Aug 2013 11:35:32 -0400
+Subject: [PATCH] swait: Add memory barrier before checking list empty
+
+There's a race condition with swait wakeups and adding to the list. The
+__swait_wake() does a check for swait_head_has_waiters(), and if it is
+empty it will exit without doing any wake ups. The problem is that the
+check does not include any memory barriers before it makes a decision
+to wake up or not.
+
+ CPU0 CPU1
+ ---- ----
+
+ condition = 1
+
+ load h->list (is empty)
+ raw_spin_lock(hlist->lock)
+ hlist_add();
+ __set_current_state();
+ raw_spin_unlock(hlist->lock)
+ swait_wake()
+ swait_head_has_waiters()
+ (sees h->list as empty and returns)
+
+ check_condition (sees condition = 0)
+
+ store condition = 1
+
+ schedule()
+
+Now the task on CPU1 has just missed its wakeup. By adding a memory
+barrier before the list empty check, we fix the problem of miss seeing
+the list not empty as well as pushing out the condition for the other
+task to see.
+
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/wait-simple.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/wait-simple.c
++++ b/kernel/wait-simple.c
+@@ -27,6 +27,8 @@ static inline void __swait_dequeue(struc
+ /* Check whether a head has waiters enqueued */
+ static inline bool swait_head_has_waiters(struct swait_head *h)
+ {
++ /* Make sure the condition is visible before checking list_empty() */
++ smp_mb();
+ return !list_empty(&h->list);
+ }
+
diff --git a/patches/swait-Add-smp_mb-after-setting-h-list.patch b/patches/swait-Add-smp_mb-after-setting-h-list.patch
new file mode 100644
index 0000000..f09ad2b
--- /dev/null
+++ b/patches/swait-Add-smp_mb-after-setting-h-list.patch
@@ -0,0 +1,65 @@
+From b82ab65560f7f21371d90f94d51b2e535574adeb Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Mon, 19 Aug 2013 11:35:33 -0400
+Subject: [PATCH] swait: Add smp_mb() after setting h->list
+
+The raw_spin_unlock() is not a full memory barrier. It only keeps
+things from leaking past it, but does not prevent leaks from entering
+the critical section. That is:
+
+ p = 1;
+
+ raw_spin_lock();
+ [...]
+ raw_spin_unlock();
+
+ y = x
+
+ Can turn into:
+
+ p = 1;
+
+ raw_spin_lock();
+
+ load x
+
+ store p = 1
+
+ raw_spin_unlock();
+
+ y = x
+
+This means that the condition check in __swait_event() (and friends)
+can be seen before the h->list is set.
+
+ raw_spin_lock();
+
+ load condition;
+
+ store h->list;
+
+ raw_spin_unlock();
+
+And the other CPU can see h->list as empty, and this CPU see condition
+as not set, and possibly miss the wake up.
+
+To prevent this from happening, add an mb() after setting the h->list.
+
+Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/wait-simple.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/wait-simple.c
++++ b/kernel/wait-simple.c
+@@ -16,6 +16,8 @@
+ static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w)
+ {
+ list_add(&w->node, &head->list);
++ /* We can't let the condition leak before the setting of head */
++ smp_mb();
+ }
+
+ /* Removes w from head->list. Must be called with head->lock locked. */
diff --git a/patches/vtime-split-lock-and-seqcount.patch b/patches/vtime-split-lock-and-seqcount.patch
index 3932760..b8c9609 100644
--- a/patches/vtime-split-lock-and-seqcount.patch
+++ b/patches/vtime-split-lock-and-seqcount.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1167,7 +1167,8 @@ struct task_struct {
+@@ -1168,7 +1168,8 @@ struct task_struct {
struct cputime prev_cputime;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
diff --git a/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch b/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
index 58c2f3c..54dbd46 100644
--- a/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
+++ b/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
@@ -54,8 +54,8 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
kthread_run()]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/kernel/cpu/mcheck/mce.c | 75 ++++++++++++++++++++++++++++++++-------
- 1 file changed, 62 insertions(+), 13 deletions(-)
+ arch/x86/kernel/cpu/mcheck/mce.c | 73 ++++++++++++++++++++++++++++++++-------
+ 1 file changed, 61 insertions(+), 12 deletions(-)
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -67,24 +67,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
-@@ -1343,26 +1344,72 @@ static void mce_do_trigger(struct work_s
+@@ -1343,6 +1344,63 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
--/*
-- * Notify the user(s) about new machine check events.
-- * Can be called from interrupt context, but not from machine check/NMI
-- * context.
-- */
--int mce_notify_irq(void)
+static void __mce_notify_work(void)
- {
- /* Not more than two messages every minute */
- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
-
-- if (test_and_clear_bit(0, &mce_need_notify)) {
-- /* wake processes polling /dev/mcelog */
-- wake_up_interruptible(&mce_chrdev_wait);
++{
++ /* Not more than two messages every minute */
++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
++
+ /* wake processes polling /dev/mcelog */
+ wake_up_interruptible(&mce_chrdev_wait);
+
@@ -121,14 +112,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ "mce-notify");
+ if (!mce_notify_helper)
+ return -ENOMEM;
-
-- if (mce_helper[0])
-- schedule_work(&mce_trigger_work);
++
+ return 0;
+}
-
-- if (__ratelimit(&ratelimit))
-- pr_info(HW_ERR "Machine check events logged\n");
++
+static void mce_notify_work(void)
+{
+ wake_up_process(mce_notify_helper);
@@ -140,15 +127,27 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
+static inline int mce_notify_work_init(void) { return 0; }
+#endif
-
-+/*
-+ * Notify the user(s) about new machine check events.
-+ * Can be called from interrupt context, but not from machine check/NMI
-+ * context.
-+ */
-+int mce_notify_irq(void)
-+{
-+ if (test_and_clear_bit(0, &mce_need_notify)) {
++
+ /*
+ * Notify the user(s) about new machine check events.
+ * Can be called from interrupt context, but not from machine check/NMI
+@@ -1350,19 +1408,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+ */
+ int mce_notify_irq(void)
+ {
+- /* Not more than two messages every minute */
+- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+-
+ if (test_and_clear_bit(0, &mce_need_notify)) {
+- /* wake processes polling /dev/mcelog */
+- wake_up_interruptible(&mce_chrdev_wait);
+-
+- if (mce_helper[0])
+- schedule_work(&mce_trigger_work);
+-
+- if (__ratelimit(&ratelimit))
+- pr_info(HW_ERR "Machine check events logged\n");
+-
+ mce_notify_work();
return 1;
}