summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-06-27 13:13:05 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-06-27 13:13:05 -0400
commite001d2d4f5ed389fc588bffd3c579433e9a58d38 (patch)
tree59309d211a80d8677da5d193e3855dee6606a393
parent5a2bc049400ce178c213f4e4a9b0807fef491115 (diff)
download3.8-rt-patches-e001d2d4f5ed389fc588bffd3c579433e9a58d38.tar.gz
patches-3.8.13-rt11.tar.xzv3.8.13-rt11
md5sum: 7193dbbfbe5a5527340038ce097e0b98 patches-3.8.13-rt11.tar.xz Announce: -------------------- Dear RT Folks, I'm pleased to announce the 3.8.13-rt11 release. changes since v3.8.13-rt10: - use wakeup_timer_waiters() in wake_up() so we do nothing on nort kernel. Sent by Zhao Hongjiang - a fix for a cpu down problem. If kthread is pinned to the same CPU which is going down we will spin for ever and wait until kthread leaves the CPU. This does not trigger on v3.6-rt because the workqueue code there does not create a new process in the notfier callback. Reported by Qiang Huang. - a check if we lose PF_THREAD_BOUND in the workqueue code. Shouldn't happen yet it seems it happens from time to time. - save the cpu mask of the application which disables a CPU. Prior this change the application which put a CPU down was allowed to run on any CPU if it was restricted to a specifc one. Reported by Zhao Chenhui. - the SLxB PowerPC, e500 problem is removed from the list without a change. The problem triggers even on a v3.6 non-RT kernel after 1-2 days of runtime on my MPC8572DS. I don't see any problem so far on MPC8536 which is mostly the same HW except it is UP but MPC8572DS crashes also in UP mode so I belive it is a HW problem. Known issues: - Steven reported a missing acpi from the v3.6 release. - a "fix" for i915 leads to high latencies due to wbinvd(). Not sure what is the best thing to do here. The delta patch against v3.8.13-rt10 is appended below and can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/incr/patch-3.8.13-rt10-rt11.patch.xz The RT patch against 3.8.11 can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patch-3.8.13-rt11.patch.xz The split quilt queue is available at: https://www.kernel.org/pub/linux/kernel/projects/rt/3.8/patches-3.8.13-rt11.tar.xz Sebastian [delta patch snipped] -------------------- http://marc.info/?l=linux-rt-users&m=137123824305680&w=2 Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--patches/cond-resched-lock-rt-tweak.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch2
-rw-r--r--patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch4
-rw-r--r--patches/genirq-Set-irq-thread-to-RT-priority-on-creation.patch66
-rw-r--r--patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch38
-rw-r--r--patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch4
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch8
-rw-r--r--patches/i915_compile_fix.patch2
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch2
-rw-r--r--patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch88
-rw-r--r--patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch59
-rw-r--r--patches/kernel-workqueue-Add-PF_THREAD_BOUND-after-set_cpu.patch30
-rw-r--r--patches/latency-hist.patch6
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/net-make-devnet_rename_seq-a-mutex.patch4
-rw-r--r--patches/net-netif-rx-ni-use-local-bh-disable.patch2
-rw-r--r--patches/net-netif_rx_ni-migrate-disable.patch2
-rw-r--r--patches/net-tx-action-avoid-livelock-on-rt.patch4
-rw-r--r--patches/perf-move-irq-work-to-softirq-in-rt.patch4
-rw-r--r--patches/peter_zijlstra-frob-migrate_disable-2.patch2
-rw-r--r--patches/powerpc-32bit-Store-temporary-result-in-r0-instead-o.patch7
-rw-r--r--patches/preempt-lazy-support.patch4
-rw-r--r--patches/random-make-it-work-on-rt.patch2
-rw-r--r--patches/sched-migrate-disable.patch2
-rw-r--r--patches/series8
-rw-r--r--patches/skbufhead-raw-lock.patch10
-rw-r--r--patches/softirq-preempt-fix-3-re.patch12
-rw-r--r--patches/softirq-thread-do-softirq.patch2
-rw-r--r--patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch2
-rw-r--r--patches/timer-handle-idle-trylock-in-get-next-timer-irq.patch4
-rw-r--r--patches/timers-avoid-the-base-null-otptimization-on-rt.patch4
-rw-r--r--patches/timers-mov-printk_tick-to-soft-interrupt.patch4
-rw-r--r--patches/timers-preempt-rt-support.patch8
-rw-r--r--patches/timers-prepare-for-full-preemption-improve.patch56
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch6
-rw-r--r--patches/x86-mce-fix-mce-timer-interval.patch11
37 files changed, 387 insertions, 90 deletions
diff --git a/patches/cond-resched-lock-rt-tweak.patch b/patches/cond-resched-lock-rt-tweak.patch
index 3e7164c..83f4a3b 100644
--- a/patches/cond-resched-lock-rt-tweak.patch
+++ b/patches/cond-resched-lock-rt-tweak.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2720,7 +2720,7 @@ extern int _cond_resched(void);
+@@ -2710,7 +2710,7 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index b30f4d5..f0cde79 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2731,12 +2731,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2721,12 +2721,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index d6228dc..5c3b0c0 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -27,7 +27,7 @@ Cc: stable-rt@vger.kernel.org
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
-@@ -166,7 +166,7 @@ again:
+@@ -157,7 +157,7 @@ again:
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
spin_unlock(&p->d_lock);
@@ -64,7 +64,7 @@ Cc: stable-rt@vger.kernel.org
goto relock;
}
-@@ -2084,7 +2085,7 @@ again:
+@@ -2086,7 +2087,7 @@ again:
if (dentry->d_count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
diff --git a/patches/genirq-Set-irq-thread-to-RT-priority-on-creation.patch b/patches/genirq-Set-irq-thread-to-RT-priority-on-creation.patch
new file mode 100644
index 0000000..47573d7
--- /dev/null
+++ b/patches/genirq-Set-irq-thread-to-RT-priority-on-creation.patch
@@ -0,0 +1,66 @@
+From 0b4a953a0a014bee0bc3eaa5ae791f4b985f2c7a Mon Sep 17 00:00:00 2001
+From: Ivo Sieben <meltedpianoman@gmail.com>
+Date: Mon, 3 Jun 2013 10:12:02 +0000
+Subject: [PATCH] genirq: Set irq thread to RT priority on creation
+
+When a threaded irq handler is installed the irq thread is initially
+created on normal scheduling priority. Only after the irq thread is
+woken up it sets its priority to RT_FIFO MAX_USER_RT_PRIO/2 itself.
+
+This means that interrupts that occur directly after the irq handler
+is installed will be handled on a normal scheduling priority instead
+of the realtime priority that one would expect.
+
+Fix this by setting the RT priority on creation of the irq_thread.
+
+Signed-off-by: Ivo Sieben <meltedpianoman@gmail.com>
+Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/1370254322-17240-1-git-send-email-meltedpianoman@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/irq/manage.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -839,9 +839,6 @@ static void irq_thread_dtor(struct callb
+ static int irq_thread(void *data)
+ {
+ struct callback_head on_exit_work;
+- static const struct sched_param param = {
+- .sched_priority = MAX_USER_RT_PRIO/2,
+- };
+ struct irqaction *action = data;
+ struct irq_desc *desc = irq_to_desc(action->irq);
+ irqreturn_t (*handler_fn)(struct irq_desc *desc,
+@@ -853,8 +850,6 @@ static int irq_thread(void *data)
+ else
+ handler_fn = irq_thread_fn;
+
+- sched_setscheduler(current, SCHED_FIFO, &param);
+-
+ init_task_work(&on_exit_work, irq_thread_dtor);
+ task_work_add(current, &on_exit_work, false);
+
+@@ -949,6 +944,9 @@ __setup_irq(unsigned int irq, struct irq
+ */
+ if (new->thread_fn && !nested) {
+ struct task_struct *t;
++ static const struct sched_param param = {
++ .sched_priority = MAX_USER_RT_PRIO/2,
++ };
+
+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
+ new->name);
+@@ -956,6 +954,9 @@ __setup_irq(unsigned int irq, struct irq
+ ret = PTR_ERR(t);
+ goto out_mput;
+ }
++
++ sched_setscheduler(t, SCHED_FIFO, &param);
++
+ /*
+ * We keep the reference to the task struct even if
+ * the thread dies to avoid that the interrupt code
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 800191d..abbe4d1 100644
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -41,7 +41,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
ktime_t softirq_time;
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -590,8 +590,7 @@ static int hrtimer_reprogram(struct hrti
+@@ -594,8 +594,7 @@ static int hrtimer_reprogram(struct hrti
* When the callback is running, we do not reprogram the clock event
* device. The timer callback is either running on a different CPU or
* the callback is executed in the hrtimer_interrupt context. The
@@ -51,7 +51,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
*/
if (hrtimer_callback_running(timer))
return 0;
-@@ -626,6 +625,9 @@ static int hrtimer_reprogram(struct hrti
+@@ -630,6 +629,9 @@ static int hrtimer_reprogram(struct hrti
return res;
}
@@ -61,7 +61,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* Initialize the high resolution related parts of cpu_base
*/
-@@ -642,9 +644,18 @@ static inline void hrtimer_init_hres(str
+@@ -646,9 +648,18 @@ static inline void hrtimer_init_hres(str
* and expiry check is done in the hrtimer_interrupt or in the softirq.
*/
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
@@ -82,7 +82,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
-@@ -725,12 +736,18 @@ static inline int hrtimer_switch_to_hres
+@@ -729,12 +740,18 @@ static inline int hrtimer_switch_to_hres
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
@@ -102,7 +102,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
#endif /* CONFIG_HIGH_RES_TIMERS */
-@@ -862,9 +879,9 @@ void hrtimer_wait_for_timer(const struct
+@@ -866,9 +883,9 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
@@ -114,7 +114,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
#else
-@@ -914,6 +931,11 @@ static void __remove_hrtimer(struct hrti
+@@ -918,6 +935,11 @@ static void __remove_hrtimer(struct hrti
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
goto out;
@@ -126,7 +126,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
next_timer = timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
if (&timer->node == next_timer) {
-@@ -1021,9 +1043,19 @@ int __hrtimer_start_range_ns(struct hrti
+@@ -1025,9 +1047,19 @@ int __hrtimer_start_range_ns(struct hrti
*
* XXX send_remote_softirq() ?
*/
@@ -149,7 +149,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* We need to drop cpu_base->lock to avoid a
* lock ordering issue vs. rq->lock.
-@@ -1031,9 +1063,7 @@ int __hrtimer_start_range_ns(struct hrti
+@@ -1035,9 +1067,7 @@ int __hrtimer_start_range_ns(struct hrti
raw_spin_unlock(&new_base->cpu_base->lock);
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
local_irq_restore(flags);
@@ -160,7 +160,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
}
-@@ -1200,6 +1230,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1204,6 +1234,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -168,7 +168,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1283,10 +1314,128 @@ static void __run_hrtimer(struct hrtimer
+@@ -1287,10 +1318,128 @@ static void __run_hrtimer(struct hrtimer
timer->state &= ~HRTIMER_STATE_CALLBACK;
}
@@ -299,7 +299,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* High resolution timer interrupt
* Called with interrupts disabled
-@@ -1295,7 +1444,7 @@ void hrtimer_interrupt(struct clock_even
+@@ -1299,7 +1448,7 @@ void hrtimer_interrupt(struct clock_even
{
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
@@ -308,7 +308,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
BUG_ON(!cpu_base->hres_active);
cpu_base->nr_events++;
-@@ -1362,7 +1511,10 @@ retry:
+@@ -1368,7 +1517,10 @@ retry:
break;
}
@@ -320,7 +320,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
}
-@@ -1377,6 +1529,10 @@ retry:
+@@ -1383,6 +1535,10 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@@ -331,7 +331,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
return;
}
-@@ -1457,24 +1613,26 @@ void hrtimer_peek_ahead_timers(void)
+@@ -1463,24 +1619,26 @@ void hrtimer_peek_ahead_timers(void)
local_irq_restore(flags);
}
@@ -365,7 +365,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/*
* Called from timer softirq every jiffy, expire hrtimers:
*
-@@ -1507,7 +1665,7 @@ void hrtimer_run_queues(void)
+@@ -1513,7 +1671,7 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
struct hrtimer_clock_base *base;
@@ -374,7 +374,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
if (hrtimer_hres_active())
return;
-@@ -1532,12 +1690,16 @@ void hrtimer_run_queues(void)
+@@ -1538,12 +1696,16 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
@@ -393,7 +393,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
/*
-@@ -1559,6 +1721,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1565,6 +1727,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -401,7 +401,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1695,6 +1858,7 @@ static void __cpuinit init_hrtimers_cpu(
+@@ -1701,6 +1864,7 @@ static void __cpuinit init_hrtimers_cpu(
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -409,7 +409,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
hrtimer_init_hres(cpu_base);
-@@ -1813,9 +1977,7 @@ void __init hrtimers_init(void)
+@@ -1819,9 +1983,7 @@ void __init hrtimers_init(void)
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
diff --git a/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch b/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
index f1acc64..d249937 100644
--- a/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
+++ b/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
@@ -12,7 +12,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -1529,11 +1529,7 @@ retry:
+@@ -1535,11 +1535,7 @@ retry:
if (expires_next.tv64 == KTIME_MAX ||
!tick_program_event(expires_next, 0)) {
cpu_base->hang_detected = 0;
@@ -25,7 +25,7 @@ Cc: stable-rt@vger.kernel.org
}
/*
-@@ -1577,6 +1573,9 @@ retry:
+@@ -1583,6 +1579,9 @@ retry:
tick_program_event(expires_next, 1);
printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
ktime_to_ns(delta));
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index 8208370..cf29e35 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
-@@ -845,6 +845,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -849,6 +849,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1095,7 +1121,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1099,7 +1125,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1510,6 +1536,8 @@ void hrtimer_run_queues(void)
+@@ -1516,6 +1542,8 @@ void hrtimer_run_queues(void)
}
raw_spin_unlock(&cpu_base->lock);
}
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1670,6 +1698,9 @@ static void __cpuinit init_hrtimers_cpu(
+@@ -1676,6 +1704,9 @@ static void __cpuinit init_hrtimers_cpu(
}
hrtimer_init_hres(cpu_base);
diff --git a/patches/i915_compile_fix.patch b/patches/i915_compile_fix.patch
index 7911d78..d1c0c7c 100644
--- a/patches/i915_compile_fix.patch
+++ b/patches/i915_compile_fix.patch
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
-@@ -4366,7 +4360,7 @@ static bool mutex_is_locked_by(struct mu
+@@ -4384,7 +4378,7 @@ static bool mutex_is_locked_by(struct mu
if (!mutex_is_locked(mutex))
return false;
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index be85497..738179e 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -81,7 +81,7 @@ Cc: stable-rt@vger.kernel.org
return ret;
}
-@@ -1127,6 +1135,9 @@ __setup_irq(unsigned int irq, struct irq
+@@ -1128,6 +1136,9 @@ __setup_irq(unsigned int irq, struct irq
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
new file mode 100644
index 0000000..8ff5acd
--- /dev/null
+++ b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -0,0 +1,88 @@
+From 24136a819693ae36039d6b4286bf1f775e062bcc Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 7 Jun 2013 22:37:06 +0200
+Subject: [PATCH] kernel/cpu: fix cpu down problem if kthread's cpu is
+ going down
+
+If kthread is pinned to CPUx and CPUx is going down then we get into
+trouble:
+- first the unplug thread is created
+- it will set itself to hp->unplug. As a result, every task that is
+ going to take a lock, has to leave the CPU.
+- the CPU_DOWN_PREPARE notifier are started. The worker thread will
+ start a new process for the "high priority worker".
+ Now kthread would like to take a lock but since it can't leave the CPU
+ it will never complete its task.
+
+We could fire the unplug thread after the notifier but then the cpu is
+no longer marked "online" and the unplug thread will run on CPU0 which
+was fixed before :)
+
+So instead the unplug thread is started and kept waiting until the
+notfier complete their work.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cpu.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -83,6 +83,7 @@ struct hotplug_pcp {
+ int refcount;
+ int grab_lock;
+ struct completion synced;
++ struct completion unplug_wait;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ spinlock_t lock;
+ #else
+@@ -180,6 +181,7 @@ static int sync_unplug_thread(void *data
+ {
+ struct hotplug_pcp *hp = data;
+
++ wait_for_completion(&hp->unplug_wait);
+ preempt_disable();
+ hp->unplug = current;
+ wait_for_pinned_cpus(hp);
+@@ -245,6 +247,14 @@ static void __cpu_unplug_sync(struct hot
+ wait_for_completion(&hp->synced);
+ }
+
++static void __cpu_unplug_wait(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ complete(&hp->unplug_wait);
++ wait_for_completion(&hp->synced);
++}
++
+ /*
+ * Start the sync_unplug_thread on the target cpu and wait for it to
+ * complete.
+@@ -268,6 +278,7 @@ static int cpu_unplug_begin(unsigned int
+ tell_sched_cpu_down_begin(cpu);
+
+ init_completion(&hp->synced);
++ init_completion(&hp->unplug_wait);
+
+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(hp->sync_tsk)) {
+@@ -283,8 +294,7 @@ static int cpu_unplug_begin(unsigned int
+ * wait for tasks that are going to enter these sections and
+ * we must not have them block.
+ */
+- __cpu_unplug_sync(hp);
+-
++ wake_up_process(hp->sync_tsk);
+ return 0;
+ }
+
+@@ -571,6 +581,8 @@ static int __ref _cpu_down(unsigned int
+ __func__, cpu);
+ goto out_release;
+ }
++
++ __cpu_unplug_wait(cpu);
+ smpboot_park_threads(cpu);
+
+ /* Notifiers are done. Don't let any more tasks pin this CPU. */
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
new file mode 100644
index 0000000..45971cf
--- /dev/null
+++ b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -0,0 +1,59 @@
+From 4c6df3d78817c20a147c0291f6600d002c0910d3 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 14 Jun 2013 17:16:35 +0200
+Subject: [PATCH] kernel/hotplug: restore original cpu mask oncpu/down
+
+If a task which is allowed to run only on CPU X puts CPU Y down then it
+will be allowed on all CPUs but the on CPU Y after it comes back from
+kernel. This patch ensures that we don't lose the initial setting unless
+the CPU the task is running is going down.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/cpu.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -545,6 +545,7 @@ static int __ref _cpu_down(unsigned int
+ .hcpu = hcpu,
+ };
+ cpumask_var_t cpumask;
++ cpumask_var_t cpumask_org;
+
+ if (num_online_cpus() == 1)
+ return -EBUSY;
+@@ -555,6 +556,12 @@ static int __ref _cpu_down(unsigned int
+ /* Move the downtaker off the unplug cpu */
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) {
++ free_cpumask_var(cpumask);
++ return -ENOMEM;
++ }
++
++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
+ set_cpus_allowed_ptr(current, cpumask);
+ free_cpumask_var(cpumask);
+@@ -563,7 +570,8 @@ static int __ref _cpu_down(unsigned int
+ if (mycpu == cpu) {
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+ migrate_enable();
+- return -EBUSY;
++ err = -EBUSY;
++ goto restore_cpus;
+ }
+
+ cpu_hotplug_begin();
+@@ -622,6 +630,9 @@ out_cancel:
+ cpu_hotplug_done();
+ if (!err)
+ cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
++restore_cpus:
++ set_cpus_allowed_ptr(current, cpumask_org);
++ free_cpumask_var(cpumask_org);
+ return err;
+ }
+
diff --git a/patches/kernel-workqueue-Add-PF_THREAD_BOUND-after-set_cpu.patch b/patches/kernel-workqueue-Add-PF_THREAD_BOUND-after-set_cpu.patch
new file mode 100644
index 0000000..d3e2d02
--- /dev/null
+++ b/patches/kernel-workqueue-Add-PF_THREAD_BOUND-after-set_cpu.patch
@@ -0,0 +1,30 @@
+From f270f2b228eb2901edc44816d1ab8a511bac0383 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 7 Jun 2013 16:03:56 +0200
+Subject: [PATCH] kernel/workqueue: Add PF_THREAD_BOUND after set_cpu
+
+This is just a precaution for now. There are reports that the flag is
+getting lost. I could only notice this on workqueues due to another bug.
+So this should WARN_ON should no trigger. If it does, investigations may
+begin.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/workqueue.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1637,8 +1637,11 @@ __acquires(&gcwq->lock)
+ * it races with cpu hotunplug operation. Verify
+ * against GCWQ_DISASSOCIATED.
+ */
+- if (!(gcwq->flags & GCWQ_DISASSOCIATED))
++ if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
+ set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
++ if (WARN_ON(!(task->flags & PF_THREAD_BOUND)))
++ task->flags |= PF_THREAD_BOUND;
++ }
+
+ spin_lock_irq(&gcwq->lock);
+ if (gcwq->flags & GCWQ_DISASSOCIATED)
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 25cdcfe..9c6a87d 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -355,7 +355,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The timer bases:
-@@ -971,6 +972,17 @@ int __hrtimer_start_range_ns(struct hrti
+@@ -975,6 +976,17 @@ int __hrtimer_start_range_ns(struct hrti
#endif
}
@@ -373,7 +373,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hrtimer_set_expires_range_ns(timer, tim, delta_ns);
timer_stats_hrtimer_set_start_info(timer);
-@@ -1247,6 +1259,8 @@ static void __run_hrtimer(struct hrtimer
+@@ -1251,6 +1263,8 @@ static void __run_hrtimer(struct hrtimer
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -382,7 +382,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* High resolution timer interrupt
* Called with interrupts disabled
-@@ -1290,6 +1304,15 @@ retry:
+@@ -1294,6 +1308,15 @@ retry:
timer = container_of(node, struct hrtimer, node);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 458b434..4e59cb4 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -12,4 +12,4 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt10
++-rt11
diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch
index b8bb512..601a01a 100644
--- a/patches/net-make-devnet_rename_seq-a-mutex.patch
+++ b/patches/net-make-devnet_rename_seq-a-mutex.patch
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
goto rollback;
} else {
-@@ -4219,7 +4220,6 @@ static int dev_ifname(struct net *net, s
+@@ -4223,7 +4224,6 @@ static int dev_ifname(struct net *net, s
{
struct net_device *dev;
struct ifreq ifr;
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Fetch the caller's info block.
-@@ -4228,19 +4228,18 @@ static int dev_ifname(struct net *net, s
+@@ -4232,19 +4232,18 @@ static int dev_ifname(struct net *net, s
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
diff --git a/patches/net-netif-rx-ni-use-local-bh-disable.patch b/patches/net-netif-rx-ni-use-local-bh-disable.patch
index b83e64e..bfa369c 100644
--- a/patches/net-netif-rx-ni-use-local-bh-disable.patch
+++ b/patches/net-netif-rx-ni-use-local-bh-disable.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3117,11 +3117,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3121,11 +3121,9 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
diff --git a/patches/net-netif_rx_ni-migrate-disable.patch b/patches/net-netif_rx_ni-migrate-disable.patch
index 0804b31..63d42cd 100644
--- a/patches/net-netif_rx_ni-migrate-disable.patch
+++ b/patches/net-netif_rx_ni-migrate-disable.patch
@@ -9,7 +9,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3114,11 +3114,11 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3118,11 +3118,11 @@ int netif_rx_ni(struct sk_buff *skb)
{
int err;
diff --git a/patches/net-tx-action-avoid-livelock-on-rt.patch b/patches/net-tx-action-avoid-livelock-on-rt.patch
index 976d039..e70ea8b 100644
--- a/patches/net-tx-action-avoid-livelock-on-rt.patch
+++ b/patches/net-tx-action-avoid-livelock-on-rt.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3124,6 +3124,36 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3128,6 +3128,36 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = &__get_cpu_var(softnet_data);
-@@ -3162,7 +3192,7 @@ static void net_tx_action(struct softirq
+@@ -3166,7 +3196,7 @@ static void net_tx_action(struct softirq
head = head->next_sched;
root_lock = qdisc_lock(q);
diff --git a/patches/perf-move-irq-work-to-softirq-in-rt.patch b/patches/perf-move-irq-work-to-softirq-in-rt.patch
index 157a8ba..60ccb0a 100644
--- a/patches/perf-move-irq-work-to-softirq-in-rt.patch
+++ b/patches/perf-move-irq-work-to-softirq-in-rt.patch
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (llnode != NULL) {
--- a/kernel/timer.c
+++ b/kernel/timer.c
-@@ -1419,7 +1419,7 @@ void update_process_times(int user_tick)
+@@ -1421,7 +1421,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (in_irq())
irq_work_run();
#endif
-@@ -1433,6 +1433,10 @@ static void run_timer_softirq(struct sof
+@@ -1435,6 +1435,10 @@ static void run_timer_softirq(struct sof
{
struct tvec_base *base = __this_cpu_read(tvec_bases);
diff --git a/patches/peter_zijlstra-frob-migrate_disable-2.patch b/patches/peter_zijlstra-frob-migrate_disable-2.patch
index f996570..35948f9 100644
--- a/patches/peter_zijlstra-frob-migrate_disable-2.patch
+++ b/patches/peter_zijlstra-frob-migrate_disable-2.patch
@@ -73,7 +73,7 @@ Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -2811,11 +2813,22 @@ static inline void set_task_cpu(struct t
+@@ -2801,11 +2803,22 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
diff --git a/patches/powerpc-32bit-Store-temporary-result-in-r0-instead-o.patch b/patches/powerpc-32bit-Store-temporary-result-in-r0-instead-o.patch
index 54cdde0..5598563 100644
--- a/patches/powerpc-32bit-Store-temporary-result-in-r0-instead-o.patch
+++ b/patches/powerpc-32bit-Store-temporary-result-in-r0-instead-o.patch
@@ -27,11 +27,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
arch/powerpc/kernel/entry_32.S | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
-diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
-index 95b884e..086dce7 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -851,7 +851,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */
+@@ -851,7 +851,7 @@ resume_kernel:
/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_FLAGS(r9)
@@ -40,6 +38,3 @@ index 95b884e..086dce7 100644
beq+ 1f
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
---
-1.7.10.4
-
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index b7755a9..76f9ea7 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define add_preempt_count_notrace(val) \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2692,6 +2692,52 @@ static inline int test_tsk_need_resched(
+@@ -2682,6 +2682,52 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -199,7 +199,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
-@@ -2723,11 +2769,6 @@ static inline int signal_pending_state(l
+@@ -2713,11 +2759,6 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 825b744..8e3303e 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -99,7 +99,7 @@ Cc: stable-rt@vger.kernel.org
note_interrupt(irq, desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -879,6 +879,12 @@ static int irq_thread(void *data)
+@@ -874,6 +874,12 @@ static int irq_thread(void *data)
if (!noirqdebug)
note_interrupt(action->irq, desc, action_ret);
diff --git a/patches/sched-migrate-disable.patch b/patches/sched-migrate-disable.patch
index 35e76e3..de0739d 100644
--- a/patches/sched-migrate-disable.patch
+++ b/patches/sched-migrate-disable.patch
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int node, int pages, bool migrated);
extern void set_numabalancing_state(bool enabled);
-@@ -2813,6 +2811,15 @@ static inline void set_task_cpu(struct t
+@@ -2803,6 +2801,15 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
diff --git a/patches/series b/patches/series
index 51ee846..aa883c0 100644
--- a/patches/series
+++ b/patches/series
@@ -22,6 +22,8 @@ generic-cmpxchg-use-raw-local-irq.patch
0001-of-fix-recursive-locking-in-of_get_next_available_ch.patch
tcp-force-a-dst-refcount-when-prequeue-packet.patch
+genirq-Set-irq-thread-to-RT-priority-on-creation.patch
+
############################################################
# UPSTREAM FIXES, patches pending
############################################################
@@ -312,6 +314,7 @@ relay-fix-timer-madness.patch
# TIMERS
timers-prepare-for-full-preemption.patch
+timers-prepare-for-full-preemption-improve.patch
timers-preempt-rt-support.patch
timers-mov-printk_tick-to-soft-interrupt.patch
timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -564,6 +567,8 @@ lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
fs-jbd-pull-plug-when-waiting-for-space.patch
perf-make-swevent-hrtimer-irqsafe.patch
cpu-rt-rework-cpu-down.patch
+kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
# Stable-rt stuff: Fold back when Steve grabbed it
random-make-it-work-on-rt.patch
@@ -607,6 +612,9 @@ i915_compile_fix.patch
# XXX need feedback
drm-i915-move-i915_trace_irq_get-out-of-the-tracing-.patch
+# XXX most likely not required
+kernel-workqueue-Add-PF_THREAD_BOUND-after-set_cpu.patch
+
# Enable full RT
powerpc-preempt-lazy-support.patch
wait-simple-implementation.patch
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index b30db3f..afbbd2e 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -3533,7 +3533,7 @@ static void flush_backlog(void *arg)
+@@ -3537,7 +3537,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -3542,10 +3542,13 @@ static void flush_backlog(void *arg)
+@@ -3546,10 +3546,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -4050,10 +4053,17 @@ static void net_rx_action(struct softirq
+@@ -4054,10 +4057,17 @@ static void net_rx_action(struct softirq
struct softnet_data *sd = &__get_cpu_var(softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (!list_empty(&sd->poll_list)) {
struct napi_struct *n;
int work, weight;
-@@ -6535,6 +6545,9 @@ static int dev_cpu_callback(struct notif
+@@ -6539,6 +6549,9 @@ static int dev_cpu_callback(struct notif
netif_rx(skb);
input_queue_head_incr(oldsd);
}
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NOTIFY_OK;
}
-@@ -6807,8 +6820,9 @@ static int __init net_dev_init(void)
+@@ -6811,8 +6824,9 @@ static int __init net_dev_init(void)
struct softnet_data *sd = &per_cpu(softnet_data, i);
memset(sd, 0, sizeof(*sd));
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index b365fba..7f2f1cb 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -95,7 +95,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -1945,6 +1945,7 @@ static inline void __netif_reschedule(st
+@@ -1946,6 +1946,7 @@ static inline void __netif_reschedule(st
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -103,7 +103,7 @@ Cc: stable-rt@vger.kernel.org
}
void __netif_schedule(struct Qdisc *q)
-@@ -1966,6 +1967,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
+@@ -1967,6 +1968,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
sd->completion_queue = skb;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -111,7 +111,7 @@ Cc: stable-rt@vger.kernel.org
}
}
EXPORT_SYMBOL(dev_kfree_skb_irq);
-@@ -3051,6 +3053,7 @@ enqueue:
+@@ -3055,6 +3057,7 @@ enqueue:
rps_unlock(sd);
local_irq_restore(flags);
@@ -119,7 +119,7 @@ Cc: stable-rt@vger.kernel.org
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -3937,6 +3940,7 @@ static void net_rps_action_and_irq_enabl
+@@ -3941,6 +3944,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -127,7 +127,7 @@ Cc: stable-rt@vger.kernel.org
}
static int process_backlog(struct napi_struct *napi, int quota)
-@@ -4009,6 +4013,7 @@ void __napi_schedule(struct napi_struct
+@@ -4013,6 +4017,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(&__get_cpu_var(softnet_data), n);
local_irq_restore(flags);
@@ -135,7 +135,7 @@ Cc: stable-rt@vger.kernel.org
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -6565,6 +6570,7 @@ static int dev_cpu_callback(struct notif
+@@ -6569,6 +6574,7 @@ static int dev_cpu_callback(struct notif
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-thread-do-softirq.patch b/patches/softirq-thread-do-softirq.patch
index cb9b485..af93113 100644
--- a/patches/softirq-thread-do-softirq.patch
+++ b/patches/softirq-thread-do-softirq.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __raise_softirq_irqoff(unsigned int nr);
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3117,7 +3117,7 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3121,7 +3121,7 @@ int netif_rx_ni(struct sk_buff *skb)
preempt_disable();
err = netif_rx(skb);
if (local_softirq_pending())
diff --git a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index 9eb2d06..a33a1fd 100644
--- a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/timer.c
+++ b/kernel/timer.c
-@@ -1387,13 +1387,13 @@ void update_process_times(int user_tick)
+@@ -1389,13 +1389,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
diff --git a/patches/timer-handle-idle-trylock-in-get-next-timer-irq.patch b/patches/timer-handle-idle-trylock-in-get-next-timer-irq.patch
index 729696c..b38ed4e 100644
--- a/patches/timer-handle-idle-trylock-in-get-next-timer-irq.patch
+++ b/patches/timer-handle-idle-trylock-in-get-next-timer-irq.patch
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(rt_spin_trylock);
--- a/kernel/timer.c
+++ b/kernel/timer.c
-@@ -1380,9 +1380,10 @@ unsigned long get_next_timer_interrupt(u
+@@ -1382,9 +1382,10 @@ unsigned long get_next_timer_interrupt(u
/*
* On PREEMPT_RT we cannot sleep here. If the trylock does not
* succeed then we return the worst-case 'expires in 1 tick'
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return now + 1;
#else
spin_lock(&base->lock);
-@@ -1392,7 +1393,11 @@ unsigned long get_next_timer_interrupt(u
+@@ -1394,7 +1395,11 @@ unsigned long get_next_timer_interrupt(u
base->next_timer = __next_timer_interrupt(base);
expires = base->next_timer;
}
diff --git a/patches/timers-avoid-the-base-null-otptimization-on-rt.patch b/patches/timers-avoid-the-base-null-otptimization-on-rt.patch
index aa2ff13..bdf6b32 100644
--- a/patches/timers-avoid-the-base-null-otptimization-on-rt.patch
+++ b/patches/timers-avoid-the-base-null-otptimization-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/timer.c
+++ b/kernel/timer.c
-@@ -717,6 +717,36 @@ static struct tvec_base *lock_timer_base
+@@ -719,6 +719,36 @@ static struct tvec_base *lock_timer_base
}
}
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
bool pending_only, int pinned)
-@@ -755,14 +785,8 @@ __mod_timer(struct timer_list *timer, un
+@@ -757,14 +787,8 @@ __mod_timer(struct timer_list *timer, un
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
diff --git a/patches/timers-mov-printk_tick-to-soft-interrupt.patch b/patches/timers-mov-printk_tick-to-soft-interrupt.patch
index 79c194a..6c4da0e 100644
--- a/patches/timers-mov-printk_tick-to-soft-interrupt.patch
+++ b/patches/timers-mov-printk_tick-to-soft-interrupt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
--- a/kernel/timer.c
+++ b/kernel/timer.c
-@@ -1389,7 +1389,6 @@ void update_process_times(int user_tick)
+@@ -1391,7 +1391,6 @@ void update_process_times(int user_tick)
account_process_tick(p, user_tick);
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
@@ -19,7 +19,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
#ifdef CONFIG_IRQ_WORK
if (in_irq())
irq_work_run();
-@@ -1405,6 +1404,7 @@ static void run_timer_softirq(struct sof
+@@ -1407,6 +1406,7 @@ static void run_timer_softirq(struct sof
{
struct tvec_base *base = __this_cpu_read(tvec_bases);
diff --git a/patches/timers-preempt-rt-support.patch b/patches/timers-preempt-rt-support.patch
index 436f8b3..46ad320 100644
--- a/patches/timers-preempt-rt-support.patch
+++ b/patches/timers-preempt-rt-support.patch
@@ -10,7 +10,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/timer.c
+++ b/kernel/timer.c
-@@ -1352,7 +1352,17 @@ unsigned long get_next_timer_interrupt(u
+@@ -1354,7 +1354,17 @@ unsigned long get_next_timer_interrupt(u
if (cpu_is_offline(smp_processor_id()))
return expires;
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (base->active_timers) {
if (time_before_eq(base->next_timer, base->timer_jiffies))
base->next_timer = __next_timer_interrupt(base);
-@@ -1362,7 +1372,6 @@ unsigned long get_next_timer_interrupt(u
+@@ -1364,7 +1374,6 @@ unsigned long get_next_timer_interrupt(u
if (time_before_eq(expires, now))
return now;
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return cmp_next_hrtimer_event(now, expires);
}
#endif
-@@ -1752,7 +1761,7 @@ static void __cpuinit migrate_timers(int
+@@ -1756,7 +1765,7 @@ static void __cpuinit migrate_timers(int
BUG_ON(cpu_online(cpu));
old_base = per_cpu(tvec_bases, cpu);
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
-@@ -1773,7 +1782,7 @@ static void __cpuinit migrate_timers(int
+@@ -1777,7 +1786,7 @@ static void __cpuinit migrate_timers(int
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
diff --git a/patches/timers-prepare-for-full-preemption-improve.patch b/patches/timers-prepare-for-full-preemption-improve.patch
new file mode 100644
index 0000000..54deabf
--- /dev/null
+++ b/patches/timers-prepare-for-full-preemption-improve.patch
@@ -0,0 +1,56 @@
+From a57194f115acfc967aa0907bc130e95b68723121 Mon Sep 17 00:00:00 2001
+From: Zhao Hongjiang <zhaohongjiang@huawei.com>
+Date: Wed, 17 Apr 2013 17:44:16 +0800
+Subject: [PATCH] timers: prepare for full preemption improve
+
+wake_up should do nothing on the nort, so we should use wakeup_timer_waiters,
+also fix a spell mistake.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Zhao Hongjiang <zhaohongjiang@huawei.com>
+[bigeasy: s/CONFIG_PREEMPT_RT_BASE/CONFIG_PREEMPT_RT_FULL/]
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/timer.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/kernel/timer.c
++++ b/kernel/timer.c
+@@ -76,7 +76,9 @@ struct tvec_root {
+ struct tvec_base {
+ spinlock_t lock;
+ struct timer_list *running_timer;
++#ifdef CONFIG_PREEMPT_RT_FULL
+ wait_queue_head_t wait_for_running_timer;
++#endif
+ unsigned long timer_jiffies;
+ unsigned long next_timer;
+ unsigned long active_timers;
+@@ -958,7 +960,7 @@ static void wait_for_running_timer(struc
+ base->running_timer != timer);
+ }
+
+-# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer)
++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
+ #else
+ static inline void wait_for_running_timer(struct timer_list *timer)
+ {
+@@ -1212,7 +1214,7 @@ static inline void __run_timers(struct t
+ }
+ }
+ }
+- wake_up(&base->wait_for_running_timer);
++ wakeup_timer_waiters(base);
+ spin_unlock_irq(&base->lock);
+ }
+
+@@ -1713,7 +1715,9 @@ static int __cpuinit init_timers_cpu(int
+ }
+
+ spin_lock_init(&base->lock);
++#ifdef CONFIG_PREEMPT_RT_FULL
+ init_waitqueue_head(&base->wait_for_running_timer);
++#endif
+
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 629164d..affb394 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3091,7 +3091,7 @@ int netif_rx(struct sk_buff *skb)
+@@ -3095,7 +3095,7 @@ int netif_rx(struct sk_buff *skb)
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3101,13 +3101,13 @@ int netif_rx(struct sk_buff *skb)
+@@ -3105,13 +3105,13 @@ int netif_rx(struct sk_buff *skb)
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch b/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
index 691f937..02732e0 100644
--- a/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
+++ b/patches/x86-mce-Defer-mce-wakeups-to-threads-for-PREEMPT_RT.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
-@@ -1345,6 +1346,63 @@ static void mce_do_trigger(struct work_s
+@@ -1346,6 +1347,63 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1352,24 +1410,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1353,24 +1411,8 @@ static DECLARE_WORK(mce_trigger_work, mc
*/
int mce_notify_irq(void)
{
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
}
return 0;
-@@ -2431,6 +2473,8 @@ static __init int mcheck_init_device(voi
+@@ -2432,6 +2474,8 @@ static __init int mcheck_init_device(voi
/* register character device /dev/mcelog */
misc_register(&mce_chrdev_device);
diff --git a/patches/x86-mce-fix-mce-timer-interval.patch b/patches/x86-mce-fix-mce-timer-interval.patch
index 3151884..924b919 100644
--- a/patches/x86-mce-fix-mce-timer-interval.patch
+++ b/patches/x86-mce-fix-mce-timer-interval.patch
@@ -16,11 +16,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
arch/x86/kernel/cpu/mcheck/mce.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
-diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
-index 332e133..f54c5bf 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
-@@ -1295,7 +1295,8 @@ static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+@@ -1294,7 +1294,8 @@ static enum hrtimer_restart mce_timer_fn
__this_cpu_write(mce_next_interval, iv);
/* Might have become 0 after CMCI storm subsided */
if (iv) {
@@ -30,7 +28,7 @@ index 332e133..f54c5bf 100644
return HRTIMER_RESTART;
}
return HRTIMER_NORESTART;
-@@ -1323,7 +1324,7 @@ void mce_timer_kick(unsigned long interval)
+@@ -1322,7 +1323,7 @@ void mce_timer_kick(unsigned long interv
}
} else {
hrtimer_start_range_ns(t,
@@ -39,7 +37,7 @@ index 332e133..f54c5bf 100644
0, HRTIMER_MODE_REL_PINNED);
}
if (interval < iv)
-@@ -1691,7 +1692,7 @@ static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+@@ -1649,7 +1650,7 @@ static void mce_start_timer(unsigned int
if (mca_cfg.ignore_ce || !iv)
return;
@@ -48,6 +46,3 @@ index 332e133..f54c5bf 100644
0, HRTIMER_MODE_REL_PINNED);
}
---
-1.7.10.4
-