summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-09-05 16:16:34 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-09-05 16:16:34 +0200
commit6820d5710a5845455e65c035d99b43bd223c3f9e (patch)
treee35ed381c50036e814fa599e085cba2831749a80
parentd35d53ca33548fbb70fc5626e821454bd17a19db (diff)
download4.12-rt-patches-6820d5710a5845455e65c035d99b43bd223c3f9e.tar.gz
[ANNOUNCE] v4.11.12-rt13
Dear RT folks! I'm pleased to announce the v4.11.12-rt13 patch set. Changes since v4.11.12-rt12: - Merging Anna-Maria's "hrtimer: Provide softirq context hrtimers" series. Merging this series enables the removal of almost all hrtimer related patches in the queue. What is left is the "force switch" of all timers to the softirq context (except a few special ones) and a wait-queue to wait until a timer is completed. The overall diffstat compared to -rt12 is 21 files changed, 573 insertions(+), 651 deletions(-) amazing. We have RT wise the same functionality with 78 lines less of code. Known issues - There was a report regarding a deadlock within the rtmutex code. The delta patch against v4.11.12-rt12 is huge can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/incr/patch-4.11.12-rt12-rt13.patch.xz https://git.kernel.org/rt/linux-rt-devel/d/v4.11.12-rt13/v4.11.12-rt12 You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.11.12-rt13 The RT patch against v4.11.12 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patch-4.11.12-rt13.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.12-rt13.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch25
-rw-r--r--patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch36
-rw-r--r--patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch42
-rw-r--r--patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch61
-rw-r--r--patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch61
-rw-r--r--patches/0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch189
-rw-r--r--patches/0007-hrtimer-Reduce-conditional-code-hres_active.patch133
-rw-r--r--patches/0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch128
-rw-r--r--patches/0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch201
-rw-r--r--patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch96
-rw-r--r--patches/0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch31
-rw-r--r--patches/0012-hrtimer-Simplify-hrtimer_reprogram-call.patch38
-rw-r--r--patches/0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch76
-rw-r--r--patches/0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch51
-rw-r--r--patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch119
-rw-r--r--patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch77
-rw-r--r--patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch303
-rw-r--r--patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch82
-rw-r--r--patches/0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch295
-rw-r--r--patches/0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch135
-rw-r--r--patches/0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch131
-rw-r--r--patches/0022-softirq-Remove-tasklet_hrtimer.patch109
-rw-r--r--patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch25
-rw-r--r--patches/completion-use-simple-wait-queues.patch4
-rw-r--r--patches/cond-resched-softirq-rt.patch4
-rw-r--r--patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch28
-rw-r--r--patches/hotplug-light-get-online-cpus.patch4
-rw-r--r--patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch4
-rw-r--r--patches/hrtimer-Remove-hrtimer_peek_ahead_timers-leftovers.patch52
-rw-r--r--patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch206
-rw-r--r--patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch243
-rw-r--r--patches/hrtimer-enfore-64byte-alignment.patch27
-rw-r--r--patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch337
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch44
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch2
-rw-r--r--patches/kernel-hrtimer-don-t-wakeup-a-process-while-holding-.patch86
-rw-r--r--patches/kernel-hrtimer-hotplug-don-t-wake-ktimersoftd-while-.patch69
-rw-r--r--patches/kernel-hrtimer-migrate-deferred-timer-on-CPU-down.patch32
-rw-r--r--patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch4
-rw-r--r--patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch24
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch68
-rw-r--r--patches/preempt-lazy-support.patch20
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch6
-rw-r--r--patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch6
-rw-r--r--patches/rt-add-rt-locks.patch8
-rw-r--r--patches/rt-introduce-cpu-chill.patch4
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch10
-rw-r--r--patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch2
-rw-r--r--patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch22
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch8
-rw-r--r--patches/sched-rt-mutex-wakeup.patch4
-rw-r--r--patches/sched-ttwu-ensure-success-return-is-correct.patch2
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/series44
-rw-r--r--patches/softirq-split-locks.patch10
-rw-r--r--patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch2
-rw-r--r--patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch2
-rw-r--r--patches/tick-broadcast--Make-hrtimer-irqsafe.patch57
-rw-r--r--patches/timer-hrtimer-check-properly-for-a-running-timer.patch33
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch12
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
64 files changed, 3050 insertions, 896 deletions
diff --git a/patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch b/patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch
new file mode 100644
index 00000000000000..d47749b61d9833
--- /dev/null
+++ b/patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch
@@ -0,0 +1,25 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:02 +0000
+Subject: [PATCH 01/25] hrtimer: Use predefined function for updating
+ next_timer
+
+There already exist a function for updating the next_timer
+hrtimer_update_next_timer().
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -629,7 +629,7 @@ static void hrtimer_reprogram(struct hrt
+ return;
+
+ /* Update the pointer to the next expiring timer */
+- cpu_base->next_timer = timer;
++ hrtimer_update_next_timer(cpu_base, timer);
+
+ /*
+ * If a hang was detected in the last timer interrupt then we
diff --git a/patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch b/patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch
new file mode 100644
index 00000000000000..6f81c31bc121fb
--- /dev/null
+++ b/patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch
@@ -0,0 +1,36 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:03 +0000
+Subject: [PATCH 02/25] hrtimer: Correct blantanly wrong comment
+
+The protection of a hrtimer which runs its callback against migration to a
+different CPU has nothing to do with hard interrupt context.
+
+The protection against migration of a hrtimer running the expiry callback
+is the pointer in the cpu_base which holds a pointer to the currently
+running timer. This pointer is evaluated in the code which potentially
+switches the timer base and makes sure it's kept on the CPU on which the
+callback is running.
+
+Reported-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1203,9 +1203,9 @@ static void __run_hrtimer(struct hrtimer
+ timer->is_rel = false;
+
+ /*
+- * Because we run timers from hardirq context, there is no chance
+- * they get migrated to another cpu, therefore its safe to unlock
+- * the timer base.
++ * The timer is marked as running in the cpu base, so it is
++ * protected against migration to a different CPU even if the lock
++ * is dropped.
+ */
+ raw_spin_unlock(&cpu_base->lock);
+ trace_hrtimer_expire_entry(timer, now);
diff --git a/patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch b/patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch
new file mode 100644
index 00000000000000..56c68b74fb4044
--- /dev/null
+++ b/patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch
@@ -0,0 +1,42 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:03 +0000
+Subject: [PATCH 03/25] hrtimer: Fix kerneldoc for struct hrtimer_cpu_base
+
+The sequence '/**' marks the start of a struct description. Add the
+missing second asterisk. While at it adapt the ordering of the struct
+members to the struct definition and document the purpose of
+expires_next more precisely.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -144,7 +144,7 @@ enum hrtimer_base_type {
+ HRTIMER_MAX_CLOCK_BASES,
+ };
+
+-/*
++/**
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+@@ -155,12 +155,12 @@ enum hrtimer_base_type {
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
+- * @expires_next: absolute time of the next event which was scheduled
+- * via clock_set_next_event()
+- * @next_timer: Pointer to the first expiring timer
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hres_active: State of high resolution mode
+ * @hang_detected: The last hrtimer interrupt detected a hang
++ * @expires_next: absolute time of the next event, is required for remote
++ * hrtimer enqueue
++ * @next_timer: Pointer to the first expiring timer
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
diff --git a/patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch b/patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch
new file mode 100644
index 00000000000000..4fa116ab52f447
--- /dev/null
+++ b/patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch
@@ -0,0 +1,61 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:03 +0000
+Subject: [PATCH 04/25] hrtimer: Cleanup clock argument in
+ schedule_hrtimeout_range_clock()
+
+schedule_hrtimeout_range_clock() uses an integer for the clock id
+instead of the predefined type "clockid_t". The ID of the clock is
+indicated in hrtimer code as clock_id. Therefore change the name of
+the variable as well to make it consistent.
+
+While at it, clean up the description for the function parameters clock_id
+and mode. The clock modes and the clock ids are not restricted as the
+comment suggests.
+
+No functional change.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 2 +-
+ kernel/time/hrtimer.c | 8 ++++----
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -470,7 +470,7 @@ extern int schedule_hrtimeout_range(ktim
+ extern int schedule_hrtimeout_range_clock(ktime_t *expires,
+ u64 delta,
+ const enum hrtimer_mode mode,
+- int clock);
++ clockid_t clock_id);
+ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
+
+ /* Soft interrupt function to run the hrtimer queues: */
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1654,12 +1654,12 @@ void __init hrtimers_init(void)
+ * schedule_hrtimeout_range_clock - sleep until timeout
+ * @expires: timeout value (ktime_t)
+ * @delta: slack in expires timeout (ktime_t)
+- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
+- * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
++ * @mode: timer mode
++ * @clock_id: timer clock to be used
+ */
+ int __sched
+ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
+- const enum hrtimer_mode mode, int clock)
++ const enum hrtimer_mode mode, clockid_t clock_id)
+ {
+ struct hrtimer_sleeper t;
+
+@@ -1680,7 +1680,7 @@ schedule_hrtimeout_range_clock(ktime_t *
+ return -EINTR;
+ }
+
+- hrtimer_init_on_stack(&t.timer, clock, mode);
++ hrtimer_init_on_stack(&t.timer, clock_id, mode);
+ hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
+
+ hrtimer_init_sleeper(&t, current);
diff --git a/patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch b/patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
new file mode 100644
index 00000000000000..ebabb389b32441
--- /dev/null
+++ b/patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
@@ -0,0 +1,61 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:04 +0000
+Subject: [PATCH 05/25] hrtimer: Switch for loop to _ffs() evaluation
+
+Looping over all clock bases to find active bits is suboptimal if not all
+bases are active.
+
+Avoid this by converting it to a __ffs() evaluation.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -464,17 +464,18 @@ static inline void hrtimer_update_next_t
+
+ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+ {
+- struct hrtimer_clock_base *base = cpu_base->clock_base;
+ unsigned int active = cpu_base->active_bases;
+ ktime_t expires, expires_next = KTIME_MAX;
+
+ hrtimer_update_next_timer(cpu_base, NULL);
+- for (; active; base++, active >>= 1) {
++ while (active) {
++ unsigned int id = __ffs(active);
++ struct hrtimer_clock_base *base;
+ struct timerqueue_node *next;
+ struct hrtimer *timer;
+
+- if (!(active & 0x01))
+- continue;
++ active &= ~(1U << id);
++ base = cpu_base->clock_base + id;
+
+ next = timerqueue_getnext(&base->active);
+ timer = container_of(next, struct hrtimer, node);
+@@ -1241,15 +1242,16 @@ static void __run_hrtimer(struct hrtimer
+
+ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+ {
+- struct hrtimer_clock_base *base = cpu_base->clock_base;
+ unsigned int active = cpu_base->active_bases;
+
+- for (; active; base++, active >>= 1) {
++ while (active) {
++ unsigned int id = __ffs(active);
++ struct hrtimer_clock_base *base;
+ struct timerqueue_node *node;
+ ktime_t basenow;
+
+- if (!(active & 0x01))
+- continue;
++ active &= ~(1U << id);
++ base = cpu_base->clock_base + id;
+
+ basenow = ktime_add(now, base->offset);
+
diff --git a/patches/0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch b/patches/0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
new file mode 100644
index 00000000000000..dc484681ed6c80
--- /dev/null
+++ b/patches/0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
@@ -0,0 +1,189 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:04 +0000
+Subject: [PATCH 06/25] hrtimer: Store running timer in hrtimer_clock_base
+
+The pointer to the currently running timer is stored in hrtimer_cpu_base
+before the base lock is dropped and the callback is invoked.
+
+This results in two levels of indirections and the upcoming support for
+softirq based hrtimer requires splitting the "running" storage into soft
+and hard irq context expiry.
+
+Storing both in the cpu base would require conditionals in all code paths
+accessing that information.
+
+It's possible to have a per clock base sequence count and running pointer
+without changing the semantics of the related mechanisms because the timer
+base pointer cannot be changed while a timer is running the callback.
+
+Unfortunately this makes cpu_clock base larger than 32 bytes on 32bit
+kernels. Instead of having huge gaps due to alignment, remove the alignment
+and let the compiler pack cpu base for 32bit. The resulting cache access
+patterns are fortunately not really different from the current
+behaviour. On 64bit kernels the 64byte alignment stays and the behaviour is
+unchanged.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 20 +++++++++-----------
+ kernel/time/hrtimer.c | 28 +++++++++++++---------------
+ 2 files changed, 22 insertions(+), 26 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -112,9 +112,9 @@ struct hrtimer_sleeper {
+ };
+
+ #ifdef CONFIG_64BIT
+-# define HRTIMER_CLOCK_BASE_ALIGN 64
++# define __hrtimer_clock_base_align ____cacheline_aligned
+ #else
+-# define HRTIMER_CLOCK_BASE_ALIGN 32
++# define __hrtimer_clock_base_align
+ #endif
+
+ /**
+@@ -123,18 +123,22 @@ struct hrtimer_sleeper {
+ * @index: clock type index for per_cpu support when moving a
+ * timer to a base on another cpu.
+ * @clockid: clock id for per_cpu support
++ * @seq: seqcount around __run_hrtimer
++ * @running: pointer to the currently running hrtimer
+ * @active: red black tree root node for the active timers
+ * @get_time: function to retrieve the current time of the clock
+ * @offset: offset of this clock to the monotonic base
+ */
+ struct hrtimer_clock_base {
+ struct hrtimer_cpu_base *cpu_base;
+- int index;
++ unsigned int index;
+ clockid_t clockid;
++ seqcount_t seq;
++ struct hrtimer *running;
+ struct timerqueue_head active;
+ ktime_t (*get_time)(void);
+ ktime_t offset;
+-} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
++} __hrtimer_clock_base_align;
+
+ enum hrtimer_base_type {
+ HRTIMER_BASE_MONOTONIC,
+@@ -148,8 +152,6 @@ enum hrtimer_base_type {
+ * struct hrtimer_cpu_base - the per cpu clock bases
+ * @lock: lock protecting the base and associated clock bases
+ * and timers
+- * @seq: seqcount around __run_hrtimer
+- * @running: pointer to the currently running hrtimer
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+@@ -173,8 +175,6 @@ enum hrtimer_base_type {
+ */
+ struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+- seqcount_t seq;
+- struct hrtimer *running;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+@@ -196,8 +196,6 @@ struct hrtimer_cpu_base {
+
+ static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
+ {
+- BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+-
+ timer->node.expires = time;
+ timer->_softexpires = time;
+ }
+@@ -426,7 +424,7 @@ static inline int hrtimer_is_queued(stru
+ */
+ static inline int hrtimer_callback_running(struct hrtimer *timer)
+ {
+- return timer->base->cpu_base->running == timer;
++ return timer->base->running == timer;
+ }
+
+ /* Forward a hrtimer so it expires after now: */
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -69,7 +69,6 @@
+ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
+ {
+ .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
+- .seq = SEQCNT_ZERO(hrtimer_bases.seq),
+ .clock_base =
+ {
+ {
+@@ -117,7 +116,6 @@ static const int hrtimer_clock_to_base_t
+ * timer->base->cpu_base
+ */
+ static struct hrtimer_cpu_base migration_cpu_base = {
+- .seq = SEQCNT_ZERO(migration_cpu_base),
+ .clock_base = { { .cpu_base = &migration_cpu_base, }, },
+ };
+
+@@ -1135,19 +1133,19 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
+ */
+ bool hrtimer_active(const struct hrtimer *timer)
+ {
+- struct hrtimer_cpu_base *cpu_base;
++ struct hrtimer_clock_base *base;
+ unsigned int seq;
+
+ do {
+- cpu_base = READ_ONCE(timer->base->cpu_base);
+- seq = raw_read_seqcount_begin(&cpu_base->seq);
++ base = READ_ONCE(timer->base);
++ seq = raw_read_seqcount_begin(&base->seq);
+
+ if (timer->state != HRTIMER_STATE_INACTIVE ||
+- cpu_base->running == timer)
++ base->running == timer)
+ return true;
+
+- } while (read_seqcount_retry(&cpu_base->seq, seq) ||
+- cpu_base != READ_ONCE(timer->base->cpu_base));
++ } while (read_seqcount_retry(&base->seq, seq) ||
++ base != READ_ONCE(timer->base));
+
+ return false;
+ }
+@@ -1181,16 +1179,16 @@ static void __run_hrtimer(struct hrtimer
+ lockdep_assert_held(&cpu_base->lock);
+
+ debug_deactivate(timer);
+- cpu_base->running = timer;
++ base->running = timer;
+
+ /*
+ * Separate the ->running assignment from the ->state assignment.
+ *
+ * As with a regular write barrier, this ensures the read side in
+- * hrtimer_active() cannot observe cpu_base->running == NULL &&
++ * hrtimer_active() cannot observe base->running == NULL &&
+ * timer->state == INACTIVE.
+ */
+- raw_write_seqcount_barrier(&cpu_base->seq);
++ raw_write_seqcount_barrier(&base->seq);
+
+ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
+ fn = timer->function;
+@@ -1231,13 +1229,13 @@ static void __run_hrtimer(struct hrtimer
+ * Separate the ->running assignment from the ->state assignment.
+ *
+ * As with a regular write barrier, this ensures the read side in
+- * hrtimer_active() cannot observe cpu_base->running == NULL &&
++ * hrtimer_active() cannot observe base->running.timer == NULL &&
+ * timer->state == INACTIVE.
+ */
+- raw_write_seqcount_barrier(&cpu_base->seq);
++ raw_write_seqcount_barrier(&base->seq);
+
+- WARN_ON_ONCE(cpu_base->running != timer);
+- cpu_base->running = NULL;
++ WARN_ON_ONCE(base->running != timer);
++ base->running = NULL;
+ }
+
+ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
diff --git a/patches/0007-hrtimer-Reduce-conditional-code-hres_active.patch b/patches/0007-hrtimer-Reduce-conditional-code-hres_active.patch
new file mode 100644
index 00000000000000..ca9ba2ce52c94e
--- /dev/null
+++ b/patches/0007-hrtimer-Reduce-conditional-code-hres_active.patch
@@ -0,0 +1,133 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:05 +0000
+Subject: [PATCH 07/25] hrtimer: Reduce conditional code (hres_active)
+
+The hrtimer_cpu_base struct has the CONFIG_HIGH_RES_TIMERS conditional
+struct member hres_active. All related functions to this member are
+conditional as well.
+
+There is no functional change, when the hres_active member is unconditional
+with all related functions and is set to zero during initialization. This
+makes the code easier to read.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 17 ++++++-----------
+ kernel/time/hrtimer.c | 30 ++++++++++++++----------------
+ 2 files changed, 20 insertions(+), 27 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -180,9 +180,9 @@ struct hrtimer_cpu_base {
+ unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
++ unsigned int hres_active : 1;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+- hres_active : 1,
+ hang_detected : 1;
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+@@ -264,16 +264,16 @@ static inline ktime_t hrtimer_cb_get_tim
+ return timer->base->get_time();
+ }
+
+-#ifdef CONFIG_HIGH_RES_TIMERS
+-struct clock_event_device;
+-
+-extern void hrtimer_interrupt(struct clock_event_device *dev);
+-
+ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+ {
+ return timer->base->cpu_base->hres_active;
+ }
+
++#ifdef CONFIG_HIGH_RES_TIMERS
++struct clock_event_device;
++
++extern void hrtimer_interrupt(struct clock_event_device *dev);
++
+ /*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+@@ -296,11 +296,6 @@ extern unsigned int hrtimer_resolution;
+
+ #define hrtimer_resolution (unsigned int)LOW_RES_NSEC
+
+-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+-{
+- return 0;
+-}
+-
+ static inline void clock_was_set_delayed(void) { }
+
+ #endif
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -504,6 +504,19 @@ static inline ktime_t hrtimer_update_bas
+ offs_real, offs_boot, offs_tai);
+ }
+
++/*
++ * Is the high resolution mode active ?
++ */
++static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
++{
++ return cpu_base->hres_active;
++}
++
++static inline int hrtimer_hres_active(void)
++{
++ return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
++}
++
+ /* High resolution timer related functions */
+ #ifdef CONFIG_HIGH_RES_TIMERS
+
+@@ -533,19 +546,6 @@ static inline int hrtimer_is_hres_enable
+ }
+
+ /*
+- * Is the high resolution mode active ?
+- */
+-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+-{
+- return cpu_base->hres_active;
+-}
+-
+-static inline int hrtimer_hres_active(void)
+-{
+- return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
+-}
+-
+-/*
+ * Reprogram the event source with checking both queues for the
+ * next event
+ * Called with interrupts disabled and base->lock held
+@@ -653,7 +653,6 @@ static void hrtimer_reprogram(struct hrt
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+ {
+ base->expires_next = KTIME_MAX;
+- base->hres_active = 0;
+ }
+
+ /*
+@@ -712,8 +711,6 @@ void clock_was_set_delayed(void)
+
+ #else
+
+-static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
+-static inline int hrtimer_hres_active(void) { return 0; }
+ static inline int hrtimer_is_hres_enabled(void) { return 0; }
+ static inline void hrtimer_switch_to_hres(void) { }
+ static inline void
+@@ -1572,6 +1569,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+ }
+
+ cpu_base->cpu = cpu;
++ cpu_base->hres_active = 0;
+ hrtimer_init_hres(cpu_base);
+ return 0;
+ }
diff --git a/patches/0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch b/patches/0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch
new file mode 100644
index 00000000000000..ed2a839925cb56
--- /dev/null
+++ b/patches/0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch
@@ -0,0 +1,128 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:05 +0000
+Subject: [PATCH 08/25] hrtimer: Reduce conditional code (expires_next,
+ next_timer)
+
+The hrtimer_cpu_base struct member expires_next and next_timer are
+conditional members (CONFIG_HIGH_RES_TIMERS). This makes the hrtimer code
+more complex and harder to understand than it actually is.
+
+Reduce the conditionals related to those two struct members.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 10 +++++-----
+ kernel/time/hrtimer.c | 24 +++++-------------------
+ 2 files changed, 10 insertions(+), 24 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -160,13 +160,13 @@ enum hrtimer_base_type {
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hres_active: State of high resolution mode
+ * @hang_detected: The last hrtimer interrupt detected a hang
+- * @expires_next: absolute time of the next event, is required for remote
+- * hrtimer enqueue
+- * @next_timer: Pointer to the first expiring timer
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
++ * @expires_next: absolute time of the next event, is required for remote
++ * hrtimer enqueue
++ * @next_timer: Pointer to the first expiring timer
+ * @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+@@ -184,13 +184,13 @@ struct hrtimer_cpu_base {
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+ hang_detected : 1;
+- ktime_t expires_next;
+- struct hrtimer *next_timer;
+ unsigned int nr_events;
+ unsigned int nr_retries;
+ unsigned int nr_hangs;
+ unsigned int max_hang_time;
+ #endif
++ ktime_t expires_next;
++ struct hrtimer *next_timer;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ } ____cacheline_aligned;
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -153,16 +153,16 @@ struct hrtimer_clock_base *lock_hrtimer_
+ }
+
+ /*
+- * With HIGHRES=y we do not migrate the timer when it is expiring
+- * before the next event on the target cpu because we cannot reprogram
+- * the target cpu hardware and we would cause it to fire late.
++ * With high resolution timers enabled we do not migrate the timer
++ * when it is expiring before the next event on the target cpu because
++ * we cannot reprogram the target cpu hardware and we would cause it
++ * to fire late.
+ *
+ * Called with cpu_base->lock of target cpu held.
+ */
+ static int
+ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
+ {
+-#ifdef CONFIG_HIGH_RES_TIMERS
+ ktime_t expires;
+
+ if (!new_base->cpu_base->hres_active)
+@@ -170,9 +170,6 @@ hrtimer_check_target(struct hrtimer *tim
+
+ expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
+ return expires <= new_base->cpu_base->expires_next;
+-#else
+- return 0;
+-#endif
+ }
+
+ #ifdef CONFIG_NO_HZ_COMMON
+@@ -455,9 +452,7 @@ static inline void debug_deactivate(stru
+ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer *timer)
+ {
+-#ifdef CONFIG_HIGH_RES_TIMERS
+ cpu_base->next_timer = timer;
+-#endif
+ }
+
+ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+@@ -648,14 +643,6 @@ static void hrtimer_reprogram(struct hrt
+ }
+
+ /*
+- * Initialize the high resolution related parts of cpu_base
+- */
+-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
+-{
+- base->expires_next = KTIME_MAX;
+-}
+-
+-/*
+ * Retrigger next event is called after clock was set
+ *
+ * Called with interrupts disabled via on_each_cpu()
+@@ -720,7 +707,6 @@ static inline int hrtimer_reprogram(stru
+ {
+ return 0;
+ }
+-static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
+ static inline void retrigger_next_event(void *arg) { }
+
+ #endif /* CONFIG_HIGH_RES_TIMERS */
+@@ -1570,7 +1556,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+
+ cpu_base->cpu = cpu;
+ cpu_base->hres_active = 0;
+- hrtimer_init_hres(cpu_base);
++ cpu_base->expires_next = KTIME_MAX;
+ return 0;
+ }
+
diff --git a/patches/0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch b/patches/0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch
new file mode 100644
index 00000000000000..384ac35b4c2c5f
--- /dev/null
+++ b/patches/0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch
@@ -0,0 +1,201 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:05 +0000
+Subject: [PATCH 09/25] hrtimer: Reduce conditional code (hrtimer_reprogram())
+
+The hrtimer_reprogram() is currently required only when
+CONFIG_HIGH_RES_TIMERS is set. Additional bitfields of hrtimer_cpu_base
+struct are high resolution timer specific as well.
+
+To simplify the hrtimer code, the behaviour of CONFIG_HIGH_RES_TIMERS and
+!CONFIG_HIGH_RES_TIMERS should be similar. As preparation for this, the
+function hrtimer_reprogram() and required hrtimer_cpu_base struct members
+are moved outside the conditional area.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 6 +-
+ kernel/time/hrtimer.c | 131 +++++++++++++++++++++++-------------------------
+ 2 files changed, 66 insertions(+), 71 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -180,10 +180,10 @@ struct hrtimer_cpu_base {
+ unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
+- unsigned int hres_active : 1;
+-#ifdef CONFIG_HIGH_RES_TIMERS
+- unsigned int in_hrtirq : 1,
++ unsigned int hres_active : 1,
++ in_hrtirq : 1,
+ hang_detected : 1;
++#ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int nr_events;
+ unsigned int nr_retries;
+ unsigned int nr_hangs;
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -448,13 +448,13 @@ static inline void debug_deactivate(stru
+ trace_hrtimer_cancel(timer);
+ }
+
+-#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer *timer)
+ {
+ cpu_base->next_timer = timer;
+ }
+
++#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+ {
+ unsigned int active = cpu_base->active_bases;
+@@ -581,68 +581,6 @@ hrtimer_force_reprogram(struct hrtimer_c
+ }
+
+ /*
+- * When a timer is enqueued and expires earlier than the already enqueued
+- * timers, we have to check, whether it expires earlier than the timer for
+- * which the clock event device was armed.
+- *
+- * Called with interrupts disabled and base->cpu_base.lock held
+- */
+-static void hrtimer_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base)
+-{
+- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+- ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+-
+- WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
+-
+- /*
+- * If the timer is not on the current cpu, we cannot reprogram
+- * the other cpus clock event device.
+- */
+- if (base->cpu_base != cpu_base)
+- return;
+-
+- /*
+- * If the hrtimer interrupt is running, then it will
+- * reevaluate the clock bases and reprogram the clock event
+- * device. The callbacks are always executed in hard interrupt
+- * context so we don't need an extra check for a running
+- * callback.
+- */
+- if (cpu_base->in_hrtirq)
+- return;
+-
+- /*
+- * CLOCK_REALTIME timer might be requested with an absolute
+- * expiry time which is less than base->offset. Set it to 0.
+- */
+- if (expires < 0)
+- expires = 0;
+-
+- if (expires >= cpu_base->expires_next)
+- return;
+-
+- /* Update the pointer to the next expiring timer */
+- hrtimer_update_next_timer(cpu_base, timer);
+-
+- /*
+- * If a hang was detected in the last timer interrupt then we
+- * do not schedule a timer which is earlier than the expiry
+- * which we enforced in the hang detection. We want the system
+- * to make progress.
+- */
+- if (cpu_base->hang_detected)
+- return;
+-
+- /*
+- * Program the timer hardware. We enforce the expiry for
+- * events which are already in the past.
+- */
+- cpu_base->expires_next = expires;
+- tick_program_event(expires, 1);
+-}
+-
+-/*
+ * Retrigger next event is called after clock was set
+ *
+ * Called with interrupts disabled via on_each_cpu()
+@@ -702,16 +640,73 @@ static inline int hrtimer_is_hres_enable
+ static inline void hrtimer_switch_to_hres(void) { }
+ static inline void
+ hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+-static inline int hrtimer_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base)
+-{
+- return 0;
+-}
+ static inline void retrigger_next_event(void *arg) { }
+
+ #endif /* CONFIG_HIGH_RES_TIMERS */
+
+ /*
++ * When a timer is enqueued and expires earlier than the already enqueued
++ * timers, we have to check, whether it expires earlier than the timer for
++ * which the clock event device was armed.
++ *
++ * Called with interrupts disabled and base->cpu_base.lock held
++ */
++static void hrtimer_reprogram(struct hrtimer *timer,
++ struct hrtimer_clock_base *base)
++{
++ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
++ ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
++
++ WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
++
++ /*
++ * If the timer is not on the current cpu, we cannot reprogram
++ * the other cpus clock event device.
++ */
++ if (base->cpu_base != cpu_base)
++ return;
++
++ /*
++ * If the hrtimer interrupt is running, then it will
++ * reevaluate the clock bases and reprogram the clock event
++ * device. The callbacks are always executed in hard interrupt
++ * context so we don't need an extra check for a running
++ * callback.
++ */
++ if (cpu_base->in_hrtirq)
++ return;
++
++ /*
++ * CLOCK_REALTIME timer might be requested with an absolute
++ * expiry time which is less than base->offset. Set it to 0.
++ */
++ if (expires < 0)
++ expires = 0;
++
++ if (expires >= cpu_base->expires_next)
++ return;
++
++ /* Update the pointer to the next expiring timer */
++ hrtimer_update_next_timer(cpu_base, timer);
++
++ /*
++ * If a hang was detected in the last timer interrupt then we
++ * do not schedule a timer which is earlier than the expiry
++ * which we enforced in the hang detection. We want the system
++ * to make progress.
++ */
++ if (cpu_base->hang_detected)
++ return;
++
++ /*
++ * Program the timer hardware. We enforce the expiry for
++ * events which are already in the past.
++ */
++ cpu_base->expires_next = expires;
++ tick_program_event(expires, 1);
++}
++
++/*
+ * Clock realtime was set
+ *
+ * Change the offset of the realtime clock vs. the monotonic
diff --git a/patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch b/patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch
new file mode 100644
index 00000000000000..aafd35824187cc
--- /dev/null
+++ b/patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch
@@ -0,0 +1,96 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:06 +0000
+Subject: [PATCH 10/25] hrtimer: Make handling of hrtimer reprogramming and
+ enqueuing not conditional
+
+The hrtimer_reprogramming, remote timer enqueuing and handling of the
+hrtimer_cpu_base struct member expires_next depend on the active high
+resolution timers. This makes the code harder to understand.
+
+To simplify the code, the hrtimer reprogramming is now executed
+independently except for the real reprogramming part. The expires_next
+stores now the first enqueued timer. Due to the adaption of the
+check_target function, remote enqueuing is now only possible when the
+expiry time is after the currently first expiry time independent of the
+active high resolution timers.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 31 ++++++++++++-------------------
+ 1 file changed, 12 insertions(+), 19 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -153,10 +153,11 @@ struct hrtimer_clock_base *lock_hrtimer_
+ }
+
+ /*
+- * With high resolution timers enabled we do not migrate the timer
+- * when it is expiring before the next event on the target cpu because
+- * we cannot reprogram the target cpu hardware and we would cause it
+- * to fire late.
++ * We do not migrate the timer when it is expiring before the next
++ * event on the target cpu. When high resolution is enabled, we cannot
++ * reprogram the target cpu hardware and we would cause it to fire
++ * late. To keep it simple, we handle the high resolution enabled and
++ * disabled case similar.
+ *
+ * Called with cpu_base->lock of target cpu held.
+ */
+@@ -165,9 +166,6 @@ hrtimer_check_target(struct hrtimer *tim
+ {
+ ktime_t expires;
+
+- if (!new_base->cpu_base->hres_active)
+- return 0;
+-
+ expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
+ return expires <= new_base->cpu_base->expires_next;
+ }
+@@ -688,21 +686,24 @@ static void hrtimer_reprogram(struct hrt
+
+ /* Update the pointer to the next expiring timer */
+ hrtimer_update_next_timer(cpu_base, timer);
++ cpu_base->expires_next = expires;
+
+ /*
++ * If hres is not active, hardware does not have to be
++ * programmed yet.
++ *
+ * If a hang was detected in the last timer interrupt then we
+ * do not schedule a timer which is earlier than the expiry
+ * which we enforced in the hang detection. We want the system
+ * to make progress.
+ */
+- if (cpu_base->hang_detected)
++ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ /*
+ * Program the timer hardware. We enforce the expiry for
+ * events which are already in the past.
+ */
+- cpu_base->expires_next = expires;
+ tick_program_event(expires, 1);
+ }
+
+@@ -942,16 +943,8 @@ void hrtimer_start_range_ns(struct hrtim
+ if (!leftmost)
+ goto unlock;
+
+- if (!hrtimer_is_hres_active(timer)) {
+- /*
+- * Kick to reschedule the next tick to handle the new timer
+- * on dynticks target.
+- */
+- if (new_base->cpu_base->nohz_active)
+- wake_up_nohz_cpu(new_base->cpu_base->cpu);
+- } else {
+- hrtimer_reprogram(timer, new_base);
+- }
++ hrtimer_reprogram(timer, new_base);
++
+ unlock:
+ unlock_hrtimer_base(timer, &flags);
+ }
diff --git a/patches/0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch b/patches/0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch
new file mode 100644
index 00000000000000..bb487542bcfe49
--- /dev/null
+++ b/patches/0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch
@@ -0,0 +1,31 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:06 +0000
+Subject: [PATCH 11/25] hrtimer: Allow remote hrtimer enqueue with
+ "expires_next" as expiry time
+
+When enqueuing a timer with expiry X into a timer queue, where already
+a timer with expriy X is queued, the new timer is queued on the
+right-hand side of the already queued timer.
+
+Therefore it is no problem, to enqueue a hrtimer on a remote CPU with the
+same expiry time than the programmed expiry time (expires_next) on this
+CPU, because the reprogramming path is not executed - it is not the
+"leftmost" hrtimer.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -167,7 +167,7 @@ hrtimer_check_target(struct hrtimer *tim
+ ktime_t expires;
+
+ expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
+- return expires <= new_base->cpu_base->expires_next;
++ return expires < new_base->cpu_base->expires_next;
+ }
+
+ #ifdef CONFIG_NO_HZ_COMMON
diff --git a/patches/0012-hrtimer-Simplify-hrtimer_reprogram-call.patch b/patches/0012-hrtimer-Simplify-hrtimer_reprogram-call.patch
new file mode 100644
index 00000000000000..5a9e6afcef4c81
--- /dev/null
+++ b/patches/0012-hrtimer-Simplify-hrtimer_reprogram-call.patch
@@ -0,0 +1,38 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:07 +0000
+Subject: [PATCH 12/25] hrtimer: Simplify hrtimer_reprogram() call
+
+The hrtimer_reprogramm() call can be simplified by dereferencing the
+hrtimer clock base inside the function. It is a preparatory change for
+softirq based hrtimers.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -649,10 +649,10 @@ static inline void retrigger_next_event(
+ *
+ * Called with interrupts disabled and base->cpu_base.lock held
+ */
+-static void hrtimer_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base)
++static void hrtimer_reprogram(struct hrtimer *timer)
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
++ struct hrtimer_clock_base *base = timer->base;
+ ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+
+ WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
+@@ -943,7 +943,7 @@ void hrtimer_start_range_ns(struct hrtim
+ if (!leftmost)
+ goto unlock;
+
+- hrtimer_reprogram(timer, new_base);
++ hrtimer_reprogram(timer);
+
+ unlock:
+ unlock_hrtimer_base(timer, &flags);
diff --git a/patches/0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch b/patches/0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch
new file mode 100644
index 00000000000000..bf58aaf3b34b29
--- /dev/null
+++ b/patches/0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch
@@ -0,0 +1,76 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:07 +0000
+Subject: [PATCH 13/25] hrtimer: Split out code from hrtimer_start_range_ns()
+ for reuse
+
+Preparatory patch for softirq based hrtimers. No functional change.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 44 ++++++++++++++++++++++++--------------------
+ 1 file changed, 24 insertions(+), 20 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -909,22 +909,11 @@ static inline ktime_t hrtimer_update_low
+ return tim;
+ }
+
+-/**
+- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+- * @timer: the timer to be added
+- * @tim: expiry time
+- * @delta_ns: "slack" range for the timer
+- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
+- * relative (HRTIMER_MODE_REL)
+- */
+-void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+- u64 delta_ns, const enum hrtimer_mode mode)
++static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
++ u64 delta_ns, const enum hrtimer_mode mode,
++ struct hrtimer_clock_base *base)
+ {
+- struct hrtimer_clock_base *base, *new_base;
+- unsigned long flags;
+- int leftmost;
+-
+- base = lock_hrtimer_base(timer, &flags);
++ struct hrtimer_clock_base *new_base;
+
+ /* Remove an active timer from the queue: */
+ remove_hrtimer(timer, base, true);
+@@ -939,13 +928,28 @@ void hrtimer_start_range_ns(struct hrtim
+ /* Switch the timer base, if necessary: */
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
+- leftmost = enqueue_hrtimer(timer, new_base);
+- if (!leftmost)
+- goto unlock;
++ return enqueue_hrtimer(timer, new_base);
++}
++
++/**
++ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
++ * @timer: the timer to be added
++ * @tim: expiry time
++ * @delta_ns: "slack" range for the timer
++ * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
++ * relative (HRTIMER_MODE_REL)
++ */
++void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
++ u64 delta_ns, const enum hrtimer_mode mode)
++{
++ struct hrtimer_clock_base *base;
++ unsigned long flags;
++
++ base = lock_hrtimer_base(timer, &flags);
+
+- hrtimer_reprogram(timer);
++ if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
++ hrtimer_reprogram(timer);
+
+-unlock:
+ unlock_hrtimer_base(timer, &flags);
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
diff --git a/patches/0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch b/patches/0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch
new file mode 100644
index 00000000000000..60d8443773a865
--- /dev/null
+++ b/patches/0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch
@@ -0,0 +1,51 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:08 +0000
+Subject: [PATCH 14/25] hrtimer: Split out code from __hrtimer_get_next_event()
+ for reuse
+
+Preparatory patch for softirq based hrtimers. No functional change.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -453,12 +453,12 @@ static inline void hrtimer_update_next_t
+ }
+
+ #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
++static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
++ unsigned int active,
++ ktime_t expires_next)
+ {
+- unsigned int active = cpu_base->active_bases;
+- ktime_t expires, expires_next = KTIME_MAX;
++ ktime_t expires;
+
+- hrtimer_update_next_timer(cpu_base, NULL);
+ while (active) {
+ unsigned int id = __ffs(active);
+ struct hrtimer_clock_base *base;
+@@ -485,6 +485,18 @@ static ktime_t __hrtimer_get_next_event(
+ expires_next = 0;
+ return expires_next;
+ }
++
++static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
++{
++ unsigned int active = cpu_base->active_bases;
++ ktime_t expires_next = KTIME_MAX;
++
++ hrtimer_update_next_timer(cpu_base, NULL);
++
++ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
++
++ return expires_next;
++}
+ #endif
+
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
diff --git a/patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch b/patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch
new file mode 100644
index 00000000000000..d41f8866cb7979
--- /dev/null
+++ b/patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch
@@ -0,0 +1,119 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:08 +0000
+Subject: [PATCH 15/25] hrtimer: Add clock bases for soft irq context
+
+hrtimer callback functions are always executed in hard interrupt
+context. Users of hrtimer which need their timer function to be executed
+in soft interrupt context, make use of tasklets to get the proper context.
+
+Add additional clock bases for timers which must expire in softirq context,
+so the detour via the tasklet can be avoided. This is also required for RT,
+where the majority of hrtimer is moved into softirq context.
+
+Keep the new clockids internal to hrtimer for now, so they can't be
+accessed from other code until the rest of the changes is in place.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 4 +++
+ kernel/time/hrtimer.c | 56 ++++++++++++++++++++++++++++++++++++++++++------
+ 2 files changed, 54 insertions(+), 6 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -145,6 +145,10 @@ enum hrtimer_base_type {
+ HRTIMER_BASE_REALTIME,
+ HRTIMER_BASE_BOOTTIME,
+ HRTIMER_BASE_TAI,
++ HRTIMER_BASE_MONOTONIC_SOFT,
++ HRTIMER_BASE_REALTIME_SOFT,
++ HRTIMER_BASE_BOOTTIME_SOFT,
++ HRTIMER_BASE_TAI_SOFT,
+ HRTIMER_MAX_CLOCK_BASES,
+ };
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -59,6 +59,18 @@
+ #include "tick-internal.h"
+
+ /*
++ * Clock ids for timers which expire in softirq context. These clock ids
++ * are kernel internal and never exported to user space. Kept internal
++ * until the rest of the functionality is in place.
++ */
++#define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
++
++#define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
++#define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
++#define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
++#define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
++
++/*
+ * The timer bases:
+ *
+ * There are more clockids than hrtimer bases. Thus, we index
+@@ -91,17 +103,43 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base,
+ .clockid = CLOCK_TAI,
+ .get_time = &ktime_get_clocktai,
+ },
++ {
++ .index = HRTIMER_BASE_MONOTONIC_SOFT,
++ .clockid = CLOCK_MONOTONIC_SOFT,
++ .get_time = &ktime_get,
++ },
++ {
++ .index = HRTIMER_BASE_REALTIME_SOFT,
++ .clockid = CLOCK_REALTIME_SOFT,
++ .get_time = &ktime_get_real,
++ },
++ {
++ .index = HRTIMER_BASE_BOOTTIME_SOFT,
++ .clockid = CLOCK_BOOTTIME_SOFT,
++ .get_time = &ktime_get_boottime,
++ },
++ {
++ .index = HRTIMER_BASE_TAI_SOFT,
++ .clockid = CLOCK_TAI_SOFT,
++ .get_time = &ktime_get_clocktai,
++ },
+ }
+ };
+
+-static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
++#define MAX_CLOCKS_HRT (MAX_CLOCKS * 2)
++
++static const int hrtimer_clock_to_base_table[MAX_CLOCKS_HRT] = {
+ /* Make sure we catch unsupported clockids */
+- [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
++ [0 ... MAX_CLOCKS_HRT - 1] = HRTIMER_MAX_CLOCK_BASES,
+
+- [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
+- [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
+- [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
+- [CLOCK_TAI] = HRTIMER_BASE_TAI,
++ [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
++ [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
++ [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
++ [CLOCK_TAI] = HRTIMER_BASE_TAI,
++ [CLOCK_REALTIME_SOFT] = HRTIMER_BASE_REALTIME_SOFT,
++ [CLOCK_MONOTONIC_SOFT] = HRTIMER_BASE_MONOTONIC_SOFT,
++ [CLOCK_BOOTTIME_SOFT] = HRTIMER_BASE_BOOTTIME_SOFT,
++ [CLOCK_TAI_SOFT] = HRTIMER_BASE_TAI_SOFT,
+ };
+
+ /*
+@@ -1632,6 +1670,12 @@ int hrtimers_dead_cpu(unsigned int scpu)
+
+ void __init hrtimers_init(void)
+ {
++ /*
++ * It is necessary, that the soft base mask is a single
++ * bit.
++ */
++ BUILD_BUG_ON_NOT_POWER_OF_2(HRTIMER_BASE_SOFT_MASK);
++
+ hrtimers_prepare_cpu(smp_processor_id());
+ }
+
diff --git a/patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch b/patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch
new file mode 100644
index 00000000000000..cc38556ae8f45d
--- /dev/null
+++ b/patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch
@@ -0,0 +1,77 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:08 +0000
+Subject: [PATCH 16/25] hrtimer: Allow function reuse for softirq based hrtimer
+
+The softirq based hrtimer can utilize most of the existing hrtimer
+functions, but need to operate on a different data set. Add an active_mask
+argument to various functions so the hard and soft bases can be
+selected. Fixup the existing callers and hand in the ACTIVE_HARD mask.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -71,6 +71,14 @@
+ #define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
+
+ /*
++ * Masks for selecting the soft and hard context timers from
++ * cpu_base->active
++ */
++#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
++#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
++#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
++
++/*
+ * The timer bases:
+ *
+ * There are more clockids than hrtimer bases. Thus, we index
+@@ -526,11 +534,12 @@ static ktime_t __hrtimer_next_event_base
+
+ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+ {
+- unsigned int active = cpu_base->active_bases;
++ unsigned int active;
+ ktime_t expires_next = KTIME_MAX;
+
+ hrtimer_update_next_timer(cpu_base, NULL);
+
++ active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
+ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+
+ return expires_next;
+@@ -1263,9 +1272,10 @@ static void __run_hrtimer(struct hrtimer
+ base->running = NULL;
+ }
+
+-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
++static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
++ unsigned int active_mask)
+ {
+- unsigned int active = cpu_base->active_bases;
++ unsigned int active = cpu_base->active_bases & active_mask;
+
+ while (active) {
+ unsigned int id = __ffs(active);
+@@ -1332,7 +1342,7 @@ void hrtimer_interrupt(struct clock_even
+ */
+ cpu_base->expires_next = KTIME_MAX;
+
+- __hrtimer_run_queues(cpu_base, now);
++ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
+
+ /* Reevaluate the clock bases for the next expiry */
+ expires_next = __hrtimer_get_next_event(cpu_base);
+@@ -1437,7 +1447,7 @@ void hrtimer_run_queues(void)
+
+ raw_spin_lock(&cpu_base->lock);
+ now = hrtimer_update_base(cpu_base);
+- __hrtimer_run_queues(cpu_base, now);
++ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
+ raw_spin_unlock(&cpu_base->lock);
+ }
+
diff --git a/patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch b/patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch
new file mode 100644
index 00000000000000..e9e131657936ae
--- /dev/null
+++ b/patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch
@@ -0,0 +1,303 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:09 +0000
+Subject: [PATCH 17/25] hrtimer: Implementation of softirq hrtimer handling
+
+hrtimers are executed always in hard irq context. If a hrtimer callback
+function needs to be exectued in softirq context, the detour using tasklets
+is required. To facilitate this, also in regards to real time specific
+handling of hrtimers, new clock ids ease the use of hrtimers in softirq
+context.
+
+Every clock ID is available for soft and hard hrtimers. The hrtimers are
+handled the same way when they are enqueued. When the hrtimer_interrupt
+raises, a check is implemented, if the HRTIMER_SOFTIRQ has to be raised as
+well. If it is raised, the soft hrtimers are not taken into account when
+for example _hrtimer_get_next_event() is called. At the end of the softirq,
+all hrtimer_cpu_base struct members are updated, so that the soft hrtimers
+are also taken into account.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 8 ++-
+ kernel/time/hrtimer.c | 125 ++++++++++++++++++++++++++++++++++++++++++++----
+ 2 files changed, 122 insertions(+), 11 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -161,6 +161,8 @@ enum hrtimer_base_type {
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active: The nohz functionality is enabled
++ * @softirq_activated: displays, if the softirq is raised - update of softirq
++ * related settings is not required then.
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hres_active: State of high resolution mode
+ * @hang_detected: The last hrtimer interrupt detected a hang
+@@ -169,8 +171,10 @@ enum hrtimer_base_type {
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+- * hrtimer enqueue
++ * hrtimer enqueue; it is the total first expiry time (hard
++ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
++ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
+ * @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+@@ -184,6 +188,7 @@ struct hrtimer_cpu_base {
+ unsigned int clock_was_set_seq;
+ bool migration_enabled;
+ bool nohz_active;
++ bool softirq_activated;
+ unsigned int hres_active : 1,
+ in_hrtirq : 1,
+ hang_detected : 1;
+@@ -195,6 +200,7 @@ struct hrtimer_cpu_base {
+ #endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
++ ktime_t softirq_expires_next;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ } ____cacheline_aligned;
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -498,7 +498,6 @@ static inline void hrtimer_update_next_t
+ cpu_base->next_timer = timer;
+ }
+
+-#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
+ unsigned int active,
+ ktime_t expires_next)
+@@ -539,12 +538,23 @@ static ktime_t __hrtimer_get_next_event(
+
+ hrtimer_update_next_timer(cpu_base, NULL);
+
++ if (!cpu_base->softirq_activated) {
++ active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
++ expires_next = __hrtimer_next_event_base(cpu_base, active,
++ expires_next);
++ cpu_base->softirq_expires_next = expires_next;
++ }
++
+ active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
+ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+
++ /*
++ * cpu_base->expires_next is not updated here. It is set only
++ * in hrtimer_reprogramming path!
++ */
++
+ return expires_next;
+ }
+-#endif
+
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ {
+@@ -968,6 +978,49 @@ static inline ktime_t hrtimer_update_low
+ return tim;
+ }
+
++static void hrtimer_reprogram_softirq(struct hrtimer *timer)
++{
++ struct hrtimer_clock_base *base = timer->base;
++ struct hrtimer_cpu_base *cpu_base = base->cpu_base;
++ ktime_t expires;
++
++ /*
++ * The softirq timer is not rearmed, when the softirq was raised
++ * and has not yet run to completion.
++ */
++ if (cpu_base->softirq_activated)
++ return;
++
++ expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
++
++ if (!ktime_before(expires, cpu_base->softirq_expires_next))
++ return;
++
++ cpu_base->softirq_expires_next = expires;
++
++ if (!ktime_before(expires, cpu_base->expires_next))
++ return;
++ hrtimer_reprogram(timer);
++}
++
++static void hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base,
++ bool reprogram)
++{
++ ktime_t expires;
++
++ expires = __hrtimer_get_next_event(cpu_base);
++
++ if (!reprogram || !ktime_before(expires, cpu_base->expires_next))
++ return;
++ /*
++ * next_timer can be used here, because
++ * hrtimer_get_next_event() updated the next
++ * timer. expires_next is only set when reprogramming function
++ * is called.
++ */
++ hrtimer_reprogram(cpu_base->next_timer);
++}
++
+ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode,
+ struct hrtimer_clock_base *base)
+@@ -1006,9 +1059,12 @@ void hrtimer_start_range_ns(struct hrtim
+
+ base = lock_hrtimer_base(timer, &flags);
+
+- if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
+- hrtimer_reprogram(timer);
+-
++ if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) {
++ if (timer->base->index < HRTIMER_BASE_MONOTONIC_SOFT)
++ hrtimer_reprogram(timer);
++ else
++ hrtimer_reprogram_softirq(timer);
++ }
+ unlock_hrtimer_base(timer, &flags);
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
+@@ -1205,7 +1261,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
+
+ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer_clock_base *base,
+- struct hrtimer *timer, ktime_t *now)
++ struct hrtimer *timer, ktime_t *now,
++ bool hardirq)
+ {
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ int restart;
+@@ -1240,11 +1297,19 @@ static void __run_hrtimer(struct hrtimer
+ * protected against migration to a different CPU even if the lock
+ * is dropped.
+ */
+- raw_spin_unlock(&cpu_base->lock);
++ if (hardirq)
++ raw_spin_unlock(&cpu_base->lock);
++ else
++ raw_spin_unlock_irq(&cpu_base->lock);
++
+ trace_hrtimer_expire_entry(timer, now);
+ restart = fn(timer);
+ trace_hrtimer_expire_exit(timer);
+- raw_spin_lock(&cpu_base->lock);
++
++ if (hardirq)
++ raw_spin_lock(&cpu_base->lock);
++ else
++ raw_spin_lock_irq(&cpu_base->lock);
+
+ /*
+ * Note: We clear the running state after enqueue_hrtimer and
+@@ -1308,11 +1373,28 @@ static void __hrtimer_run_queues(struct
+ if (basenow < hrtimer_get_softexpires_tv64(timer))
+ break;
+
+- __run_hrtimer(cpu_base, base, timer, &basenow);
++ __run_hrtimer(cpu_base, base, timer, &basenow,
++ active_mask == HRTIMER_ACTIVE_HARD);
+ }
+ }
+ }
+
++static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
++{
++ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
++ ktime_t now;
++
++ raw_spin_lock_irq(&cpu_base->lock);
++
++ now = hrtimer_update_base(cpu_base);
++ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_SOFT);
++
++ cpu_base->softirq_activated = 0;
++ hrtimer_update_softirq_timer(cpu_base, true);
++
++ raw_spin_unlock_irq(&cpu_base->lock);
++}
++
+ #ifdef CONFIG_HIGH_RES_TIMERS
+
+ /*
+@@ -1342,9 +1424,15 @@ void hrtimer_interrupt(struct clock_even
+ */
+ cpu_base->expires_next = KTIME_MAX;
+
++ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
++ cpu_base->softirq_expires_next = KTIME_MAX;
++ cpu_base->softirq_activated = 1;
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ }
++
+ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
+
+- /* Reevaluate the clock bases for the next expiry */
++ /* Reevaluate the hard interrupt clock bases for the next expiry */
+ expires_next = __hrtimer_get_next_event(cpu_base);
+ /*
+ * Store the new expiry value so the migration code can verify
+@@ -1447,6 +1535,13 @@ void hrtimer_run_queues(void)
+
+ raw_spin_lock(&cpu_base->lock);
+ now = hrtimer_update_base(cpu_base);
++
++ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
++ cpu_base->softirq_expires_next = KTIME_MAX;
++ cpu_base->softirq_activated = 1;
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ }
++
+ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
+ raw_spin_unlock(&cpu_base->lock);
+ }
+@@ -1609,6 +1704,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+ cpu_base->cpu = cpu;
+ cpu_base->hres_active = 0;
+ cpu_base->expires_next = KTIME_MAX;
++ cpu_base->softirq_expires_next = KTIME_MAX;
+ return 0;
+ }
+
+@@ -1652,6 +1748,7 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ BUG_ON(cpu_online(scpu));
+ tick_cancel_sched_timer(scpu);
+
++ local_bh_disable();
+ local_irq_disable();
+ old_base = &per_cpu(hrtimer_bases, scpu);
+ new_base = this_cpu_ptr(&hrtimer_bases);
+@@ -1667,12 +1764,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ &new_base->clock_base[i]);
+ }
+
++ /*
++ * The migration might have changed the first expiring softirq
++ * timer on this CPU. Update it.
++ */
++ hrtimer_update_softirq_timer(new_base, false);
++
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
+
+ /* Check, if we got expired work to do */
+ __hrtimer_peek_ahead_timers();
+ local_irq_enable();
++ local_bh_enable();
+ return 0;
+ }
+
+@@ -1687,6 +1791,7 @@ void __init hrtimers_init(void)
+ BUILD_BUG_ON_NOT_POWER_OF_2(HRTIMER_BASE_SOFT_MASK);
+
+ hrtimers_prepare_cpu(smp_processor_id());
++ open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
+ }
+
+ /**
diff --git a/patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch b/patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch
new file mode 100644
index 00000000000000..dcdf0dbaec8d9a
--- /dev/null
+++ b/patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch
@@ -0,0 +1,82 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:09 +0000
+Subject: [PATCH 18/25] hrtimer: Enable soft and hard hrtimer
+
+Move the definition of the clock ids, to be available not only
+internal. The transition between clock id and hrtimer base is now
+expanded by the soft hrtimer bases and the corresponding clock
+ids. Update all hard hrtimer restricted queries to handle soft and
+hard hrtimers similarly.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 11 +++++++++++
+ kernel/time/hrtimer.c | 22 +++++++---------------
+ 2 files changed, 18 insertions(+), 15 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -23,6 +23,17 @@
+ #include <linux/timer.h>
+ #include <linux/timerqueue.h>
+
++/*
++ * Clock ids for hrtimers which expire in softirq context. These clock ids
++ * are kernel internal and never exported to user space.
++ */
++#define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
++
++#define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
++#define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
++#define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
++#define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
++
+ struct hrtimer_clock_base;
+ struct hrtimer_cpu_base;
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -59,18 +59,6 @@
+ #include "tick-internal.h"
+
+ /*
+- * Clock ids for timers which expire in softirq context. These clock ids
+- * are kernel internal and never exported to user space. Kept internal
+- * until the rest of the functionality is in place.
+- */
+-#define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
+-
+-#define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
+-#define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
+-#define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
+-#define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
+-
+-/*
+ * Masks for selecting the soft and hard context timers from
+ * cpu_base->active
+ */
+@@ -1172,7 +1160,7 @@ u64 hrtimer_get_next_event(void)
+
+ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
+ {
+- if (likely(clock_id < MAX_CLOCKS)) {
++ if (likely(clock_id < MAX_CLOCKS_HRT)) {
+ int base = hrtimer_clock_to_base_table[clock_id];
+
+ if (likely(base != HRTIMER_MAX_CLOCK_BASES))
+@@ -1192,8 +1180,12 @@ static void __hrtimer_init(struct hrtime
+
+ cpu_base = raw_cpu_ptr(&hrtimer_bases);
+
+- if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
+- clock_id = CLOCK_MONOTONIC;
++ if (mode != HRTIMER_MODE_ABS) {
++ if (clock_id == CLOCK_REALTIME)
++ clock_id = CLOCK_MONOTONIC;
++ else if (clock_id == CLOCK_REALTIME_SOFT)
++ clock_id = CLOCK_MONOTONIC_SOFT;
++ }
+
+ base = hrtimer_clockid_to_base(clock_id);
+ timer->base = &cpu_base->clock_base[base];
diff --git a/patches/0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch b/patches/0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
new file mode 100644
index 00000000000000..6ca9ba3a6e5b78
--- /dev/null
+++ b/patches/0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
@@ -0,0 +1,295 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:10 +0000
+Subject: [PATCH 19/25] can/bcm: Replace hrtimer_tasklet with softirq based
+ hrtimer
+
+Switch the timer to CLOCK_MONOTONIC_SOFT, which executed the timer
+callback in softirq context and remove the hrtimer_tasklet.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Cc: linux-can@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/can/bcm.c | 150 ++++++++++++++++++----------------------------------------
+ 1 file changed, 49 insertions(+), 101 deletions(-)
+
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -102,7 +102,6 @@ struct bcm_op {
+ unsigned long frames_abs, frames_filtered;
+ struct bcm_timeval ival1, ival2;
+ struct hrtimer timer, thrtimer;
+- struct tasklet_struct tsklet, thrtsklet;
+ ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
+ int rx_ifindex;
+ int cfsiz;
+@@ -363,25 +362,34 @@ static void bcm_send_to_user(struct bcm_
+ }
+ }
+
+-static void bcm_tx_start_timer(struct bcm_op *op)
++static bool bcm_tx_set_expiry(struct bcm_op *op, struct hrtimer *hrt)
+ {
++ ktime_t ival;
++
+ if (op->kt_ival1 && op->count)
+- hrtimer_start(&op->timer,
+- ktime_add(ktime_get(), op->kt_ival1),
+- HRTIMER_MODE_ABS);
++ ival = op->kt_ival1;
+ else if (op->kt_ival2)
+- hrtimer_start(&op->timer,
+- ktime_add(ktime_get(), op->kt_ival2),
+- HRTIMER_MODE_ABS);
++ ival = op->kt_ival2;
++ else
++ return false;
++
++ hrtimer_set_expires(hrt, ktime_add(ktime_get(), ival));
++ return true;
+ }
+
+-static void bcm_tx_timeout_tsklet(unsigned long data)
++static void bcm_tx_start_timer(struct bcm_op *op)
+ {
+- struct bcm_op *op = (struct bcm_op *)data;
++ if (bcm_tx_set_expiry(op, &op->timer))
++ hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS);
++}
++
++/* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
++static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
++{
++ struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+ struct bcm_msg_head msg_head;
+
+ if (op->kt_ival1 && (op->count > 0)) {
+-
+ op->count--;
+ if (!op->count && (op->flags & TX_COUNTEVT)) {
+
+@@ -398,22 +406,12 @@ static void bcm_tx_timeout_tsklet(unsign
+ }
+ bcm_can_tx(op);
+
+- } else if (op->kt_ival2)
++ } else if (op->kt_ival2) {
+ bcm_can_tx(op);
++ }
+
+- bcm_tx_start_timer(op);
+-}
+-
+-/*
+- * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
+- */
+-static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+-{
+- struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+-
+- tasklet_schedule(&op->tsklet);
+-
+- return HRTIMER_NORESTART;
++ return bcm_tx_set_expiry(op, &op->timer) ?
++ HRTIMER_RESTART : HRTIMER_NORESTART;
+ }
+
+ /*
+@@ -541,11 +539,18 @@ static void bcm_rx_starttimer(struct bcm
+ hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
+ }
+
+-static void bcm_rx_timeout_tsklet(unsigned long data)
++/* bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out */
++static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+ {
+- struct bcm_op *op = (struct bcm_op *)data;
++ struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+ struct bcm_msg_head msg_head;
+
++ /* if user wants to be informed, when cyclic CAN-Messages come back */
++ if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
++ /* clear received CAN frames to indicate 'nothing received' */
++ memset(op->last_frames, 0, op->nframes * op->cfsiz);
++ }
++
+ /* create notification to user */
+ msg_head.opcode = RX_TIMEOUT;
+ msg_head.flags = op->flags;
+@@ -556,25 +561,6 @@ static void bcm_rx_timeout_tsklet(unsign
+ msg_head.nframes = 0;
+
+ bcm_send_to_user(op, &msg_head, NULL, 0);
+-}
+-
+-/*
+- * bcm_rx_timeout_handler - when the (cyclic) CAN frame reception timed out
+- */
+-static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+-{
+- struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+-
+- /* schedule before NET_RX_SOFTIRQ */
+- tasklet_hi_schedule(&op->tsklet);
+-
+- /* no restart of the timer is done here! */
+-
+- /* if user wants to be informed, when cyclic CAN-Messages come back */
+- if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
+- /* clear received CAN frames to indicate 'nothing received' */
+- memset(op->last_frames, 0, op->nframes * op->cfsiz);
+- }
+
+ return HRTIMER_NORESTART;
+ }
+@@ -582,14 +568,12 @@ static enum hrtimer_restart bcm_rx_timeo
+ /*
+ * bcm_rx_do_flush - helper for bcm_rx_thr_flush
+ */
+-static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
+- unsigned int index)
++static inline int bcm_rx_do_flush(struct bcm_op *op, unsigned int index)
+ {
+ struct canfd_frame *lcf = op->last_frames + op->cfsiz * index;
+
+ if ((op->last_frames) && (lcf->flags & RX_THR)) {
+- if (update)
+- bcm_rx_changed(op, lcf);
++ bcm_rx_changed(op, lcf);
+ return 1;
+ }
+ return 0;
+@@ -597,11 +581,8 @@ static inline int bcm_rx_do_flush(struct
+
+ /*
+ * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
+- *
+- * update == 0 : just check if throttled data is available (any irq context)
+- * update == 1 : check and send throttled data to userspace (soft_irq context)
+ */
+-static int bcm_rx_thr_flush(struct bcm_op *op, int update)
++static int bcm_rx_thr_flush(struct bcm_op *op)
+ {
+ int updated = 0;
+
+@@ -610,24 +591,16 @@ static int bcm_rx_thr_flush(struct bcm_o
+
+ /* for MUX filter we start at index 1 */
+ for (i = 1; i < op->nframes; i++)
+- updated += bcm_rx_do_flush(op, update, i);
++ updated += bcm_rx_do_flush(op, i);
+
+ } else {
+ /* for RX_FILTER_ID and simple filter */
+- updated += bcm_rx_do_flush(op, update, 0);
++ updated += bcm_rx_do_flush(op, 0);
+ }
+
+ return updated;
+ }
+
+-static void bcm_rx_thr_tsklet(unsigned long data)
+-{
+- struct bcm_op *op = (struct bcm_op *)data;
+-
+- /* push the changed data to the userspace */
+- bcm_rx_thr_flush(op, 1);
+-}
+-
+ /*
+ * bcm_rx_thr_handler - the time for blocked content updates is over now:
+ * Check for throttled data and send it to the userspace
+@@ -636,9 +609,7 @@ static enum hrtimer_restart bcm_rx_thr_h
+ {
+ struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
+
+- tasklet_schedule(&op->thrtsklet);
+-
+- if (bcm_rx_thr_flush(op, 0)) {
++ if (bcm_rx_thr_flush(op)) {
+ hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
+ return HRTIMER_RESTART;
+ } else {
+@@ -734,23 +705,8 @@ static struct bcm_op *bcm_find_op(struct
+
+ static void bcm_remove_op(struct bcm_op *op)
+ {
+- if (op->tsklet.func) {
+- while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
+- test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
+- hrtimer_active(&op->timer)) {
+- hrtimer_cancel(&op->timer);
+- tasklet_kill(&op->tsklet);
+- }
+- }
+-
+- if (op->thrtsklet.func) {
+- while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
+- test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
+- hrtimer_active(&op->thrtimer)) {
+- hrtimer_cancel(&op->thrtimer);
+- tasklet_kill(&op->thrtsklet);
+- }
+- }
++ hrtimer_cancel(&op->timer);
++ hrtimer_cancel(&op->thrtimer);
+
+ if ((op->frames) && (op->frames != &op->sframe))
+ kfree(op->frames);
+@@ -977,15 +933,13 @@ static int bcm_tx_setup(struct bcm_msg_h
+ op->ifindex = ifindex;
+
+ /* initialize uninitialized (kzalloc) structure */
+- hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&op->timer, CLOCK_MONOTONIC_SOFT,
++ HRTIMER_MODE_REL);
+ op->timer.function = bcm_tx_timeout_handler;
+
+- /* initialize tasklet for tx countevent notification */
+- tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
+- (unsigned long) op);
+-
+ /* currently unused in tx_ops */
+- hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC_SOFT,
++ HRTIMER_MODE_REL);
+
+ /* add this bcm_op to the list of the tx_ops */
+ list_add(&op->list, &bo->tx_ops);
+@@ -1148,20 +1102,14 @@ static int bcm_rx_setup(struct bcm_msg_h
+ op->rx_ifindex = ifindex;
+
+ /* initialize uninitialized (kzalloc) structure */
+- hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&op->timer, CLOCK_MONOTONIC_SOFT,
++ HRTIMER_MODE_REL);
+ op->timer.function = bcm_rx_timeout_handler;
+
+- /* initialize tasklet for rx timeout notification */
+- tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
+- (unsigned long) op);
+-
+- hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC_SOFT,
++ HRTIMER_MODE_REL);
+ op->thrtimer.function = bcm_rx_thr_handler;
+
+- /* initialize tasklet for rx throttle handling */
+- tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
+- (unsigned long) op);
+-
+ /* add this bcm_op to the list of the rx_ops */
+ list_add(&op->list, &bo->rx_ops);
+
+@@ -1207,7 +1155,7 @@ static int bcm_rx_setup(struct bcm_msg_h
+ */
+ op->kt_lastmsg = 0;
+ hrtimer_cancel(&op->thrtimer);
+- bcm_rx_thr_flush(op, 1);
++ bcm_rx_thr_flush(op);
+ }
+
+ if ((op->flags & STARTTIMER) && op->kt_ival1)
diff --git a/patches/0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch b/patches/0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
new file mode 100644
index 00000000000000..f5fa41b66684f1
--- /dev/null
+++ b/patches/0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
@@ -0,0 +1,135 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:10 +0000
+Subject: [PATCH 20/25] mac80211_hwsim: Replace hrtimer tasklet with softirq
+ hrtimer
+
+Switch the timer to CLOCK_MONOTONIC_SOFT, which executed the timer
+callback in softirq context and remove the hrtimer_tasklet.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Cc: Kalle Valo <kvalo@codeaurora.org>
+Cc: linux-wireless@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/wireless/mac80211_hwsim.c | 44 +++++++++++++++-------------------
+ 1 file changed, 20 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -531,7 +531,7 @@ struct mac80211_hwsim_data {
+ unsigned int rx_filter;
+ bool started, idle, scanning;
+ struct mutex mutex;
+- struct tasklet_hrtimer beacon_timer;
++ struct hrtimer beacon_timer;
+ enum ps_mode {
+ PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
+ } ps;
+@@ -1408,7 +1408,7 @@ static void mac80211_hwsim_stop(struct i
+ {
+ struct mac80211_hwsim_data *data = hw->priv;
+ data->started = false;
+- tasklet_hrtimer_cancel(&data->beacon_timer);
++ hrtimer_cancel(&data->beacon_timer);
+ wiphy_debug(hw->wiphy, "%s\n", __func__);
+ }
+
+@@ -1531,14 +1531,12 @@ static enum hrtimer_restart
+ mac80211_hwsim_beacon(struct hrtimer *timer)
+ {
+ struct mac80211_hwsim_data *data =
+- container_of(timer, struct mac80211_hwsim_data,
+- beacon_timer.timer);
++ container_of(timer, struct mac80211_hwsim_data, beacon_timer);
+ struct ieee80211_hw *hw = data->hw;
+ u64 bcn_int = data->beacon_int;
+- ktime_t next_bcn;
+
+ if (!data->started)
+- goto out;
++ return HRTIMER_NORESTART;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ hw, IEEE80211_IFACE_ITER_NORMAL,
+@@ -1550,11 +1548,9 @@ mac80211_hwsim_beacon(struct hrtimer *ti
+ data->bcn_delta = 0;
+ }
+
+- next_bcn = ktime_add(hrtimer_get_expires(timer),
+- ns_to_ktime(bcn_int * 1000));
+- tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS);
+-out:
+- return HRTIMER_NORESTART;
++ hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
++ ns_to_ktime(bcn_int * NSEC_PER_USEC));
++ return HRTIMER_RESTART;
+ }
+
+ static const char * const hwsim_chanwidths[] = {
+@@ -1604,15 +1600,15 @@ static int mac80211_hwsim_config(struct
+
+ data->power_level = conf->power_level;
+ if (!data->started || !data->beacon_int)
+- tasklet_hrtimer_cancel(&data->beacon_timer);
+- else if (!hrtimer_is_queued(&data->beacon_timer.timer)) {
++ hrtimer_cancel(&data->beacon_timer);
++ else if (!hrtimer_is_queued(&data->beacon_timer)) {
+ u64 tsf = mac80211_hwsim_get_tsf(hw, NULL);
+ u32 bcn_int = data->beacon_int;
+ u64 until_tbtt = bcn_int - do_div(tsf, bcn_int);
+
+- tasklet_hrtimer_start(&data->beacon_timer,
+- ns_to_ktime(until_tbtt * 1000),
+- HRTIMER_MODE_REL);
++ hrtimer_start(&data->beacon_timer,
++ ns_to_ktime(until_tbtt * 1000),
++ HRTIMER_MODE_REL);
+ }
+
+ return 0;
+@@ -1675,7 +1671,7 @@ static void mac80211_hwsim_bss_info_chan
+ info->enable_beacon, info->beacon_int);
+ vp->bcn_en = info->enable_beacon;
+ if (data->started &&
+- !hrtimer_is_queued(&data->beacon_timer.timer) &&
++ !hrtimer_is_queued(&data->beacon_timer) &&
+ info->enable_beacon) {
+ u64 tsf, until_tbtt;
+ u32 bcn_int;
+@@ -1683,9 +1679,9 @@ static void mac80211_hwsim_bss_info_chan
+ tsf = mac80211_hwsim_get_tsf(hw, vif);
+ bcn_int = data->beacon_int;
+ until_tbtt = bcn_int - do_div(tsf, bcn_int);
+- tasklet_hrtimer_start(&data->beacon_timer,
+- ns_to_ktime(until_tbtt * 1000),
+- HRTIMER_MODE_REL);
++ hrtimer_start(&data->beacon_timer,
++ ns_to_ktime(until_tbtt * 1000),
++ HRTIMER_MODE_REL);
+ } else if (!info->enable_beacon) {
+ unsigned int count = 0;
+ ieee80211_iterate_active_interfaces_atomic(
+@@ -1694,7 +1690,7 @@ static void mac80211_hwsim_bss_info_chan
+ wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
+ count);
+ if (count == 0) {
+- tasklet_hrtimer_cancel(&data->beacon_timer);
++ hrtimer_cancel(&data->beacon_timer);
+ data->beacon_int = 0;
+ }
+ }
+@@ -2669,9 +2665,9 @@ static int mac80211_hwsim_new_radio(stru
+ data->debugfs,
+ data, &hwsim_simulate_radar);
+
+- tasklet_hrtimer_init(&data->beacon_timer,
+- mac80211_hwsim_beacon,
+- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC_SOFT,
++ HRTIMER_MODE_ABS);
++ data->beacon_timer.function = mac80211_hwsim_beacon;
+
+ spin_lock_bh(&hwsim_radio_lock);
+ list_add_tail(&data->list, &hwsim_radios);
diff --git a/patches/0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch b/patches/0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
new file mode 100644
index 00000000000000..6a8ee1b97aa203
--- /dev/null
+++ b/patches/0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
@@ -0,0 +1,131 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:10 +0000
+Subject: [PATCH 21/25] xfrm: Replace hrtimer tasklet with softirq hrtimer
+
+Switch the timer to CLOCK_MONOTONIC_SOFT, which executed the timer
+callback in softirq context and remove the hrtimer_tasklet.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: netdev@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/net/xfrm.h | 2 +-
+ net/xfrm/xfrm_state.c | 29 +++++++++++++++++------------
+ 2 files changed, 18 insertions(+), 13 deletions(-)
+
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -205,7 +205,7 @@ struct xfrm_state {
+ struct xfrm_stats stats;
+
+ struct xfrm_lifetime_cur curlft;
+- struct tasklet_hrtimer mtimer;
++ struct hrtimer mtimer;
+
+ /* used to fix curlft->add_time when changing date */
+ long saved_tmo;
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -349,7 +349,7 @@ static void xfrm_put_mode(struct xfrm_mo
+
+ static void xfrm_state_gc_destroy(struct xfrm_state *x)
+ {
+- tasklet_hrtimer_cancel(&x->mtimer);
++ hrtimer_cancel(&x->mtimer);
+ del_timer_sync(&x->rtimer);
+ kfree(x->aead);
+ kfree(x->aalg);
+@@ -391,8 +391,8 @@ static void xfrm_state_gc_task(struct wo
+
+ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
+ {
+- struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer);
+- struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer);
++ struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
++ enum hrtimer_restart ret = HRTIMER_NORESTART;
+ unsigned long now = get_seconds();
+ long next = LONG_MAX;
+ int warn = 0;
+@@ -456,7 +456,8 @@ static enum hrtimer_restart xfrm_timer_h
+ km_state_expired(x, 0, 0);
+ resched:
+ if (next != LONG_MAX) {
+- tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL);
++ hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
++ ret = HRTIMER_RESTART;
+ }
+
+ goto out;
+@@ -473,7 +474,7 @@ static enum hrtimer_restart xfrm_timer_h
+
+ out:
+ spin_unlock(&x->lock);
+- return HRTIMER_NORESTART;
++ return ret;
+ }
+
+ static void xfrm_replay_timer_handler(unsigned long data);
+@@ -492,8 +493,8 @@ struct xfrm_state *xfrm_state_alloc(stru
+ INIT_HLIST_NODE(&x->bydst);
+ INIT_HLIST_NODE(&x->bysrc);
+ INIT_HLIST_NODE(&x->byspi);
+- tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
+- CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
++ hrtimer_init(&x->mtimer, CLOCK_BOOTTIME_SOFT, HRTIMER_MODE_ABS);
++ x->mtimer.function = xfrm_timer_handler;
+ setup_timer(&x->rtimer, xfrm_replay_timer_handler,
+ (unsigned long)x);
+ x->curlft.add_time = get_seconds();
+@@ -876,7 +877,9 @@ xfrm_state_find(const xfrm_address_t *da
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
+ }
+ x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
+- tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
++ hrtimer_start(&x->mtimer,
++ ktime_set(net->xfrm.sysctl_acq_expires, 0),
++ HRTIMER_MODE_REL);
+ net->xfrm.state_num++;
+ xfrm_hash_grow_check(net, x->bydst.next != NULL);
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+@@ -987,7 +990,7 @@ static void __xfrm_state_insert(struct x
+ hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
+ }
+
+- tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
++ hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ if (x->replay_maxage)
+ mod_timer(&x->rtimer, jiffies + x->replay_maxage);
+
+@@ -1091,7 +1094,9 @@ static struct xfrm_state *__find_acq_cor
+ x->mark.m = m->m;
+ x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
+ xfrm_state_hold(x);
+- tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
++ hrtimer_start(&x->mtimer,
++ ktime_set(net->xfrm.sysctl_acq_expires, 0),
++ HRTIMER_MODE_REL);
+ list_add(&x->km.all, &net->xfrm.state_all);
+ hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
+ h = xfrm_src_hash(net, daddr, saddr, family);
+@@ -1380,7 +1385,7 @@ int xfrm_state_update(struct xfrm_state
+ memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
+ x1->km.dying = 0;
+
+- tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
++ hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
+ if (x1->curlft.use_time)
+ xfrm_state_check_expire(x1);
+
+@@ -1404,7 +1409,7 @@ int xfrm_state_check_expire(struct xfrm_
+ if (x->curlft.bytes >= x->lft.hard_byte_limit ||
+ x->curlft.packets >= x->lft.hard_packet_limit) {
+ x->km.state = XFRM_STATE_EXPIRED;
+- tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
++ hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
+ return -EINVAL;
+ }
+
diff --git a/patches/0022-softirq-Remove-tasklet_hrtimer.patch b/patches/0022-softirq-Remove-tasklet_hrtimer.patch
new file mode 100644
index 00000000000000..e3196f16257e14
--- /dev/null
+++ b/patches/0022-softirq-Remove-tasklet_hrtimer.patch
@@ -0,0 +1,109 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 31 Aug 2017 11:03:11 +0000
+Subject: [PATCH 22/25] softirq: Remove tasklet_hrtimer
+
+There are no more tasklet_hrtimer users of this interface.
+Remove it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/interrupt.h | 25 ----------------------
+ kernel/softirq.c | 51 ----------------------------------------------
+ 2 files changed, 76 deletions(-)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -622,31 +622,6 @@ extern void tasklet_kill_immediate(struc
+ extern void tasklet_init(struct tasklet_struct *t,
+ void (*func)(unsigned long), unsigned long data);
+
+-struct tasklet_hrtimer {
+- struct hrtimer timer;
+- struct tasklet_struct tasklet;
+- enum hrtimer_restart (*function)(struct hrtimer *);
+-};
+-
+-extern void
+-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+- enum hrtimer_restart (*function)(struct hrtimer *),
+- clockid_t which_clock, enum hrtimer_mode mode);
+-
+-static inline
+-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+- const enum hrtimer_mode mode)
+-{
+- hrtimer_start(&ttimer->timer, time, mode);
+-}
+-
+-static inline
+-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
+-{
+- hrtimer_cancel(&ttimer->timer);
+- tasklet_kill(&ttimer->tasklet);
+-}
+-
+ /*
+ * Autoprobing for irqs:
+ *
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -594,57 +594,6 @@ void tasklet_kill(struct tasklet_struct
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
+-/*
+- * tasklet_hrtimer
+- */
+-
+-/*
+- * The trampoline is called when the hrtimer expires. It schedules a tasklet
+- * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
+- * hrtimer callback, but from softirq context.
+- */
+-static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
+-{
+- struct tasklet_hrtimer *ttimer =
+- container_of(timer, struct tasklet_hrtimer, timer);
+-
+- tasklet_hi_schedule(&ttimer->tasklet);
+- return HRTIMER_NORESTART;
+-}
+-
+-/*
+- * Helper function which calls the hrtimer callback from
+- * tasklet/softirq context
+- */
+-static void __tasklet_hrtimer_trampoline(unsigned long data)
+-{
+- struct tasklet_hrtimer *ttimer = (void *)data;
+- enum hrtimer_restart restart;
+-
+- restart = ttimer->function(&ttimer->timer);
+- if (restart != HRTIMER_NORESTART)
+- hrtimer_restart(&ttimer->timer);
+-}
+-
+-/**
+- * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
+- * @ttimer: tasklet_hrtimer which is initialized
+- * @function: hrtimer callback function which gets called from softirq context
+- * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
+- * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
+- */
+-void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
+- enum hrtimer_restart (*function)(struct hrtimer *),
+- clockid_t which_clock, enum hrtimer_mode mode)
+-{
+- hrtimer_init(&ttimer->timer, which_clock, mode);
+- ttimer->timer.function = __hrtimer_tasklet_trampoline;
+- tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
+- (unsigned long)ttimer);
+- ttimer->function = function;
+-}
+-EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
+-
+ void __init softirq_init(void)
+ {
+ int cpu;
diff --git a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
deleted file mode 100644
index 390ab95e637961..00000000000000
--- a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Marcelo Tosatti <mtosatti@redhat.com>
-Date: Wed, 8 Apr 2015 20:33:25 -0300
-Subject: KVM: lapic: mark LAPIC timer handler as irqsafe
-
-Since lapic timer handler only wakes up a simple waitqueue,
-it can be executed from hardirq context.
-
-Reduces average cyclictest latency by 3us.
-
-Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kvm/lapic.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -2062,6 +2062,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS_PINNED);
- apic->lapic_timer.timer.function = apic_timer_fn;
-+ apic->lapic_timer.timer.irqsafe = 1;
-
- /*
- * APIC is created enabled. This will prevent kvm_lapic_set_base from
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 3ce5296d8d20bd..0b08be768f4466 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -276,7 +276,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7528,7 +7528,10 @@ void migrate_disable(void)
+@@ -7527,7 +7527,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -288,7 +288,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7558,7 +7561,10 @@ void migrate_enable(void)
+@@ -7557,7 +7560,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index dad5f040d7a5cc..032da7826e9ea8 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -5096,6 +5096,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5095,6 +5095,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -5109,6 +5110,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5108,6 +5109,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 95494c5e7a1961..0924cf5672fc8f 100644
--- a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -33,24 +33,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1606,12 +1606,13 @@ void hrtimer_init_sleeper(struct hrtimer
- }
- EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+@@ -1664,10 +1664,11 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_o
+ #endif
+
-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode,
+ unsigned long state)
{
- hrtimer_init_sleeper(t, current);
-
do {
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(state);
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
-@@ -1653,7 +1654,8 @@ long __sched hrtimer_nanosleep_restart(s
- HRTIMER_MODE_ABS);
+@@ -1709,7 +1710,8 @@ long __sched hrtimer_nanosleep_restart(s
+ HRTIMER_MODE_ABS, current);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
- if (do_nanosleep(&t, HRTIMER_MODE_ABS))
@@ -59,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
rmtp = restart->nanosleep.rmtp;
-@@ -1670,8 +1672,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1726,8 +1728,10 @@ long __sched hrtimer_nanosleep_restart(s
return ret;
}
@@ -72,16 +70,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1684,7 +1688,7 @@ long hrtimer_nanosleep(struct timespec *
-
- hrtimer_init_on_stack(&t.timer, clockid, mode);
+@@ -1741,7 +1745,7 @@ long hrtimer_nanosleep(struct timespec *
+ hrtimer_init_sleeper_on_stack(&t, clockid, mode, current);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
+
- if (do_nanosleep(&t, mode))
+ if (do_nanosleep(&t, mode, state))
goto out;
/* Absolute timers do not update the rmtp value and restart: */
-@@ -1711,6 +1715,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1768,6 +1772,12 @@ long hrtimer_nanosleep(struct timespec *
return ret;
}
@@ -94,12 +92,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1737,7 +1747,8 @@ void cpu_chill(void)
+@@ -1794,7 +1804,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
-- hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
-+ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC,
+- hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC_HARD);
++ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC_HARD,
+ TASK_UNINTERRUPTIBLE);
if (!freeze_flag)
current->flags &= ~PF_NOFREEZE;
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 48a5697ada2d47..20c4c5b3c638b2 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7510,6 +7510,7 @@ void migrate_disable(void)
+@@ -7509,6 +7509,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -7572,12 +7573,15 @@ void migrate_enable(void)
+@@ -7571,12 +7572,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
diff --git a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index f9619243d11f15..5071e0d4e1fcb2 100644
--- a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -695,6 +695,29 @@ static void hrtimer_switch_to_hres(void)
+@@ -685,6 +685,29 @@ static void hrtimer_switch_to_hres(void)
retrigger_next_event(NULL);
}
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void clock_was_set_work(struct work_struct *work)
{
clock_was_set();
-@@ -710,6 +733,7 @@ void clock_was_set_delayed(void)
+@@ -700,6 +723,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
diff --git a/patches/hrtimer-Remove-hrtimer_peek_ahead_timers-leftovers.patch b/patches/hrtimer-Remove-hrtimer_peek_ahead_timers-leftovers.patch
new file mode 100644
index 00000000000000..453563cdcb3f17
--- /dev/null
+++ b/patches/hrtimer-Remove-hrtimer_peek_ahead_timers-leftovers.patch
@@ -0,0 +1,52 @@
+From: Stephen Boyd <sboyd@codeaurora.org>
+Date: Thu, 16 Mar 2017 18:08:13 -0700
+Subject: [PATCH] hrtimer: Remove hrtimer_peek_ahead_timers() leftovers
+
+This function was removed in commit c6eb3f70d448 (hrtimer: Get rid of
+hrtimer softirq, 2015-04-14) but the prototype wasn't ever deleted.
+
+Delete it now.
+
+Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
+Link: http://lkml.kernel.org/r/20170317010814.2591-1-sboyd@codeaurora.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 4 ----
+ kernel/time/hrtimer.c | 5 +----
+ 2 files changed, 1 insertion(+), 8 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -274,8 +274,6 @@ static inline int hrtimer_is_hres_active
+ return timer->base->cpu_base->hres_active;
+ }
+
+-extern void hrtimer_peek_ahead_timers(void);
+-
+ /*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+@@ -298,8 +296,6 @@ extern unsigned int hrtimer_resolution;
+
+ #define hrtimer_resolution (unsigned int)LOW_RES_NSEC
+
+-static inline void hrtimer_peek_ahead_timers(void) { }
+-
+ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+ {
+ return 0;
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1368,10 +1368,7 @@ void hrtimer_interrupt(struct clock_even
+ ktime_to_ns(delta));
+ }
+
+-/*
+- * local version of hrtimer_peek_ahead_timers() called with interrupts
+- * disabled.
+- */
++/* called with interrupts disabled */
+ static inline void __hrtimer_peek_ahead_timers(void)
+ {
+ struct tick_device *td;
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
new file mode 100644
index 00000000000000..75914c661e077a
--- /dev/null
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -0,0 +1,206 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:31 -0500
+Subject: hrtimer: by timers by default into the softirq context
+
+We can't have hrtimers callbacks running in haradirq context on RT. Therefore
+CLOCK_MONOTONIC (and so on) are mapped by default to CLOCK_MONOTONIC_SOFT
+behaviour (and are invoked in softirq context).
+There are few timers which expect to be run in hardirq context even on RT.
+Those are:
+- very short running where low latency is critical (kvm lapic)
+- timers which take raw locks and need run in hard-irq context (perf, sched)
+- wake up related timer (kernel side of clock_nanosleep() and so on)
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kvm/lapic.c | 2 +-
+ include/linux/hrtimer.h | 6 ++++++
+ kernel/events/core.c | 4 ++--
+ kernel/sched/core.c | 2 +-
+ kernel/sched/deadline.c | 2 +-
+ kernel/sched/rt.c | 4 ++--
+ kernel/time/hrtimer.c | 22 +++++++++++++++++++++-
+ kernel/time/tick-broadcast-hrtimer.c | 2 +-
+ kernel/time/tick-sched.c | 2 +-
+ kernel/watchdog.c | 2 +-
+ 10 files changed, 37 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2059,7 +2059,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+ }
+ apic->vcpu = vcpu;
+
+- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
++ hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC_HARD,
+ HRTIMER_MODE_ABS_PINNED);
+ apic->lapic_timer.timer.function = apic_timer_fn;
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -29,12 +29,18 @@
+ * are kernel internal and never exported to user space.
+ */
+ #define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
++#define HRTIMER_BASE_HARD_MASK (MAX_CLOCKS << 1)
+
+ #define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
+ #define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
+ #define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
+ #define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
+
++#define CLOCK_REALTIME_HARD (CLOCK_REALTIME | HRTIMER_BASE_HARD_MASK)
++#define CLOCK_MONOTONIC_HARD (CLOCK_MONOTONIC | HRTIMER_BASE_HARD_MASK)
++#define CLOCK_BOOTTIME_HARD (CLOCK_BOOTTIME | HRTIMER_BASE_HARD_MASK)
++#define CLOCK_TAI_HARD (CLOCK_TAI | HRTIMER_BASE_HARD_MASK)
++
+ struct hrtimer_clock_base;
+ struct hrtimer_cpu_base;
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1042,7 +1042,7 @@ static void __perf_mux_hrtimer_init(stru
+ cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
+
+ raw_spin_lock_init(&cpuctx->hrtimer_lock);
+- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
++ hrtimer_init(timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS_PINNED);
+ timer->function = perf_mux_hrtimer_handler;
+ }
+
+@@ -8485,7 +8485,7 @@ static void perf_swevent_init_hrtimer(st
+ if (!is_sampling_event(event))
+ return;
+
+- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
+
+ /*
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -350,7 +350,7 @@ static void init_rq_hrtick(struct rq *rq
+ rq->hrtick_csd.info = rq;
+ #endif
+
+- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+ rq->hrtick_timer.function = hrtick;
+ }
+ #else /* CONFIG_SCHED_HRTICK */
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -691,7 +691,7 @@ void init_dl_task_timer(struct sched_dl_
+ {
+ struct hrtimer *timer = &dl_se->dl_timer;
+
+- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+ timer->function = dl_task_timer;
+ }
+
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -46,8 +46,8 @@ void init_rt_bandwidth(struct rt_bandwid
+
+ raw_spin_lock_init(&rt_b->rt_runtime_lock);
+
+- hrtimer_init(&rt_b->rt_period_timer,
+- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC_HARD,
++ HRTIMER_MODE_REL);
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
+ }
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -122,20 +122,32 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base,
+ }
+ };
+
+-#define MAX_CLOCKS_HRT (MAX_CLOCKS * 2)
++#define MAX_CLOCKS_HRT (MAX_CLOCKS * 3)
+
+ static const int hrtimer_clock_to_base_table[MAX_CLOCKS_HRT] = {
+ /* Make sure we catch unsupported clockids */
+ [0 ... MAX_CLOCKS_HRT - 1] = HRTIMER_MAX_CLOCK_BASES,
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME_SOFT,
++ [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC_SOFT,
++ [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME_SOFT,
++ [CLOCK_TAI] = HRTIMER_BASE_TAI_SOFT,
++#else
+ [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
+ [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
+ [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
+ [CLOCK_TAI] = HRTIMER_BASE_TAI,
++#endif
+ [CLOCK_REALTIME_SOFT] = HRTIMER_BASE_REALTIME_SOFT,
+ [CLOCK_MONOTONIC_SOFT] = HRTIMER_BASE_MONOTONIC_SOFT,
+ [CLOCK_BOOTTIME_SOFT] = HRTIMER_BASE_BOOTTIME_SOFT,
+ [CLOCK_TAI_SOFT] = HRTIMER_BASE_TAI_SOFT,
++
++ [CLOCK_REALTIME_HARD] = HRTIMER_BASE_REALTIME,
++ [CLOCK_MONOTONIC_HARD] = HRTIMER_BASE_MONOTONIC,
++ [CLOCK_BOOTTIME_HARD] = HRTIMER_BASE_BOOTTIME,
++ [CLOCK_TAI_HARD] = HRTIMER_BASE_TAI,
+ };
+
+ /*
+@@ -1194,7 +1206,11 @@ static inline int hrtimer_clockid_to_bas
+ return base;
+ }
+ WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return HRTIMER_BASE_MONOTONIC_SOFT;
++#else
+ return HRTIMER_BASE_MONOTONIC;
++#endif
+ }
+
+ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+@@ -1587,6 +1603,10 @@ static void __hrtimer_init_sleeper(struc
+ enum hrtimer_mode mode,
+ struct task_struct *task)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (!(clock_id & HRTIMER_BASE_SOFT_MASK))
++ clock_id |= HRTIMER_BASE_HARD_MASK;
++#endif
+ __hrtimer_init(&sl->timer, clock_id, mode);
+ sl->timer.function = hrtimer_wakeup;
+ sl->task = task;
+--- a/kernel/time/tick-broadcast-hrtimer.c
++++ b/kernel/time/tick-broadcast-hrtimer.c
+@@ -105,7 +105,7 @@ static enum hrtimer_restart bc_handler(s
+
+ void tick_setup_hrtimer_broadcast(void)
+ {
+- hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ hrtimer_init(&bctimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS);
+ bctimer.function = bc_handler;
+ clockevents_register_device(&ce_broadcast_hrtimer);
+ }
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1196,7 +1196,7 @@ void tick_setup_sched_timer(void)
+ /*
+ * Emulate tick processing via per-CPU hrtimers:
+ */
+- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS);
+ ts->sched_timer.function = tick_sched_timer;
+
+ /* Get the next period (per-CPU) */
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -382,7 +382,7 @@ static void watchdog_enable(unsigned int
+ struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
+
+ /* kick off the timer for the hardlockup detector */
+- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(hrtimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
+
+ /* Enable the perf event */
diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
new file mode 100644
index 00000000000000..3474b406ce5bf1
--- /dev/null
+++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -0,0 +1,243 @@
+From 3c3702c557d49df718523d04ae1393ce05769ef6 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 4 Sep 2017 18:31:50 +0200
+Subject: [PATCH] hrtimer: consolidate hrtimer_init() + hrtimer_init_sleeper()
+ calls
+
+hrtimer_init_sleeper() calls require a prior initialisation of the
+hrtimer object with hrtimer_init(). Lets make the initialisation of the
+hrtimer object part of hrtimer_init_sleeper().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ block/blk-mq.c | 3 +--
+ include/linux/hrtimer.h | 19 ++++++++++++++++---
+ include/linux/wait.h | 5 ++---
+ kernel/futex.c | 19 ++++++++-----------
+ kernel/time/hrtimer.c | 47 ++++++++++++++++++++++++++++++++++++++---------
+ net/core/pktgen.c | 4 ++--
+ 6 files changed, 67 insertions(+), 30 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2848,10 +2848,9 @@ static bool blk_mq_poll_hybrid_sleep(str
+ kt = nsecs;
+
+ mode = HRTIMER_MODE_REL;
+- hrtimer_init_on_stack(&hs.timer, CLOCK_MONOTONIC, mode);
++ hrtimer_init_sleeper_on_stack(&hs, CLOCK_MONOTONIC, mode, current);
+ hrtimer_set_expires(&hs.timer, kt);
+
+- hrtimer_init_sleeper(&hs, current);
+ do {
+ if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags))
+ break;
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -358,10 +358,17 @@ DECLARE_PER_CPU(struct tick_device, tick
+ /* Initialize timers: */
+ extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
+ enum hrtimer_mode mode);
++extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
++ enum hrtimer_mode mode,
++ struct task_struct *task);
+
+ #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+ extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
+ enum hrtimer_mode mode);
++extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
++ clockid_t clock_id,
++ enum hrtimer_mode mode,
++ struct task_struct *task);
+
+ extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
+ #else
+@@ -371,6 +378,15 @@ static inline void hrtimer_init_on_stack
+ {
+ hrtimer_init(timer, which_clock, mode);
+ }
++
++static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
++ clockid_t clock_id,
++ enum hrtimer_mode mode,
++ struct task_struct *task)
++{
++ hrtimer_init_sleeper(sl, clock_id, mode, task);
++}
++
+ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
+ #endif
+
+@@ -472,9 +488,6 @@ extern long hrtimer_nanosleep(struct tim
+ const clockid_t clockid);
+ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
+
+-extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
+- struct task_struct *tsk);
+-
+ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
+ const enum hrtimer_mode mode);
+ extern int schedule_hrtimeout_range_clock(ktime_t *expires,
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -508,9 +508,8 @@ do { \
+ int __ret = 0; \
+ struct hrtimer_sleeper __t; \
+ \
+- hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
+- HRTIMER_MODE_REL); \
+- hrtimer_init_sleeper(&__t, current); \
++ hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
++ HRTIMER_MODE_REL, current); \
+ if ((timeout) != KTIME_MAX) \
+ hrtimer_start_range_ns(&__t.timer, timeout, \
+ current->timer_slack_ns, \
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2500,10 +2500,9 @@ static int futex_wait(u32 __user *uaddr,
+ if (abs_time) {
+ to = &timeout;
+
+- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
+- CLOCK_REALTIME : CLOCK_MONOTONIC,
+- HRTIMER_MODE_ABS);
+- hrtimer_init_sleeper(to, current);
++ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ?
++ CLOCK_REALTIME : CLOCK_MONOTONIC,
++ HRTIMER_MODE_ABS, current);
+ hrtimer_set_expires_range_ns(&to->timer, *abs_time,
+ current->timer_slack_ns);
+ }
+@@ -2599,9 +2598,8 @@ static int futex_lock_pi(u32 __user *uad
+
+ if (time) {
+ to = &timeout;
+- hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
+- HRTIMER_MODE_ABS);
+- hrtimer_init_sleeper(to, current);
++ hrtimer_init_sleeper_on_stack(to, CLOCK_REALTIME,
++ HRTIMER_MODE_ABS, current);
+ hrtimer_set_expires(&to->timer, *time);
+ }
+
+@@ -3011,10 +3009,9 @@ static int futex_wait_requeue_pi(u32 __u
+
+ if (abs_time) {
+ to = &timeout;
+- hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
+- CLOCK_REALTIME : CLOCK_MONOTONIC,
+- HRTIMER_MODE_ABS);
+- hrtimer_init_sleeper(to, current);
++ hrtimer_init_sleeper_on_stack(to, (flags & FLAGS_CLOCKRT) ?
++ CLOCK_REALTIME : CLOCK_MONOTONIC,
++ HRTIMER_MODE_ABS, current);
+ hrtimer_set_expires_range_ns(&to->timer, *abs_time,
+ current->timer_slack_ns);
+ }
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1554,17 +1554,46 @@ static enum hrtimer_restart hrtimer_wake
+ return HRTIMER_NORESTART;
+ }
+
+-void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
++static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
++ clockid_t clock_id,
++ enum hrtimer_mode mode,
++ struct task_struct *task)
+ {
++ __hrtimer_init(&sl->timer, clock_id, mode);
+ sl->timer.function = hrtimer_wakeup;
+ sl->task = task;
+ }
++
++/**
++ * hrtimer_init - initialize a timer to the given clock
++ * @timer: the timer to be initialized
++ * @clock_id: the clock to be used
++ * @mode: timer mode abs/rel
++ */
++void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id,
++ enum hrtimer_mode mode, struct task_struct *task)
++{
++ debug_init(&sl->timer, clock_id, mode);
++ __hrtimer_init_sleeper(sl, clock_id, mode, task);
++
++}
+ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+
+-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
++#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
++void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl,
++ clockid_t clock_id,
++ enum hrtimer_mode mode,
++ struct task_struct *task)
+ {
+- hrtimer_init_sleeper(t, current);
++ debug_object_init_on_stack(&sl->timer, &hrtimer_debug_descr);
++ __hrtimer_init_sleeper(sl, clock_id, mode, task);
++}
++EXPORT_SYMBOL_GPL(hrtimer_init_sleeper_on_stack);
++#endif
++
+
++static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
++{
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ hrtimer_start_expires(&t->timer, mode);
+@@ -1604,8 +1633,8 @@ long __sched hrtimer_nanosleep_restart(s
+ struct timespec __user *rmtp;
+ int ret = 0;
+
+- hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
+- HRTIMER_MODE_ABS);
++ hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
++ HRTIMER_MODE_ABS, current);
+ hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
+
+ if (do_nanosleep(&t, HRTIMER_MODE_ABS))
+@@ -1637,8 +1666,9 @@ long hrtimer_nanosleep(struct timespec *
+ if (dl_task(current) || rt_task(current))
+ slack = 0;
+
+- hrtimer_init_on_stack(&t.timer, clockid, mode);
++ hrtimer_init_sleeper_on_stack(&t, clockid, mode, current);
+ hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
++
+ if (do_nanosleep(&t, mode))
+ goto out;
+
+@@ -1816,10 +1846,9 @@ schedule_hrtimeout_range_clock(ktime_t *
+ return -EINTR;
+ }
+
+- hrtimer_init_on_stack(&t.timer, clock_id, mode);
+- hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
++ hrtimer_init_sleeper_on_stack(&t, clock_id, mode, current);
+
+- hrtimer_init_sleeper(&t, current);
++ hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
+
+ hrtimer_start_expires(&t.timer, mode);
+
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2252,7 +2252,8 @@ static void spin(struct pktgen_dev *pkt_
+ s64 remaining;
+ struct hrtimer_sleeper t;
+
+- hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ hrtimer_init_sleeper_on_stack(&t, CLOCK_MONOTONIC, HRTIMER_MODE_ABS,
++ current);
+ hrtimer_set_expires(&t.timer, spin_until);
+
+ remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
+@@ -2267,7 +2268,6 @@ static void spin(struct pktgen_dev *pkt_
+ } while (ktime_compare(end_time, spin_until) < 0);
+ } else {
+ /* see do_nanosleep */
+- hrtimer_init_sleeper(&t, current);
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
diff --git a/patches/hrtimer-enfore-64byte-alignment.patch b/patches/hrtimer-enfore-64byte-alignment.patch
deleted file mode 100644
index fccb336bdee667..00000000000000
--- a/patches/hrtimer-enfore-64byte-alignment.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 23 Dec 2015 20:57:41 +0100
-Subject: hrtimer: enfore 64byte alignment
-
-The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds
-a list_head expired to struct hrtimer_clock_base and with it we run into
-BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/hrtimer.h | 4 ----
- 1 file changed, 4 deletions(-)
-
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -112,11 +112,7 @@ struct hrtimer_sleeper {
- struct task_struct *task;
- };
-
--#ifdef CONFIG_64BIT
- # define HRTIMER_CLOCK_BASE_ALIGN 64
--#else
--# define HRTIMER_CLOCK_BASE_ALIGN 32
--#endif
-
- /**
- * struct hrtimer_clock_base - the timer base for a specific clock
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
deleted file mode 100644
index f191a08096385c..00000000000000
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ /dev/null
@@ -1,337 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 3 Jul 2009 08:44:31 -0500
-Subject: hrtimer: Fixup hrtimer callback changes for preempt-rt
-
-In preempt-rt we can not call the callbacks which take sleeping locks
-from the timer interrupt context.
-
-Bring back the softirq split for now, until we fixed the signal
-delivery problem for real.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
-
----
- include/linux/hrtimer.h | 7 ++
- kernel/sched/core.c | 1
- kernel/sched/rt.c | 1
- kernel/time/hrtimer.c | 143 ++++++++++++++++++++++++++++++++++++++++++++---
- kernel/time/tick-sched.c | 1
- kernel/watchdog.c | 1
- 6 files changed, 145 insertions(+), 9 deletions(-)
-
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -86,6 +86,8 @@ enum hrtimer_restart {
- * was armed.
- * @function: timer expiry callback function
- * @base: pointer to the timer base (per cpu and per clock)
-+ * @cb_entry: list entry to defer timers from hardirq context
-+ * @irqsafe: timer can run in hardirq context
- * @state: state information (See bit values above)
- * @is_rel: Set if the timer was armed relative
- *
-@@ -96,6 +98,8 @@ struct hrtimer {
- ktime_t _softexpires;
- enum hrtimer_restart (*function)(struct hrtimer *);
- struct hrtimer_clock_base *base;
-+ struct list_head cb_entry;
-+ int irqsafe;
- u8 state;
- u8 is_rel;
- };
-@@ -121,6 +125,7 @@ struct hrtimer_sleeper {
- * timer to a base on another cpu.
- * @clockid: clock id for per_cpu support
- * @active: red black tree root node for the active timers
-+ * @expired: list head for deferred timers.
- * @get_time: function to retrieve the current time of the clock
- * @offset: offset of this clock to the monotonic base
- */
-@@ -129,6 +134,7 @@ struct hrtimer_clock_base {
- int index;
- clockid_t clockid;
- struct timerqueue_head active;
-+ struct list_head expired;
- ktime_t (*get_time)(void);
- ktime_t offset;
- } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
-@@ -172,6 +178,7 @@ struct hrtimer_cpu_base {
- raw_spinlock_t lock;
- seqcount_t seq;
- struct hrtimer *running;
-+ struct hrtimer *running_soft;
- unsigned int cpu;
- unsigned int active_bases;
- unsigned int clock_was_set_seq;
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -352,6 +352,7 @@ static void init_rq_hrtick(struct rq *rq
-
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rq->hrtick_timer.function = hrtick;
-+ rq->hrtick_timer.irqsafe = 1;
- }
- #else /* CONFIG_SCHED_HRTICK */
- static inline void hrtick_clear(struct rq *rq)
---- a/kernel/sched/rt.c
-+++ b/kernel/sched/rt.c
-@@ -48,6 +48,7 @@ void init_rt_bandwidth(struct rt_bandwid
-
- hrtimer_init(&rt_b->rt_period_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ rt_b->rt_period_timer.irqsafe = 1;
- rt_b->rt_period_timer.function = sched_rt_period_timer;
- }
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -719,11 +719,8 @@ static inline int hrtimer_is_hres_enable
- static inline void hrtimer_switch_to_hres(void) { }
- static inline void
- hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
--static inline int hrtimer_reprogram(struct hrtimer *timer,
-- struct hrtimer_clock_base *base)
--{
-- return 0;
--}
-+static inline void hrtimer_reprogram(struct hrtimer *timer,
-+ struct hrtimer_clock_base *base) { }
- static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
- static inline void retrigger_next_event(void *arg) { }
-
-@@ -844,7 +841,7 @@ void hrtimer_wait_for_timer(const struct
- {
- struct hrtimer_clock_base *base = timer->base;
-
-- if (base && base->cpu_base && !hrtimer_hres_active())
-+ if (base && base->cpu_base && !timer->irqsafe)
- wait_event(base->cpu_base->wait,
- !(hrtimer_callback_running(timer)));
- }
-@@ -894,6 +891,11 @@ static void __remove_hrtimer(struct hrti
- if (!(state & HRTIMER_STATE_ENQUEUED))
- return;
-
-+ if (unlikely(!list_empty(&timer->cb_entry))) {
-+ list_del_init(&timer->cb_entry);
-+ return;
-+ }
-+
- if (!timerqueue_del(&base->active, &timer->node))
- cpu_base->active_bases &= ~(1 << base->index);
-
-@@ -1134,6 +1136,7 @@ static void __hrtimer_init(struct hrtime
-
- base = hrtimer_clockid_to_base(clock_id);
- timer->base = &cpu_base->clock_base[base];
-+ INIT_LIST_HEAD(&timer->cb_entry);
- timerqueue_init(&timer->node);
- }
-
-@@ -1168,6 +1171,7 @@ bool hrtimer_active(const struct hrtimer
- seq = raw_read_seqcount_begin(&cpu_base->seq);
-
- if (timer->state != HRTIMER_STATE_INACTIVE ||
-+ cpu_base->running_soft == timer ||
- cpu_base->running == timer)
- return true;
-
-@@ -1265,10 +1269,109 @@ static void __run_hrtimer(struct hrtimer
- cpu_base->running = NULL;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
-+ struct hrtimer_clock_base *base)
-+{
-+ int leftmost;
-+
-+ if (restart != HRTIMER_NORESTART &&
-+ !(timer->state & HRTIMER_STATE_ENQUEUED)) {
-+
-+ leftmost = enqueue_hrtimer(timer, base);
-+ if (!leftmost)
-+ return;
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+ if (!hrtimer_is_hres_active(timer)) {
-+ /*
-+ * Kick to reschedule the next tick to handle the new timer
-+ * on dynticks target.
-+ */
-+ if (base->cpu_base->nohz_active)
-+ wake_up_nohz_cpu(base->cpu_base->cpu);
-+ } else {
-+
-+ hrtimer_reprogram(timer, base);
-+ }
-+#endif
-+ }
-+}
-+
-+/*
-+ * The changes in mainline which removed the callback modes from
-+ * hrtimer are not yet working with -rt. The non wakeup_process()
-+ * based callbacks which involve sleeping locks need to be treated
-+ * seperately.
-+ */
-+static void hrtimer_rt_run_pending(void)
-+{
-+ enum hrtimer_restart (*fn)(struct hrtimer *);
-+ struct hrtimer_cpu_base *cpu_base;
-+ struct hrtimer_clock_base *base;
-+ struct hrtimer *timer;
-+ int index, restart;
-+
-+ local_irq_disable();
-+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
-+
-+ raw_spin_lock(&cpu_base->lock);
-+
-+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
-+ base = &cpu_base->clock_base[index];
-+
-+ while (!list_empty(&base->expired)) {
-+ timer = list_first_entry(&base->expired,
-+ struct hrtimer, cb_entry);
-+
-+ /*
-+ * Same as the above __run_hrtimer function
-+ * just we run with interrupts enabled.
-+ */
-+ debug_deactivate(timer);
-+ cpu_base->running_soft = timer;
-+ raw_write_seqcount_barrier(&cpu_base->seq);
-+
-+ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
-+ fn = timer->function;
-+
-+ raw_spin_unlock_irq(&cpu_base->lock);
-+ restart = fn(timer);
-+ raw_spin_lock_irq(&cpu_base->lock);
-+
-+ hrtimer_rt_reprogram(restart, timer, base);
-+ raw_write_seqcount_barrier(&cpu_base->seq);
-+
-+ WARN_ON_ONCE(cpu_base->running_soft != timer);
-+ cpu_base->running_soft = NULL;
-+ }
-+ }
-+
-+ raw_spin_unlock_irq(&cpu_base->lock);
-+
-+ wake_up_timer_waiters(cpu_base);
-+}
-+
-+static int hrtimer_rt_defer(struct hrtimer *timer)
-+{
-+ if (timer->irqsafe)
-+ return 0;
-+
-+ __remove_hrtimer(timer, timer->base, timer->state, 0);
-+ list_add_tail(&timer->cb_entry, &timer->base->expired);
-+ return 1;
-+}
-+
-+#else
-+
-+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
-+
-+#endif
-+
- static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
- {
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
-+ int raise = 0;
-
- for (; active; base++, active >>= 1) {
- struct timerqueue_node *node;
-@@ -1299,9 +1402,14 @@ static void __hrtimer_run_queues(struct
- if (basenow < hrtimer_get_softexpires_tv64(timer))
- break;
-
-- __run_hrtimer(cpu_base, base, timer, &basenow);
-+ if (!hrtimer_rt_defer(timer))
-+ __run_hrtimer(cpu_base, base, timer, &basenow);
-+ else
-+ raise = 1;
- }
- }
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- }
-
- #ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1443,8 +1551,6 @@ void hrtimer_run_queues(void)
- now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now);
- raw_spin_unlock(&cpu_base->lock);
--
-- wake_up_timer_waiters(cpu_base);
- }
-
- /*
-@@ -1466,6 +1572,7 @@ static enum hrtimer_restart hrtimer_wake
- void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
- {
- sl->timer.function = hrtimer_wakeup;
-+ sl->timer.irqsafe = 1;
- sl->task = task;
- }
- EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1600,6 +1707,7 @@ int hrtimers_prepare_cpu(unsigned int cp
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- cpu_base->clock_base[i].cpu_base = cpu_base;
- timerqueue_init_head(&cpu_base->clock_base[i].active);
-+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
- }
-
- cpu_base->cpu = cpu;
-@@ -1676,9 +1784,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
-
- #endif /* CONFIG_HOTPLUG_CPU */
-
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+
-+static void run_hrtimer_softirq(struct softirq_action *h)
-+{
-+ hrtimer_rt_run_pending();
-+}
-+
-+static void hrtimers_open_softirq(void)
-+{
-+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-+}
-+
-+#else
-+static void hrtimers_open_softirq(void) { }
-+#endif
-+
- void __init hrtimers_init(void)
- {
- hrtimers_prepare_cpu(smp_processor_id());
-+ hrtimers_open_softirq();
- }
-
- /**
---- a/kernel/time/tick-sched.c
-+++ b/kernel/time/tick-sched.c
-@@ -1197,6 +1197,7 @@ void tick_setup_sched_timer(void)
- * Emulate tick processing via per-CPU hrtimers:
- */
- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-+ ts->sched_timer.irqsafe = 1;
- ts->sched_timer.function = tick_sched_timer;
-
- /* Get the next period (per-CPU) */
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -384,6 +384,7 @@ static void watchdog_enable(unsigned int
- /* kick off the timer for the hardlockup detector */
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer->function = watchdog_timer_fn;
-+ hrtimer->irqsafe = 1;
-
- /* Enable the perf event */
- watchdog_nmi_enable(cpu);
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index 65cded2b0287c8..91b349cf821026 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -23,19 +23,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/timerqueue.h>
+#include <linux/wait.h>
- struct hrtimer_clock_base;
- struct hrtimer_cpu_base;
-@@ -191,6 +192,9 @@ struct hrtimer_cpu_base {
- unsigned int nr_hangs;
- unsigned int max_hang_time;
- #endif
+ /*
+ * Clock ids for hrtimers which expire in softirq context. These clock ids
+@@ -212,6 +213,9 @@ struct hrtimer_cpu_base {
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ ktime_t softirq_expires_next;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ wait_queue_head_t wait;
+#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
-@@ -400,6 +404,13 @@ static inline void hrtimer_restart(struc
+@@ -426,6 +430,13 @@ static inline void hrtimer_restart(struc
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@@ -49,18 +49,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
-@@ -424,7 +435,7 @@ static inline int hrtimer_is_queued(stru
+@@ -450,7 +461,7 @@ static inline int hrtimer_is_queued(stru
* Helper function to check, whether the timer is running the callback
* function
*/
-static inline int hrtimer_callback_running(struct hrtimer *timer)
+static inline int hrtimer_callback_running(const struct hrtimer *timer)
{
- return timer->base->cpu_base->running == timer;
+ return timer->base->running == timer;
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -827,6 +827,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -862,6 +862,33 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -81,7 +81,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+{
+ struct hrtimer_clock_base *base = timer->base;
+
-+ if (base && base->cpu_base && !hrtimer_hres_active())
++ if (base && base->cpu_base &&
++ base->index >= HRTIMER_BASE_MONOTONIC_SOFT)
+ wait_event(base->cpu_base->wait,
+ !(hrtimer_callback_running(timer)));
+}
@@ -93,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1032,7 +1058,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1109,7 +1136,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -102,19 +103,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1417,6 +1443,8 @@ void hrtimer_run_queues(void)
- now = hrtimer_update_base(cpu_base);
- __hrtimer_run_queues(cpu_base, now);
- raw_spin_unlock(&cpu_base->lock);
-+
+@@ -1385,6 +1412,7 @@ static __latent_entropy void hrtimer_run
+ hrtimer_update_softirq_timer(cpu_base, true);
+
+ raw_spin_unlock_irq(&cpu_base->lock);
+ wake_up_timer_waiters(cpu_base);
}
- /*
-@@ -1576,6 +1604,9 @@ int hrtimers_prepare_cpu(unsigned int cp
-
- cpu_base->cpu = cpu;
- hrtimer_init_hres(cpu_base);
+ #ifdef CONFIG_HIGH_RES_TIMERS
+@@ -1727,6 +1755,9 @@ int hrtimers_prepare_cpu(unsigned int cp
+ cpu_base->hres_active = 0;
+ cpu_base->expires_next = KTIME_MAX;
+ cpu_base->softirq_expires_next = KTIME_MAX;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ init_waitqueue_head(&cpu_base->wait);
+#endif
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index cdd39f20c11968..f3f9b28e938148 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -103,6 +103,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
+@@ -102,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->push_cpu = nr_cpu_ids;
raw_spin_lock_init(&rt_rq->push_lock);
init_irq_work(&rt_rq->push_work, push_irq_work_func);
diff --git a/patches/kernel-hrtimer-don-t-wakeup-a-process-while-holding-.patch b/patches/kernel-hrtimer-don-t-wakeup-a-process-while-holding-.patch
deleted file mode 100644
index e97b1ad0558a25..00000000000000
--- a/patches/kernel-hrtimer-don-t-wakeup-a-process-while-holding-.patch
+++ /dev/null
@@ -1,86 +0,0 @@
-From 9e5116d50ea95f2a2e420216bc5a01bd1bdd7e86 Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 31 Aug 2017 18:19:06 +0200
-Subject: [PATCH] kernel/hrtimer: don't wakeup a process while holding the
- hrtimer base lock
-
-We must not wake any process (and thus acquire the pi->lock) while
-holding the hrtimer's base lock. This does not happen usually because
-the hrtimer-callback is invoked in IRQ-context and so
-raise_softirq_irqoff() does not wakeup a process.
-However during CPU-hotplug it might get called from hrtimers_dead_cpu()
-which would wakeup the thread immediately.
-
-Reported-by: Mike Galbraith <efault@gmx.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 15 ++++++++++-----
- 1 file changed, 10 insertions(+), 5 deletions(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1367,7 +1367,7 @@ static inline int hrtimer_rt_defer(struc
-
- #endif
-
--static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
-+static int __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
- {
- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
-@@ -1408,8 +1408,7 @@ static void __hrtimer_run_queues(struct
- raise = 1;
- }
- }
-- if (raise)
-- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ return raise;
- }
-
- #ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1423,6 +1422,7 @@ void hrtimer_interrupt(struct clock_even
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t expires_next, now, entry_time, delta;
- int retries = 0;
-+ int raise;
-
- BUG_ON(!cpu_base->hres_active);
- cpu_base->nr_events++;
-@@ -1441,7 +1441,7 @@ void hrtimer_interrupt(struct clock_even
- */
- cpu_base->expires_next = KTIME_MAX;
-
-- __hrtimer_run_queues(cpu_base, now);
-+ raise = __hrtimer_run_queues(cpu_base, now);
-
- /* Reevaluate the clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base);
-@@ -1452,6 +1452,8 @@ void hrtimer_interrupt(struct clock_even
- cpu_base->expires_next = expires_next;
- cpu_base->in_hrtirq = 0;
- raw_spin_unlock(&cpu_base->lock);
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-
- /* Reprogramming necessary ? */
- if (!tick_program_event(expires_next, 0)) {
-@@ -1531,6 +1533,7 @@ void hrtimer_run_queues(void)
- {
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t now;
-+ int raise;
-
- if (__hrtimer_hres_active(cpu_base))
- return;
-@@ -1549,8 +1552,10 @@ void hrtimer_run_queues(void)
-
- raw_spin_lock(&cpu_base->lock);
- now = hrtimer_update_base(cpu_base);
-- __hrtimer_run_queues(cpu_base, now);
-+ raise = __hrtimer_run_queues(cpu_base, now);
- raw_spin_unlock(&cpu_base->lock);
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- }
-
- /*
diff --git a/patches/kernel-hrtimer-hotplug-don-t-wake-ktimersoftd-while-.patch b/patches/kernel-hrtimer-hotplug-don-t-wake-ktimersoftd-while-.patch
deleted file mode 100644
index 925c2af1942ebf..00000000000000
--- a/patches/kernel-hrtimer-hotplug-don-t-wake-ktimersoftd-while-.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From 4ca8658769126558b04a1d78b7d6bee389bfa24b Mon Sep 17 00:00:00 2001
-From: Mike Galbraith <efault@gmx.de>
-Date: Sun, 3 Sep 2017 04:48:10 +0200
-Subject: [PATCH] kernel/hrtimer/hotplug: don't wake ktimersoftd while holding
- the hrtimer base lock
-
-kernel/hrtimer: don't wakeup a process while holding the hrtimer base lock
-missed a path, namely hrtimers_dead_cpu() -> migrate_hrtimer_list(). Defer
-raising softirq until after base lock has been released there as well.
-
-Signed-off-by: Mike Galbraith <efault@gmx.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 19 +++++++++++++------
- 1 file changed, 13 insertions(+), 6 deletions(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1725,7 +1725,7 @@ int hrtimers_prepare_cpu(unsigned int cp
-
- #ifdef CONFIG_HOTPLUG_CPU
-
--static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
-+static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
- struct hrtimer_clock_base *new_base)
- {
- struct hrtimer *timer;
-@@ -1755,15 +1755,19 @@ static void migrate_hrtimer_list(struct
- }
- #ifdef CONFIG_PREEMPT_RT_BASE
- list_splice_tail(&old_base->expired, &new_base->expired);
-- if (!list_empty(&new_base->expired))
-- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ /*
-+ * Tell the caller to raise HRTIMER_SOFTIRQ. We can't safely
-+ * acquire ktimersoftd->pi_lock while the base lock is held.
-+ */
-+ return !list_empty(&new_base->expired);
- #endif
-+ return 0;
- }
-
- int hrtimers_dead_cpu(unsigned int scpu)
- {
- struct hrtimer_cpu_base *old_base, *new_base;
-- int i;
-+ int i, raise = 0;
-
- BUG_ON(cpu_online(scpu));
- tick_cancel_sched_timer(scpu);
-@@ -1779,13 +1783,16 @@ int hrtimers_dead_cpu(unsigned int scpu)
- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-- migrate_hrtimer_list(&old_base->clock_base[i],
-- &new_base->clock_base[i]);
-+ raise |= migrate_hrtimer_list(&old_base->clock_base[i],
-+ &new_base->clock_base[i]);
- }
-
- raw_spin_unlock(&old_base->lock);
- raw_spin_unlock(&new_base->lock);
-
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+
- /* Check, if we got expired work to do */
- __hrtimer_peek_ahead_timers();
- local_irq_enable();
diff --git a/patches/kernel-hrtimer-migrate-deferred-timer-on-CPU-down.patch b/patches/kernel-hrtimer-migrate-deferred-timer-on-CPU-down.patch
deleted file mode 100644
index 67e8c0e37ef343..00000000000000
--- a/patches/kernel-hrtimer-migrate-deferred-timer-on-CPU-down.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 18 Aug 2017 10:09:09 +0200
-Subject: [PATCH] kernel/hrtimer: migrate deferred timer on CPU down
-
-hrtimers, which were deferred to the softirq context, and expire between
-softirq shutdown and hrtimer migration are dangling around. If the CPU
-goes back up the list head will be initialized and this corrupts the
-timer's list. It will remain unnoticed until a hrtimer_cancel().
-This moves those timers so they will expire.
-
-Cc: stable-rt@vger.kernel.org
-Reported-by: Mike Galbraith <efault@gmx.de>
-Tested-by: Mike Galbraith <efault@gmx.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 5 +++++
- 1 file changed, 5 insertions(+)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1748,6 +1748,11 @@ static void migrate_hrtimer_list(struct
- */
- enqueue_hrtimer(timer, new_base);
- }
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ list_splice_tail(&old_base->expired, &new_base->expired);
-+ if (!list_empty(&new_base->expired))
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+#endif
- }
-
- int hrtimers_dead_cpu(unsigned int scpu)
diff --git a/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch b/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
index 3474123a12ac4c..ff3f6be32617a1 100644
--- a/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
+++ b/patches/kernel-locking-use-an-exclusive-wait_q-for-sleeper.patch
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock(&current->pi_lock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -437,9 +437,15 @@ static bool set_nr_if_polling(struct tas
+@@ -436,9 +436,15 @@ static bool set_nr_if_polling(struct tas
#endif
#endif
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Atomically grab the task, if ->wake_q is !nil already it means
-@@ -468,12 +474,17 @@ void __wake_up_q(struct wake_q_head *hea
+@@ -467,12 +473,17 @@ void __wake_up_q(struct wake_q_head *hea
while (node != WAKE_Q_TAIL) {
struct task_struct *task;
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
deleted file mode 100644
index a02027328c472a..00000000000000
--- a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 4 Feb 2016 16:38:10 +0100
-Subject: [PATCH] kernel/perf: mark perf_cpu_context's timer as irqsafe
-
-Otherwise we get a WARN_ON() backtrace and some events are reported as
-"not counted".
-
-Cc: stable-rt@vger.kernel.org
-Reported-by: Yang Shi <yang.shi@linaro.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/events/core.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -1044,6 +1044,7 @@ static void __perf_mux_hrtimer_init(stru
- raw_spin_lock_init(&cpuctx->hrtimer_lock);
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
- timer->function = perf_mux_hrtimer_handler;
-+ timer->irqsafe = 1;
- }
-
- static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index ae5699f012b75d..76175a23298b75 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2795,15 +2795,6 @@ static struct rq *finish_task_switch(str
+@@ -2794,15 +2794,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 12bd473a33f5b0..25e5fadbaae8f1 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt12
++-rt13
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
deleted file mode 100644
index 1762d1cd82bb52..00000000000000
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-From: Yong Zhang <yong.zhang@windriver.com>
-Date: Wed, 11 Jul 2012 22:05:21 +0000
-Subject: perf: Make swevent hrtimer run in irq instead of softirq
-
-Otherwise we get a deadlock like below:
-
-[ 1044.042749] BUG: scheduling while atomic: ksoftirqd/21/141/0x00010003
-[ 1044.042752] INFO: lockdep is turned off.
-[ 1044.042754] Modules linked in:
-[ 1044.042757] Pid: 141, comm: ksoftirqd/21 Tainted: G W 3.4.0-rc2-rt3-23676-ga723175-dirty #29
-[ 1044.042759] Call Trace:
-[ 1044.042761] <IRQ> [<ffffffff8107d8e5>] __schedule_bug+0x65/0x80
-[ 1044.042770] [<ffffffff8168978c>] __schedule+0x83c/0xa70
-[ 1044.042775] [<ffffffff8106bdd2>] ? prepare_to_wait+0x32/0xb0
-[ 1044.042779] [<ffffffff81689a5e>] schedule+0x2e/0xa0
-[ 1044.042782] [<ffffffff81071ebd>] hrtimer_wait_for_timer+0x6d/0xb0
-[ 1044.042786] [<ffffffff8106bb30>] ? wake_up_bit+0x40/0x40
-[ 1044.042790] [<ffffffff81071f20>] hrtimer_cancel+0x20/0x40
-[ 1044.042794] [<ffffffff8111da0c>] perf_swevent_cancel_hrtimer+0x3c/0x50
-[ 1044.042798] [<ffffffff8111da31>] task_clock_event_stop+0x11/0x40
-[ 1044.042802] [<ffffffff8111da6e>] task_clock_event_del+0xe/0x10
-[ 1044.042805] [<ffffffff8111c568>] event_sched_out+0x118/0x1d0
-[ 1044.042809] [<ffffffff8111c649>] group_sched_out+0x29/0x90
-[ 1044.042813] [<ffffffff8111ed7e>] __perf_event_disable+0x18e/0x200
-[ 1044.042817] [<ffffffff8111c343>] remote_function+0x63/0x70
-[ 1044.042821] [<ffffffff810b0aae>] generic_smp_call_function_single_interrupt+0xce/0x120
-[ 1044.042826] [<ffffffff81022bc7>] smp_call_function_single_interrupt+0x27/0x40
-[ 1044.042831] [<ffffffff8168d50c>] call_function_single_interrupt+0x6c/0x80
-[ 1044.042833] <EOI> [<ffffffff811275b0>] ? perf_event_overflow+0x20/0x20
-[ 1044.042840] [<ffffffff8168b970>] ? _raw_spin_unlock_irq+0x30/0x70
-[ 1044.042844] [<ffffffff8168b976>] ? _raw_spin_unlock_irq+0x36/0x70
-[ 1044.042848] [<ffffffff810702e2>] run_hrtimer_softirq+0xc2/0x200
-[ 1044.042853] [<ffffffff811275b0>] ? perf_event_overflow+0x20/0x20
-[ 1044.042857] [<ffffffff81045265>] __do_softirq_common+0xf5/0x3a0
-[ 1044.042862] [<ffffffff81045c3d>] __thread_do_softirq+0x15d/0x200
-[ 1044.042865] [<ffffffff81045dda>] run_ksoftirqd+0xfa/0x210
-[ 1044.042869] [<ffffffff81045ce0>] ? __thread_do_softirq+0x200/0x200
-[ 1044.042873] [<ffffffff81045ce0>] ? __thread_do_softirq+0x200/0x200
-[ 1044.042877] [<ffffffff8106b596>] kthread+0xb6/0xc0
-[ 1044.042881] [<ffffffff8168b97b>] ? _raw_spin_unlock_irq+0x3b/0x70
-[ 1044.042886] [<ffffffff8168d994>] kernel_thread_helper+0x4/0x10
-[ 1044.042889] [<ffffffff8107d98c>] ? finish_task_switch+0x8c/0x110
-[ 1044.042894] [<ffffffff8168b97b>] ? _raw_spin_unlock_irq+0x3b/0x70
-[ 1044.042897] [<ffffffff8168bd5d>] ? retint_restore_args+0xe/0xe
-[ 1044.042900] [<ffffffff8106b4e0>] ? kthreadd+0x1e0/0x1e0
-[ 1044.042902] [<ffffffff8168d990>] ? gs_change+0xb/0xb
-
-Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
-Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Link: http://lkml.kernel.org/r/1341476476-5666-1-git-send-email-yong.zhang0@gmail.com
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-
----
- kernel/events/core.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -8487,6 +8487,7 @@ static void perf_swevent_init_hrtimer(st
-
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hwc->hrtimer.function = perf_swevent_hrtimer;
-+ hwc->hrtimer.irqsafe = 1;
-
- /*
- * Since hrtimers have a fixed rate, we can do a static freq->period
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 225838f8b5e8cb..e98d84bc008d73 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -528,6 +528,48 @@ void resched_curr(struct rq *rq)
+@@ -527,6 +527,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2458,6 +2500,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2457,6 +2499,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3449,6 +3494,7 @@ static void __sched notrace __schedule(b
+@@ -3448,6 +3493,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3600,6 +3646,30 @@ static void __sched notrace preempt_sche
+@@ -3599,6 +3645,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3614,7 +3684,8 @@ asmlinkage __visible void __sched notrac
+@@ -3613,7 +3683,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3641,6 +3712,9 @@ asmlinkage __visible void __sched notrac
+@@ -3640,6 +3711,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5470,7 +5544,9 @@ void init_idle(struct task_struct *idle,
+@@ -5469,7 +5543,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7484,6 +7560,7 @@ void migrate_disable(void)
+@@ -7483,6 +7559,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7551,6 +7628,7 @@ void migrate_enable(void)
+@@ -7550,6 +7627,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -378,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7559,6 +7637,7 @@ void migrate_enable(void)
+@@ -7558,6 +7636,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 9dda90ba1989ee..a464e280336c97 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1374,6 +1374,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1373,6 +1373,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1418,7 +1430,7 @@ unsigned long wait_task_inactive(struct
+@@ -1417,7 +1429,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1433,7 +1445,8 @@ unsigned long wait_task_inactive(struct
+@@ -1432,7 +1444,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index a18dbc5b97fc49..3efe6bb93b74b2 100644
--- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7449,6 +7449,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7448,6 +7448,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -7469,10 +7510,9 @@ void migrate_disable(void)
+@@ -7468,10 +7509,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -7501,9 +7541,8 @@ void migrate_enable(void)
+@@ -7500,9 +7540,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index aa886020d9ef28..e6d1fa49adb3c4 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -993,7 +993,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -2668,7 +2669,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2666,7 +2667,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -1002,7 +1002,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3035,7 +3036,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3032,7 +3033,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -2377,7 +2377,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -461,7 +461,7 @@ void wake_q_add(struct wake_q_head *head
+@@ -460,7 +460,7 @@ void wake_q_add(struct wake_q_head *head
head->lastp = &node->next;
}
@@ -2386,7 +2386,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct wake_q_node *node = head->first;
-@@ -478,7 +478,10 @@ void wake_up_q(struct wake_q_head *head)
+@@ -477,7 +477,10 @@ void wake_up_q(struct wake_q_head *head)
* wake_up_process() implies a wmb() to pair with the queueing
* in wake_q_add() so as not to miss wakeups.
*/
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index 8e049dc5933504..b99bb40083f793 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1725,6 +1725,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1782,6 +1782,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
+
+ current->flags |= PF_NOFREEZE;
-+ hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
++ hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC_HARD);
+ if (!freeze_flag)
+ current->flags &= ~PF_NOFREEZE;
+}
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 125274ddd9ee8d..ec7838e96bd337 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -2998,7 +3008,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2996,7 +3006,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3054,20 +3064,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3051,20 +3061,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3076,7 +3121,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3073,7 +3118,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3087,7 +3133,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3084,7 +3130,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3101,7 +3147,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3098,7 +3144,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
diff --git a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
index f505d221ebde1b..3b023c4f240e87 100644
--- a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
+++ b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2206,7 +2206,7 @@ EXPORT_SYMBOL(wake_up_process);
+@@ -2205,7 +2205,7 @@ EXPORT_SYMBOL(wake_up_process);
*/
int wake_up_lock_sleeper(struct task_struct *p)
{
diff --git a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
deleted file mode 100644
index af69550adc1423..00000000000000
--- a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-From: Juri Lelli <juri.lelli@gmail.com>
-Date: Tue, 13 May 2014 15:30:20 +0200
-Subject: sched/deadline: dl_task_timer has to be irqsafe
-
-As for rt_period_timer, dl_task_timer has to be irqsafe.
-
-Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/deadline.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -693,6 +693,7 @@ void init_dl_task_timer(struct sched_dl_
-
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- timer->function = dl_task_timer;
-+ timer->irqsafe = 1;
- }
-
- /*
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 8827d153e2a1ac..cfd979ca487249 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6271,7 +6271,7 @@ void __init sched_init(void)
+@@ -6270,7 +6270,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 57c74f27effa70..e95dd5aaf02e2c 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
VM_BUG_ON(atomic_read(&mm->mm_users));
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2785,8 +2785,12 @@ static struct rq *finish_task_switch(str
+@@ -2784,8 +2784,12 @@ static struct rq *finish_task_switch(str
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5612,6 +5616,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5611,6 +5615,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5626,7 +5632,12 @@ void idle_task_exit(void)
+@@ -5625,7 +5631,12 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5953,6 +5964,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5952,6 +5963,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(cpu);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index c9a28a1683d760..ad8c997cdb11c3 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2013,8 +2013,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2012,8 +2012,25 @@ try_to_wake_up(struct task_struct *p, un
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2180,6 +2197,18 @@ int wake_up_process(struct task_struct *
+@@ -2179,6 +2196,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index c413f66f597f93..e39bc8be623351 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2020,8 +2020,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2019,8 +2019,10 @@ try_to_wake_up(struct task_struct *p, un
* if the wakeup condition is true.
*/
if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 2c6ec642e0af7d..95e46ed209a128 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3474,8 +3474,10 @@ static void __sched notrace __schedule(b
+@@ -3473,8 +3473,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/series b/patches/series
index 0985cc1292d05d..fb207e236f2e01 100644
--- a/patches/series
+++ b/patches/series
@@ -120,6 +120,31 @@ smp-hotplug-Move-unparking-of-percpu-threads-to-the-.patch
0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
###
+# soft hrtimer patches (v1)
+0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch
+0002-hrtimer-Correct-blantanly-wrong-comment.patch
+0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch
+0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch
+0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
+0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
+hrtimer-Remove-hrtimer_peek_ahead_timers-leftovers.patch
+0007-hrtimer-Reduce-conditional-code-hres_active.patch
+0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch
+0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch
+0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch
+0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch
+0012-hrtimer-Simplify-hrtimer_reprogram-call.patch
+0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch
+0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch
+0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch
+0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch
+0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch
+0018-hrtimer-Enable-soft-and-hard-hrtimer.patch
+0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
+0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
+0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
+0022-softirq-Remove-tasklet_hrtimer.patch
+
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
@@ -387,17 +412,14 @@ panic-disable-random-on-rt.patch
timers-prepare-for-full-preemption.patch
timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+# KVM require constant freq TSC (smp function call -> cpufreq)
+x86-kvm-require-const-tsc-for-rt.patch
+
# HRTIMERS
+hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
hrtimers-prepare-full-preemption.patch
-hrtimer-enfore-64byte-alignment.patch
-hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
-sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+hrtimer-by-timers-by-default-into-the-softirq-context.patch
timer-fd-avoid-live-lock.patch
-tick-broadcast--Make-hrtimer-irqsafe.patch
-timer-hrtimer-check-properly-for-a-running-timer.patch
-kernel-hrtimer-migrate-deferred-timer-on-CPU-down.patch
-kernel-hrtimer-don-t-wakeup-a-process-while-holding-.patch
-kernel-hrtimer-hotplug-don-t-wake-ktimersoftd-while-.patch
# POSIX-CPU-TIMERS
posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -621,10 +643,6 @@ arm-enable-highmem-for-rt.patch
# SYSRQ
-# KVM require constant freq TSC (smp function call -> cpufreq)
-x86-kvm-require-const-tsc-for-rt.patch
-KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
-
# SCSI/FCOE
scsi-fcoe-rt-aware.patch
sas-ata-isci-dont-t-disable-interrupts-in-qc_issue-h.patch
@@ -669,8 +687,6 @@ lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
# PERF
-perf-make-swevent-hrtimer-irqsafe.patch
-kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
# RCU
rcu-disable-rcu-fast-no-hz-on-rt.patch
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 84be7bf11d2be0..95bea40b1505b1 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -117,9 +117,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -656,6 +661,12 @@ void tasklet_hrtimer_cancel(struct taskl
- tasklet_kill(&ttimer->tasklet);
- }
+@@ -631,6 +636,12 @@ extern void tasklet_kill_immediate(struc
+ extern void tasklet_init(struct tasklet_struct *t,
+ void (*func)(unsigned long), unsigned long data);
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void softirq_early_init(void);
@@ -762,7 +762,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void open_softirq(int nr, void (*action)(struct softirq_action *))
{
softirq_vec[nr].action = action;
-@@ -747,23 +1097,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
+@@ -696,23 +1046,7 @@ EXPORT_SYMBOL(tasklet_unlock_wait);
static int ksoftirqd_should_run(unsigned int cpu)
{
@@ -787,7 +787,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_HOTPLUG_CPU
-@@ -830,6 +1164,8 @@ static int takeover_tasklets(unsigned in
+@@ -779,6 +1113,8 @@ static int takeover_tasklets(unsigned in
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
diff --git a/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
index 8428868af09ff9..4a1cfd52da3fc4 100644
--- a/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ b/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
#endif
}
-@@ -1176,18 +1225,30 @@ static int takeover_tasklets(unsigned in
+@@ -1125,18 +1174,30 @@ static int takeover_tasklets(unsigned in
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
.setup = ksoftirqd_set_sched_params,
diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 6bc42d7508fc33..23674c93211df4 100644
--- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -365,7 +365,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
-@@ -660,6 +728,23 @@ void __init softirq_init(void)
+@@ -609,6 +677,23 @@ void __init softirq_init(void)
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
diff --git a/patches/tick-broadcast--Make-hrtimer-irqsafe.patch b/patches/tick-broadcast--Make-hrtimer-irqsafe.patch
deleted file mode 100644
index ae524bc7ed7f56..00000000000000
--- a/patches/tick-broadcast--Make-hrtimer-irqsafe.patch
+++ /dev/null
@@ -1,57 +0,0 @@
-Subject: tick/broadcast: Make broadcast hrtimer irqsafe
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sat, 27 Feb 2016 10:47:10 +0100
-
-Otherwise we end up with the following:
-
-|=================================
-|[ INFO: inconsistent lock state ]
-|4.4.2-rt7+ #5 Not tainted
-|---------------------------------
-|inconsistent {IN-HARDIRQ-W} -> {HARDIRQ-ON-W} usage.
-|ktimersoftd/0/4 [HC0[0]:SC0[0]:HE1:SE1] takes:
-| (tick_broadcast_lock){?.....}, at: [<ffffffc000150db4>] tick_handle_oneshot_broadcast+0x58/0x27c
-|{IN-HARDIRQ-W} state was registered at:
-| [<ffffffc000118198>] mark_lock+0x19c/0x6a0
-| [<ffffffc000119728>] __lock_acquire+0xb1c/0x2100
-| [<ffffffc00011b560>] lock_acquire+0xf8/0x230
-| [<ffffffc00061bf08>] _raw_spin_lock_irqsave+0x50/0x68
-| [<ffffffc000152188>] tick_broadcast_switch_to_oneshot+0x20/0x60
-| [<ffffffc0001529f4>] tick_switch_to_oneshot+0x64/0xd8
-| [<ffffffc000152b00>] tick_init_highres+0x1c/0x24
-| [<ffffffc000141e58>] hrtimer_run_queues+0x78/0x100
-| [<ffffffc00013f804>] update_process_times+0x38/0x74
-| [<ffffffc00014fc5c>] tick_periodic+0x60/0x140
-| [<ffffffc00014fd68>] tick_handle_periodic+0x2c/0x94
-| [<ffffffc00052b878>] arch_timer_handler_phys+0x3c/0x48
-| [<ffffffc00012d078>] handle_percpu_devid_irq+0x100/0x390
-| [<ffffffc000127f34>] generic_handle_irq+0x34/0x4c
-| [<ffffffc000128300>] __handle_domain_irq+0x90/0xf8
-| [<ffffffc000082554>] gic_handle_irq+0x5c/0xa4
-| [<ffffffc0000855ac>] el1_irq+0x6c/0xec
-| [<ffffffc000112bec>] default_idle_call+0x2c/0x44
-| [<ffffffc000113058>] cpu_startup_entry+0x3cc/0x410
-| [<ffffffc0006169f8>] rest_init+0x158/0x168
-| [<ffffffc000888954>] start_kernel+0x3a0/0x3b4
-| [<0000000080621000>] 0x80621000
-|irq event stamp: 18723
-|hardirqs last enabled at (18723): [<ffffffc00061c188>] _raw_spin_unlock_irq+0x38/0x80
-|hardirqs last disabled at (18722): [<ffffffc000140a4c>] run_hrtimer_softirq+0x2c/0x2f4
-|softirqs last enabled at (0): [<ffffffc0000c4744>] copy_process.isra.50+0x300/0x16d4
-|softirqs last disabled at (0): [< (null)>] (null)
-
-Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/time/tick-broadcast-hrtimer.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/kernel/time/tick-broadcast-hrtimer.c
-+++ b/kernel/time/tick-broadcast-hrtimer.c
-@@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void)
- {
- hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- bctimer.function = bc_handler;
-+ bctimer.irqsafe = true;
- clockevents_register_device(&ce_broadcast_hrtimer);
- }
diff --git a/patches/timer-hrtimer-check-properly-for-a-running-timer.patch b/patches/timer-hrtimer-check-properly-for-a-running-timer.patch
deleted file mode 100644
index cc05b42923902f..00000000000000
--- a/patches/timer-hrtimer-check-properly-for-a-running-timer.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 1 Mar 2017 16:30:49 +0100
-Subject: [PATCH] timer/hrtimer: check properly for a running timer
-
-hrtimer_callback_running() checks only whether a timmer is running on a
-CPU in hardirq-context. This is okay for !RT. For RT environment we move
-most timers to the timer-softirq and therefore we therefore need to
-check if the timer is running in the softirq context.
-
-Cc: stable-rt@vger.kernel.org
-Reported-by: Alexander Gerasiov <gq@cs.msu.su>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/hrtimer.h | 8 +++++++-
- 1 file changed, 7 insertions(+), 1 deletion(-)
-
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -440,7 +440,13 @@ static inline int hrtimer_is_queued(stru
- */
- static inline int hrtimer_callback_running(const struct hrtimer *timer)
- {
-- return timer->base->cpu_base->running == timer;
-+ if (timer->base->cpu_base->running == timer)
-+ return 1;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ if (timer->base->cpu_base->running_soft == timer)
-+ return 1;
-+#endif
-+ return 0;
- }
-
- /* Forward a hrtimer so it expires after now: */
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 9b2ab733b968a6..f0366227c246ca 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1701,10 +1701,6 @@ static inline void ttwu_activate(struct
+@@ -1700,10 +1700,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2157,58 +2153,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2156,58 +2152,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3496,21 +3440,6 @@ static void __sched notrace __schedule(b
+@@ -3495,21 +3439,6 @@ static void __sched notrace __schedule(b
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3575,6 +3504,14 @@ static inline void sched_submit_work(str
+@@ -3574,6 +3503,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -138,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3583,6 +3520,12 @@ static inline void sched_submit_work(str
+@@ -3582,6 +3519,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3593,6 +3536,7 @@ asmlinkage __visible void __sched schedu
+@@ -3592,6 +3535,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 639ffd2d3e94de..c889fd1d09e344 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3548,9 +3548,8 @@ void __noreturn do_task_dead(void)
+@@ -3547,9 +3547,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3558,6 +3557,10 @@ static inline void sched_submit_work(str
+@@ -3557,6 +3556,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);