summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-01-12 16:23:00 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-01-12 16:23:00 +0100
commitc5f0ba59007d49527ec8c89cd6ff721a8f5c294e (patch)
treeaffd020051f4f6c0c4d52128ce6e49f9901daeeb
parent7bbd599b8bb8ca9e9b7cf0fdf77da61fc1eb7d11 (diff)
download4.12-rt-patches-c5f0ba59007d49527ec8c89cd6ff721a8f5c294e.tar.gz
[ANNOUNCE] 4.4-rt2
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch12
-rw-r--r--patches/block-blk-mq-use-swait.patch6
-rw-r--r--patches/block-shorten-interrupt-disabled-regions.patch10
-rw-r--r--patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch2
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch4
-rw-r--r--patches/fs-block-rt-support.patch2
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch2
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch4
-rw-r--r--patches/latency-hist.patch2
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch10
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch2
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch2
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch2
-rw-r--r--patches/preempt-lazy-support.patch2
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch2
-rw-r--r--patches/sched-delay-put-task.patch6
-rw-r--r--patches/sched-mmdrop-delayed.patch2
-rw-r--r--patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch34
-rw-r--r--patches/sched-rt-mutex-wakeup.patch2
-rw-r--r--patches/series1
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/softirq-split-locks.patch4
-rw-r--r--patches/vtime-split-lock-and-seqcount.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-mce-timer-hrtimer.patch22
-rw-r--r--patches/x86-mce-use-swait-queue-for-mce-wakeups.patch6
31 files changed, 63 insertions, 98 deletions
diff --git a/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch b/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch
index 94fddc6f575c6a..b461f6f3bb9e23 100644
--- a/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch
+++ b/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
++vcpu->stat.halt_wakeup;
}
-@@ -701,8 +701,8 @@ int kvmppc_pseries_do_hcall(struct kvm_v
+@@ -707,8 +707,8 @@ int kvmppc_pseries_do_hcall(struct kvm_v
tvcpu->arch.prodded = 1;
smp_mb();
if (vcpu->arch.ceded) {
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
vcpu->stat.halt_wakeup++;
}
}
-@@ -1441,7 +1441,7 @@ static struct kvmppc_vcore *kvmppc_vcore
+@@ -1447,7 +1447,7 @@ static struct kvmppc_vcore *kvmppc_vcore
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock);
@@ -146,7 +146,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * threads_per_subcore;
-@@ -2513,10 +2513,9 @@ static void kvmppc_vcore_blocked(struct
+@@ -2519,10 +2519,9 @@ static void kvmppc_vcore_blocked(struct
{
struct kvm_vcpu *vcpu;
int do_sleep = 1;
@@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Check one last time for pending exceptions and ceded state after
-@@ -2530,7 +2529,7 @@ static void kvmppc_vcore_blocked(struct
+@@ -2536,7 +2535,7 @@ static void kvmppc_vcore_blocked(struct
}
if (!do_sleep) {
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
-@@ -2538,7 +2537,7 @@ static void kvmppc_vcore_blocked(struct
+@@ -2544,7 +2543,7 @@ static void kvmppc_vcore_blocked(struct
trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
@@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_vcore_blocked(vc, 1);
-@@ -2594,7 +2593,7 @@ static int kvmppc_run_vcpu(struct kvm_ru
+@@ -2600,7 +2599,7 @@ static int kvmppc_run_vcpu(struct kvm_ru
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch
index d062d02a211dbe..d0c2e94918e603 100644
--- a/patches/block-blk-mq-use-swait.patch
+++ b/patches/block-blk-mq-use-swait.patch
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -644,7 +644,7 @@ int blk_queue_enter(struct request_queue
+@@ -660,7 +660,7 @@ int blk_queue_enter(struct request_queue
if (!gfpflags_allow_blocking(gfp))
return -EBUSY;
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
-@@ -664,7 +664,7 @@ static void blk_queue_usage_counter_rele
+@@ -680,7 +680,7 @@ static void blk_queue_usage_counter_rele
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
-@@ -726,7 +726,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -742,7 +742,7 @@ struct request_queue *blk_alloc_queue_no
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch
index ef9a3567eece84..6422d01853a6a5 100644
--- a/patches/block-shorten-interrupt-disabled-regions.patch
+++ b/patches/block-shorten-interrupt-disabled-regions.patch
@@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -3182,7 +3182,7 @@ static void queue_unplugged(struct reque
+@@ -3198,7 +3198,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3230,7 +3230,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3246,7 +3246,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3250,11 +3249,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3266,11 +3265,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3267,7 +3261,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3283,7 +3277,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
-@@ -3294,8 +3288,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3310,8 +3304,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index 38cce01941fafd..fef6f77b70d2f9 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1913,14 +1913,17 @@ static void drain_local_stock(struct wor
+@@ -1937,14 +1937,17 @@ static void drain_local_stock(struct wor
*/
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index fb2f3d7e89b193..fa4f589c5ae9b3 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2984,12 +2984,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2986,12 +2986,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 39543b48345f3c..416ab1e2858650 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2282,6 +2282,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2284,6 +2284,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2294,6 +2298,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2296,6 +2300,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
diff --git a/patches/fs-block-rt-support.patch b/patches/fs-block-rt-support.patch
index 9e0ba604887ee6..cd957c2c68a423 100644
--- a/patches/fs-block-rt-support.patch
+++ b/patches/fs-block-rt-support.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -217,7 +217,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+@@ -233,7 +233,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
**/
void blk_start_queue(struct request_queue *q)
{
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index c3a203e8eb5818..92064d31e96d0c 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -11366,7 +11366,7 @@ void intel_check_page_flip(struct drm_de
+@@ -11376,7 +11376,7 @@ void intel_check_page_flip(struct drm_de
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index 5be4b994c05d13..8d5df970442520 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1836,9 +1842,6 @@ extern int arch_task_struct_size __read_
+@@ -1837,9 +1843,6 @@ extern int arch_task_struct_size __read_
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -3114,6 +3117,26 @@ static inline void set_task_cpu(struct t
+@@ -3116,6 +3119,26 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 557aaefe80aec2..f287de9c73f4f0 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -228,7 +228,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *start_site;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1794,6 +1794,12 @@ struct task_struct {
+@@ -1795,6 +1795,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 1e8ff31d716fd3..24f89a4b90f490 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.4-rc6-rt1
+Subject: v4.4-rt2
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt1
++-rt2
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index a1c70c329121ed..ce0cdf65dc84d9 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1933,7 +1933,7 @@ static void drain_all_stock(struct mem_c
+@@ -1957,7 +1957,7 @@ static void drain_all_stock(struct mem_c
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1950,7 +1950,7 @@ static void drain_all_stock(struct mem_c
+@@ -1974,7 +1974,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 1ac923195c74ec..316c252fa81a64 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static const char * const mem_cgroup_stat_names[] = {
"cache",
"rss",
-@@ -4584,12 +4587,12 @@ static int mem_cgroup_move_account(struc
+@@ -4615,12 +4618,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5341,10 +5344,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5373,10 +5376,10 @@ void mem_cgroup_commit_charge(struct pag
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
}
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5400,14 +5403,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5432,14 +5435,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5599,6 +5602,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5631,6 +5634,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5627,9 +5631,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5659,9 +5663,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 43fa53f1d2e809..510f94c7397d17 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -1847,6 +1848,12 @@ struct task_struct {
+@@ -1848,6 +1849,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
diff --git a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 455a7e7f5af499..b90553dfa13624 100644
--- a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -888,7 +888,7 @@ void dev_deactivate_many(struct list_hea
+@@ -890,7 +890,7 @@ void dev_deactivate_many(struct list_hea
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 7222dd3ca80d33..ada05998ed6a3f 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1577,6 +1577,10 @@ struct task_struct {
+@@ -1578,6 +1578,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index da73668e16e816..7963bd868397ac 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -7243,6 +7243,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -7228,6 +7228,7 @@ static void perf_swevent_init_hrtimer(st
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index b150e65775d6cd..427fd3174c1572 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1542,6 +1542,9 @@ struct task_struct {
+@@ -1543,6 +1543,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 0b15414f106cf7..6a11d6905eade3 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -165,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2961,6 +2961,43 @@ static inline int test_tsk_need_resched(
+@@ -2963,6 +2963,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 3592ac0874320f..1da7a69886d280 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
-@@ -2981,6 +2978,51 @@ static inline int signal_pending_state(l
+@@ -2983,6 +2980,51 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index b627092852028c..a9e4213403fb88 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1829,6 +1829,9 @@ struct task_struct {
+@@ -1830,6 +1830,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
-@@ -2037,6 +2040,15 @@ extern struct pid *cad_pid;
+@@ -2039,6 +2042,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -2044,6 +2056,7 @@ static inline void put_task_struct(struc
+@@ -2046,6 +2058,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 65b7088e30d288..ea0c9073b65a3f 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __user *bd_addr;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2603,12 +2603,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2605,12 +2605,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
diff --git a/patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch b/patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch
deleted file mode 100644
index 2a2c272b608e99..00000000000000
--- a/patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From c4c38e7dcd4e925f624cc7fe18aeaad841fd7d6f Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 21 Dec 2015 18:17:10 +0100
-Subject: [PATCH] sched: reset task's lockless wake-queues on fork()
-
-In 7675104990ed ("sched: Implement lockless wake-queues") we gained
-lockless wake-queues. -RT managed to lockup itself with those. There
-could be multiple attempts for task X to enqueue it for a wakeup
-_even_ if task X is already running.
-The reason is that task X could be runnable but not yet on CPU. The the
-task performing the wakeup did not leave the CPU it could performe
-multiple wakeups.
-With the proper timming task X could be running and enqueued for a
-wakeup. If this happens while X is performing a fork() then its its
-child will have a !NULL `wake_q` member copied.
-This is not a problem as long as the child task does not participate in
-lockless wakeups :)
-
-Fixes: 7675104990ed ("sched: Implement lockless wake-queues")
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/fork.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -380,6 +380,7 @@ static struct task_struct *dup_task_stru
- #endif
- tsk->splice_pipe = NULL;
- tsk->task_frag.page = NULL;
-+ tsk->wake_q.next = NULL;
-
- account_kernel_stack(ti, 1);
-
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 66f46e6896fbcc..164b3c5a9586f8 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2480,6 +2481,7 @@ extern void xtime_update(unsigned long t
+@@ -2482,6 +2483,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
diff --git a/patches/series b/patches/series
index b980cbaccecbbe..9e9f18f91daccb 100644
--- a/patches/series
+++ b/patches/series
@@ -13,7 +13,6 @@
############################################################
# Stuff broken upstream, patches submitted
############################################################
-sched-reset-task-s-lockless-wake-queues-on-fork.patch
############################################################
# Stuff which needs addressing upstream, but requires more
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 96e8c4719d24c6..35338c41edd095 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1566,6 +1566,7 @@ struct task_struct {
+@@ -1567,6 +1567,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 00d527083e6f1b..31e499340ca4b1 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Are we in NMI context?
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1832,6 +1832,8 @@ struct task_struct {
+@@ -1833,6 +1833,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -2096,6 +2098,7 @@ extern void thread_group_cputime_adjuste
+@@ -2098,6 +2100,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
diff --git a/patches/vtime-split-lock-and-seqcount.patch b/patches/vtime-split-lock-and-seqcount.patch
index 1b792f26a42d92..31539c782c8e05 100644
--- a/patches/vtime-split-lock-and-seqcount.patch
+++ b/patches/vtime-split-lock-and-seqcount.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1519,7 +1519,8 @@ struct task_struct {
+@@ -1520,7 +1520,8 @@ struct task_struct {
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 270702b4950408..0768469a0adcac 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5782,6 +5782,13 @@ int kvm_arch_init(void *opaque)
+@@ -5787,6 +5787,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-mce-timer-hrtimer.patch
index b3b27813d0fcfc..812ed9c329dc15 100644
--- a/patches/x86-mce-timer-hrtimer.patch
+++ b/patches/x86-mce-timer-hrtimer.patch
@@ -34,7 +34,7 @@ fold in:
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1225,7 +1226,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1236,7 +1237,7 @@ void mce_log_therm_throt_event(__u64 sta
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -43,7 +43,7 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1234,32 +1235,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1245,32 +1246,18 @@ static unsigned long mce_adjust_timer_de
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -82,7 +82,7 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1282,7 +1269,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1293,7 +1280,7 @@ static void mce_timer_fn(unsigned long d
done:
__this_cpu_write(mce_next_interval, iv);
@@ -91,7 +91,7 @@ fold in:
}
/*
-@@ -1290,7 +1277,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1301,7 +1288,7 @@ static void mce_timer_fn(unsigned long d
*/
void mce_timer_kick(unsigned long interval)
{
@@ -100,7 +100,7 @@ fold in:
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1305,7 +1292,7 @@ static void mce_timer_delete_all(void)
+@@ -1316,7 +1303,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -109,7 +109,7 @@ fold in:
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1628,7 +1615,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1639,7 +1626,7 @@ static void __mcheck_cpu_clear_vendor(st
}
}
@@ -118,7 +118,7 @@ fold in:
{
unsigned long iv = check_interval * HZ;
-@@ -1637,16 +1624,17 @@ static void mce_start_timer(unsigned int
+@@ -1648,16 +1635,17 @@ static void mce_start_timer(unsigned int
per_cpu(mce_next_interval, cpu) = iv;
@@ -140,7 +140,7 @@ fold in:
mce_start_timer(cpu, t);
}
-@@ -2365,6 +2353,8 @@ static void mce_disable_cpu(void *h)
+@@ -2376,6 +2364,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -149,7 +149,7 @@ fold in:
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
-@@ -2387,6 +2377,7 @@ static void mce_reenable_cpu(void *h)
+@@ -2398,6 +2388,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
}
@@ -157,7 +157,7 @@ fold in:
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2394,7 +2385,6 @@ static int
+@@ -2405,7 +2396,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -165,7 +165,7 @@ fold in:
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2414,11 +2404,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2425,11 +2415,9 @@ mce_cpu_callback(struct notifier_block *
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
index e0b1abaaa2a0d5..7793973e85851e 100644
--- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -68,7 +68,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1302,6 +1303,56 @@ static void mce_do_trigger(struct work_s
+@@ -1313,6 +1314,56 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -125,7 +125,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1309,19 +1360,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1320,19 +1371,8 @@ static DECLARE_WORK(mce_trigger_work, mc
*/
int mce_notify_irq(void)
{
@@ -146,7 +146,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
return 1;
}
return 0;
-@@ -2445,6 +2485,10 @@ static __init int mcheck_init_device(voi
+@@ -2456,6 +2496,10 @@ static __init int mcheck_init_device(voi
goto err_out;
}