diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-12-25 10:49:29 -0500 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-12-25 10:49:29 -0500 |
commit | e9429124b4bfa3dc2e35aed53fa666bb3168cea0 (patch) | |
tree | f17f2b692b738271935890b47ca788d29e2acbb7 | |
parent | 34b54eb536dde6970e8b447806904d322fe5e74f (diff) | |
download | 4.9-rt-patches-e9429124b4bfa3dc2e35aed53fa666bb3168cea0.tar.gz |
cpu: interim refresh for cpu_hotplug.wq init change upstreamrt-v4.8-627-gaf79ad2b1f33
-rw-r--r-- | patches/cpu-rt-rework-cpu-down.patch | 41 |
1 files changed, 24 insertions, 17 deletions
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch index fa4b8495d2dc2a..46053d660d9637 100644 --- a/patches/cpu-rt-rework-cpu-down.patch +++ b/patches/cpu-rt-rework-cpu-down.patch @@ -1,6 +1,7 @@ +From 6839e793e1c1e832d61ff4a3cea5650394bc2688 Mon Sep 17 00:00:00 2001 From: Steven Rostedt <srostedt@redhat.com> Date: Mon, 16 Jul 2012 08:07:43 +0000 -Subject: cpu/rt: Rework cpu down for PREEMPT_RT +Subject: [PATCH] cpu/rt: Rework cpu down for PREEMPT_RT Bringing a CPU down is a pain with the PREEMPT_RT kernel because tasks can be preempted in many more places than in non-RT. In @@ -47,16 +48,17 @@ This helps fix issues with ksoftirqd and workqueue that unbind on CPU down. Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> - --- - include/linux/sched.h | 7 + - kernel/cpu.c | 238 +++++++++++++++++++++++++++++++++++++++++--------- - kernel/sched/core.c | 78 ++++++++++++++++ - 3 files changed, 281 insertions(+), 42 deletions(-) + include/linux/sched.h | 7 ++ + kernel/cpu.c | 236 +++++++++++++++++++++++++++++++++++++++++--------- + kernel/sched/core.c | 78 +++++++++++++++++ + 3 files changed, 280 insertions(+), 41 deletions(-) +diff --git a/include/linux/sched.h b/include/linux/sched.h +index bdcb1276d4b8..7bf2fa0d2a2d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -2429,6 +2429,10 @@ extern void do_set_cpus_allowed(struct t +@@ -2442,6 +2442,10 @@ extern void do_set_cpus_allowed(struct task_struct *p, extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); @@ -67,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) -@@ -2441,6 +2445,9 @@ static inline int set_cpus_allowed_ptr(s +@@ -2454,6 +2458,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, return -EINVAL; return 0; } @@ -77,6 +79,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif #ifdef CONFIG_NO_HZ_COMMON +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 0d8eb402a24f..e028d73ca077 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -137,16 +137,10 @@ static int cpu_hotplug_disabled; @@ -96,17 +100,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Also blocks the new readers during * an ongoing cpu hotplug operation. -@@ -158,25 +152,13 @@ static struct { - #endif +@@ -159,24 +153,12 @@ static struct { } cpu_hotplug = { .active_writer = NULL, -- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), + .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), -#ifdef CONFIG_PREEMPT_RT_FULL - .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock), -#else .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), -#endif -+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), #ifdef CONFIG_DEBUG_LOCK_ALLOC .dep_map = {.name = "cpu_hotplug.lock" }, #endif @@ -166,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); /** -@@ -203,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp +@@ -203,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); void pin_current_cpu(void) { struct hotplug_pcp *hp; @@ -302,7 +304,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Start the sync_unplug_thread on the target cpu and wait for it to * complete. -@@ -262,23 +353,83 @@ static int sync_unplug_thread(void *data +@@ -262,23 +353,83 @@ static int sync_unplug_thread(void *data) static int cpu_unplug_begin(unsigned int cpu) { struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); @@ -428,7 +430,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> cpuhp_lock_release(); } -@@ -828,6 +979,9 @@ static int takedown_cpu(unsigned int cpu +@@ -828,6 +979,9 @@ static int takedown_cpu(unsigned int cpu) kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); smpboot_park_threads(cpu); @@ -438,10 +440,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 5cef167aca3a..98dc382737f3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1129,6 +1129,84 @@ void do_set_cpus_allowed(struct task_str - enqueue_task(rq, p, ENQUEUE_RESTORE); +@@ -1140,6 +1140,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) + set_curr_task(rq, p); } +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); @@ -525,3 +529,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on +-- +2.10.1 + |