summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-26 12:36:18 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-26 12:39:50 -0500
commit7e2081928f9cbc99a1d71dd9e3398cb0e2a8fe11 (patch)
tree9e965160a69df7112172588a2eaa9895dec4a521
parent2723fee9a7c5d2976717f6862d11c045dad090cc (diff)
download4.9-rt-patches-7e2081928f9cbc99a1d71dd9e3398cb0e2a8fe11.tar.gz
hotplug: import refresh with lockdep update (x2)rt-v4.9-rc2
-rw-r--r--patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch14
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch47
2 files changed, 26 insertions, 35 deletions
diff --git a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index ee85da7f3d159..8d3c7d0febf3b 100644
--- a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -137,10 +137,16 @@ static int cpu_hotplug_disabled;
+@@ -210,10 +210,16 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -153,12 +159,24 @@ static struct {
+@@ -226,12 +232,24 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- .dep_map = {.name = "cpu_hotplug.lock" },
+ .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
#endif
};
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -195,8 +213,8 @@ void pin_current_cpu(void)
+@@ -268,8 +286,8 @@ void pin_current_cpu(void)
return;
}
preempt_enable();
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -269,9 +287,9 @@ void get_online_cpus(void)
+@@ -342,9 +360,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -324,11 +342,11 @@ void cpu_hotplug_begin(void)
+@@ -397,11 +415,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -337,7 +355,7 @@ void cpu_hotplug_begin(void)
+@@ -410,7 +428,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 46053d660d963..8a91b7f820671 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -1,7 +1,6 @@
-From 6839e793e1c1e832d61ff4a3cea5650394bc2688 Mon Sep 17 00:00:00 2001
From: Steven Rostedt <srostedt@redhat.com>
Date: Mon, 16 Jul 2012 08:07:43 +0000
-Subject: [PATCH] cpu/rt: Rework cpu down for PREEMPT_RT
+Subject: cpu/rt: Rework cpu down for PREEMPT_RT
Bringing a CPU down is a pain with the PREEMPT_RT kernel because
tasks can be preempted in many more places than in non-RT. In
@@ -48,17 +47,16 @@ This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
---
- include/linux/sched.h | 7 ++
- kernel/cpu.c | 236 +++++++++++++++++++++++++++++++++++++++++---------
- kernel/sched/core.c | 78 +++++++++++++++++
+ include/linux/sched.h | 7 +
+ kernel/cpu.c | 236 +++++++++++++++++++++++++++++++++++++++++---------
+ kernel/sched/core.c | 78 ++++++++++++++++
3 files changed, 280 insertions(+), 41 deletions(-)
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index bdcb1276d4b8..7bf2fa0d2a2d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2442,6 +2442,10 @@ extern void do_set_cpus_allowed(struct task_struct *p,
+@@ -2473,6 +2473,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -69,7 +67,7 @@ index bdcb1276d4b8..7bf2fa0d2a2d 100644
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2454,6 +2458,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
+@@ -2485,6 +2489,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
@@ -79,11 +77,9 @@ index bdcb1276d4b8..7bf2fa0d2a2d 100644
#endif
#ifdef CONFIG_NO_HZ_COMMON
-diff --git a/kernel/cpu.c b/kernel/cpu.c
-index 0d8eb402a24f..e028d73ca077 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -137,16 +137,10 @@ static int cpu_hotplug_disabled;
+@@ -210,16 +210,10 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -100,7 +96,7 @@ index 0d8eb402a24f..e028d73ca077 100644
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -159,24 +153,12 @@ static struct {
+@@ -232,24 +226,12 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -110,7 +106,7 @@ index 0d8eb402a24f..e028d73ca077 100644
.lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
-#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
- .dep_map = {.name = "cpu_hotplug.lock" },
+ .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
#endif
};
@@ -125,7 +121,7 @@ index 0d8eb402a24f..e028d73ca077 100644
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -184,12 +166,42 @@ static struct {
+@@ -257,12 +239,42 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -168,7 +164,7 @@ index 0d8eb402a24f..e028d73ca077 100644
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
-@@ -203,18 +215,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+@@ -276,18 +288,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
@@ -212,7 +208,7 @@ index 0d8eb402a24f..e028d73ca077 100644
preempt_disable();
goto retry;
}
-@@ -235,26 +268,84 @@ void unpin_current_cpu(void)
+@@ -308,26 +341,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
@@ -304,7 +300,7 @@ index 0d8eb402a24f..e028d73ca077 100644
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -262,23 +353,83 @@ static int sync_unplug_thread(void *data)
+@@ -335,23 +426,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -395,7 +391,7 @@ index 0d8eb402a24f..e028d73ca077 100644
}
void get_online_cpus(void)
-@@ -287,9 +438,9 @@ void get_online_cpus(void)
+@@ -360,9 +511,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -407,7 +403,7 @@ index 0d8eb402a24f..e028d73ca077 100644
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -342,11 +493,11 @@ void cpu_hotplug_begin(void)
+@@ -415,11 +566,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -421,7 +417,7 @@ index 0d8eb402a24f..e028d73ca077 100644
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -355,7 +506,7 @@ void cpu_hotplug_begin(void)
+@@ -428,7 +579,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
@@ -430,7 +426,7 @@ index 0d8eb402a24f..e028d73ca077 100644
cpuhp_lock_release();
}
-@@ -828,6 +979,9 @@ static int takedown_cpu(unsigned int cpu)
+@@ -907,6 +1058,9 @@ static int takedown_cpu(unsigned int cpu
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
@@ -440,11 +436,9 @@ index 0d8eb402a24f..e028d73ca077 100644
/*
* Prevent irq alloc/free while the dying cpu reorganizes the
* interrupt affinities.
-diff --git a/kernel/sched/core.c b/kernel/sched/core.c
-index 5cef167aca3a..98dc382737f3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1140,6 +1140,84 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+@@ -1140,6 +1140,84 @@ void do_set_cpus_allowed(struct task_str
set_curr_task(rq, p);
}
@@ -529,6 +523,3 @@ index 5cef167aca3a..98dc382737f3 100644
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
---
-2.10.1
-