summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-25 10:09:54 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2016-12-25 10:09:54 -0500
commit130b61424cf4cc1773896be98704682dbeb1ddd7 (patch)
treebda40c96c4ad9d9110a4cff7c8f21457c444d630
parent851c61bd076075bc185998116db10947a32443a8 (diff)
download4.9-rt-patches-130b61424cf4cc1773896be98704682dbeb1ddd7.tar.gz
lglocks: drop patches ; core support gone upstreamrt-v4.8-373-g00bcf5cdd6c0
-rw-r--r--patches/lglocks-rt.patch199
-rw-r--r--patches/lockinglglocks_Use_preempt_enabledisable_nort.patch34
-rw-r--r--patches/series8
-rw-r--r--patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch86
-rw-r--r--patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch73
5 files changed, 0 insertions, 400 deletions
diff --git a/patches/lglocks-rt.patch b/patches/lglocks-rt.patch
deleted file mode 100644
index e024ff3b5a6594..00000000000000
--- a/patches/lglocks-rt.patch
+++ /dev/null
@@ -1,199 +0,0 @@
-Subject: lglocks: Provide a RT safe variant
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 15 Jun 2011 11:02:21 +0200
-
-lglocks by itself will spin in order to get the lock. This will end up
-badly if a task with the highest priority keeps spinning while a task
-with the lowest priority owns the lock.
-
-Lets replace them with rt_mutex based locks so they can sleep, track
-owner and boost if needed.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/lglock.h | 18 +++++++++++++
- kernel/locking/lglock.c | 62 ++++++++++++++++++++++++++++++------------------
- 2 files changed, 58 insertions(+), 22 deletions(-)
-
---- a/include/linux/lglock.h
-+++ b/include/linux/lglock.h
-@@ -34,13 +34,30 @@
- #endif
-
- struct lglock {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ struct rt_mutex __percpu *lock;
-+#else
- arch_spinlock_t __percpu *lock;
-+#endif
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lock_class_key lock_key;
- struct lockdep_map lock_dep_map;
- #endif
- };
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+# define DEFINE_LGLOCK(name) \
-+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
-+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
-+ struct lglock name = { .lock = &name ## _lock }
-+
-+# define DEFINE_STATIC_LGLOCK(name) \
-+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
-+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
-+ static struct lglock name = { .lock = &name ## _lock }
-+
-+#else
-+
- #define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
-@@ -50,6 +67,7 @@ struct lglock {
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- static struct lglock name = { .lock = &name ## _lock }
-+#endif
-
- void lg_lock_init(struct lglock *lg, char *name);
-
---- a/kernel/locking/lglock.c
-+++ b/kernel/locking/lglock.c
-@@ -4,6 +4,15 @@
- #include <linux/cpu.h>
- #include <linux/string.h>
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define lg_lock_ptr arch_spinlock_t
-+# define lg_do_lock(l) arch_spin_lock(l)
-+# define lg_do_unlock(l) arch_spin_unlock(l)
-+#else
-+# define lg_lock_ptr struct rt_mutex
-+# define lg_do_lock(l) __rt_spin_lock__no_mg(l)
-+# define lg_do_unlock(l) __rt_spin_unlock(l)
-+#endif
- /*
- * Note there is no uninit, so lglocks cannot be defined in
- * modules (but it's fine to use them from there)
-@@ -12,51 +21,60 @@
-
- void lg_lock_init(struct lglock *lg, char *name)
- {
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int i;
-+
-+ for_each_possible_cpu(i) {
-+ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
-+
-+ rt_mutex_init(lock);
-+ }
-+#endif
- LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
- }
- EXPORT_SYMBOL(lg_lock_init);
-
- void lg_local_lock(struct lglock *lg)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
-- preempt_disable();
-+ migrate_disable();
- lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- lock = this_cpu_ptr(lg->lock);
-- arch_spin_lock(lock);
-+ lg_do_lock(lock);
- }
- EXPORT_SYMBOL(lg_local_lock);
-
- void lg_local_unlock(struct lglock *lg)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- lock = this_cpu_ptr(lg->lock);
-- arch_spin_unlock(lock);
-- preempt_enable();
-+ lg_do_unlock(lock);
-+ migrate_enable();
- }
- EXPORT_SYMBOL(lg_local_unlock);
-
- void lg_local_lock_cpu(struct lglock *lg, int cpu)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
-- preempt_disable();
-+ preempt_disable_nort();
- lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- lock = per_cpu_ptr(lg->lock, cpu);
-- arch_spin_lock(lock);
-+ lg_do_lock(lock);
- }
- EXPORT_SYMBOL(lg_local_lock_cpu);
-
- void lg_local_unlock_cpu(struct lglock *lg, int cpu)
- {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
-
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- lock = per_cpu_ptr(lg->lock, cpu);
-- arch_spin_unlock(lock);
-- preempt_enable();
-+ lg_do_unlock(lock);
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_local_unlock_cpu);
-
-@@ -70,15 +88,15 @@ void lg_double_lock(struct lglock *lg, i
-
- preempt_disable();
- lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
-- arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
-- arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
-+ lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
-+ lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
- }
-
- void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
- {
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
-- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
-- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
-+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
-+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
- preempt_enable();
- }
-
-@@ -86,12 +104,12 @@ void lg_global_lock(struct lglock *lg)
- {
- int i;
-
-- preempt_disable();
-+ preempt_disable_nort();
- lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- for_each_possible_cpu(i) {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
- lock = per_cpu_ptr(lg->lock, i);
-- arch_spin_lock(lock);
-+ lg_do_lock(lock);
- }
- }
- EXPORT_SYMBOL(lg_global_lock);
-@@ -102,10 +120,10 @@ void lg_global_unlock(struct lglock *lg)
-
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- for_each_possible_cpu(i) {
-- arch_spinlock_t *lock;
-+ lg_lock_ptr *lock;
- lock = per_cpu_ptr(lg->lock, i);
-- arch_spin_unlock(lock);
-+ lg_do_unlock(lock);
- }
-- preempt_enable();
-+ preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_global_unlock);
diff --git a/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch b/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch
deleted file mode 100644
index 8c13841b08f44a..00000000000000
--- a/patches/lockinglglocks_Use_preempt_enabledisable_nort.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-Subject: locking/lglocks: Use preempt_enable/disable_nort() in lg_double_[un]lock
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Sat, 27 Feb 2016 08:34:43 +0100
-
-Let's not do that when snagging an rtmutex.
-
-Signed-off-by: Mike Galbraith <umgwanakilbuti@gmail.com>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: linux-rt-users <linux-rt-users@vger.kernel.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- kernel/locking/lglock.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/locking/lglock.c
-+++ b/kernel/locking/lglock.c
-@@ -86,7 +86,7 @@ void lg_double_lock(struct lglock *lg, i
- if (cpu2 < cpu1)
- swap(cpu1, cpu2);
-
-- preempt_disable();
-+ preempt_disable_nort();
- lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
- lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
- lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
-@@ -97,7 +97,7 @@ void lg_double_unlock(struct lglock *lg,
- lock_release(&lg->lock_dep_map, 1, _RET_IP_);
- lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
- lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
-- preempt_enable();
-+ preempt_enable_nort();
- }
-
- void lg_global_lock(struct lglock *lg)
diff --git a/patches/series b/patches/series
index a09de6cf9135cf..061f61f77f22f7 100644
--- a/patches/series
+++ b/patches/series
@@ -346,14 +346,6 @@ rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
-# LGLOCKS - lovely
-lglocks-rt.patch
-lockinglglocks_Use_preempt_enabledisable_nort.patch
-
-# STOP machine (depend on lglock & rtmutex)
-stomp-machine-create-lg_global_trylock_relax-primiti.patch
-stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
-
# DRIVERS SERIAL
drivers-tty-fix-omap-lock-crap.patch
drivers-tty-pl011-irq-disable-madness.patch
diff --git a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
deleted file mode 100644
index ce3ee6fea457b0..00000000000000
--- a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
+++ /dev/null
@@ -1,86 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Fri, 2 May 2014 13:13:22 +0200
-Subject: stomp-machine: create lg_global_trylock_relax() primitive
-
-Create lg_global_trylock_relax() for use by stopper thread when it cannot
-schedule, to deal with stop_cpus_lock, which is now an lglock.
-
-Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/lglock.h | 6 ++++++
- include/linux/spinlock_rt.h | 1 +
- kernel/locking/lglock.c | 25 +++++++++++++++++++++++++
- kernel/locking/rtmutex.c | 5 +++++
- 4 files changed, 37 insertions(+)
-
---- a/include/linux/lglock.h
-+++ b/include/linux/lglock.h
-@@ -82,6 +82,12 @@ void lg_double_unlock(struct lglock *lg,
- void lg_global_lock(struct lglock *lg);
- void lg_global_unlock(struct lglock *lg);
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+#define lg_global_trylock_relax(name) lg_global_lock(name)
-+#else
-+void lg_global_trylock_relax(struct lglock *lg);
-+#endif
-+
- #else
- /* When !CONFIG_SMP, map lglock to spinlock */
- #define lglock spinlock
---- a/include/linux/spinlock_rt.h
-+++ b/include/linux/spinlock_rt.h
-@@ -40,6 +40,7 @@ extern int atomic_dec_and_spin_lock(atom
- extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
- extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
- extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
-+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
-
- #define spin_lock(lock) rt_spin_lock(lock)
-
---- a/kernel/locking/lglock.c
-+++ b/kernel/locking/lglock.c
-@@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg)
- preempt_enable_nort();
- }
- EXPORT_SYMBOL(lg_global_unlock);
-+
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+/*
-+ * HACK: If you use this, you get to keep the pieces.
-+ * Used in queue_stop_cpus_work() when stop machinery
-+ * is called from inactive CPU, so we can't schedule.
-+ */
-+# define lg_do_trylock_relax(l) \
-+ do { \
-+ while (!__rt_spin_trylock(l)) \
-+ cpu_relax(); \
-+ } while (0)
-+
-+void lg_global_trylock_relax(struct lglock *lg)
-+{
-+ int i;
-+
-+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
-+ for_each_possible_cpu(i) {
-+ lg_lock_ptr *lock;
-+ lock = per_cpu_ptr(lg->lock, i);
-+ lg_do_trylock_relax(lock);
-+ }
-+}
-+#endif
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1279,6 +1279,11 @@ void __lockfunc rt_spin_unlock_wait(spin
- }
- EXPORT_SYMBOL(rt_spin_unlock_wait);
-
-+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
-+{
-+ return rt_mutex_trylock(lock);
-+}
-+
- int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
- {
- int ret;
diff --git a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
deleted file mode 100644
index dc0ba8d4cd4ff5..00000000000000
--- a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Mike Galbraith <umgwanakikbuti@gmail.com>
-Date: Fri, 2 May 2014 13:13:34 +0200
-Subject: stomp-machine: use lg_global_trylock_relax() to dead with stop_cpus_lock lglock
-
-If the stop machinery is called from inactive CPU we cannot use
-lg_global_lock(), because some other stomp machine invocation might be
-in progress and the lock can be contended. We cannot schedule from this
-context, so use the lovely new lg_global_trylock_relax() primitive to
-do what we used to do via one mutex_trylock()/cpu_relax() loop. We
-now do that trylock()/relax() across an entire herd of locks. Joy.
-
-Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/stop_machine.c | 19 ++++++++++++-------
- 1 file changed, 12 insertions(+), 7 deletions(-)
-
---- a/kernel/stop_machine.c
-+++ b/kernel/stop_machine.c
-@@ -321,18 +321,21 @@ static DEFINE_MUTEX(stop_cpus_mutex);
-
- static bool queue_stop_cpus_work(const struct cpumask *cpumask,
- cpu_stop_fn_t fn, void *arg,
-- struct cpu_stop_done *done)
-+ struct cpu_stop_done *done, bool inactive)
- {
- struct cpu_stop_work *work;
- unsigned int cpu;
- bool queued = false;
-
- /*
-- * Disable preemption while queueing to avoid getting
-- * preempted by a stopper which might wait for other stoppers
-- * to enter @fn which can lead to deadlock.
-+ * Make sure that all work is queued on all cpus before
-+ * any of the cpus can execute it.
- */
-- lg_global_lock(&stop_cpus_lock);
-+ if (!inactive)
-+ lg_global_lock(&stop_cpus_lock);
-+ else
-+ lg_global_trylock_relax(&stop_cpus_lock);
-+
- for_each_cpu(cpu, cpumask) {
- work = &per_cpu(cpu_stopper.stop_work, cpu);
- work->fn = fn;
-@@ -352,7 +355,7 @@ static int __stop_cpus(const struct cpum
- struct cpu_stop_done done;
-
- cpu_stop_init_done(&done, cpumask_weight(cpumask));
-- if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
-+ if (!queue_stop_cpus_work(cpumask, fn, arg, &done, false))
- return -ENOENT;
- wait_for_completion(&done.completion);
- return done.ret;
-@@ -540,6 +543,8 @@ static int __init cpu_stop_init(void)
- INIT_LIST_HEAD(&stopper->works);
- }
-
-+ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
-+
- BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
- stop_machine_unpark(raw_smp_processor_id());
- stop_machine_initialized = true;
-@@ -634,7 +639,7 @@ int stop_machine_from_inactive_cpu(cpu_s
- set_state(&msdata, MULTI_STOP_PREPARE);
- cpu_stop_init_done(&done, num_active_cpus());
- queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
-- &done);
-+ &done, true);
- ret = multi_cpu_stop(&msdata);
-
- /* Busy wait for completion. */