From 3292f30d2598f7ad726dbcd269dbbb0e87a54d07 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Mon, 2 Sep 2013 09:30:31 -0400 Subject: patches-3.10.10-rt7.tar.xz md5sum: f962e0301086b610da801f1816d38862 patches-3.10.10-rt7.tar.xz Announce: -------------- Dear RT folks! I'm pleased to announce the v3.10.10-rt7 patch set. Changes since v3.10.10-rt6 - hwlat compiles on 32bit (again). Reported by Fernando Lopez-Lezcano. - bcache is disabled due to usage of anon semaphores. Anyone interrested in using it? - "genirq affinity callback" are fixed. Reported by Joe Korty. There are two nics using this feature. - Paul Gortmaker's swait rename patch has been included. Known issues: - SLAB support not working - The cpsw network driver shows some issues. - bcache is disabled. - an ancient race (since we got sleeping spinlocks) where the TASK_TRACED state is temporary replaced while waiting on a rw lock and the task can't be traced. The delta patch against v3.10.10-rt6 is appended below and can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/incr/patch-3.10.10-rt6-rt7.patch.xz The RT patch against 3.10.10 can be found here: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patch-3.10.10-rt7.patch.xz The split quilt queue is available at: https://www.kernel.org/pub/linux/kernel/projects/rt/3.10/patches-3.10.10-rt7.tar.xz Sebastian [delta diff snipped] -------------- http://marc.info/?l=linux-rt-users&m=137780177007595&w=2 Signed-off-by: Paul Gortmaker --- ...ot-invoke-the-affinity-callback-via-a-wor.patch | 25 +++----- ...tor-Update-hwlat_detector-to-add-outer-lo.patch | 11 +--- ...tector-Use-thread-instead-of-stop-machine.patch | 15 ++--- ...tector-Use-trace_clock_local-if-available.patch | 13 ++-- ...-softirq-processing-in-irq-thread-context.patch | 4 +- patches/localversion.patch | 2 +- patches/md-disable-bcache.patch | 35 +++++++++++ .../ptrace-fix-ptrace-vs-tasklist_lock-race.patch | 69 ++++++++++++++++++++++ patches/random-make-it-work-on-rt.patch | 2 +- patches/rcu-more-swait-conversions.patch | 15 ++++- patches/series | 10 +++- ...-rename-and-export-the-equivalent-of-wait.patch | 63 ++++++++++++++++++++ .../softirq-disable-softirq-stacks-for-rt.patch | 2 +- patches/softirq-local-lock.patch | 4 +- patches/softirq-sanitize-softirq-pending.patch | 2 +- ...klets-from-going-into-infinite-spin-in-rt.patch | 6 +- patches/wait-simple-implementation.patch | 12 +++- patches/wait-simple-rework-for-completions.patch | 22 ++++--- 18 files changed, 243 insertions(+), 69 deletions(-) create mode 100644 patches/md-disable-bcache.patch create mode 100644 patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch create mode 100644 patches/simple-wait-rename-and-export-the-equivalent-of-wait.patch diff --git a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch index 5dbd27d..ba6001b 100644 --- a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch +++ b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch @@ -11,15 +11,13 @@ wakeup() a process while holding the lock. Signed-off-by: Sebastian Andrzej Siewior --- - include/linux/interrupt.h | 1 + - kernel/irq/manage.c | 79 +++++++++++++++++++++++++++++++++++++++++++++-- + include/linux/interrupt.h | 1 + kernel/irq/manage.c | 79 ++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 77 insertions(+), 3 deletions(-) -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 11bdb1e..838d4bd 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -263,6 +263,7 @@ struct irq_affinity_notify { +@@ -261,6 +261,7 @@ struct irq_affinity_notify { unsigned int irq; struct kref kref; struct work_struct work; @@ -27,11 +25,9 @@ index 11bdb1e..838d4bd 100644 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); void (*release)(struct kref *ref); }; -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 0e34a98..5999a67 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, +@@ -164,6 +164,62 @@ int irq_do_set_affinity(struct irq_data return ret; } @@ -65,7 +61,7 @@ index 0e34a98..5999a67 100644 + if (!list_empty(&affinity_list)) { + notify = list_first_entry(&affinity_list, + struct irq_affinity_notify, list); -+ list_del(¬ify->list); ++ list_del_init(¬ify->list); + } + raw_spin_unlock_irq(&affinity_list_lock); + @@ -94,7 +90,7 @@ index 0e34a98..5999a67 100644 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) { struct irq_chip *chip = irq_data_get_irq_chip(data); -@@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) +@@ -182,7 +238,17 @@ int __irq_set_affinity_locked(struct irq if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); @@ -112,7 +108,7 @@ index 0e34a98..5999a67 100644 } irqd_set(data, IRQD_AFFINITY_SET); -@@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) +@@ -223,10 +289,8 @@ int irq_set_affinity_hint(unsigned int i } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); @@ -124,7 +120,7 @@ index 0e34a98..5999a67 100644 struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; -@@ -248,6 +312,13 @@ static void irq_affinity_notify(struct work_struct *work) +@@ -248,6 +312,13 @@ out: kref_put(¬ify->kref, notify->release); } @@ -138,7 +134,7 @@ index 0e34a98..5999a67 100644 /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification -@@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) +@@ -277,6 +348,8 @@ irq_set_affinity_notifier(unsigned int i notify->irq = irq; kref_init(¬ify->kref); INIT_WORK(¬ify->work, irq_affinity_notify); @@ -147,6 +143,3 @@ index 0e34a98..5999a67 100644 } raw_spin_lock_irqsave(&desc->lock, flags); --- -1.8.4.rc3 - diff --git a/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch b/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch index 253b4dd..15fb8ed 100644 --- a/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch +++ b/patches/hwlat-detector-Update-hwlat_detector-to-add-outer-lo.patch @@ -43,11 +43,9 @@ There's some hardware latency that takes 2 microseconds to run. Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior --- - drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------ + drivers/misc/hwlat_detector.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) -diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c -index b7b7c90..f93b8ef 100644 --- a/drivers/misc/hwlat_detector.c +++ b/drivers/misc/hwlat_detector.c @@ -143,6 +143,7 @@ static void detector_exit(void); @@ -58,7 +56,7 @@ index b7b7c90..f93b8ef 100644 struct timespec timestamp; /* wall time */ unsigned long lost; }; -@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample(struct sample *sample) +@@ -219,11 +220,13 @@ static struct sample *buffer_get_sample( */ static int get_sample(void *unused) { @@ -111,7 +109,7 @@ index b7b7c90..f93b8ef 100644 s.timestamp = CURRENT_TIME; __buffer_add_sample(&s); -@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, +@@ -738,10 +757,11 @@ static ssize_t debug_sample_fread(struct } } @@ -127,6 +125,3 @@ index b7b7c90..f93b8ef 100644 /* handling partial reads is more trouble than it's worth */ --- -1.8.4.rc3 - diff --git a/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch b/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch index f41beef..9f6e72b 100644 --- a/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch +++ b/patches/hwlat-detector-Use-thread-instead-of-stop-machine.patch @@ -14,11 +14,9 @@ while it checks for hardware latency. Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior --- - drivers/misc/hwlat_detector.c | 59 ++++++++++++++++++------------------------- + drivers/misc/hwlat_detector.c | 59 +++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 34 deletions(-) -diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c -index d80a857..0bfa40d 100644 --- a/drivers/misc/hwlat_detector.c +++ b/drivers/misc/hwlat_detector.c @@ -41,7 +41,6 @@ @@ -55,7 +53,7 @@ index d80a857..0bfa40d 100644 */ static int __buffer_add_sample(struct sample *sample) { -@@ -229,18 +227,17 @@ static struct sample *buffer_get_sample(struct sample *sample) +@@ -229,18 +227,17 @@ static struct sample *buffer_get_sample( #endif /** * get_sample - sample the CPU TSC and look for likely hardware latencies @@ -100,7 +98,7 @@ index d80a857..0bfa40d 100644 out: return ret; } -@@ -305,32 +305,30 @@ static int get_sample(void *unused) +@@ -305,32 +305,30 @@ out: * @unused: A required part of the kthread API. * * Used to periodically sample the CPU TSC via a call to get_sample. We @@ -163,7 +161,7 @@ index d80a857..0bfa40d 100644 } /** -@@ -442,8 +435,7 @@ static int init_stats(void) +@@ -442,8 +435,7 @@ out: * This function provides a generic read implementation for the global state * "data" structure debugfs filesystem entries. It would be nice to use * simple_attr_read directly, but we need to make sure that the data.lock @@ -173,7 +171,7 @@ index d80a857..0bfa40d 100644 */ static ssize_t simple_data_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, const u64 *entry) -@@ -478,8 +470,7 @@ static ssize_t simple_data_read(struct file *filp, char __user *ubuf, +@@ -478,8 +470,7 @@ static ssize_t simple_data_read(struct f * This function provides a generic write implementation for the global state * "data" structure debugfs filesystem entries. It would be nice to use * simple_attr_write directly, but we need to make sure that the data.lock @@ -183,6 +181,3 @@ index d80a857..0bfa40d 100644 */ static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos, u64 *entry) --- -1.8.4.rc3 - diff --git a/patches/hwlat-detector-Use-trace_clock_local-if-available.patch b/patches/hwlat-detector-Use-trace_clock_local-if-available.patch index 9d7d327..bef079f 100644 --- a/patches/hwlat-detector-Use-trace_clock_local-if-available.patch +++ b/patches/hwlat-detector-Use-trace_clock_local-if-available.patch @@ -12,11 +12,9 @@ otherwise use ktime_get(). Signed-off-by: Steven Rostedt Signed-off-by: Sebastian Andrzej Siewior --- - drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++--------- + drivers/misc/hwlat_detector.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) -diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c -index f93b8ef..d80a857 100644 --- a/drivers/misc/hwlat_detector.c +++ b/drivers/misc/hwlat_detector.c @@ -51,6 +51,7 @@ @@ -27,7 +25,7 @@ index f93b8ef..d80a857 100644 #define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ #define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ -@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample(struct sample *sample) +@@ -211,6 +212,21 @@ static struct sample *buffer_get_sample( return sample; } @@ -41,7 +39,7 @@ index f93b8ef..d80a857 100644 +#else +#define time_type u64 +#define time_get() trace_clock_local() -+#define time_to_us(x) ((x) / 1000) ++#define time_to_us(x) div_u64(x, 1000) +#define time_sub(a, b) ((a) - (b)) +#define init_time(a, b) a = b +#define time_u64(a) a @@ -49,7 +47,7 @@ index f93b8ef..d80a857 100644 /** * get_sample - sample the CPU TSC and look for likely hardware latencies * @unused: This is not used but is a part of the stop_machine API -@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample(struct sample *sample) +@@ -220,23 +236,23 @@ static struct sample *buffer_get_sample( */ static int get_sample(void *unused) { @@ -93,6 +91,3 @@ index f93b8ef..d80a857 100644 /* This shouldn't happen */ if (diff < 0) { --- -1.8.4.rc3 - diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch index 2d6f039..cb916f7 100644 --- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch +++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch @@ -64,7 +64,7 @@ Cc: stable-rt@vger.kernel.org --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -783,7 +783,15 @@ irq_forced_thread_fn(struct irq_desc *de +@@ -856,7 +856,15 @@ irq_forced_thread_fn(struct irq_desc *de local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); @@ -81,7 +81,7 @@ Cc: stable-rt@vger.kernel.org return ret; } -@@ -1128,6 +1136,9 @@ __setup_irq(unsigned int irq, struct irq +@@ -1201,6 +1209,9 @@ __setup_irq(unsigned int irq, struct irq irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } diff --git a/patches/localversion.patch b/patches/localversion.patch index 9dfd443..5dacefd 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -12,4 +12,4 @@ Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt6 ++-rt7 diff --git a/patches/md-disable-bcache.patch b/patches/md-disable-bcache.patch new file mode 100644 index 0000000..7b47e0d --- /dev/null +++ b/patches/md-disable-bcache.patch @@ -0,0 +1,35 @@ +From a94d9b765f54e3ab9d11156c7a899c71a9185f1c Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 29 Aug 2013 11:48:57 +0200 +Subject: [PATCH] md: disable bcache +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +It uses anon semaphores +|drivers/md/bcache/request.c: In function ‘cached_dev_write_complete’: +|drivers/md/bcache/request.c:1007:2: error: implicit declaration of function ‘up_read_non_owner’ [-Werror=implicit-function-declaration] +| up_read_non_owner(&dc->writeback_lock); +| ^ +|drivers/md/bcache/request.c: In function ‘request_write’: +|drivers/md/bcache/request.c:1033:2: error: implicit declaration of function ‘down_read_non_owner’ [-Werror=implicit-function-declaration] +| down_read_non_owner(&dc->writeback_lock); +| ^ + +either we get rid of those or we have to introduce them… + +Signed-off-by: Sebastian Andrzej Siewior +--- + drivers/md/bcache/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/md/bcache/Kconfig ++++ b/drivers/md/bcache/Kconfig +@@ -1,6 +1,7 @@ + + config BCACHE + tristate "Block device as cache" ++ depends on !PREEMPT_RT_FULL + ---help--- + Allows a block device to be used as cache for other devices; uses + a btree for indexing and the layout is optimized for SSDs. diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch new file mode 100644 index 0000000..f940c45 --- /dev/null +++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -0,0 +1,69 @@ +From 1bd263cbd3951f8f36ee6dcfe9160dafcfdd91fe Mon Sep 17 00:00:00 2001 +From: Sebastian Andrzej Siewior +Date: Thu, 29 Aug 2013 18:21:04 +0200 +Subject: [PATCH] ptrace: fix ptrace vs tasklist_lock race + +As explained by Alexander Fyodorov : + +|read_lock(&tasklist_lock) in ptrace_stop() is converted to mutex on RT kernel, +|and it can remove __TASK_TRACED from task->state (by moving it to +|task->saved_state). If parent does wait() on child followed by a sys_ptrace +|call, the following race can happen: +| +|- child sets __TASK_TRACED in ptrace_stop() +|- parent does wait() which eventually calls wait_task_stopped() and returns +| child's pid +|- child blocks on read_lock(&tasklist_lock) in ptrace_stop() and moves +| __TASK_TRACED flag to saved_state +|- parent calls sys_ptrace, which calls ptrace_check_attach() and wait_task_inactive() + +The patch is based on his initial patch where an additional check is +added in case the __TASK_TRACED moved to ->saved_state. The pi_lock is +taken in case the caller is interrupted between looking into ->state and +->saved_state. + +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/sched.h | 19 ++++++++++++++++++- + 1 file changed, 18 insertions(+), 1 deletion(-) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index e0a05de..bd60b9d 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -175,7 +175,6 @@ extern char ___assert_task_state[1 - 2*!!( + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED) + +-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) + #define task_is_dead(task) ((task)->exit_state != 0) + #define task_is_stopped_or_traced(task) \ +@@ -2532,6 +2531,24 @@ static inline int signal_pending_state(long state, struct task_struct *p) + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); + } + ++static inline bool task_is_traced(struct task_struct *task) ++{ ++ bool traced = false; ++ ++ if (task->state & __TASK_TRACED) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* in case the task is sleeping on tasklist_lock */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->state & __TASK_TRACED) ++ traced = true; ++ else if (task->saved_state & __TASK_TRACED) ++ traced = true; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ return traced; ++} ++ + /* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return +-- +1.8.4.rc3 + diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch index 2cf70ab..09accb1 100644 --- a/patches/random-make-it-work-on-rt.patch +++ b/patches/random-make-it-work-on-rt.patch @@ -99,7 +99,7 @@ Cc: stable-rt@vger.kernel.org note_interrupt(irq, desc, retval); --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -880,6 +880,12 @@ static int irq_thread(void *data) +@@ -953,6 +953,12 @@ static int irq_thread(void *data) if (!noirqdebug) note_interrupt(action->irq, desc, action_ret); diff --git a/patches/rcu-more-swait-conversions.patch b/patches/rcu-more-swait-conversions.patch index adb85c1..426e0a4 100644 --- a/patches/rcu-more-swait-conversions.patch +++ b/patches/rcu-more-swait-conversions.patch @@ -1,8 +1,19 @@ -Subject: rcu-more-swait-conversions.patch +From eddcd14571497d3d5d6ce7df0ee1bf2ecec72292 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 31 Jul 2013 19:00:35 +0200 +Subject: [PATCH] rcu-more-swait-conversions.patch Signed-off-by: Thomas Gleixner + +Merged Steven's + + static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { +- swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]); ++ wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); + } + +Signed-off-by: Steven Rostedt +Signed-off-by: Sebastian Andrzej Siewior --- kernel/rcutree.h | 5 +++-- kernel/rcutree_plugin.h | 14 +++++++------- @@ -43,7 +54,7 @@ Signed-off-by: Thomas Gleixner static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) { - wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); -+ swait_wake(&rnp->nocb_gp_wq[rnp->completed & 0x1]); ++ swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); } /* diff --git a/patches/series b/patches/series index 2267ed0..317eeea 100644 --- a/patches/series +++ b/patches/series @@ -96,6 +96,8 @@ tracing-account-for-preempt-off-in-preempt_schedule.patch # PTRACE/SIGNAL crap signal-revert-ptrace-preempt-magic.patch +# wait for feedback +# ptrace-fix-ptrace-vs-tasklist_lock-race.patch # ARM lock annotation arm-convert-boot-lock-to-raw.patch @@ -232,6 +234,7 @@ list_bl.h-make-list-head-locking-RT-safe.patch genirq-nodebug-shirq.patch genirq-disable-irqpoll-on-rt.patch genirq-force-threading.patch +genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch # DRIVERS NET drivers-net-fix-livelock-issues.patch @@ -610,13 +613,11 @@ drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch # SIMPLE WAITQUEUE wait-simple-implementation.patch wait-simple-rework-for-completions.patch -swait-Add-memory-barrier-before-checking-list-empty.patch -swait-Add-smp_mb-after-setting-h-list.patch +simple-wait-rename-and-export-the-equivalent-of-wait.patch rcutiny-use-simple-waitqueue.patch treercu-use-simple-waitqueue.patch rcu-more-swait-conversions.patch -rcu-swait-Fix-RCU-conversion-of-wake_up_all-to-swait.patch completion-use-simple-wait-queues.patch @@ -624,6 +625,9 @@ completion-use-simple-wait-queues.patch # Revisit: We need this in other places as well move_sched_delayed_work_to_helper.patch +# bcache disabled +md-disable-bcache.patch + # Enable full RT kconfig-disable-a-few-options-rt.patch kconfig-preempt-rt-full.patch diff --git a/patches/simple-wait-rename-and-export-the-equivalent-of-wait.patch b/patches/simple-wait-rename-and-export-the-equivalent-of-wait.patch new file mode 100644 index 0000000..c140d82 --- /dev/null +++ b/patches/simple-wait-rename-and-export-the-equivalent-of-wait.patch @@ -0,0 +1,63 @@ +From 069b715a6b4f86a4a09a0be1d7156c7b388eaf2d Mon Sep 17 00:00:00 2001 +From: Paul Gortmaker +Date: Tue, 27 Aug 2013 14:20:26 -0400 +Subject: [PATCH] simple-wait: rename and export the equivalent of + waitqueue_active() + +The function "swait_head_has_waiters()" was internalized into +wait-simple.c but it parallels the waitqueue_active of normal +waitqueue support. Given that there are over 150 waitqueue_active +users in drivers/ fs/ kernel/ and the like, lets make it globally +visible, and rename it to parallel the waitqueue_active accordingly. +We'll need to do this if we expect to expand its usage beyond RT. + +Signed-off-by: Paul Gortmaker +Signed-off-by: Sebastian Andrzej Siewior +--- + include/linux/wait-simple.h | 8 ++++++++ + kernel/wait-simple.c | 10 +--------- + 2 files changed, 9 insertions(+), 9 deletions(-) + +--- a/include/linux/wait-simple.h ++++ b/include/linux/wait-simple.h +@@ -47,6 +47,14 @@ extern void swait_prepare(struct swait_h + extern void swait_finish_locked(struct swait_head *head, struct swaiter *w); + extern void swait_finish(struct swait_head *head, struct swaiter *w); + ++/* Check whether a head has waiters enqueued */ ++static inline bool swaitqueue_active(struct swait_head *h) ++{ ++ /* Make sure the condition is visible before checking list_empty() */ ++ smp_mb(); ++ return !list_empty(&h->list); ++} ++ + /* + * Wakeup functions + */ +--- a/kernel/wait-simple.c ++++ b/kernel/wait-simple.c +@@ -26,14 +26,6 @@ static inline void __swait_dequeue(struc + list_del_init(&w->node); + } + +-/* Check whether a head has waiters enqueued */ +-static inline bool swait_head_has_waiters(struct swait_head *h) +-{ +- /* Make sure the condition is visible before checking list_empty() */ +- smp_mb(); +- return !list_empty(&h->list); +-} +- + void __init_swait_head(struct swait_head *head, struct lock_class_key *key) + { + raw_spin_lock_init(&head->lock); +@@ -112,7 +104,7 @@ __swait_wake(struct swait_head *head, un + unsigned long flags; + int woken; + +- if (!swait_head_has_waiters(head)) ++ if (!swaitqueue_active(head)) + return 0; + + raw_spin_lock_irqsave(&head->lock, flags); diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch index 0d2cbe5..65c1bf9 100644 --- a/patches/softirq-disable-softirq-stacks-for-rt.patch +++ b/patches/softirq-disable-softirq-stacks-for-rt.patch @@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner +#endif --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -445,10 +445,9 @@ struct softirq_action +@@ -446,10 +446,9 @@ struct softirq_action void (*action)(struct softirq_action *); }; diff --git a/patches/softirq-local-lock.patch b/patches/softirq-local-lock.patch index 6825a0a..5c496a8 100644 --- a/patches/softirq-local-lock.patch +++ b/patches/softirq-local-lock.patch @@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner * Are we in NMI context? --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -447,7 +447,13 @@ struct softirq_action +@@ -448,7 +448,13 @@ struct softirq_action asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); @@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); -@@ -634,6 +640,12 @@ void tasklet_hrtimer_cancel(struct taskl +@@ -635,6 +641,12 @@ void tasklet_hrtimer_cancel(struct taskl tasklet_kill(&ttimer->tasklet); } diff --git a/patches/softirq-sanitize-softirq-pending.patch b/patches/softirq-sanitize-softirq-pending.patch index 22906fd..1e9ff82 100644 --- a/patches/softirq-sanitize-softirq-pending.patch +++ b/patches/softirq-sanitize-softirq-pending.patch @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -455,6 +455,8 @@ extern void __raise_softirq_irqoff(unsig +@@ -456,6 +456,8 @@ extern void __raise_softirq_irqoff(unsig extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch index 23fe479..5dbdae3 100644 --- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch +++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch @@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -502,8 +502,9 @@ extern void __send_remote_softirq(struct +@@ -503,8 +503,9 @@ extern void __send_remote_softirq(struct to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its execution is still not started, it will be executed only once. @@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. -@@ -528,27 +529,36 @@ struct tasklet_struct name = { NULL, 0, +@@ -529,27 +530,36 @@ struct tasklet_struct name = { NULL, 0, enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ @@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif -@@ -597,17 +607,8 @@ static inline void tasklet_disable(struc +@@ -598,17 +608,8 @@ static inline void tasklet_disable(struc smp_mb(); } diff --git a/patches/wait-simple-implementation.patch b/patches/wait-simple-implementation.patch index 5e500d6..2ebd713 100644 --- a/patches/wait-simple-implementation.patch +++ b/patches/wait-simple-implementation.patch @@ -13,15 +13,17 @@ runtime overhead. Signed-off-by: Thomas Gleixner +smp_mb() added by Steven Rostedt to fix a race condition with swait +wakeups vs adding items to the list. --- - include/linux/wait-simple.h | 231 ++++++++++++++++++++++++++++++++++++++++++++ + include/linux/wait-simple.h | 235 ++++++++++++++++++++++++++++++++++++++++++++ kernel/Makefile | 2 kernel/wait-simple.c | 68 ++++++++++++ - 3 files changed, 300 insertions(+), 1 deletion(-) + 3 files changed, 304 insertions(+), 1 deletion(-) --- /dev/null +++ b/include/linux/wait-simple.h -@@ -0,0 +1,231 @@ +@@ -0,0 +1,235 @@ +#ifndef _LINUX_WAIT_SIMPLE_H +#define _LINUX_WAIT_SIMPLE_H + @@ -78,6 +80,8 @@ Signed-off-by: Thomas Gleixner +static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) +{ + list_add(&w->node, &head->list); ++ /* We can't let the condition leak before the setting of head */ ++ smp_mb(); +} + +/* @@ -93,6 +97,8 @@ Signed-off-by: Thomas Gleixner + */ +static inline bool swait_head_has_waiters(struct swait_head *h) +{ ++ /* Make sure the condition is visible before checking list_empty() */ ++ smp_mb(); + return !list_empty(&h->list); +} + diff --git a/patches/wait-simple-rework-for-completions.patch b/patches/wait-simple-rework-for-completions.patch index ecf8adb..98f16ad 100644 --- a/patches/wait-simple-rework-for-completions.patch +++ b/patches/wait-simple-rework-for-completions.patch @@ -4,9 +4,9 @@ Date: Thu, 10 Jan 2013 11:47:35 +0100 Signed-off-by: Thomas Gleixner --- - include/linux/wait-simple.h | 56 +++++++---------------------------- - kernel/wait-simple.c | 69 ++++++++++++++++++++++++++++++++++++++------ - 2 files changed, 72 insertions(+), 53 deletions(-) + include/linux/wait-simple.h | 60 +++++++----------------------------- + kernel/wait-simple.c | 73 ++++++++++++++++++++++++++++++++++++++------ + 2 files changed, 76 insertions(+), 57 deletions(-) --- a/include/linux/wait-simple.h +++ b/include/linux/wait-simple.h @@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key); #define init_swait_head(swh) \ -@@ -40,59 +42,25 @@ extern void __init_swait_head(struct swa +@@ -40,63 +42,25 @@ extern void __init_swait_head(struct swa /* * Waiter functions */ @@ -47,6 +47,8 @@ Signed-off-by: Thomas Gleixner -static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) -{ - list_add(&w->node, &head->list); +- /* We can't let the condition leak before the setting of head */ +- smp_mb(); -} - -/* @@ -62,6 +64,8 @@ Signed-off-by: Thomas Gleixner - */ -static inline bool swait_head_has_waiters(struct swait_head *h) -{ +- /* Make sure the condition is visible before checking list_empty() */ +- smp_mb(); - return !list_empty(&h->list); -} - @@ -97,7 +101,7 @@ Signed-off-by: Thomas Gleixner DEFINE_SWAITER(__wait); \ --- a/kernel/wait-simple.c +++ b/kernel/wait-simple.c -@@ -12,6 +12,24 @@ +@@ -12,6 +12,28 @@ #include #include @@ -105,6 +109,8 @@ Signed-off-by: Thomas Gleixner +static inline void __swait_enqueue(struct swait_head *head, struct swaiter *w) +{ + list_add(&w->node, &head->list); ++ /* We can't let the condition leak before the setting of head */ ++ smp_mb(); +} + +/* Removes w from head->list. Must be called with head->lock locked. */ @@ -116,13 +122,15 @@ Signed-off-by: Thomas Gleixner +/* Check whether a head has waiters enqueued */ +static inline bool swait_head_has_waiters(struct swait_head *h) +{ ++ /* Make sure the condition is visible before checking list_empty() */ ++ smp_mb(); + return !list_empty(&h->list); +} + void __init_swait_head(struct swait_head *head, struct lock_class_key *key) { raw_spin_lock_init(&head->lock); -@@ -20,19 +38,31 @@ void __init_swait_head(struct swait_head +@@ -20,19 +42,31 @@ void __init_swait_head(struct swait_head } EXPORT_SYMBOL(__init_swait_head); @@ -158,7 +166,7 @@ Signed-off-by: Thomas Gleixner void swait_finish(struct swait_head *head, struct swaiter *w) { unsigned long flags; -@@ -46,22 +76,43 @@ void swait_finish(struct swait_head *hea +@@ -46,22 +80,43 @@ void swait_finish(struct swait_head *hea } EXPORT_SYMBOL(swait_finish); -- cgit 1.2.3-korg