summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2012-10-09 19:23:14 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2012-10-09 19:23:14 -0400
commit19aa1730ceb5605bf3cf6e24aeb990643a925cfc (patch)
tree60f0a998dd87adc1a6ba8f26cc8c1efabeced165
parentbc3382f01f4d9321b635d5da7c199c8160c0a86e (diff)
download3.6-rt-patches-19aa1730ceb5605bf3cf6e24aeb990643a925cfc.tar.gz
RT: 3.6.1 baseline patch setv3.6.1-rt1
Corresponding to an untar of the following md5: 46bfe597e6ed926a6bec34366eeefb6b patches-3.6.1-rt1.tar.xz Current location: http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/patches-3.6.1-rt1.tar.xz If the above doesn't work anymore, try: http://www.kernel.org/pub/linux/kernel/projects/rt/3.6/older/ as it follows the naming convention of the previous release(s). Also note lwn post from Thomas, describing the release: http://lwn.net/Articles/518993/ Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--acpi-use-local-irq-nort.patch27
-rw-r--r--arch-use-pagefault-disabled.patch330
-rw-r--r--arm-allow-irq-threading.patch24
-rw-r--r--arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch58
-rw-r--r--arm-at91-tclib-default-to-tclib-timer-for-rt.patch34
-rw-r--r--arm-convert-boot-lock-to-raw.patch248
-rw-r--r--arm-disable-highmem-on-rt.patch22
-rw-r--r--arm-mark-pmu-interupt-no-thread.patch25
-rw-r--r--arm-omap-make-wakeupgen_lock-raw.patch64
-rw-r--r--ata-disable-interrupts-if-non-rt.patch66
-rw-r--r--block-shorten-interrupt-disabled-regions.patch117
-rw-r--r--bug-rt-dependend-variants.patch36
-rw-r--r--clocksource-tclib-allow-higher-clockrates.patch163
-rw-r--r--cond-resched-lock-rt-tweak.patch22
-rw-r--r--cond-resched-softirq-rt.patch51
-rw-r--r--cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch123
-rw-r--r--cpu-rt-rework-cpu-down.patch550
-rw-r--r--cpu-rt-variants.patch28
-rw-r--r--cpumask-disable-offstack-on-rt.patch38
-rw-r--r--debugobjects-rt.patch25
-rw-r--r--dm-make-rt-aware.patch36
-rw-r--r--drivers-net-8139-disable-irq-nosync.patch27
-rw-r--r--drivers-net-at91-make-mdio-protection-rt-safe.patch54
-rw-r--r--drivers-net-ehea-mark-rx-irq-no-thread.patch53
-rw-r--r--drivers-net-fix-livelock-issues.patch140
-rw-r--r--drivers-net-gianfar-make-rt-aware.patch57
-rw-r--r--drivers-net-tulip-add-missing-pci-disable.patch25
-rw-r--r--drivers-net-vortex-fix-locking-issues.patch50
-rw-r--r--drivers-random-reduce-preempt-disabled-region.patch41
-rw-r--r--drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch49
-rw-r--r--drivers-serial-cleanup-locking-for-rt.patch44
-rw-r--r--drivers-tty-fix-omap-lock-crap.patch40
-rw-r--r--early-printk-consolidate.patch498
-rw-r--r--epoll-use-get-cpu-light.patch28
-rw-r--r--filemap-fix-up.patch24
-rw-r--r--fix-rt-int3-x86_32-3.2-rt.patch114
-rw-r--r--fs-block-rt-support.patch44
-rw-r--r--fs-dcache-use-cpu-chill-in-trylock-loops.patch102
-rw-r--r--fs-jbd-pull-plug-when-waiting-for-space.patch31
-rw-r--r--fs-jbd-replace-bh_state-lock.patch104
-rw-r--r--fs-namespace-preemption-fix.patch32
-rw-r--r--fs-ntfs-disable-interrupt-non-rt.patch61
-rw-r--r--fs-replace-bh_uptodate_lock-for-rt.patch167
-rw-r--r--ftrace-crap.patch92
-rw-r--r--ftrace-migrate-disable-tracing.patch81
-rw-r--r--futex-requeue-pi-fix.patch118
-rw-r--r--generic-cmpxchg-use-raw-local-irq.patch49
-rw-r--r--genirq-add-default-mask-cmdline-option.patch70
-rw-r--r--genirq-disable-irqpoll-on-rt.patch41
-rw-r--r--genirq-force-threading.patch50
-rw-r--r--genirq-nodebug-shirq.patch22
-rw-r--r--harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch37
-rw-r--r--hotplug-call-cpu_unplug_begin-a-little-early.patch61
-rw-r--r--hotplug-light-get-online-cpus.patch212
-rw-r--r--hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch26
-rw-r--r--hotplug-use-migrate-disable.patch38
-rw-r--r--hrtimer-add-missing-debug_activate-aid-was-re-announce-3-0-6-rt17.patch70
-rw-r--r--hrtimer-fix-reprogram-madness.patch42
-rw-r--r--hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch430
-rw-r--r--hrtimers-prepare-full-preemption.patch203
-rw-r--r--hwlatdetect.patch1352
-rw-r--r--ide-use-nort-local-irq-variants.patch183
-rw-r--r--infiniband-mellanox-ib-use-nort-irq.patch42
-rw-r--r--inpt-gameport-use-local-irq-nort.patch46
-rw-r--r--intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch69
-rw-r--r--ipc-make-rt-aware.patch89
-rw-r--r--ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch66
-rw-r--r--ipc-sem-rework-semaphore-wakeups.patch75
-rw-r--r--irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch154
-rw-r--r--jump-label-rt.patch23
-rw-r--r--kconfig-disable-a-few-options-rt.patch50
-rw-r--r--kconfig-preempt-rt-full.patch61
-rw-r--r--kgb-serial-hackaround.patch106
-rw-r--r--latency-hist.patch1825
-rw-r--r--lglocks-rt.patch168
-rw-r--r--list-add-list-last-entry.patch31
-rw-r--r--local-irq-rt-depending-variants.patch56
-rw-r--r--local-var.patch25
-rw-r--r--local-vars-migrate-disable.patch48
-rw-r--r--localversion.patch17
-rw-r--r--lockdep-no-softirq-accounting-on-rt.patch60
-rw-r--r--lockdep-selftest-convert-spinlock-to-raw-spinlock.patch92
-rw-r--r--lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch58
-rw-r--r--locking-various-init-fixes.patch96
-rw-r--r--md-raid5-percpu-handling-rt-aware.patch65
-rw-r--r--migrate-disable-rt-variant.patch29
-rw-r--r--mips-disable-highmem-on-rt.patch22
-rw-r--r--mips-enable-interrupts-in-signal.patch21
-rw-r--r--mm-allow-slab-rt.patch31
-rw-r--r--mm-cgroup-page-bit-spinlock.patch95
-rw-r--r--mm-convert-swap-to-percpu-locked.patch124
-rw-r--r--mm-make-vmstat-rt-aware.patch88
-rw-r--r--mm-page-alloc-fix.patch24
-rw-r--r--mm-page-alloc-use-list-last-entry.patch22
-rw-r--r--mm-page-alloc-use-local-lock-on-target-cpu.patch59
-rw-r--r--mm-page_alloc-reduce-lock-sections-further.patch197
-rw-r--r--mm-page_alloc-rt-friendly-per-cpu-pages.patch211
-rw-r--r--mm-prepare-pf-disable-discoupling.patch126
-rw-r--r--mm-protect-activate-switch-mm.patch47
-rw-r--r--mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch80
-rw-r--r--mm-remove-preempt-count-from-pf.patch36
-rw-r--r--mm-rt-kmap-atomic-scheduling.patch119
-rw-r--r--mm-scatterlist-dont-disable-irqs-on-RT.patch40
-rw-r--r--mm-shrink-the-page-frame-to-rt-size.patch147
-rw-r--r--mm-slab-fix-potential-deadlock.patch122
-rw-r--r--mm-slab-more-lock-breaks.patch269
-rw-r--r--mm-slab-move-debug-out.patch39
-rw-r--r--mm-slab-wrap-functions.patch451
-rw-r--r--mm-vmalloc-use-get-cpu-light.patch66
-rw-r--r--mutex-no-spin-on-rt.patch19
-rw-r--r--net-another-local-irq-disable-alloc-atomic-headache.patch49
-rw-r--r--net-flip-lock-dep-thingy.patch113
-rw-r--r--net-netif_rx_ni-migrate-disable.patch27
-rw-r--r--net-tx-action-avoid-livelock-on-rt.patch94
-rw-r--r--net-use-cpu-chill.patch66
-rw-r--r--net-use-cpu-light-in-ip-send-unicast-reply.patch32
-rw-r--r--net-wireless-warn-nort.patch22
-rw-r--r--ntp-make-ntp-lock-raw-sigh.patch127
-rw-r--r--of-convert-devtree-lock.patch392
-rw-r--r--of-fixup-recursive-locking.patch195
-rw-r--r--oleg-signal-rt-fix.patch150
-rw-r--r--panic-disable-random-on-rt.patch22
-rw-r--r--patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch166
-rw-r--r--pci-access-use-__wake_up_all_locked.patch27
-rw-r--r--perf-make-swevent-hrtimer-irqsafe.patch70
-rw-r--r--perf-move-irq-work-to-softirq-in-rt.patch67
-rw-r--r--peter_zijlstra-frob-hrtimer.patch106
-rw-r--r--peter_zijlstra-frob-migrate_disable-2.patch186
-rw-r--r--peter_zijlstra-frob-migrate_disable.patch69
-rw-r--r--peter_zijlstra-frob-pagefault_disable.patch388
-rw-r--r--peter_zijlstra-frob-rcu.patch168
-rw-r--r--peterz-raw_pagefault_disable.patch151
-rw-r--r--peterz-srcu-crypto-chain.patch196
-rw-r--r--pid-h-include-atomic-h.patch21
-rw-r--r--ping-sysrq.patch129
-rw-r--r--posix-timers-avoid-wakeups-when-no-timers-are-active.patch59
-rw-r--r--posix-timers-no-broadcast.patch35
-rw-r--r--posix-timers-shorten-cpu-timers-thread.patch28
-rw-r--r--posix-timers-thread-posix-cpu-timers-on-rt.patch313
-rw-r--r--power-disable-highmem-on-rt.patch22
-rw-r--r--power-use-generic-rwsem-on-rt.patch23
-rw-r--r--ppc-mark-low-level-handlers-no-thread.patch39
-rw-r--r--preempt-nort-rt-variants.patch54
-rw-r--r--printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch37
-rw-r--r--printk-kill.patch121
-rw-r--r--printk-rt-aware.patch103
-rw-r--r--radix-tree-rt-aware.patch72
-rw-r--r--random-make-it-work-on-rt.patch124
-rw-r--r--rcu-fix-build-break.patch60
-rw-r--r--rcu-force-preempt-rcu-for-rt.patch28
-rw-r--r--rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch266
-rw-r--r--rcu-tiny-merge-bh.patch26
-rw-r--r--re-migrate_disable-race-with-cpu-hotplug-3f.patch36
-rw-r--r--re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch71
-rw-r--r--relay-fix-timer-madness.patch54
-rw-r--r--resource-counters-use-localirq-nort.patch86
-rw-r--r--rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch47
-rw-r--r--rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch48
-rw-r--r--rt-add-rt-locks.patch910
-rw-r--r--rt-add-rt-spinlock-to-headers.patch124
-rw-r--r--rt-add-rt-to-mutex-headers.patch145
-rw-r--r--rt-disable-rt-group-sched.patch29
-rw-r--r--rt-introduce-cpu-chill.patch30
-rw-r--r--rt-local-irq-lock.patch244
-rw-r--r--rt-mutex-add-sleeping-spinlocks-support.patch627
-rw-r--r--rt-preempt-base-config.patch51
-rw-r--r--rt-rcutree-warn-fix.patch44
-rw-r--r--rt-rw-lockdep-annotations.patch121
-rw-r--r--rt-sched-do-not-compare-cpu-masks-in-scheduler.patch40
-rw-r--r--rt-sched-have-migrate_disable-ignore-bounded-threads.patch70
-rw-r--r--rt-sched-postpone-actual-migration-disalbe-to-schedule.patch307
-rw-r--r--rt-serial-warn-fix.patch54
-rw-r--r--rt-tracing-show-padding-as-unsigned-short.patch48
-rw-r--r--rtmutex-avoid-include-hell.patch22
-rw-r--r--rtmutex-futex-prepare-rt.patch221
-rw-r--r--rtmutex-lock-killable.patch84
-rw-r--r--rwsem-add-rt-variant.patch159
-rw-r--r--sched-better-debug-output-for-might-sleep.patch76
-rw-r--r--sched-clear-pf-thread-bound-on-fallback-rq.patch26
-rw-r--r--sched-cond-resched.patch34
-rw-r--r--sched-delay-put-task.patch71
-rw-r--r--sched-disable-rt-group-sched-on-rt.patch30
-rw-r--r--sched-disable-ttwu-queue.patch29
-rw-r--r--sched-limit-nr-migrate.patch25
-rw-r--r--sched-might-sleep-do-not-account-rcu-depth.patch49
-rw-r--r--sched-migrate-disable.patch200
-rw-r--r--sched-mmdrop-delayed.patch153
-rw-r--r--sched-rt-fix-migrate_enable-thinko.patch67
-rw-r--r--sched-rt-mutex-wakeup.patch88
-rw-r--r--sched-teach-migrate_disable-about-atomic-contexts.patch92
-rw-r--r--sched-ttwu-ensure-success-return-is-correct.patch36
-rw-r--r--scsi-fcoe-rt-aware.patch117
-rw-r--r--scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch49
-rw-r--r--seqlock-prevent-rt-starvation.patch168
-rw-r--r--seqlock-remove-unused-functions.patch46
-rw-r--r--seqlock-use-seqcount.patch220
-rw-r--r--series598
-rw-r--r--signal-fix-up-rcu-wreckage.patch37
-rw-r--r--signal-revert-ptrace-preempt-magic.patch29
-rw-r--r--signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch215
-rw-r--r--signals-do-not-wake-self.patch27
-rw-r--r--skbufhead-raw-lock.patch133
-rw-r--r--softirq-disable-softirq-stacks-for-rt.patch191
-rw-r--r--softirq-export-in-serving-softirq.patch30
-rw-r--r--softirq-fix-unplug-deadlock.patch68
-rw-r--r--softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch135
-rw-r--r--softirq-local-lock.patch325
-rw-r--r--softirq-make-fifo.patch60
-rw-r--r--softirq-make-serving-softirqs-a-task-flag.patch78
-rw-r--r--softirq-preempt-fix-3-re.txt153
-rw-r--r--softirq-sanitize-softirq-pending.patch116
-rw-r--r--softirq-split-handling-function.patch70
-rw-r--r--softirq-split-locks.patch449
-rw-r--r--softirq-split-out-code.patch155
-rw-r--r--softirq-thread-do-softirq.patch35
-rw-r--r--spinlock-types-separate-raw.patch212
-rw-r--r--stomp-machine-deal-clever-with-stopper-lock.patch60
-rw-r--r--stomp-machine-mark-stomper-thread.patch34
-rw-r--r--stomp-machine-raw-lock.patch176
-rw-r--r--stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch64
-rw-r--r--suspend-prevernt-might-sleep-splats.patch112
-rw-r--r--sysctl-include-atomic-h.patch21
-rw-r--r--sysfs-realtime-entry.patch49
-rw-r--r--tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch406
-rw-r--r--tasklist-lock-fix-section-conflict.patch61
-rw-r--r--timekeeping-split-xtime-lock.patch527
-rw-r--r--timer-delay-waking-softirqs-from-the-jiffy-tick.patch77
-rw-r--r--timer-fd-avoid-live-lock.patch29
-rw-r--r--timer-handle-idle-trylock-in-get-next-timer-irq.patch81
-rw-r--r--timers-avoid-the-base-null-otptimization-on-rt.patch70
-rw-r--r--timers-mov-printk_tick-to-soft-interrupt.patch31
-rw-r--r--timers-preempt-rt-support.patch58
-rw-r--r--timers-prepare-for-full-preemption.patch126
-rw-r--r--tracing-account-for-preempt-off-in-preempt_schedule.patch48
-rw-r--r--tty-use-local-irq-nort.patch49
-rw-r--r--upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch67
-rw-r--r--usb-fix-mouse-problem-copying-large-data.patch38
-rw-r--r--usb-hcd-use-local-irq-nort.patch36
-rw-r--r--user-use-local-irq-nort.patch31
-rw-r--r--workqueue-use-get-cpu-light.patch24
-rw-r--r--x86-crypto-reduce-preempt-disabled-regions.patch114
-rw-r--r--x86-disable-debug-stack.patch108
-rw-r--r--x86-highmem-warn.patch27
-rw-r--r--x86-hpet-disable-msi-on-lenovo-w510.patch66
-rw-r--r--x86-io-apic-migra-no-unmask.patch28
-rw-r--r--x86-kprobes-remove-bogus-preempt-enable.patch29
-rw-r--r--x86-kvm-require-const-tsc-for-rt.patch27
-rw-r--r--x86-mce-timer-hrtimer.patch134
-rw-r--r--x86-perf-uncore-deal-with-kfree.patch76
-rw-r--r--x86-stackprot-no-random-on-rt.patch49
-rw-r--r--x86-use-gen-rwsem-spinlocks-rt.patch30
251 files changed, 28625 insertions, 0 deletions
diff --git a/acpi-use-local-irq-nort.patch b/acpi-use-local-irq-nort.patch
new file mode 100644
index 0000000..4581836
--- /dev/null
+++ b/acpi-use-local-irq-nort.patch
@@ -0,0 +1,27 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 21 Jul 2009 22:54:51 +0200
+Subject: acpi: Do not disable interrupts on PREEMPT_RT
+
+Use the local_irq_*_nort() variants.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/x86/include/asm/acpi.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/arch/x86/include/asm/acpi.h
+===================================================================
+--- linux-stable.orig/arch/x86/include/asm/acpi.h
++++ linux-stable/arch/x86/include/asm/acpi.h
+@@ -51,8 +51,8 @@
+
+ #define ACPI_ASM_MACROS
+ #define BREAKPOINT3
+-#define ACPI_DISABLE_IRQS() local_irq_disable()
+-#define ACPI_ENABLE_IRQS() local_irq_enable()
++#define ACPI_DISABLE_IRQS() local_irq_disable_nort()
++#define ACPI_ENABLE_IRQS() local_irq_enable_nort()
+ #define ACPI_FLUSH_CPU_CACHE() wbinvd()
+
+ int __acpi_acquire_global_lock(unsigned int *lock);
diff --git a/arch-use-pagefault-disabled.patch b/arch-use-pagefault-disabled.patch
new file mode 100644
index 0000000..537c4ac
--- /dev/null
+++ b/arch-use-pagefault-disabled.patch
@@ -0,0 +1,330 @@
+Subject: mm: Fixup all fault handlers to check current->pagefault_disable
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 17 Mar 2011 11:32:28 +0100
+
+Necessary for decoupling pagefault disable from preempt count.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/alpha/mm/fault.c | 2 +-
+ arch/arm/mm/fault.c | 2 +-
+ arch/avr32/mm/fault.c | 3 ++-
+ arch/cris/mm/fault.c | 2 +-
+ arch/frv/mm/fault.c | 2 +-
+ arch/ia64/mm/fault.c | 2 +-
+ arch/m32r/mm/fault.c | 2 +-
+ arch/m68k/mm/fault.c | 2 +-
+ arch/microblaze/mm/fault.c | 2 +-
+ arch/mips/mm/fault.c | 2 +-
+ arch/mn10300/mm/fault.c | 2 +-
+ arch/parisc/mm/fault.c | 2 +-
+ arch/powerpc/mm/fault.c | 2 +-
+ arch/s390/mm/fault.c | 6 ++++--
+ arch/score/mm/fault.c | 2 +-
+ arch/sh/mm/fault.c | 2 +-
+ arch/sparc/mm/fault_32.c | 2 +-
+ arch/sparc/mm/fault_64.c | 2 +-
+ arch/tile/mm/fault.c | 2 +-
+ arch/um/kernel/trap.c | 2 +-
+ arch/x86/mm/fault.c | 2 +-
+ arch/xtensa/mm/fault.c | 2 +-
+ 22 files changed, 26 insertions(+), 23 deletions(-)
+
+Index: linux-stable/arch/alpha/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/alpha/mm/fault.c
++++ linux-stable/arch/alpha/mm/fault.c
+@@ -108,7 +108,7 @@ do_page_fault(unsigned long address, uns
+
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+- if (!mm || in_atomic())
++ if (!mm || in_atomic() || current->pagefault_disabled)
+ goto no_context;
+
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+Index: linux-stable/arch/arm/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/arm/mm/fault.c
++++ linux-stable/arch/arm/mm/fault.c
+@@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsign
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ /*
+Index: linux-stable/arch/avr32/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/avr32/mm/fault.c
++++ linux-stable/arch/avr32/mm/fault.c
+@@ -81,7 +81,8 @@ asmlinkage void do_page_fault(unsigned l
+ * If we're in an interrupt or have no user context, we must
+ * not take the fault...
+ */
+- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
++ if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) ||
++ current->pagefault_disabled)
+ goto no_context;
+
+ local_irq_enable();
+Index: linux-stable/arch/cris/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/cris/mm/fault.c
++++ linux-stable/arch/cris/mm/fault.c
+@@ -114,7 +114,7 @@ do_page_fault(unsigned long address, str
+ * user context, we must not take the fault.
+ */
+
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ retry:
+Index: linux-stable/arch/frv/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/frv/mm/fault.c
++++ linux-stable/arch/frv/mm/fault.c
+@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/ia64/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/ia64/mm/fault.c
++++ linux-stable/arch/ia64/mm/fault.c
+@@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long addres
+ /*
+ * If we're in an interrupt or have no user context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+Index: linux-stable/arch/m32r/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/m32r/mm/fault.c
++++ linux-stable/arch/m32r/mm/fault.c
+@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto bad_area_nosemaphore;
+
+ /* When running in the kernel we expect faults to occur only to
+Index: linux-stable/arch/m68k/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/m68k/mm/fault.c
++++ linux-stable/arch/m68k/mm/fault.c
+@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ retry:
+Index: linux-stable/arch/microblaze/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/microblaze/mm/fault.c
++++ linux-stable/arch/microblaze/mm/fault.c
+@@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs,
+ if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+ is_write = 0;
+
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
+ if (kernel_mode(regs))
+ goto bad_area_nosemaphore;
+
+Index: linux-stable/arch/mips/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/mips/mm/fault.c
++++ linux-stable/arch/mips/mm/fault.c
+@@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto bad_area_nosemaphore;
+
+ retry:
+Index: linux-stable/arch/mn10300/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/mn10300/mm/fault.c
++++ linux-stable/arch/mn10300/mm/fault.c
+@@ -167,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/parisc/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/parisc/mm/fault.c
++++ linux-stable/arch/parisc/mm/fault.c
+@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
+ unsigned long acc_type;
+ int fault;
+
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/powerpc/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/mm/fault.c
++++ linux-stable/arch/powerpc/mm/fault.c
+@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
+- if (in_atomic() || mm == NULL) {
++ if (in_atomic() || mm == NULL || current->pagefault_disabled) {
+ if (!user_mode(regs))
+ return SIGSEGV;
+ /* in_atomic() in user mode is really bad,
+Index: linux-stable/arch/s390/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/s390/mm/fault.c
++++ linux-stable/arch/s390/mm/fault.c
+@@ -286,7 +286,8 @@ static inline int do_exception(struct pt
+ * user context.
+ */
+ fault = VM_FAULT_BADCONTEXT;
+- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
++ tsk->pagefault_disabled))
+ goto out;
+
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+@@ -423,7 +424,8 @@ void __kprobes do_asce_exception(struct
+ unsigned long trans_exc_code;
+
+ trans_exc_code = regs->int_parm_long;
+- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
++ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
++ current->pagefault_disabled))
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/score/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/score/mm/fault.c
++++ linux-stable/arch/score/mm/fault.c
+@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto bad_area_nosemaphore;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/sh/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/sh/mm/fault.c
++++ linux-stable/arch/sh/mm/fault.c
+@@ -445,7 +445,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+Index: linux-stable/arch/sparc/mm/fault_32.c
+===================================================================
+--- linux-stable.orig/arch/sparc/mm/fault_32.c
++++ linux-stable/arch/sparc/mm/fault_32.c
+@@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_disabled)
+ goto no_context;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+Index: linux-stable/arch/sparc/mm/fault_64.c
+===================================================================
+--- linux-stable.orig/arch/sparc/mm/fault_64.c
++++ linux-stable/arch/sparc/mm/fault_64.c
+@@ -323,7 +323,7 @@ asmlinkage void __kprobes do_sparc64_fau
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm)
++ if (in_atomic() || !mm || current->pagefault_enabled)
+ goto intr_or_no_mm;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+Index: linux-stable/arch/tile/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/tile/mm/fault.c
++++ linux-stable/arch/tile/mm/fault.c
+@@ -359,7 +359,7 @@ static int handle_page_fault(struct pt_r
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault.
+ */
+- if (in_atomic() || !mm) {
++ if (in_atomic() || !mm || current->pagefault_disabled) {
+ vma = NULL; /* happy compiler */
+ goto bad_area_nosemaphore;
+ }
+Index: linux-stable/arch/um/kernel/trap.c
+===================================================================
+--- linux-stable.orig/arch/um/kernel/trap.c
++++ linux-stable/arch/um/kernel/trap.c
+@@ -39,7 +39,7 @@ int handle_page_fault(unsigned long addr
+ * If the fault was during atomic operation, don't take the fault, just
+ * fail.
+ */
+- if (in_atomic())
++ if (in_atomic() || current->pagefault_disabled)
+ goto out_nosemaphore;
+
+ retry:
+Index: linux-stable/arch/x86/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/x86/mm/fault.c
++++ linux-stable/arch/x86/mm/fault.c
+@@ -1094,7 +1094,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm)) {
++ if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+Index: linux-stable/arch/xtensa/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/xtensa/mm/fault.c
++++ linux-stable/arch/xtensa/mm/fault.c
+@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm) {
++ if (in_atomic() || !mm || current->pagefault_disabled) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+ }
diff --git a/arm-allow-irq-threading.patch b/arm-allow-irq-threading.patch
new file mode 100644
index 0000000..57573a0
--- /dev/null
+++ b/arm-allow-irq-threading.patch
@@ -0,0 +1,24 @@
+Subject: arm: Allow forced irq threading
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Jul 2011 13:15:20 +0200
+
+All timer interrupts and the perf interrupt are marked NO_THREAD, so
+its safe to allow forced interrupt threading.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/arch/arm/Kconfig
+===================================================================
+--- linux-stable.orig/arch/arm/Kconfig
++++ linux-stable/arch/arm/Kconfig
+@@ -40,6 +40,7 @@ config ARM
+ select GENERIC_IRQ_SHOW
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select HARDIRQS_SW_RESEND
++ select IRQ_FORCED_THREADING
+ select CPU_PM if (SUSPEND || CPU_IDLE)
+ select GENERIC_PCI_IOMAP
+ select HAVE_BPF_JIT
diff --git a/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
new file mode 100644
index 0000000..ac3700b
--- /dev/null
+++ b/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -0,0 +1,58 @@
+From: Benedikt Spranger <b.spranger@linutronix.de>
+Date: Sat, 6 Mar 2010 17:47:10 +0100
+Subject: ARM: AT91: PIT: Remove irq handler when clock event is unused
+
+Setup and remove the interrupt handler in clock event mode selection.
+This avoids calling the (shared) interrupt handler when the device is
+not used.
+
+Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/arm/mach-at91/at91rm9200_time.c | 1 +
+ arch/arm/mach-at91/at91sam926x_time.c | 5 ++++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/arm/mach-at91/at91rm9200_time.c
+===================================================================
+--- linux-stable.orig/arch/arm/mach-at91/at91rm9200_time.c
++++ linux-stable/arch/arm/mach-at91/at91rm9200_time.c
+@@ -130,6 +130,7 @@ clkevt32k_mode(enum clock_event_mode mod
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
++ remove_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ case CLOCK_EVT_MODE_RESUME:
+ irqmask = 0;
+ break;
+Index: linux-stable/arch/arm/mach-at91/at91sam926x_time.c
+===================================================================
+--- linux-stable.orig/arch/arm/mach-at91/at91sam926x_time.c
++++ linux-stable/arch/arm/mach-at91/at91sam926x_time.c
+@@ -67,7 +67,7 @@ static struct clocksource pit_clk = {
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+-
++static struct irqaction at91sam926x_pit_irq;
+ /*
+ * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
+ */
+@@ -76,6 +76,8 @@ pit_clkevt_mode(enum clock_event_mode mo
+ {
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
++ /* Set up irq handler */
++ setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
+ /* update clocksource counter */
+ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR));
+ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN
+@@ -88,6 +90,7 @@ pit_clkevt_mode(enum clock_event_mode mo
+ case CLOCK_EVT_MODE_UNUSED:
+ /* disable irq, leaving the clocksource active */
+ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN);
++ remove_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
diff --git a/arm-at91-tclib-default-to-tclib-timer-for-rt.patch b/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
new file mode 100644
index 0000000..2b7a409
--- /dev/null
+++ b/arm-at91-tclib-default-to-tclib-timer-for-rt.patch
@@ -0,0 +1,34 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 1 May 2010 18:29:35 +0200
+Subject: ARM: at91: tclib: Default to tclib timer for RT
+
+RT is not too happy about the shared timer interrupt in AT91
+devices. Default to tclib timer for RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/misc/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: linux-stable/drivers/misc/Kconfig
+===================================================================
+--- linux-stable.orig/drivers/misc/Kconfig
++++ linux-stable/drivers/misc/Kconfig
+@@ -73,6 +73,7 @@ config AB8500_PWM
+ config ATMEL_TCLIB
+ bool "Atmel AT32/AT91 Timer/Counter Library"
+ depends on (AVR32 || ARCH_AT91)
++ default y if PREEMPT_RT_FULL
+ help
+ Select this if you want a library to allocate the Timer/Counter
+ blocks found on many Atmel processors. This facilitates using
+@@ -105,7 +106,7 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ bool "TC Block use 32 KiHz clock"
+ depends on ATMEL_TCB_CLKSRC
+- default y
++ default y if !PREEMPT_RT_FULL
+ help
+ Select this to use 32 KiHz base clock rate as TC block clock
+ source for clock events.
diff --git a/arm-convert-boot-lock-to-raw.patch b/arm-convert-boot-lock-to-raw.patch
new file mode 100644
index 0000000..5310942
--- /dev/null
+++ b/arm-convert-boot-lock-to-raw.patch
@@ -0,0 +1,248 @@
+Subject: preempt-rt: Convert arm boot_lock to raw
+From: Frank Rowand <frank.rowand@am.sony.com>
+Date: Mon, 19 Sep 2011 14:51:14 -0700
+
+
+The arm boot_lock is used by the secondary processor startup code. The locking
+task is the idle thread, which has idle->sched_class == &idle_sched_class.
+idle_sched_class->enqueue_task == NULL, so if the idle task blocks on the
+lock, the attempt to wake it when the lock becomes available will fail:
+
+try_to_wake_up()
+ ...
+ activate_task()
+ enqueue_task()
+ p->sched_class->enqueue_task(rq, p, flags)
+
+Fix by converting boot_lock to a raw spin lock.
+
+Signed-off-by: Frank Rowand <frank.rowand@am.sony.com>
+Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/mach-exynos/platsmp.c | 12 ++++++------
+ arch/arm/mach-msm/platsmp.c | 10 +++++-----
+ arch/arm/mach-omap2/omap-smp.c | 10 +++++-----
+ arch/arm/mach-ux500/platsmp.c | 10 +++++-----
+ arch/arm/plat-versatile/platsmp.c | 10 +++++-----
+ 5 files changed, 26 insertions(+), 26 deletions(-)
+
+Index: linux-stable/arch/arm/mach-exynos/platsmp.c
+===================================================================
+--- linux-stable.orig/arch/arm/mach-exynos/platsmp.c
++++ linux-stable/arch/arm/mach-exynos/platsmp.c
+@@ -62,7 +62,7 @@ static void __iomem *scu_base_addr(void)
+ return (void __iomem *)(S5P_VA_SCU);
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -82,8 +82,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -94,7 +94,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -123,7 +123,7 @@ int __cpuinit boot_secondary(unsigned in
+
+ if (timeout == 0) {
+ printk(KERN_ERR "cpu1 power enable failed");
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ return -ETIMEDOUT;
+ }
+ }
+@@ -151,7 +151,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-stable/arch/arm/mach-msm/platsmp.c
+===================================================================
+--- linux-stable.orig/arch/arm/mach-msm/platsmp.c
++++ linux-stable/arch/arm/mach-msm/platsmp.c
+@@ -40,7 +40,7 @@ extern void msm_secondary_startup(void);
+ */
+ volatile int pen_release = -1;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ static inline int get_core_count(void)
+ {
+@@ -70,8 +70,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
+@@ -108,7 +108,7 @@ int __cpuinit boot_secondary(unsigned in
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -142,7 +142,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-stable/arch/arm/mach-omap2/omap-smp.c
+===================================================================
+--- linux-stable.orig/arch/arm/mach-omap2/omap-smp.c
++++ linux-stable/arch/arm/mach-omap2/omap-smp.c
+@@ -42,7 +42,7 @@
+ /* SCU base address */
+ static void __iomem *scu_base;
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __iomem *omap4_get_scu_base(void)
+ {
+@@ -73,8 +73,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -87,7 +87,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the AuxCoreBoot0 with boot state for secondary core.
+@@ -131,7 +131,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return 0;
+ }
+Index: linux-stable/arch/arm/mach-ux500/platsmp.c
+===================================================================
+--- linux-stable.orig/arch/arm/mach-ux500/platsmp.c
++++ linux-stable/arch/arm/mach-ux500/platsmp.c
+@@ -56,7 +56,7 @@ static void __iomem *scu_base_addr(void)
+ return NULL;
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -76,8 +76,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -88,7 +88,7 @@ int __cpuinit boot_secondary(unsigned in
+ * set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * The secondary processor is waiting to be released from
+@@ -109,7 +109,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
+Index: linux-stable/arch/arm/plat-versatile/platsmp.c
+===================================================================
+--- linux-stable.orig/arch/arm/plat-versatile/platsmp.c
++++ linux-stable/arch/arm/plat-versatile/platsmp.c
+@@ -38,7 +38,7 @@ static void __cpuinit write_pen_release(
+ outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
+ }
+
+-static DEFINE_SPINLOCK(boot_lock);
++static DEFINE_RAW_SPINLOCK(boot_lock);
+
+ void __cpuinit platform_secondary_init(unsigned int cpu)
+ {
+@@ -58,8 +58,8 @@ void __cpuinit platform_secondary_init(u
+ /*
+ * Synchronise with the boot thread.
+ */
+- spin_lock(&boot_lock);
+- spin_unlock(&boot_lock);
++ raw_spin_lock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ }
+
+ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
+@@ -70,7 +70,7 @@ int __cpuinit boot_secondary(unsigned in
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+- spin_lock(&boot_lock);
++ raw_spin_lock(&boot_lock);
+
+ /*
+ * This is really belt and braces; we hold unintended secondary
+@@ -100,7 +100,7 @@ int __cpuinit boot_secondary(unsigned in
+ * now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+
+ return pen_release != -1 ? -ENOSYS : 0;
+ }
diff --git a/arm-disable-highmem-on-rt.patch b/arm-disable-highmem-on-rt.patch
new file mode 100644
index 0000000..2c88ad0
--- /dev/null
+++ b/arm-disable-highmem-on-rt.patch
@@ -0,0 +1,22 @@
+Subject: arm-disable-highmem-on-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 17:09:28 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/arch/arm/Kconfig
+===================================================================
+--- linux-stable.orig/arch/arm/Kconfig
++++ linux-stable/arch/arm/Kconfig
+@@ -1737,7 +1737,7 @@ config HAVE_ARCH_PFN_VALID
+
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on MMU
++ depends on MMU && !PREEMPT_RT_FULL
+ help
+ The address space of ARM processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
diff --git a/arm-mark-pmu-interupt-no-thread.patch b/arm-mark-pmu-interupt-no-thread.patch
new file mode 100644
index 0000000..65a9900
--- /dev/null
+++ b/arm-mark-pmu-interupt-no-thread.patch
@@ -0,0 +1,25 @@
+Subject: arm: Mark pmu interupt IRQF_NO_THREAD
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 16 Mar 2011 14:45:31 +0100
+
+PMU interrupt must not be threaded. Remove IRQF_DISABLED while at it
+as we run all handlers with interrupts disabled anyway.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/kernel/perf_event.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/arch/arm/kernel/perf_event.c
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/perf_event.c
++++ linux-stable/arch/arm/kernel/perf_event.c
+@@ -430,7 +430,7 @@ armpmu_reserve_hardware(struct arm_pmu *
+ }
+
+ err = request_irq(irq, handle_irq,
+- IRQF_DISABLED | IRQF_NOBALANCING,
++ IRQF_NOBALANCING | IRQF_NO_THREAD,
+ "arm-pmu", armpmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
diff --git a/arm-omap-make-wakeupgen_lock-raw.patch b/arm-omap-make-wakeupgen_lock-raw.patch
new file mode 100644
index 0000000..7faf47b
--- /dev/null
+++ b/arm-omap-make-wakeupgen_lock-raw.patch
@@ -0,0 +1,64 @@
+Subject: arm-omap-make-wakeupgen_lock-raw.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 11 Apr 2012 11:26:38 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/mach-omap2/omap-wakeupgen.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+Index: linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c
+===================================================================
+--- linux-stable.orig/arch/arm/mach-omap2/omap-wakeupgen.c
++++ linux-stable/arch/arm/mach-omap2/omap-wakeupgen.c
+@@ -45,7 +45,7 @@
+
+ static void __iomem *wakeupgen_base;
+ static void __iomem *sar_base;
+-static DEFINE_SPINLOCK(wakeupgen_lock);
++static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
+ static unsigned int irq_target_cpu[MAX_IRQS];
+ static unsigned int irq_banks = MAX_NR_REG_BANKS;
+ static unsigned int max_irqs = MAX_IRQS;
+@@ -133,9 +133,9 @@ static void wakeupgen_mask(struct irq_da
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&wakeupgen_lock, flags);
++ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
+ _wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
+- spin_unlock_irqrestore(&wakeupgen_lock, flags);
++ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ }
+
+ /*
+@@ -145,9 +145,9 @@ static void wakeupgen_unmask(struct irq_
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&wakeupgen_lock, flags);
++ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
+ _wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
+- spin_unlock_irqrestore(&wakeupgen_lock, flags);
++ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -188,7 +188,7 @@ static void wakeupgen_irqmask_all(unsign
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&wakeupgen_lock, flags);
++ raw_spin_lock_irqsave(&wakeupgen_lock, flags);
+ if (set) {
+ _wakeupgen_save_masks(cpu);
+ _wakeupgen_set_all(cpu, WKG_MASK_ALL);
+@@ -196,7 +196,7 @@ static void wakeupgen_irqmask_all(unsign
+ _wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
+ _wakeupgen_restore_masks(cpu);
+ }
+- spin_unlock_irqrestore(&wakeupgen_lock, flags);
++ raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
+ }
+ #endif
+
diff --git a/ata-disable-interrupts-if-non-rt.patch b/ata-disable-interrupts-if-non-rt.patch
new file mode 100644
index 0000000..6436e2c
--- /dev/null
+++ b/ata-disable-interrupts-if-non-rt.patch
@@ -0,0 +1,66 @@
+From: Steven Rostedt <srostedt@redhat.com>
+Date: Fri, 3 Jul 2009 08:44:29 -0500
+Subject: ata: Do not disable interrupts in ide code for preempt-rt
+
+Use the local_irq_*_nort variants.
+
+Signed-off-by: Steven Rostedt <srostedt@redhat.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/ata/libata-sff.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+Index: linux-stable/drivers/ata/libata-sff.c
+===================================================================
+--- linux-stable.orig/drivers/ata/libata-sff.c
++++ linux-stable/drivers/ata/libata-sff.c
+@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
+ unsigned long flags;
+ unsigned int consumed;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ return consumed;
+ }
+@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
+ unsigned long flags;
+
+ /* FIXME: use a bounce buffer */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ buf = kmap_atomic(page);
+
+ /* do the actual data transfer */
+@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
+ do_write);
+
+ kunmap_atomic(buf);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ } else {
+ buf = page_address(page);
+ ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
+@@ -864,7 +864,7 @@ next_sg:
+ unsigned long flags;
+
+ /* FIXME: use bounce buffer */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ buf = kmap_atomic(page);
+
+ /* do the actual data transfer */
+@@ -872,7 +872,7 @@ next_sg:
+ count, rw);
+
+ kunmap_atomic(buf);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ } else {
+ buf = page_address(page);
+ consumed = ap->ops->sff_data_xfer(dev, buf + offset,
diff --git a/block-shorten-interrupt-disabled-regions.patch b/block-shorten-interrupt-disabled-regions.patch
new file mode 100644
index 0000000..082a86d
--- /dev/null
+++ b/block-shorten-interrupt-disabled-regions.patch
@@ -0,0 +1,117 @@
+Subject: block: Shorten interrupt disabled regions
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 22 Jun 2011 19:47:02 +0200
+
+Moving the blk_sched_flush_plug() call out of the interrupt/preempt
+disabled region in the scheduler allows us to replace
+local_irq_save/restore(flags) by local_irq_disable/enable() in
+blk_flush_plug().
+
+Now instead of doing this we disable interrupts explicitely when we
+lock the request_queue and reenable them when we drop the lock. That
+allows interrupts to be handled when the plug list contains requests
+for more than one queue.
+
+Aside of that this change makes the scope of the irq disabled region
+more obvious. The current code confused the hell out of me when
+looking at:
+
+ local_irq_save(flags);
+ spin_lock(q->queue_lock);
+ ...
+ queue_unplugged(q...);
+ scsi_request_fn();
+ spin_unlock(q->queue_lock);
+ spin_lock(shost->host_lock);
+ spin_unlock_irq(shost->host_lock);
+
+-------------------^^^ ????
+
+ spin_lock_irq(q->queue_lock);
+ spin_unlock(q->lock);
+ local_irq_restore(flags);
+
+Also add a comment to __blk_run_queue() documenting that
+q->request_fn() can drop q->queue_lock and reenable interrupts, but
+must return with q->queue_lock held and interrupts disabled.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ block/blk-core.c | 20 ++++++++------------
+ 1 file changed, 8 insertions(+), 12 deletions(-)
+
+Index: linux-stable/block/blk-core.c
+===================================================================
+--- linux-stable.orig/block/blk-core.c
++++ linux-stable/block/blk-core.c
+@@ -304,7 +304,11 @@ void __blk_run_queue(struct request_queu
+ {
+ if (unlikely(blk_queue_stopped(q)))
+ return;
+-
++ /*
++ * q->request_fn() can drop q->queue_lock and reenable
++ * interrupts, but must return with q->queue_lock held and
++ * interrupts disabled.
++ */
+ q->request_fn(q);
+ }
+ EXPORT_SYMBOL(__blk_run_queue);
+@@ -2902,11 +2906,11 @@ static void queue_unplugged(struct reque
+ * this lock).
+ */
+ if (from_schedule) {
+- spin_unlock(q->queue_lock);
++ spin_unlock_irq(q->queue_lock);
+ blk_run_queue_async(q);
+ } else {
+ __blk_run_queue(q);
+- spin_unlock(q->queue_lock);
++ spin_unlock_irq(q->queue_lock);
+ }
+
+ }
+@@ -2956,7 +2960,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ {
+ struct request_queue *q;
+- unsigned long flags;
+ struct request *rq;
+ LIST_HEAD(list);
+ unsigned int depth;
+@@ -2977,11 +2980,6 @@ void blk_flush_plug_list(struct blk_plug
+ q = NULL;
+ depth = 0;
+
+- /*
+- * Save and disable interrupts here, to avoid doing it for every
+- * queue lock we have to take.
+- */
+- local_irq_save(flags);
+ while (!list_empty(&list)) {
+ rq = list_entry_rq(list.next);
+ list_del_init(&rq->queuelist);
+@@ -2994,7 +2992,7 @@ void blk_flush_plug_list(struct blk_plug
+ queue_unplugged(q, depth, from_schedule);
+ q = rq->q;
+ depth = 0;
+- spin_lock(q->queue_lock);
++ spin_lock_irq(q->queue_lock);
+ }
+
+ /*
+@@ -3021,8 +3019,6 @@ void blk_flush_plug_list(struct blk_plug
+ */
+ if (q)
+ queue_unplugged(q, depth, from_schedule);
+-
+- local_irq_restore(flags);
+ }
+
+ void blk_finish_plug(struct blk_plug *plug)
diff --git a/bug-rt-dependend-variants.patch b/bug-rt-dependend-variants.patch
new file mode 100644
index 0000000..35a80e1
--- /dev/null
+++ b/bug-rt-dependend-variants.patch
@@ -0,0 +1,36 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:58 -0500
+Subject: bug: BUG_ON/WARN_ON variants dependend on RT/!RT
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/asm-generic/bug.h | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+Index: linux-stable/include/asm-generic/bug.h
+===================================================================
+--- linux-stable.orig/include/asm-generic/bug.h
++++ linux-stable/include/asm-generic/bug.h
+@@ -202,6 +202,20 @@ extern void warn_slowpath_null(const cha
+ # define WARN_ON_SMP(x) ({0;})
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define BUG_ON_RT(c) BUG_ON(c)
++# define BUG_ON_NONRT(c) do { } while (0)
++# define WARN_ON_RT(condition) WARN_ON(condition)
++# define WARN_ON_NONRT(condition) do { } while (0)
++# define WARN_ON_ONCE_NONRT(condition) do { } while (0)
++#else
++# define BUG_ON_RT(c) do { } while (0)
++# define BUG_ON_NONRT(c) BUG_ON(c)
++# define WARN_ON_RT(condition) do { } while (0)
++# define WARN_ON_NONRT(condition) WARN_ON(condition)
++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition)
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
diff --git a/clocksource-tclib-allow-higher-clockrates.patch b/clocksource-tclib-allow-higher-clockrates.patch
new file mode 100644
index 0000000..98561a4
--- /dev/null
+++ b/clocksource-tclib-allow-higher-clockrates.patch
@@ -0,0 +1,163 @@
+From: Benedikt Spranger <b.spranger@linutronix.de>
+Date: Mon, 8 Mar 2010 18:57:04 +0100
+Subject: clocksource: TCLIB: Allow higher clock rates for clock events
+
+As default the TCLIB uses the 32KiHz base clock rate for clock events.
+Add a compile time selection to allow higher clock resulution.
+
+Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/clocksource/tcb_clksrc.c | 44 +++++++++++++++++++++++----------------
+ drivers/misc/Kconfig | 11 +++++++--
+ 2 files changed, 35 insertions(+), 20 deletions(-)
+
+Index: linux-stable/drivers/clocksource/tcb_clksrc.c
+===================================================================
+--- linux-stable.orig/drivers/clocksource/tcb_clksrc.c
++++ linux-stable/drivers/clocksource/tcb_clksrc.c
+@@ -23,8 +23,7 @@
+ * this 32 bit free-running counter. the second channel is not used.
+ *
+ * - The third channel may be used to provide a 16-bit clockevent
+- * source, used in either periodic or oneshot mode. This runs
+- * at 32 KiHZ, and can handle delays of up to two seconds.
++ * source, used in either periodic or oneshot mode.
+ *
+ * A boot clocksource and clockevent source are also currently needed,
+ * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
+@@ -74,6 +73,7 @@ static struct clocksource clksrc = {
+ struct tc_clkevt_device {
+ struct clock_event_device clkevt;
+ struct clk *clk;
++ u32 freq;
+ void __iomem *regs;
+ };
+
+@@ -82,13 +82,6 @@ static struct tc_clkevt_device *to_tc_cl
+ return container_of(clkevt, struct tc_clkevt_device, clkevt);
+ }
+
+-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+- * because using one of the divided clocks would usually mean the
+- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+- *
+- * A divided clock could be good for high resolution timers, since
+- * 30.5 usec resolution can seem "low".
+- */
+ static u32 timer_clock;
+
+ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
+@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mod
+ case CLOCK_EVT_MODE_PERIODIC:
+ clk_enable(tcd->clk);
+
+- /* slow clock, count up to RC, then irq and restart */
++ /* count up to RC, then irq and restart */
+ __raw_writel(timer_clock
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++ __raw_writel((tcd->freq + HZ/2)/HZ,
++ tcaddr + ATMEL_TC_REG(2, RC));
+
+ /* Enable clock and interrupts on RC compare */
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mod
+ case CLOCK_EVT_MODE_ONESHOT:
+ clk_enable(tcd->clk);
+
+- /* slow clock, count up to RC, then irq and stop */
++ /* count up to RC, then irq and stop */
+ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
+ | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+@@ -158,8 +152,12 @@ static struct tc_clkevt_device clkevt =
+ .features = CLOCK_EVT_FEAT_PERIODIC
+ | CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 32,
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ /* Should be lower than at91rm9200's system timer */
+ .rating = 125,
++#else
++ .rating = 200,
++#endif
+ .set_next_event = tc_next_event,
+ .set_mode = tc_mode,
+ },
+@@ -185,8 +183,9 @@ static struct irqaction tc_irqaction = {
+ .handler = ch2_irq,
+ };
+
+-static void __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++static void __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
+ {
++ unsigned divisor = atmel_tc_divisors[divisor_idx];
+ struct clk *t2_clk = tc->clk[2];
+ int irq = tc->irq[2];
+
+@@ -194,11 +193,17 @@ static void __init setup_clkevents(struc
+ clkevt.clk = t2_clk;
+ tc_irqaction.dev_id = &clkevt;
+
+- timer_clock = clk32k_divisor_idx;
++ timer_clock = divisor_idx;
+
+- clkevt.clkevt.mult = div_sc(32768, NSEC_PER_SEC, clkevt.clkevt.shift);
+- clkevt.clkevt.max_delta_ns
+- = clockevent_delta2ns(0xffff, &clkevt.clkevt);
++ if (!divisor)
++ clkevt.freq = 32768;
++ else
++ clkevt.freq = clk_get_rate(t2_clk)/divisor;
++
++ clkevt.clkevt.mult = div_sc(clkevt.freq, NSEC_PER_SEC,
++ clkevt.clkevt.shift);
++ clkevt.clkevt.max_delta_ns =
++ clockevent_delta2ns(0xffff, &clkevt.clkevt);
+ clkevt.clkevt.min_delta_ns = clockevent_delta2ns(1, &clkevt.clkevt) + 1;
+ clkevt.clkevt.cpumask = cpumask_of(0);
+
+@@ -327,8 +332,11 @@ static int __init tcb_clksrc_init(void)
+ clocksource_register_hz(&clksrc, divided_rate);
+
+ /* channel 2: periodic and oneshot timer support */
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ setup_clkevents(tc, clk32k_divisor_idx);
+-
++#else
++ setup_clkevents(tc, best_divisor_idx);
++#endif
+ return 0;
+ }
+ arch_initcall(tcb_clksrc_init);
+Index: linux-stable/drivers/misc/Kconfig
+===================================================================
+--- linux-stable.orig/drivers/misc/Kconfig
++++ linux-stable/drivers/misc/Kconfig
+@@ -88,8 +88,7 @@ config ATMEL_TCB_CLKSRC
+ are combined to make a single 32-bit timer.
+
+ When GENERIC_CLOCKEVENTS is defined, the third timer channel
+- may be used as a clock event device supporting oneshot mode
+- (delays of up to two seconds) based on the 32 KiHz clock.
++ may be used as a clock event device supporting oneshot mode.
+
+ config ATMEL_TCB_CLKSRC_BLOCK
+ int
+@@ -103,6 +102,14 @@ config ATMEL_TCB_CLKSRC_BLOCK
+ TC can be used for other purposes, such as PWM generation and
+ interval timing.
+
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++ bool "TC Block use 32 KiHz clock"
++ depends on ATMEL_TCB_CLKSRC
++ default y
++ help
++ Select this to use 32 KiHz base clock rate as TC block clock
++ source for clock events.
++
+ config IBM_ASM
+ tristate "Device driver for IBM RSA service processor"
+ depends on X86 && PCI && INPUT && EXPERIMENTAL
diff --git a/cond-resched-lock-rt-tweak.patch b/cond-resched-lock-rt-tweak.patch
new file mode 100644
index 0000000..e99b6ba
--- /dev/null
+++ b/cond-resched-lock-rt-tweak.patch
@@ -0,0 +1,22 @@
+Subject: cond-resched-lock-rt-tweak.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 22:51:33 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -2689,7 +2689,7 @@ extern int _cond_resched(void);
+
+ extern int __cond_resched_lock(spinlock_t *lock);
+
+-#ifdef CONFIG_PREEMPT_COUNT
++#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
+ #else
+ #define PREEMPT_LOCK_OFFSET 0
diff --git a/cond-resched-softirq-rt.patch b/cond-resched-softirq-rt.patch
new file mode 100644
index 0000000..f3905a2
--- /dev/null
+++ b/cond-resched-softirq-rt.patch
@@ -0,0 +1,51 @@
+Subject: cond-resched-softirq-fix.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 14 Jul 2011 09:56:44 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 4 ++++
+ kernel/sched/core.c | 2 ++
+ 2 files changed, 6 insertions(+)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -2700,12 +2700,16 @@ extern int __cond_resched_lock(spinlock_
+ __cond_resched_lock(lock); \
+ })
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern int __cond_resched_softirq(void);
+
+ #define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
+ })
++#else
++# define cond_resched_softirq() cond_resched()
++#endif
+
+ /*
+ * Does a critical section need to be broken due to another
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -4832,6 +4832,7 @@ int __cond_resched_lock(spinlock_t *lock
+ }
+ EXPORT_SYMBOL(__cond_resched_lock);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int __sched __cond_resched_softirq(void)
+ {
+ BUG_ON(!in_softirq());
+@@ -4845,6 +4846,7 @@ int __sched __cond_resched_softirq(void)
+ return 0;
+ }
+ EXPORT_SYMBOL(__cond_resched_softirq);
++#endif
+
+ /**
+ * yield - yield the current processor to other threads.
diff --git a/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
new file mode 100644
index 0000000..297791e
--- /dev/null
+++ b/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -0,0 +1,123 @@
+Subject: cpu: Make hotplug.lock a "sleeping" spinlock on RT
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Fri, 02 Mar 2012 10:36:57 -0500
+
+Tasks can block on hotplug.lock in pin_current_cpu(), but their state
+might be != RUNNING. So the mutex wakeup will set the state
+unconditionally to RUNNING. That might cause spurious unexpected
+wakeups. We could provide a state preserving mutex_lock() function,
+but this is semantically backwards. So instead we convert the
+hotplug.lock() to a spinlock for RT, which has the state preserving
+semantics already.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Carsten Emde <C.Emde@osadl.org>
+Cc: John Kacur <jkacur@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Clark Williams <clark.williams@gmail.com>
+Cc: stable-rt@vger.kernel.org
+Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/cpu.c | 35 ++++++++++++++++++++++++++---------
+ 1 file changed, 26 insertions(+), 9 deletions(-)
+
+Index: linux-stable/kernel/cpu.c
+===================================================================
+--- linux-stable.orig/kernel/cpu.c
++++ linux-stable/kernel/cpu.c
+@@ -51,7 +51,12 @@ static int cpu_hotplug_disabled;
+
+ static struct {
+ struct task_struct *active_writer;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* Makes the lock keep the task's state */
++ spinlock_t lock;
++#else
+ struct mutex lock; /* Synchronizes accesses to refcount, */
++#endif
+ /*
+ * Also blocks the new readers during
+ * an ongoing cpu hotplug operation.
+@@ -59,10 +64,22 @@ static struct {
+ int refcount;
+ } cpu_hotplug = {
+ .active_writer = NULL,
++#ifdef CONFIG_PREEMPT_RT_FULL
++ .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
++#else
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
++#endif
+ .refcount = 0,
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
++# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
++#else
++# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
++# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
++#endif
++
+ struct hotplug_pcp {
+ struct task_struct *unplug;
+ int refcount;
+@@ -92,8 +109,8 @@ retry:
+ return;
+ }
+ preempt_enable();
+- mutex_lock(&cpu_hotplug.lock);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_lock();
++ hotplug_unlock();
+ preempt_disable();
+ goto retry;
+ }
+@@ -166,9 +183,9 @@ void get_online_cpus(void)
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+ return;
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ cpu_hotplug.refcount++;
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+
+ }
+ EXPORT_SYMBOL_GPL(get_online_cpus);
+@@ -177,10 +194,10 @@ void put_online_cpus(void)
+ {
+ if (cpu_hotplug.active_writer == current)
+ return;
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+ wake_up_process(cpu_hotplug.active_writer);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+
+ }
+ EXPORT_SYMBOL_GPL(put_online_cpus);
+@@ -212,11 +229,11 @@ static void cpu_hotplug_begin(void)
+ cpu_hotplug.active_writer = current;
+
+ for (;;) {
+- mutex_lock(&cpu_hotplug.lock);
++ hotplug_lock();
+ if (likely(!cpu_hotplug.refcount))
+ break;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+ schedule();
+ }
+ }
+@@ -224,7 +241,7 @@ static void cpu_hotplug_begin(void)
+ static void cpu_hotplug_done(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+- mutex_unlock(&cpu_hotplug.lock);
++ hotplug_unlock();
+ }
+
+ #else /* #if CONFIG_HOTPLUG_CPU */
diff --git a/cpu-rt-rework-cpu-down.patch b/cpu-rt-rework-cpu-down.patch
new file mode 100644
index 0000000..de4f8a5
--- /dev/null
+++ b/cpu-rt-rework-cpu-down.patch
@@ -0,0 +1,550 @@
+From: Steven Rostedt <srostedt@redhat.com>
+Date: Mon, 16 Jul 2012 08:07:43 +0000
+Subject: cpu/rt: Rework cpu down for PREEMPT_RT
+
+Bringing a CPU down is a pain with the PREEMPT_RT kernel because
+tasks can be preempted in many more places than in non-RT. In
+order to handle per_cpu variables, tasks may be pinned to a CPU
+for a while, and even sleep. But these tasks need to be off the CPU
+if that CPU is going down.
+
+Several synchronization methods have been tried, but when stressed
+they failed. This is a new approach.
+
+A sync_tsk thread is still created and tasks may still block on a
+lock when the CPU is going down, but how that works is a bit different.
+When cpu_down() starts, it will create the sync_tsk and wait on it
+to inform that current tasks that are pinned on the CPU are no longer
+pinned. But new tasks that are about to be pinned will still be allowed
+to do so at this time.
+
+Then the notifiers are called. Several notifiers will bring down tasks
+that will enter these locations. Some of these tasks will take locks
+of other tasks that are on the CPU. If we don't let those other tasks
+continue, but make them block until CPU down is done, the tasks that
+the notifiers are waiting on will never complete as they are waiting
+for the locks held by the tasks that are blocked.
+
+Thus we still let the task pin the CPU until the notifiers are done.
+After the notifiers run, we then make new tasks entering the pinned
+CPU sections grab a mutex and wait. This mutex is now a per CPU mutex
+in the hotplug_pcp descriptor.
+
+To help things along, a new function in the scheduler code is created
+called migrate_me(). This function will try to migrate the current task
+off the CPU this is going down if possible. When the sync_tsk is created,
+all tasks will then try to migrate off the CPU going down. There are
+several cases that this wont work, but it helps in most cases.
+
+After the notifiers are called and if a task can't migrate off but enters
+the pin CPU sections, it will be forced to wait on the hotplug_pcp mutex
+until the CPU down is complete. Then the scheduler will force the migration
+anyway.
+
+Also, I found that THREAD_BOUND need to also be accounted for in the
+pinned CPU, and the migrate_disable no longer treats them special.
+This helps fix issues with ksoftirqd and workqueue that unbind on CPU down.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/sched.h | 7 +
+ kernel/cpu.c | 240 +++++++++++++++++++++++++++++++++++++++++---------
+ kernel/sched/core.c | 82 ++++++++++++++++-
+ 3 files changed, 285 insertions(+), 44 deletions(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1952,6 +1952,10 @@ extern void do_set_cpus_allowed(struct t
+
+ extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask);
++int migrate_me(void);
++void tell_sched_cpu_down_begin(int cpu);
++void tell_sched_cpu_down_done(int cpu);
++
+ #else
+ static inline void do_set_cpus_allowed(struct task_struct *p,
+ const struct cpumask *new_mask)
+@@ -1964,6 +1968,9 @@ static inline int set_cpus_allowed_ptr(s
+ return -EINVAL;
+ return 0;
+ }
++static inline int migrate_me(void) { return 0; }
++static inline void tell_sched_cpu_down_begin(int cpu) { }
++static inline void tell_sched_cpu_down_done(int cpu) { }
+ #endif
+
+ #ifdef CONFIG_NO_HZ
+Index: linux-stable/kernel/cpu.c
+===================================================================
+--- linux-stable.orig/kernel/cpu.c
++++ linux-stable/kernel/cpu.c
+@@ -51,12 +51,7 @@ static int cpu_hotplug_disabled;
+
+ static struct {
+ struct task_struct *active_writer;
+-#ifdef CONFIG_PREEMPT_RT_FULL
+- /* Makes the lock keep the task's state */
+- spinlock_t lock;
+-#else
+ struct mutex lock; /* Synchronizes accesses to refcount, */
+-#endif
+ /*
+ * Also blocks the new readers during
+ * an ongoing cpu hotplug operation.
+@@ -64,28 +59,46 @@ static struct {
+ int refcount;
+ } cpu_hotplug = {
+ .active_writer = NULL,
+-#ifdef CONFIG_PREEMPT_RT_FULL
+- .lock = __SPIN_LOCK_UNLOCKED(cpu_hotplug.lock),
+-#else
+ .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+-#endif
+ .refcount = 0,
+ };
+
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-# define hotplug_lock() rt_spin_lock(&cpu_hotplug.lock)
+-# define hotplug_unlock() rt_spin_unlock(&cpu_hotplug.lock)
+-#else
+-# define hotplug_lock() mutex_lock(&cpu_hotplug.lock)
+-# define hotplug_unlock() mutex_unlock(&cpu_hotplug.lock)
+-#endif
+-
++/**
++ * hotplug_pcp - per cpu hotplug descriptor
++ * @unplug: set when pin_current_cpu() needs to sync tasks
++ * @sync_tsk: the task that waits for tasks to finish pinned sections
++ * @refcount: counter of tasks in pinned sections
++ * @grab_lock: set when the tasks entering pinned sections should wait
++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished
++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true)
++ * @mutex_init: zero if the mutex hasn't been initialized yet.
++ *
++ * Although @unplug and @sync_tsk may point to the same task, the @unplug
++ * is used as a flag and still exists after @sync_tsk has exited and
++ * @sync_tsk set to NULL.
++ */
+ struct hotplug_pcp {
+ struct task_struct *unplug;
++ struct task_struct *sync_tsk;
+ int refcount;
++ int grab_lock;
+ struct completion synced;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ spinlock_t lock;
++#else
++ struct mutex mutex;
++#endif
++ int mutex_init;
+ };
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
++# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
++#else
++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
++#endif
++
+ static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+
+ /**
+@@ -99,18 +112,40 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+ void pin_current_cpu(void)
+ {
+ struct hotplug_pcp *hp;
++ int force = 0;
+
+ retry:
+ hp = &__get_cpu_var(hotplug_pcp);
+
+- if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
+ hp->unplug == current || (current->flags & PF_STOMPER)) {
+ hp->refcount++;
+ return;
+ }
+- preempt_enable();
+- hotplug_lock();
+- hotplug_unlock();
++
++ if (hp->grab_lock) {
++ preempt_enable();
++ hotplug_lock(hp);
++ hotplug_unlock(hp);
++ } else {
++ preempt_enable();
++ /*
++ * Try to push this task off of this CPU.
++ */
++ if (!migrate_me()) {
++ preempt_disable();
++ hp = &__get_cpu_var(hotplug_pcp);
++ if (!hp->grab_lock) {
++ /*
++ * Just let it continue it's already pinned
++ * or about to sleep.
++ */
++ force = 1;
++ goto retry;
++ }
++ preempt_enable();
++ }
++ }
+ preempt_disable();
+ goto retry;
+ }
+@@ -132,26 +167,84 @@ void unpin_current_cpu(void)
+ wake_up_process(hp->unplug);
+ }
+
+-/*
+- * FIXME: Is this really correct under all circumstances ?
+- */
++static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
++{
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (hp->refcount) {
++ schedule_preempt_disabled();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++}
++
+ static int sync_unplug_thread(void *data)
+ {
+ struct hotplug_pcp *hp = data;
+
+ preempt_disable();
+ hp->unplug = current;
++ wait_for_pinned_cpus(hp);
++
++ /*
++ * This thread will synchronize the cpu_down() with threads
++ * that have pinned the CPU. When the pinned CPU count reaches
++ * zero, we inform the cpu_down code to continue to the next step.
++ */
+ set_current_state(TASK_UNINTERRUPTIBLE);
+- while (hp->refcount) {
+- schedule_preempt_disabled();
++ preempt_enable();
++ complete(&hp->synced);
++
++ /*
++ * If all succeeds, the next step will need tasks to wait till
++ * the CPU is offline before continuing. To do this, the grab_lock
++ * is set and tasks going into pin_current_cpu() will block on the
++ * mutex. But we still need to wait for those that are already in
++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
++ * will kick this thread out.
++ */
++ while (!hp->grab_lock && !kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++
++ /* Make sure grab_lock is seen before we see a stale completion */
++ smp_mb();
++
++ /*
++ * Now just before cpu_down() enters stop machine, we need to make
++ * sure all tasks that are in pinned CPU sections are out, and new
++ * tasks will now grab the lock, keeping them from entering pinned
++ * CPU sections.
++ */
++ if (!kthread_should_stop()) {
++ preempt_disable();
++ wait_for_pinned_cpus(hp);
++ preempt_enable();
++ complete(&hp->synced);
++ }
++
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ }
+ set_current_state(TASK_RUNNING);
+- preempt_enable();
+- complete(&hp->synced);
++
++ /*
++ * Force this thread off this CPU as it's going down and
++ * we don't want any more work on this CPU.
++ */
++ current->flags &= ~PF_THREAD_BOUND;
++ do_set_cpus_allowed(current, cpu_present_mask);
++ migrate_me();
+ return 0;
+ }
+
++static void __cpu_unplug_sync(struct hotplug_pcp *hp)
++{
++ wake_up_process(hp->sync_tsk);
++ wait_for_completion(&hp->synced);
++}
++
+ /*
+ * Start the sync_unplug_thread on the target cpu and wait for it to
+ * complete.
+@@ -159,23 +252,83 @@ static int sync_unplug_thread(void *data
+ static int cpu_unplug_begin(unsigned int cpu)
+ {
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+- struct task_struct *tsk;
++ int err;
++
++ /* Protected by cpu_hotplug.lock */
++ if (!hp->mutex_init) {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ spin_lock_init(&hp->lock);
++#else
++ mutex_init(&hp->mutex);
++#endif
++ hp->mutex_init = 1;
++ }
++
++ /* Inform the scheduler to migrate tasks off this CPU */
++ tell_sched_cpu_down_begin(cpu);
+
+ init_completion(&hp->synced);
+- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+- if (IS_ERR(tsk))
+- return (PTR_ERR(tsk));
+- kthread_bind(tsk, cpu);
+- wake_up_process(tsk);
+- wait_for_completion(&hp->synced);
++
++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
++ if (IS_ERR(hp->sync_tsk)) {
++ err = PTR_ERR(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++ return err;
++ }
++ kthread_bind(hp->sync_tsk, cpu);
++
++ /*
++ * Wait for tasks to get out of the pinned sections,
++ * it's still OK if new tasks enter. Some CPU notifiers will
++ * wait for tasks that are going to enter these sections and
++ * we must not have them block.
++ */
++ __cpu_unplug_sync(hp);
++
+ return 0;
+ }
+
++static void cpu_unplug_sync(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ init_completion(&hp->synced);
++ /* The completion needs to be initialzied before setting grab_lock */
++ smp_wmb();
++
++ /* Grab the mutex before setting grab_lock */
++ hotplug_lock(hp);
++ hp->grab_lock = 1;
++
++ /*
++ * The CPU notifiers have been completed.
++ * Wait for tasks to get out of pinned CPU sections and have new
++ * tasks block until the CPU is completely down.
++ */
++ __cpu_unplug_sync(hp);
++
++ /* All done with the sync thread */
++ kthread_stop(hp->sync_tsk);
++ hp->sync_tsk = NULL;
++}
++
+ static void cpu_unplug_done(unsigned int cpu)
+ {
+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+ hp->unplug = NULL;
++ /* Let all tasks know cpu unplug is finished before cleaning up */
++ smp_wmb();
++
++ if (hp->sync_tsk)
++ kthread_stop(hp->sync_tsk);
++
++ if (hp->grab_lock) {
++ hotplug_unlock(hp);
++ /* protected by cpu_hotplug.lock */
++ hp->grab_lock = 0;
++ }
++ tell_sched_cpu_down_done(cpu);
+ }
+
+ void get_online_cpus(void)
+@@ -183,9 +336,9 @@ void get_online_cpus(void)
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+ return;
+- hotplug_lock();
++ mutex_lock(&cpu_hotplug.lock);
+ cpu_hotplug.refcount++;
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+
+ }
+ EXPORT_SYMBOL_GPL(get_online_cpus);
+@@ -194,10 +347,10 @@ void put_online_cpus(void)
+ {
+ if (cpu_hotplug.active_writer == current)
+ return;
+- hotplug_lock();
++ mutex_lock(&cpu_hotplug.lock);
+ if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+ wake_up_process(cpu_hotplug.active_writer);
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+
+ }
+ EXPORT_SYMBOL_GPL(put_online_cpus);
+@@ -229,11 +382,11 @@ static void cpu_hotplug_begin(void)
+ cpu_hotplug.active_writer = current;
+
+ for (;;) {
+- hotplug_lock();
++ mutex_lock(&cpu_hotplug.lock);
+ if (likely(!cpu_hotplug.refcount))
+ break;
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+ schedule();
+ }
+ }
+@@ -241,7 +394,7 @@ static void cpu_hotplug_begin(void)
+ static void cpu_hotplug_done(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+- hotplug_unlock();
++ mutex_unlock(&cpu_hotplug.lock);
+ }
+
+ #else /* #if CONFIG_HOTPLUG_CPU */
+@@ -416,6 +569,9 @@ static int __ref _cpu_down(unsigned int
+ goto out_release;
+ }
+
++ /* Notifiers are done. Don't let any more tasks pin this CPU. */
++ cpu_unplug_sync(cpu);
++
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+ if (err) {
+ /* CPU didn't die: tell everyone. Can't complain. */
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -3418,7 +3418,7 @@ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+
+- if (in_atomic() || p->flags & PF_THREAD_BOUND) {
++ if (in_atomic()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+ #endif
+@@ -3449,7 +3449,7 @@ void migrate_enable(void)
+ unsigned long flags;
+ struct rq *rq;
+
+- if (in_atomic() || p->flags & PF_THREAD_BOUND) {
++ if (in_atomic()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+ #endif
+@@ -5341,6 +5341,84 @@ void do_set_cpus_allowed(struct task_str
+ cpumask_copy(&p->cpus_allowed, new_mask);
+ }
+
++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
++static DEFINE_MUTEX(sched_down_mutex);
++static cpumask_t sched_down_cpumask;
++
++void tell_sched_cpu_down_begin(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_set_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++void tell_sched_cpu_down_done(int cpu)
++{
++ mutex_lock(&sched_down_mutex);
++ cpumask_clear_cpu(cpu, &sched_down_cpumask);
++ mutex_unlock(&sched_down_mutex);
++}
++
++/**
++ * migrate_me - try to move the current task off this cpu
++ *
++ * Used by the pin_current_cpu() code to try to get tasks
++ * to move off the current CPU as it is going down.
++ * It will only move the task if the task isn't pinned to
++ * the CPU (with migrate_disable, affinity or THREAD_BOUND)
++ * and the task has to be in a RUNNING state. Otherwise the
++ * movement of the task will wake it up (change its state
++ * to running) when the task did not expect it.
++ *
++ * Returns 1 if it succeeded in moving the current task
++ * 0 otherwise.
++ */
++int migrate_me(void)
++{
++ struct task_struct *p = current;
++ struct migration_arg arg;
++ struct cpumask *cpumask;
++ struct cpumask *mask;
++ unsigned long flags;
++ unsigned int dest_cpu;
++ struct rq *rq;
++
++ /*
++ * We can not migrate tasks bounded to a CPU or tasks not
++ * running. The movement of the task will wake it up.
++ */
++ if (p->flags & PF_THREAD_BOUND || p->state)
++ return 0;
++
++ mutex_lock(&sched_down_mutex);
++ rq = task_rq_lock(p, &flags);
++
++ cpumask = &__get_cpu_var(sched_cpumasks);
++ mask = &p->cpus_allowed;
++
++ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
++
++ if (!cpumask_weight(cpumask)) {
++ /* It's only on this CPU? */
++ task_rq_unlock(rq, p, &flags);
++ mutex_unlock(&sched_down_mutex);
++ return 0;
++ }
++
++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask);
++
++ arg.task = p;
++ arg.dest_cpu = dest_cpu;
++
++ task_rq_unlock(rq, p, &flags);
++
++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ mutex_unlock(&sched_down_mutex);
++
++ return 1;
++}
++
+ /*
+ * This is how migration works:
+ *
diff --git a/cpu-rt-variants.patch b/cpu-rt-variants.patch
new file mode 100644
index 0000000..e6eb3b8
--- /dev/null
+++ b/cpu-rt-variants.patch
@@ -0,0 +1,28 @@
+Subject: cpu-rt-variants.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 17 Jun 2011 15:42:38 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/smp.h | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+Index: linux-stable/include/linux/smp.h
+===================================================================
+--- linux-stable.orig/include/linux/smp.h
++++ linux-stable/include/linux/smp.h
+@@ -218,6 +218,14 @@ static inline void kick_all_cpus_sync(vo
+ #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define get_cpu_light() get_cpu()
++# define put_cpu_light() put_cpu()
++#else
++# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
++# define put_cpu_light() migrate_enable()
++#endif
++
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
diff --git a/cpumask-disable-offstack-on-rt.patch b/cpumask-disable-offstack-on-rt.patch
new file mode 100644
index 0000000..9c819c5
--- /dev/null
+++ b/cpumask-disable-offstack-on-rt.patch
@@ -0,0 +1,38 @@
+Subject: cpumask: Disable CONFIG_CPUMASK_OFFSTACK for RT
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Dec 2011 01:03:49 +0100
+
+We can't deal with the cpumask allocations which happen in atomic
+context (see arch/x86/kernel/apic/io_apic.c) on RT right now.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/Kconfig | 2 +-
+ lib/Kconfig | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/x86/Kconfig
+===================================================================
+--- linux-stable.orig/arch/x86/Kconfig
++++ linux-stable/arch/x86/Kconfig
+@@ -757,7 +757,7 @@ config IOMMU_HELPER
+ config MAXSMP
+ bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+ depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
+- select CPUMASK_OFFSTACK
++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
+ ---help---
+ Enable maximum number of CPUS and NUMA Nodes for this architecture.
+ If unsure, say N.
+Index: linux-stable/lib/Kconfig
+===================================================================
+--- linux-stable.orig/lib/Kconfig
++++ linux-stable/lib/Kconfig
+@@ -312,6 +312,7 @@ config CHECK_SIGNATURE
+
+ config CPUMASK_OFFSTACK
+ bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
++ depends on !PREEMPT_RT_FULL
+ help
+ Use dynamic allocation for cpumask_var_t, instead of putting
+ them on the stack. This is a bit more expensive, but avoids
diff --git a/debugobjects-rt.patch b/debugobjects-rt.patch
new file mode 100644
index 0000000..bc3cba6
--- /dev/null
+++ b/debugobjects-rt.patch
@@ -0,0 +1,25 @@
+Subject: debugobjects-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:41:35 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/debugobjects.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+Index: linux-stable/lib/debugobjects.c
+===================================================================
+--- linux-stable.orig/lib/debugobjects.c
++++ linux-stable/lib/debugobjects.c
+@@ -309,7 +309,10 @@ __debug_object_init(void *addr, struct d
+ struct debug_obj *obj;
+ unsigned long flags;
+
+- fill_pool();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (preempt_count() == 0 && !irqs_disabled())
++#endif
++ fill_pool();
+
+ db = get_bucket((unsigned long) addr);
+
diff --git a/dm-make-rt-aware.patch b/dm-make-rt-aware.patch
new file mode 100644
index 0000000..2e11eb5
--- /dev/null
+++ b/dm-make-rt-aware.patch
@@ -0,0 +1,36 @@
+Subject: dm: Make rt aware
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 14 Nov 2011 23:06:09 +0100
+
+Use the BUG_ON_NORT variant for the irq_disabled() checks. RT has
+interrupts legitimately enabled here as we cant deadlock against the
+irq thread due to the "sleeping spinlocks" conversion.
+
+Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/md/dm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/drivers/md/dm.c
+===================================================================
+--- linux-stable.orig/drivers/md/dm.c
++++ linux-stable/drivers/md/dm.c
+@@ -1692,14 +1692,14 @@ static void dm_request_fn(struct request
+ if (map_request(ti, clone, md))
+ goto requeued;
+
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ spin_lock(q->queue_lock);
+ }
+
+ goto out;
+
+ requeued:
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ spin_lock(q->queue_lock);
+
+ delay_and_out:
diff --git a/drivers-net-8139-disable-irq-nosync.patch b/drivers-net-8139-disable-irq-nosync.patch
new file mode 100644
index 0000000..e17452f
--- /dev/null
+++ b/drivers-net-8139-disable-irq-nosync.patch
@@ -0,0 +1,27 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:24 -0500
+Subject: drivers/net: Use disable_irq_nosync() in 8139too
+
+Use disable_irq_nosync() instead of disable_irq() as this might be
+called in atomic context with netpoll.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/net/ethernet/realtek/8139too.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/drivers/net/ethernet/realtek/8139too.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/realtek/8139too.c
++++ linux-stable/drivers/net/ethernet/realtek/8139too.c
+@@ -2216,7 +2216,7 @@ static void rtl8139_poll_controller(stru
+ struct rtl8139_private *tp = netdev_priv(dev);
+ const int irq = tp->pci_dev->irq;
+
+- disable_irq(irq);
++ disable_irq_nosync(irq);
+ rtl8139_interrupt(irq, dev);
+ enable_irq(irq);
+ }
diff --git a/drivers-net-at91-make-mdio-protection-rt-safe.patch b/drivers-net-at91-make-mdio-protection-rt-safe.patch
new file mode 100644
index 0000000..33e850e
--- /dev/null
+++ b/drivers-net-at91-make-mdio-protection-rt-safe.patch
@@ -0,0 +1,54 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 17 Nov 2009 12:02:43 +0100
+Subject: drivers: net: at91_ether: Make mdio protection -rt safe
+
+Neither the phy interrupt nor the timer callback which updates the
+link status in absense of a phy interrupt are taking lp->lock which
+serializes the MDIO access. This works on mainline as at91 is an UP
+machine. On preempt-rt the timer callback can run even in the
+spin_lock_irq(&lp->lock) protected code pathes because spin_lock_irq
+is neither disabling interrupts nor disabling preemption.
+
+Fix this by adding proper locking to at91ether_phy_interrupt() and
+at91_check_ether() which serializes the access on -rt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/net/ethernet/cadence/at91_ether.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+Index: linux-stable/drivers/net/ethernet/cadence/at91_ether.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/cadence/at91_ether.c
++++ linux-stable/drivers/net/ethernet/cadence/at91_ether.c
+@@ -199,7 +199,9 @@ static irqreturn_t at91ether_phy_interru
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
+ unsigned int phy;
++ unsigned long flags;
+
++ spin_lock_irqsave(&lp->lock, flags);
+ /*
+ * This hander is triggered on both edges, but the PHY chips expect
+ * level-triggering. We therefore have to check if the PHY actually has
+@@ -241,6 +243,7 @@ static irqreturn_t at91ether_phy_interru
+
+ done:
+ disable_mdi(lp);
++ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return IRQ_HANDLED;
+ }
+@@ -397,9 +400,11 @@ static void at91ether_check_link(unsigne
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct at91_private *lp = netdev_priv(dev);
+
++ spin_lock_irq(&lp->lock);
+ enable_mdi(lp);
+ update_linkspeed(dev, 1);
+ disable_mdi(lp);
++ spin_unlock_irq(&lp->lock);
+
+ mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
+ }
diff --git a/drivers-net-ehea-mark-rx-irq-no-thread.patch b/drivers-net-ehea-mark-rx-irq-no-thread.patch
new file mode 100644
index 0000000..942a767
--- /dev/null
+++ b/drivers-net-ehea-mark-rx-irq-no-thread.patch
@@ -0,0 +1,53 @@
+From: Darren Hart <dvhltc@us.ibm.com>
+Date: Tue, 18 May 2010 14:33:07 -0700
+Subject: drivers: net: ehea: Make rx irq handler non-threaded (IRQF_NO_THREAD)
+
+The underlying hardware is edge triggered but presented by XICS as level
+triggered. The edge triggered interrupts are not reissued after masking. This
+is not a problem in mainline which does not mask the interrupt (relying on the
+EOI mechanism instead). The threaded interrupts in PREEMPT_RT do mask the
+interrupt, and can lose interrupts that occurred while masked, resulting in a
+hung ethernet interface.
+
+The receive handler simply calls napi_schedule(), as such, there is no
+significant additional overhead in making this non-threaded, since we either
+wakeup the threaded irq handler to call napi_schedule(), or just call
+napi_schedule() directly to wakeup the softirqs. As the receive handler is
+lockless, there is no need to convert any of the ehea spinlock_t's to
+raw_spinlock_t's.
+
+Without this patch, a simple scp file copy loop would fail quickly (usually
+seconds). We have over two hours of sustained scp activity with the patch
+applied.
+
+Credit goes to Will Schmidt for lots of instrumentation and tracing which
+clarified the scenario and to Thomas Gleixner for the incredibly simple
+solution.
+
+Signed-off-by: Darren Hart <dvhltc@us.ibm.com>
+Acked-by: Will Schmidt <will_schmidt@vnet.ibm.com>
+Cc: Jan-Bernd Themann <themann@de.ibm.com>
+Cc: Nivedita Singhvi <niv@us.ibm.com>
+Cc: Brian King <bjking1@us.ibm.com>
+Cc: Michael Ellerman <ellerman@au1.ibm.com>
+Cc: Doug Maxey <doug.maxey@us.ibm.com>
+LKML-Reference: <4BF30793.5070300@us.ibm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/net/ethernet/ibm/ehea/ehea_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/drivers/net/ethernet/ibm/ehea/ehea_main.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ linux-stable/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -1308,7 +1308,7 @@ static int ehea_reg_interrupts(struct ne
+ "%s-queue%d", dev->name, i);
+ ret = ibmebus_request_irq(pr->eq->attr.ist1,
+ ehea_recv_irq_handler,
+- IRQF_DISABLED, pr->int_send_name,
++ IRQF_NO_THREAD, pr->int_send_name,
+ pr);
+ if (ret) {
+ netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
diff --git a/drivers-net-fix-livelock-issues.patch b/drivers-net-fix-livelock-issues.patch
new file mode 100644
index 0000000..f4946e2
--- /dev/null
+++ b/drivers-net-fix-livelock-issues.patch
@@ -0,0 +1,140 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 20 Jun 2009 11:36:54 +0200
+Subject: drivers/net: fix livelock issues
+
+Preempt-RT runs into a live lock issue with the NETDEV_TX_LOCKED micro
+optimization. The reason is that the softirq thread is rescheduling
+itself on that return value. Depending on priorities it starts to
+monoplize the CPU and livelock on UP systems.
+
+Remove it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 6 +-----
+ drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 3 +--
+ drivers/net/ethernet/chelsio/cxgb/sge.c | 3 +--
+ drivers/net/ethernet/neterion/s2io.c | 7 +------
+ drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 6 ++----
+ drivers/net/ethernet/tehuti/tehuti.c | 9 ++-------
+ drivers/net/rionet.c | 6 +-----
+ 7 files changed, 9 insertions(+), 31 deletions(-)
+
+Index: linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ linux-stable/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -2122,11 +2122,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
+ }
+
+ tpd_req = atl1c_cal_tpd_req(skb);
+- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
+- if (netif_msg_pktdata(adapter))
+- dev_info(&adapter->pdev->dev, "tx locked\n");
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&adapter->tx_lock, flags);
+
+ if (atl1c_tpd_avail(adapter, type) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+Index: linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
++++ linux-stable/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+@@ -1803,8 +1803,7 @@ static netdev_tx_t atl1e_xmit_frame(stru
+ return NETDEV_TX_OK;
+ }
+ tpd_req = atl1e_cal_tdp_req(skb);
+- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
+- return NETDEV_TX_LOCKED;
++ spin_lock_irqsave(&adapter->tx_lock, flags);
+
+ if (atl1e_tpd_avail(adapter) < tpd_req) {
+ /* no enough descriptor, just stop queue */
+Index: linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/chelsio/cxgb/sge.c
++++ linux-stable/drivers/net/ethernet/chelsio/cxgb/sge.c
+@@ -1678,8 +1678,7 @@ static int t1_sge_tx(struct sk_buff *skb
+ struct cmdQ *q = &sge->cmdQ[qid];
+ unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+
+- if (!spin_trylock(&q->lock))
+- return NETDEV_TX_LOCKED;
++ spin_lock(&q->lock);
+
+ reclaim_completed_tx(sge, q);
+
+Index: linux-stable/drivers/net/ethernet/neterion/s2io.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/neterion/s2io.c
++++ linux-stable/drivers/net/ethernet/neterion/s2io.c
+@@ -4088,12 +4088,7 @@ static netdev_tx_t s2io_xmit(struct sk_b
+ [skb->priority & (MAX_TX_FIFOS - 1)];
+ fifo = &mac_control->fifos[queue];
+
+- if (do_spin_lock)
+- spin_lock_irqsave(&fifo->tx_lock, flags);
+- else {
+- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&fifo->tx_lock, flags);
+
+ if (sp->config.multiq) {
+ if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
+Index: linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
++++ linux-stable/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+@@ -2159,10 +2159,8 @@ static int pch_gbe_xmit_frame(struct sk_
+ struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
+ unsigned long flags;
+
+- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
+- /* Collision - tell upper layer to requeue */
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&tx_ring->tx_lock, flags);
++
+ if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
+Index: linux-stable/drivers/net/ethernet/tehuti/tehuti.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/tehuti/tehuti.c
++++ linux-stable/drivers/net/ethernet/tehuti/tehuti.c
+@@ -1630,13 +1630,8 @@ static netdev_tx_t bdx_tx_transmit(struc
+ unsigned long flags;
+
+ ENTER;
+- local_irq_save(flags);
+- if (!spin_trylock(&priv->tx_lock)) {
+- local_irq_restore(flags);
+- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
+- BDX_DRV_NAME, ndev->name);
+- return NETDEV_TX_LOCKED;
+- }
++
++ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ /* build tx descriptor */
+ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
+Index: linux-stable/drivers/net/rionet.c
+===================================================================
+--- linux-stable.orig/drivers/net/rionet.c
++++ linux-stable/drivers/net/rionet.c
+@@ -176,11 +176,7 @@ static int rionet_start_xmit(struct sk_b
+ u16 destid;
+ unsigned long flags;
+
+- local_irq_save(flags);
+- if (!spin_trylock(&rnet->tx_lock)) {
+- local_irq_restore(flags);
+- return NETDEV_TX_LOCKED;
+- }
++ spin_lock_irqsave(&rnet->tx_lock, flags);
+
+ if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
+ netif_stop_queue(ndev);
diff --git a/drivers-net-gianfar-make-rt-aware.patch b/drivers-net-gianfar-make-rt-aware.patch
new file mode 100644
index 0000000..cac7b0b
--- /dev/null
+++ b/drivers-net-gianfar-make-rt-aware.patch
@@ -0,0 +1,57 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 1 Apr 2010 20:20:57 +0200
+Subject: drivers: net: gianfar: Make RT aware
+
+The adjust_link() disables interrupts before taking the queue
+locks. On RT those locks are converted to "sleeping" locks and
+therefor the local_irq_save/restore must be converted to
+local_irq_save/restore_nort.
+
+Reported-by: Xianghua Xiao <xiaoxianghua@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Xianghua Xiao <xiaoxianghua@gmail.com>
+
+---
+ drivers/net/ethernet/freescale/gianfar.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/drivers/net/ethernet/freescale/gianfar.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/freescale/gianfar.c
++++ linux-stable/drivers/net/ethernet/freescale/gianfar.c
+@@ -1652,7 +1652,7 @@ void stop_gfar(struct net_device *dev)
+
+
+ /* Lock it down */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+ lock_rx_qs(priv);
+
+@@ -1660,7 +1660,7 @@ void stop_gfar(struct net_device *dev)
+
+ unlock_rx_qs(priv);
+ unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ /* Free the IRQs */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+@@ -2957,7 +2957,7 @@ static void adjust_link(struct net_devic
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ lock_tx_qs(priv);
+
+ if (phydev->link) {
+@@ -3026,7 +3026,7 @@ static void adjust_link(struct net_devic
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+ unlock_tx_qs(priv);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ /* Update the hash table based on the current list of multicast
diff --git a/drivers-net-tulip-add-missing-pci-disable.patch b/drivers-net-tulip-add-missing-pci-disable.patch
new file mode 100644
index 0000000..b83d282
--- /dev/null
+++ b/drivers-net-tulip-add-missing-pci-disable.patch
@@ -0,0 +1,25 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:18 -0500
+Subject: drivers/net: tulip_remove_one needs to call pci_disable_device()
+
+Otherwise the device is not completely shut down.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/net/ethernet/dec/tulip/tulip_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/dec/tulip/tulip_core.c
++++ linux-stable/drivers/net/ethernet/dec/tulip/tulip_core.c
+@@ -1948,6 +1948,7 @@ static void __devexit tulip_remove_one (
+ pci_iounmap(pdev, tp->base_addr);
+ free_netdev (dev);
+ pci_release_regions (pdev);
++ pci_disable_device (pdev);
+ pci_set_drvdata (pdev, NULL);
+
+ /* pci_power_off (pdev, -1); */
diff --git a/drivers-net-vortex-fix-locking-issues.patch b/drivers-net-vortex-fix-locking-issues.patch
new file mode 100644
index 0000000..6f8b3e4
--- /dev/null
+++ b/drivers-net-vortex-fix-locking-issues.patch
@@ -0,0 +1,50 @@
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Fri, 3 Jul 2009 08:30:00 -0500
+Subject: drivers/net: vortex fix locking issues
+
+Argh, cut and paste wasn't enough...
+
+Use this patch instead. It needs an irq disable. But, believe it or not,
+on SMP this is actually better. If the irq is shared (as it is in Mark's
+case), we don't stop the irq of other devices from being handled on
+another CPU (unfortunately for Mark, he pinned all interrupts to one CPU).
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+ drivers/net/ethernet/3com/3c59x.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+Index: linux-stable/drivers/net/ethernet/3com/3c59x.c
+===================================================================
+--- linux-stable.orig/drivers/net/ethernet/3com/3c59x.c
++++ linux-stable/drivers/net/ethernet/3com/3c59x.c
+@@ -843,9 +843,9 @@ static void poll_vortex(struct net_devic
+ {
+ struct vortex_private *vp = netdev_priv(dev);
+ unsigned long flags;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ #endif
+
+@@ -1920,12 +1920,12 @@ static void vortex_tx_timeout(struct net
+ * Block interrupts because vortex_interrupt does a bare spin_lock()
+ */
+ unsigned long flags;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (vp->full_bus_master_tx)
+ boomerang_interrupt(dev->irq, dev);
+ else
+ vortex_interrupt(dev->irq, dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ }
+
diff --git a/drivers-random-reduce-preempt-disabled-region.patch b/drivers-random-reduce-preempt-disabled-region.patch
new file mode 100644
index 0000000..e76754c
--- /dev/null
+++ b/drivers-random-reduce-preempt-disabled-region.patch
@@ -0,0 +1,41 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:30 -0500
+Subject: drivers: random: Reduce preempt disabled region
+
+No need to keep preemption disabled across the whole function.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/char/random.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+Index: linux-stable/drivers/char/random.c
+===================================================================
+--- linux-stable.orig/drivers/char/random.c
++++ linux-stable/drivers/char/random.c
+@@ -679,9 +679,12 @@ static void add_timer_randomness(struct
+ preempt_disable();
+ /* if over the trickle threshold, use only 1 in 4096 samples */
+ if (input_pool.entropy_count > trickle_thresh &&
+- ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
+- goto out;
++ ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff)) {
++ preempt_enable();
++ return;
++ }
+
++ preempt_enable();
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+ sample.num = num;
+@@ -722,8 +725,6 @@ static void add_timer_randomness(struct
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
+ }
+-out:
+- preempt_enable();
+ }
+
+ void add_input_randomness(unsigned int type, unsigned int code,
diff --git a/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch b/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch
new file mode 100644
index 0000000..a6ddc99
--- /dev/null
+++ b/drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch
@@ -0,0 +1,49 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:01 -0500
+Subject: serial: 8250: Call flush_to_ldisc when the irq is threaded
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+---
+ drivers/tty/serial/8250/8250.c | 2 ++
+ drivers/tty/tty_buffer.c | 4 ++++
+ 2 files changed, 6 insertions(+)
+
+Index: linux-stable/drivers/tty/serial/8250/8250.c
+===================================================================
+--- linux-stable.orig/drivers/tty/serial/8250/8250.c
++++ linux-stable/drivers/tty/serial/8250/8250.c
+@@ -1549,12 +1549,14 @@ static irqreturn_t serial8250_interrupt(
+
+ l = l->next;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (l == i->head && pass_counter++ > PASS_LIMIT) {
+ /* If we hit this, we're dead. */
+ printk_ratelimited(KERN_ERR
+ "serial8250: too much work for irq%d\n", irq);
+ break;
+ }
++#endif
+ } while (l != end);
+
+ spin_unlock(&i->lock);
+Index: linux-stable/drivers/tty/tty_buffer.c
+===================================================================
+--- linux-stable.orig/drivers/tty/tty_buffer.c
++++ linux-stable/drivers/tty/tty_buffer.c
+@@ -538,10 +538,14 @@ void tty_flip_buffer_push(struct tty_str
+ tty->buf.tail->commit = tty->buf.tail->used;
+ spin_unlock_irqrestore(&tty->buf.lock, flags);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (tty->low_latency)
+ flush_to_ldisc(&tty->buf.work);
+ else
+ schedule_work(&tty->buf.work);
++#else
++ flush_to_ldisc(&tty->buf.work);
++#endif
+ }
+ EXPORT_SYMBOL(tty_flip_buffer_push);
+
diff --git a/drivers-serial-cleanup-locking-for-rt.patch b/drivers-serial-cleanup-locking-for-rt.patch
new file mode 100644
index 0000000..3a63633
--- /dev/null
+++ b/drivers-serial-cleanup-locking-for-rt.patch
@@ -0,0 +1,44 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:01 -0500
+Subject: serial: 8250: Clean up the locking for -rt
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/tty/serial/8250/8250.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+Index: linux-stable/drivers/tty/serial/8250/8250.c
+===================================================================
+--- linux-stable.orig/drivers/tty/serial/8250/8250.c
++++ linux-stable/drivers/tty/serial/8250/8250.c
+@@ -2773,14 +2773,10 @@ serial8250_console_write(struct console
+
+ touch_nmi_watchdog();
+
+- local_irq_save(flags);
+- if (port->sysrq) {
+- /* serial8250_handle_irq() already took the lock */
+- locked = 0;
+- } else if (oops_in_progress) {
+- locked = spin_trylock(&port->lock);
+- } else
+- spin_lock(&port->lock);
++ if (port->sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&port->lock, flags);
++ else
++ spin_lock_irqsave(&port->lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -2812,8 +2808,7 @@ serial8250_console_write(struct console
+ serial8250_modem_status(up);
+
+ if (locked)
+- spin_unlock(&port->lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ static int __init serial8250_console_setup(struct console *co, char *options)
diff --git a/drivers-tty-fix-omap-lock-crap.patch b/drivers-tty-fix-omap-lock-crap.patch
new file mode 100644
index 0000000..382de0a
--- /dev/null
+++ b/drivers-tty-fix-omap-lock-crap.patch
@@ -0,0 +1,40 @@
+Subject: drivers-tty-fix-omap-lock-crap.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 28 Jul 2011 13:32:57 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/tty/serial/omap-serial.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+Index: linux-stable/drivers/tty/serial/omap-serial.c
+===================================================================
+--- linux-stable.orig/drivers/tty/serial/omap-serial.c
++++ linux-stable/drivers/tty/serial/omap-serial.c
+@@ -1082,13 +1082,10 @@ serial_omap_console_write(struct console
+
+ pm_runtime_get_sync(&up->pdev->dev);
+
+- local_irq_save(flags);
+- if (up->port.sysrq)
+- locked = 0;
+- else if (oops_in_progress)
+- locked = spin_trylock(&up->port.lock);
++ if (up->port.sysrq || oops_in_progress)
++ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+- spin_lock(&up->port.lock);
++ spin_lock_irqsave(&up->port.lock, flags);
+
+ /*
+ * First save the IER then disable the interrupts
+@@ -1117,8 +1114,7 @@ serial_omap_console_write(struct console
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
+ if (locked)
+- spin_unlock(&up->port.lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&up->port.lock, flags);
+ }
+
+ static int __init
diff --git a/early-printk-consolidate.patch b/early-printk-consolidate.patch
new file mode 100644
index 0000000..6738e79
--- /dev/null
+++ b/early-printk-consolidate.patch
@@ -0,0 +1,498 @@
+Subject: early-printk-consolidate.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 23 Jul 2011 11:04:08 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/kernel/early_printk.c | 17 +++--------------
+ arch/blackfin/kernel/early_printk.c | 2 --
+ arch/microblaze/kernel/early_printk.c | 26 ++++----------------------
+ arch/mips/kernel/early_printk.c | 10 ++++------
+ arch/powerpc/kernel/udbg.c | 6 ++----
+ arch/sh/kernel/sh_bios.c | 2 --
+ arch/sparc/kernel/setup_32.c | 1 +
+ arch/sparc/kernel/setup_64.c | 8 +++++++-
+ arch/tile/kernel/early_printk.c | 26 ++++----------------------
+ arch/um/kernel/early_printk.c | 8 +++++---
+ arch/unicore32/kernel/early_printk.c | 12 ++++--------
+ arch/x86/kernel/early_printk.c | 21 ++-------------------
+ include/linux/console.h | 1 +
+ include/linux/printk.h | 5 +++++
+ kernel/printk.c | 30 +++++++++++++++++++++++-------
+ 15 files changed, 65 insertions(+), 110 deletions(-)
+
+Index: linux-stable/arch/arm/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/early_printk.c
++++ linux-stable/arch/arm/kernel/early_printk.c
+@@ -29,28 +29,17 @@ static void early_console_write(struct c
+ early_write(s, n);
+ }
+
+-static struct console early_console = {
++static struct console early_console_dev = {
+ .name = "earlycon",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+ };
+
+-asmlinkage void early_printk(const char *fmt, ...)
+-{
+- char buf[512];
+- int n;
+- va_list ap;
+-
+- va_start(ap, fmt);
+- n = vscnprintf(buf, sizeof(buf), fmt, ap);
+- early_write(buf, n);
+- va_end(ap);
+-}
+-
+ static int __init setup_early_printk(char *buf)
+ {
+- register_console(&early_console);
++ early_console = &early_console_dev;
++ register_console(&early_console_dev);
+ return 0;
+ }
+
+Index: linux-stable/arch/blackfin/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/blackfin/kernel/early_printk.c
++++ linux-stable/arch/blackfin/kernel/early_printk.c
+@@ -25,8 +25,6 @@ extern struct console *bfin_earlyserial_
+ extern struct console *bfin_jc_early_init(void);
+ #endif
+
+-static struct console *early_console;
+-
+ /* Default console */
+ #define DEFAULT_PORT 0
+ #define DEFAULT_CFLAG CS8|B57600
+Index: linux-stable/arch/microblaze/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/microblaze/kernel/early_printk.c
++++ linux-stable/arch/microblaze/kernel/early_printk.c
+@@ -21,7 +21,6 @@
+ #include <asm/setup.h>
+ #include <asm/prom.h>
+
+-static u32 early_console_initialized;
+ static u32 base_addr;
+
+ #ifdef CONFIG_SERIAL_UARTLITE_CONSOLE
+@@ -109,27 +108,11 @@ static struct console early_serial_uart1
+ };
+ #endif /* CONFIG_SERIAL_8250_CONSOLE */
+
+-static struct console *early_console;
+-
+-void early_printk(const char *fmt, ...)
+-{
+- char buf[512];
+- int n;
+- va_list ap;
+-
+- if (early_console_initialized) {
+- va_start(ap, fmt);
+- n = vscnprintf(buf, 512, fmt, ap);
+- early_console->write(early_console, buf, n);
+- va_end(ap);
+- }
+-}
+-
+ int __init setup_early_printk(char *opt)
+ {
+ int version = 0;
+
+- if (early_console_initialized)
++ if (early_console)
+ return 1;
+
+ base_addr = of_early_console(&version);
+@@ -159,7 +142,6 @@ int __init setup_early_printk(char *opt)
+ }
+
+ register_console(early_console);
+- early_console_initialized = 1;
+ return 0;
+ }
+ return 1;
+@@ -169,7 +151,7 @@ int __init setup_early_printk(char *opt)
+ * only for early console because of performance degression */
+ void __init remap_early_printk(void)
+ {
+- if (!early_console_initialized || !early_console)
++ if (!early_console)
+ return;
+ printk(KERN_INFO "early_printk_console remapping from 0x%x to ",
+ base_addr);
+@@ -195,9 +177,9 @@ void __init remap_early_printk(void)
+
+ void __init disable_early_printk(void)
+ {
+- if (!early_console_initialized || !early_console)
++ if (!early_console)
+ return;
+ printk(KERN_WARNING "disabling early console\n");
+ unregister_console(early_console);
+- early_console_initialized = 0;
++ early_console = NULL;
+ }
+Index: linux-stable/arch/mips/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/mips/kernel/early_printk.c
++++ linux-stable/arch/mips/kernel/early_printk.c
+@@ -25,20 +25,18 @@ early_console_write(struct console *con,
+ }
+ }
+
+-static struct console early_console __initdata = {
++static struct console early_console_prom = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+ };
+
+-static int early_console_initialized __initdata;
+-
+ void __init setup_early_printk(void)
+ {
+- if (early_console_initialized)
++ if (early_console)
+ return;
+- early_console_initialized = 1;
++ early_console = &early_console_prom;
+
+- register_console(&early_console);
++ register_console(&early_console_prom);
+ }
+Index: linux-stable/arch/powerpc/kernel/udbg.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/udbg.c
++++ linux-stable/arch/powerpc/kernel/udbg.c
+@@ -179,15 +179,13 @@ static struct console udbg_console = {
+ .index = 0,
+ };
+
+-static int early_console_initialized;
+-
+ /*
+ * Called by setup_system after ppc_md->probe and ppc_md->early_init.
+ * Call it again after setting udbg_putc in ppc_md->setup_arch.
+ */
+ void __init register_early_udbg_console(void)
+ {
+- if (early_console_initialized)
++ if (early_console)
+ return;
+
+ if (!udbg_putc)
+@@ -197,7 +195,7 @@ void __init register_early_udbg_console(
+ printk(KERN_INFO "early console immortal !\n");
+ udbg_console.flags &= ~CON_BOOT;
+ }
+- early_console_initialized = 1;
++ early_console = &udbg_console;
+ register_console(&udbg_console);
+ }
+
+Index: linux-stable/arch/sh/kernel/sh_bios.c
+===================================================================
+--- linux-stable.orig/arch/sh/kernel/sh_bios.c
++++ linux-stable/arch/sh/kernel/sh_bios.c
+@@ -144,8 +144,6 @@ static struct console bios_console = {
+ .index = -1,
+ };
+
+-static struct console *early_console;
+-
+ static int __init setup_early_printk(char *buf)
+ {
+ int keep_early = 0;
+Index: linux-stable/arch/sparc/kernel/setup_32.c
+===================================================================
+--- linux-stable.orig/arch/sparc/kernel/setup_32.c
++++ linux-stable/arch/sparc/kernel/setup_32.c
+@@ -309,6 +309,7 @@ void __init setup_arch(char **cmdline_p)
+
+ boot_flags_init(*cmdline_p);
+
++ early_console = &prom_early_console;
+ register_console(&prom_early_console);
+
+ printk("ARCH: ");
+Index: linux-stable/arch/sparc/kernel/setup_64.c
+===================================================================
+--- linux-stable.orig/arch/sparc/kernel/setup_64.c
++++ linux-stable/arch/sparc/kernel/setup_64.c
+@@ -487,6 +487,12 @@ static void __init init_sparc64_elf_hwca
+ popc_patch();
+ }
+
++static inline void register_prom_console(void)
++{
++ early_console = &prom_early_console;
++ register_console(&prom_early_console);
++}
++
+ void __init setup_arch(char **cmdline_p)
+ {
+ /* Initialize PROM console and command line. */
+@@ -498,7 +504,7 @@ void __init setup_arch(char **cmdline_p)
+ #ifdef CONFIG_EARLYFB
+ if (btext_find_display())
+ #endif
+- register_console(&prom_early_console);
++ register_prom_console();
+
+ if (tlb_type == hypervisor)
+ printk("ARCH: SUN4V\n");
+Index: linux-stable/arch/tile/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/tile/kernel/early_printk.c
++++ linux-stable/arch/tile/kernel/early_printk.c
+@@ -33,25 +33,8 @@ static struct console early_hv_console =
+ };
+
+ /* Direct interface for emergencies */
+-static struct console *early_console = &early_hv_console;
+-static int early_console_initialized;
+ static int early_console_complete;
+
+-static void early_vprintk(const char *fmt, va_list ap)
+-{
+- char buf[512];
+- int n = vscnprintf(buf, sizeof(buf), fmt, ap);
+- early_console->write(early_console, buf, n);
+-}
+-
+-void early_printk(const char *fmt, ...)
+-{
+- va_list ap;
+- va_start(ap, fmt);
+- early_vprintk(fmt, ap);
+- va_end(ap);
+-}
+-
+ void early_panic(const char *fmt, ...)
+ {
+ va_list ap;
+@@ -69,14 +52,13 @@ static int __initdata keep_early;
+
+ static int __init setup_early_printk(char *str)
+ {
+- if (early_console_initialized)
++ if (early_console)
+ return 1;
+
+ if (str != NULL && strncmp(str, "keep", 4) == 0)
+ keep_early = 1;
+
+ early_console = &early_hv_console;
+- early_console_initialized = 1;
+ register_console(early_console);
+
+ return 0;
+@@ -85,12 +67,12 @@ static int __init setup_early_printk(cha
+ void __init disable_early_printk(void)
+ {
+ early_console_complete = 1;
+- if (!early_console_initialized || !early_console)
++ if (!early_console)
+ return;
+ if (!keep_early) {
+ early_printk("disabling early console\n");
+ unregister_console(early_console);
+- early_console_initialized = 0;
++ early_console = NULL;
+ } else {
+ early_printk("keeping early console\n");
+ }
+@@ -98,7 +80,7 @@ void __init disable_early_printk(void)
+
+ void warn_early_printk(void)
+ {
+- if (early_console_complete || early_console_initialized)
++ if (early_console_complete || early_console)
+ return;
+ early_printk("\
+ Machine shutting down before console output is fully initialized.\n\
+Index: linux-stable/arch/um/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/um/kernel/early_printk.c
++++ linux-stable/arch/um/kernel/early_printk.c
+@@ -16,7 +16,7 @@ static void early_console_write(struct c
+ um_early_printk(s, n);
+ }
+
+-static struct console early_console = {
++static struct console early_console_dev = {
+ .name = "earlycon",
+ .write = early_console_write,
+ .flags = CON_BOOT,
+@@ -25,8 +25,10 @@ static struct console early_console = {
+
+ static int __init setup_early_printk(char *buf)
+ {
+- register_console(&early_console);
+-
++ if (!early_console) {
++ early_console = &early_console_dev;
++ register_console(&early_console_dev);
++ }
+ return 0;
+ }
+
+Index: linux-stable/arch/unicore32/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/unicore32/kernel/early_printk.c
++++ linux-stable/arch/unicore32/kernel/early_printk.c
+@@ -33,21 +33,17 @@ static struct console early_ocd_console
+ .index = -1,
+ };
+
+-/* Direct interface for emergencies */
+-static struct console *early_console = &early_ocd_console;
+-
+-static int __initdata keep_early;
+-
+ static int __init setup_early_printk(char *buf)
+ {
+- if (!buf)
++ int keep_early;
++
++ if (!buf || early_console)
+ return 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+- if (!strncmp(buf, "ocd", 3))
+- early_console = &early_ocd_console;
++ early_console = &early_ocd_console;
+
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+Index: linux-stable/arch/x86/kernel/early_printk.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/early_printk.c
++++ linux-stable/arch/x86/kernel/early_printk.c
+@@ -169,25 +169,9 @@ static struct console early_serial_conso
+ .index = -1,
+ };
+
+-/* Direct interface for emergencies */
+-static struct console *early_console = &early_vga_console;
+-static int __initdata early_console_initialized;
+-
+-asmlinkage void early_printk(const char *fmt, ...)
+-{
+- char buf[512];
+- int n;
+- va_list ap;
+-
+- va_start(ap, fmt);
+- n = vscnprintf(buf, sizeof(buf), fmt, ap);
+- early_console->write(early_console, buf, n);
+- va_end(ap);
+-}
+-
+ static inline void early_console_register(struct console *con, int keep_early)
+ {
+- if (early_console->index != -1) {
++ if (con->index != -1) {
+ printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
+ con->name);
+ return;
+@@ -207,9 +191,8 @@ static int __init setup_early_printk(cha
+ if (!buf)
+ return 0;
+
+- if (early_console_initialized)
++ if (early_console)
+ return 0;
+- early_console_initialized = 1;
+
+ keep = (strstr(buf, "keep") != NULL);
+
+Index: linux-stable/include/linux/console.h
+===================================================================
+--- linux-stable.orig/include/linux/console.h
++++ linux-stable/include/linux/console.h
+@@ -133,6 +133,7 @@ struct console {
+ for (con = console_drivers; con != NULL; con = con->next)
+
+ extern int console_set_on_cmdline;
++extern struct console *early_console;
+
+ extern int add_preferred_console(char *name, int idx, char *options);
+ extern int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options);
+Index: linux-stable/include/linux/printk.h
+===================================================================
+--- linux-stable.orig/include/linux/printk.h
++++ linux-stable/include/linux/printk.h
+@@ -95,8 +95,13 @@ int no_printk(const char *fmt, ...)
+ return 0;
+ }
+
++#ifdef CONFIG_EARLY_PRINTK
+ extern asmlinkage __printf(1, 2)
+ void early_printk(const char *fmt, ...);
++#else
++static inline __printf(1, 2) __cold
++void early_printk(const char *s, ...) { }
++#endif
+
+ extern int printk_needs_cpu(int cpu);
+ extern void printk_tick(void);
+Index: linux-stable/kernel/printk.c
+===================================================================
+--- linux-stable.orig/kernel/printk.c
++++ linux-stable/kernel/printk.c
+@@ -48,13 +48,6 @@
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/printk.h>
+
+-/*
+- * Architectures can override it:
+- */
+-void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...)
+-{
+-}
+-
+ /* printk's without a loglevel use this.. */
+ #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
+
+@@ -1232,6 +1225,29 @@ SYSCALL_DEFINE3(syslog, int, type, char
+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
+ }
+
++#ifdef CONFIG_EARLY_PRINTK
++struct console *early_console;
++
++static void early_vprintk(const char *fmt, va_list ap)
++{
++ if (early_console) {
++ char buf[512];
++ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
++
++ early_console->write(early_console, buf, n);
++ }
++}
++
++asmlinkage void early_printk(const char *fmt, ...)
++{
++ va_list ap;
++
++ va_start(ap, fmt);
++ early_vprintk(fmt, ap);
++ va_end(ap);
++}
++#endif
++
+ static bool __read_mostly ignore_loglevel;
+
+ static int __init ignore_loglevel_setup(char *str)
diff --git a/epoll-use-get-cpu-light.patch b/epoll-use-get-cpu-light.patch
new file mode 100644
index 0000000..269a907
--- /dev/null
+++ b/epoll-use-get-cpu-light.patch
@@ -0,0 +1,28 @@
+Subject: epoll.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 08 Jul 2011 16:35:35 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ fs/eventpoll.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/fs/eventpoll.c
+===================================================================
+--- linux-stable.orig/fs/eventpoll.c
++++ linux-stable/fs/eventpoll.c
+@@ -495,12 +495,12 @@ static int ep_poll_wakeup_proc(void *pri
+ */
+ static void ep_poll_safewake(wait_queue_head_t *wq)
+ {
+- int this_cpu = get_cpu();
++ int this_cpu = get_cpu_light();
+
+ ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
+ ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
+
+- put_cpu();
++ put_cpu_light();
+ }
+
+ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
diff --git a/filemap-fix-up.patch b/filemap-fix-up.patch
new file mode 100644
index 0000000..0f6897c
--- /dev/null
+++ b/filemap-fix-up.patch
@@ -0,0 +1,24 @@
+Subject: filemap-fix-up.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 17 Jun 2011 18:56:24 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Wrecked-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-m6yuzd6ul717hlnl2gj6p3ou@git.kernel.org
+---
+ mm/filemap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/mm/filemap.c
+===================================================================
+--- linux-stable.orig/mm/filemap.c
++++ linux-stable/mm/filemap.c
+@@ -1955,7 +1955,7 @@ size_t iov_iter_copy_from_user_atomic(st
+ char *kaddr;
+ size_t copied;
+
+- BUG_ON(!in_atomic());
++ BUG_ON(!pagefault_disabled());
+ kaddr = kmap_atomic(page);
+ if (likely(i->nr_segs == 1)) {
+ int left;
diff --git a/fix-rt-int3-x86_32-3.2-rt.patch b/fix-rt-int3-x86_32-3.2-rt.patch
new file mode 100644
index 0000000..997c7bf
--- /dev/null
+++ b/fix-rt-int3-x86_32-3.2-rt.patch
@@ -0,0 +1,114 @@
+Subject: x86: Do not disable preemption in int3 on 32bit
+From: Steven Rostedt <rostedt@goodmis.org>
+
+Preemption must be disabled before enabling interrupts in do_trap
+on x86_64 because the stack in use for int3 and debug is a per CPU
+stack set by th IST. But 32bit does not have an IST and the stack
+still belongs to the current task and there is no problem in scheduling
+out the task.
+
+Keep preemption enabled on X86_32 when enabling interrupts for
+do_trap().
+
+The name of the function is changed from preempt_conditional_sti/cli()
+to conditional_sti/cli_ist(), to annotate that this function is used
+when the stack is on the IST.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/x86/kernel/traps.c | 32 +++++++++++++++++++++++---------
+ 1 file changed, 23 insertions(+), 9 deletions(-)
+
+Index: linux-stable/arch/x86/kernel/traps.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/traps.c
++++ linux-stable/arch/x86/kernel/traps.c
+@@ -87,9 +87,21 @@ static inline void conditional_sti(struc
+ local_irq_enable();
+ }
+
+-static inline void preempt_conditional_sti(struct pt_regs *regs)
++static inline void conditional_sti_ist(struct pt_regs *regs)
+ {
++#ifdef CONFIG_X86_64
++ /*
++ * X86_64 uses a per CPU stack on the IST for certain traps
++ * like int3. The task can not be preempted when using one
++ * of these stacks, thus preemption must be disabled, otherwise
++ * the stack can be corrupted if the task is scheduled out,
++ * and another task comes in and uses this stack.
++ *
++ * On x86_32 the task keeps its own stack and it is OK if the
++ * task schedules out.
++ */
+ inc_preempt_count();
++#endif
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_enable();
+ }
+@@ -100,11 +112,13 @@ static inline void conditional_cli(struc
+ local_irq_disable();
+ }
+
+-static inline void preempt_conditional_cli(struct pt_regs *regs)
++static inline void conditional_cli_ist(struct pt_regs *regs)
+ {
+ if (regs->flags & X86_EFLAGS_IF)
+ local_irq_disable();
++#ifdef CONFIG_X86_64
+ dec_preempt_count();
++#endif
+ }
+
+ static void __kprobes
+@@ -225,9 +239,9 @@ dotraplinkage void do_stack_segment(stru
+ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
+ X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
+ return;
+- preempt_conditional_sti(regs);
++ conditional_sti_ist(regs);
+ do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
+- preempt_conditional_cli(regs);
++ conditional_cli_ist(regs);
+ }
+
+ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
+@@ -327,9 +341,9 @@ dotraplinkage void __kprobes notrace do_
+ * as we may switch to the interrupt stack.
+ */
+ debug_stack_usage_inc();
+- preempt_conditional_sti(regs);
++ conditional_sti_ist(regs);
+ do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
+- preempt_conditional_cli(regs);
++ conditional_cli_ist(regs);
+ debug_stack_usage_dec();
+ }
+
+@@ -430,12 +444,12 @@ dotraplinkage void __kprobes do_debug(st
+ debug_stack_usage_inc();
+
+ /* It's safe to allow irq's after DR6 has been saved */
+- preempt_conditional_sti(regs);
++ conditional_sti_ist(regs);
+
+ if (regs->flags & X86_VM_MASK) {
+ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+ X86_TRAP_DB);
+- preempt_conditional_cli(regs);
++ conditional_cli_ist(regs);
+ debug_stack_usage_dec();
+ return;
+ }
+@@ -455,7 +469,7 @@ dotraplinkage void __kprobes do_debug(st
+ si_code = get_si_code(tsk->thread.debugreg6);
+ if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
+ send_sigtrap(tsk, regs, error_code, si_code);
+- preempt_conditional_cli(regs);
++ conditional_cli_ist(regs);
+ debug_stack_usage_dec();
+
+ return;
diff --git a/fs-block-rt-support.patch b/fs-block-rt-support.patch
new file mode 100644
index 0000000..a047eb4
--- /dev/null
+++ b/fs-block-rt-support.patch
@@ -0,0 +1,44 @@
+Subject: fs-block-rt-support.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 14 Jun 2011 17:05:09 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ block/blk-core.c | 2 +-
+ fs/file.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+Index: linux-stable/block/blk-core.c
+===================================================================
+--- linux-stable.orig/block/blk-core.c
++++ linux-stable/block/blk-core.c
+@@ -239,7 +239,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+ **/
+ void blk_start_queue(struct request_queue *q)
+ {
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
+Index: linux-stable/fs/file.c
+===================================================================
+--- linux-stable.orig/fs/file.c
++++ linux-stable/fs/file.c
+@@ -105,14 +105,14 @@ void free_fdtable_rcu(struct rcu_head *r
+ kfree(fdt->open_fds);
+ kfree(fdt);
+ } else {
+- fddef = &get_cpu_var(fdtable_defer_list);
++ fddef = &per_cpu(fdtable_defer_list, get_cpu_light());
+ spin_lock(&fddef->lock);
+ fdt->next = fddef->next;
+ fddef->next = fdt;
+ /* vmallocs are handled from the workqueue context */
+ schedule_work(&fddef->wq);
+ spin_unlock(&fddef->lock);
+- put_cpu_var(fdtable_defer_list);
++ put_cpu_light();
+ }
+ }
+
diff --git a/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/fs-dcache-use-cpu-chill-in-trylock-loops.patch
new file mode 100644
index 0000000..2665a24
--- /dev/null
+++ b/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -0,0 +1,102 @@
+Subject: fs: dcache: Use cpu_chill() in trylock loops
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 07 Mar 2012 21:00:34 +0100
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Use cpu_chill() instead of cpu_relax() to let the system
+make progress.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ fs/autofs4/autofs_i.h | 1 +
+ fs/autofs4/expire.c | 2 +-
+ fs/dcache.c | 7 ++++---
+ fs/namespace.c | 3 ++-
+ 4 files changed, 8 insertions(+), 5 deletions(-)
+
+Index: linux-stable/fs/autofs4/autofs_i.h
+===================================================================
+--- linux-stable.orig/fs/autofs4/autofs_i.h
++++ linux-stable/fs/autofs4/autofs_i.h
+@@ -34,6 +34,7 @@
+ #include <linux/sched.h>
+ #include <linux/mount.h>
+ #include <linux/namei.h>
++#include <linux/delay.h>
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+
+Index: linux-stable/fs/autofs4/expire.c
+===================================================================
+--- linux-stable.orig/fs/autofs4/expire.c
++++ linux-stable/fs/autofs4/expire.c
+@@ -166,7 +166,7 @@ again:
+ parent = p->d_parent;
+ if (!spin_trylock(&parent->d_lock)) {
+ spin_unlock(&p->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto relock;
+ }
+ spin_unlock(&p->d_lock);
+Index: linux-stable/fs/dcache.c
+===================================================================
+--- linux-stable.orig/fs/dcache.c
++++ linux-stable/fs/dcache.c
+@@ -37,6 +37,7 @@
+ #include <linux/rculist_bl.h>
+ #include <linux/prefetch.h>
+ #include <linux/ratelimit.h>
++#include <linux/delay.h>
+ #include "internal.h"
+ #include "mount.h"
+
+@@ -488,7 +489,7 @@ static inline struct dentry *dentry_kill
+ if (inode && !spin_trylock(&inode->i_lock)) {
+ relock:
+ spin_unlock(&dentry->d_lock);
+- cpu_relax();
++ cpu_chill();
+ return dentry; /* try again with same dentry */
+ }
+ if (IS_ROOT(dentry))
+@@ -876,7 +877,7 @@ relock:
+
+ if (!spin_trylock(&dentry->d_lock)) {
+ spin_unlock(&dcache_lru_lock);
+- cpu_relax();
++ cpu_chill();
+ goto relock;
+ }
+
+@@ -2115,7 +2116,7 @@ again:
+ if (dentry->d_count == 1) {
+ if (inode && !spin_trylock(&inode->i_lock)) {
+ spin_unlock(&dentry->d_lock);
+- cpu_relax();
++ cpu_chill();
+ goto again;
+ }
+ dentry->d_flags &= ~DCACHE_CANT_MOUNT;
+Index: linux-stable/fs/namespace.c
+===================================================================
+--- linux-stable.orig/fs/namespace.c
++++ linux-stable/fs/namespace.c
+@@ -20,6 +20,7 @@
+ #include <linux/fs_struct.h> /* get_fs_root et.al. */
+ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
+ #include <linux/uaccess.h>
++#include <linux/delay.h>
+ #include "pnode.h"
+ #include "internal.h"
+
+@@ -313,7 +314,7 @@ int __mnt_want_write(struct vfsmount *m)
+ smp_mb();
+ while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) {
+ preempt_enable();
+- cpu_relax();
++ cpu_chill();
+ preempt_disable();
+ }
+ /*
diff --git a/fs-jbd-pull-plug-when-waiting-for-space.patch b/fs-jbd-pull-plug-when-waiting-for-space.patch
new file mode 100644
index 0000000..cbd3076
--- /dev/null
+++ b/fs-jbd-pull-plug-when-waiting-for-space.patch
@@ -0,0 +1,31 @@
+From: Mike Galbraith <mgalbraith@suse.de>
+Date: Wed, 11 Jul 2012 22:05:20 +0000
+Subject: fs, jbd: pull your plug when waiting for space
+
+With an -rt kernel, and a heavy sync IO load, tasks can jam
+up on journal locks without unplugging, which can lead to
+terminal IO starvation. Unplug and schedule when waiting for space.
+
+Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Theodore Tso <tytso@mit.edu>
+Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ fs/jbd/checkpoint.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: linux-stable/fs/jbd/checkpoint.c
+===================================================================
+--- linux-stable.orig/fs/jbd/checkpoint.c
++++ linux-stable/fs/jbd/checkpoint.c
+@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou
+ if (journal->j_flags & JFS_ABORT)
+ return;
+ spin_unlock(&journal->j_state_lock);
++ if (current->plug)
++ io_schedule();
+ mutex_lock(&journal->j_checkpoint_mutex);
+
+ /*
diff --git a/fs-jbd-replace-bh_state-lock.patch b/fs-jbd-replace-bh_state-lock.patch
new file mode 100644
index 0000000..a981eef
--- /dev/null
+++ b/fs-jbd-replace-bh_state-lock.patch
@@ -0,0 +1,104 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Mar 2011 10:11:25 +0100
+Subject: fs: jbd/jbd2: Make state lock and journal head lock rt safe
+
+bit_spin_locks break under RT.
+
+Based on a previous patch from Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+--
+
+ include/linux/buffer_head.h | 10 ++++++++++
+ include/linux/jbd_common.h | 24 ++++++++++++++++++++++++
+ 2 files changed, 34 insertions(+)
+
+Index: linux-stable/include/linux/buffer_head.h
+===================================================================
+--- linux-stable.orig/include/linux/buffer_head.h
++++ linux-stable/include/linux/buffer_head.h
+@@ -74,6 +74,11 @@ struct buffer_head {
+ atomic_t b_count; /* users using this buffer_head */
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t b_uptodate_lock;
++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++ spinlock_t b_state_lock;
++ spinlock_t b_journal_head_lock;
++#endif
+ #endif
+ };
+
+@@ -105,6 +110,11 @@ static inline void buffer_head_init_lock
+ {
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ spin_lock_init(&bh->b_uptodate_lock);
++#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
++ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++ spin_lock_init(&bh->b_state_lock);
++ spin_lock_init(&bh->b_journal_head_lock);
++#endif
+ #endif
+ }
+
+Index: linux-stable/include/linux/jbd_common.h
+===================================================================
+--- linux-stable.orig/include/linux/jbd_common.h
++++ linux-stable/include/linux/jbd_common.h
+@@ -39,32 +39,56 @@ static inline struct journal_head *bh2jh
+
+ static inline void jbd_lock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_State, &bh->b_state);
++#else
++ spin_lock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_trylock(BH_State, &bh->b_state);
++#else
++ return spin_trylock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ return bit_spin_is_locked(BH_State, &bh->b_state);
++#else
++ return spin_is_locked(&bh->b_state_lock);
++#endif
+ }
+
+ static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_State, &bh->b_state);
++#else
++ spin_unlock(&bh->b_state_lock);
++#endif
+ }
+
+ static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(BH_JournalHead, &bh->b_state);
++#else
++ spin_lock(&bh->b_journal_head_lock);
++#endif
+ }
+
+ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(BH_JournalHead, &bh->b_state);
++#else
++ spin_unlock(&bh->b_journal_head_lock);
++#endif
+ }
+
+ #endif
diff --git a/fs-namespace-preemption-fix.patch b/fs-namespace-preemption-fix.patch
new file mode 100644
index 0000000..8b4a69e
--- /dev/null
+++ b/fs-namespace-preemption-fix.patch
@@ -0,0 +1,32 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 19 Jul 2009 08:44:27 -0500
+Subject: fs: namespace preemption fix
+
+On RT we cannot loop with preemption disabled here as
+mnt_make_readonly() might have been preempted. We can safely enable
+preemption while waiting for MNT_WRITE_HOLD to be cleared. Safe on !RT
+as well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ fs/namespace.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+Index: linux-stable/fs/namespace.c
+===================================================================
+--- linux-stable.orig/fs/namespace.c
++++ linux-stable/fs/namespace.c
+@@ -311,8 +311,11 @@ int __mnt_want_write(struct vfsmount *m)
+ * incremented count after it has set MNT_WRITE_HOLD.
+ */
+ smp_mb();
+- while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
++ while (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) {
++ preempt_enable();
+ cpu_relax();
++ preempt_disable();
++ }
+ /*
+ * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
+ * be set to match its requirements. So we must not load that until
diff --git a/fs-ntfs-disable-interrupt-non-rt.patch b/fs-ntfs-disable-interrupt-non-rt.patch
new file mode 100644
index 0000000..f30579a
--- /dev/null
+++ b/fs-ntfs-disable-interrupt-non-rt.patch
@@ -0,0 +1,61 @@
+From: Mike Galbraith <efault@gmx.de>
+Date: Fri, 3 Jul 2009 08:44:12 -0500
+Subject: fs: ntfs: disable interrupt only on !RT
+
+On Sat, 2007-10-27 at 11:44 +0200, Ingo Molnar wrote:
+> * Nick Piggin <nickpiggin@yahoo.com.au> wrote:
+>
+> > > [10138.175796] [<c0105de3>] show_trace+0x12/0x14
+> > > [10138.180291] [<c0105dfb>] dump_stack+0x16/0x18
+> > > [10138.184769] [<c011609f>] native_smp_call_function_mask+0x138/0x13d
+> > > [10138.191117] [<c0117606>] smp_call_function+0x1e/0x24
+> > > [10138.196210] [<c012f85c>] on_each_cpu+0x25/0x50
+> > > [10138.200807] [<c0115c74>] flush_tlb_all+0x1e/0x20
+> > > [10138.205553] [<c016caaf>] kmap_high+0x1b6/0x417
+> > > [10138.210118] [<c011ec88>] kmap+0x4d/0x4f
+> > > [10138.214102] [<c026a9d8>] ntfs_end_buffer_async_read+0x228/0x2f9
+> > > [10138.220163] [<c01a0e9e>] end_bio_bh_io_sync+0x26/0x3f
+> > > [10138.225352] [<c01a2b09>] bio_endio+0x42/0x6d
+> > > [10138.229769] [<c02c2a08>] __end_that_request_first+0x115/0x4ac
+> > > [10138.235682] [<c02c2da7>] end_that_request_chunk+0x8/0xa
+> > > [10138.241052] [<c0365943>] ide_end_request+0x55/0x10a
+> > > [10138.246058] [<c036dae3>] ide_dma_intr+0x6f/0xac
+> > > [10138.250727] [<c0366d83>] ide_intr+0x93/0x1e0
+> > > [10138.255125] [<c015afb4>] handle_IRQ_event+0x5c/0xc9
+> >
+> > Looks like ntfs is kmap()ing from interrupt context. Should be using
+> > kmap_atomic instead, I think.
+>
+> it's not atomic interrupt context but irq thread context - and -rt
+> remaps kmap_atomic() to kmap() internally.
+
+Hm. Looking at the change to mm/bounce.c, perhaps I should do this
+instead?
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ fs/ntfs/aops.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/fs/ntfs/aops.c
+===================================================================
+--- linux-stable.orig/fs/ntfs/aops.c
++++ linux-stable/fs/ntfs/aops.c
+@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s
+ recs = PAGE_CACHE_SIZE / rec_size;
+ /* Should have been verified before we got here... */
+ BUG_ON(!recs);
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ kaddr = kmap_atomic(page);
+ for (i = 0; i < recs; i++)
+ post_read_mst_fixup((NTFS_RECORD*)(kaddr +
+ i * rec_size), rec_size);
+ kunmap_atomic(kaddr);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ flush_dcache_page(page);
+ if (likely(page_uptodate && !PageError(page)))
+ SetPageUptodate(page);
diff --git a/fs-replace-bh_uptodate_lock-for-rt.patch b/fs-replace-bh_uptodate_lock-for-rt.patch
new file mode 100644
index 0000000..7e5b393
--- /dev/null
+++ b/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -0,0 +1,167 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Mar 2011 09:18:52 +0100
+Subject: buffer_head: Replace bh_uptodate_lock for -rt
+
+Wrap the bit_spin_lock calls into a separate inline and add the RT
+replacements with a real spinlock.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ fs/buffer.c | 21 +++++++--------------
+ fs/ntfs/aops.c | 10 +++-------
+ include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
+ 3 files changed, 44 insertions(+), 21 deletions(-)
+
+Index: linux-stable/fs/buffer.c
+===================================================================
+--- linux-stable.orig/fs/buffer.c
++++ linux-stable/fs/buffer.c
+@@ -281,8 +281,7 @@ static void end_buffer_async_read(struct
+ * decide that the page is now completely done.
+ */
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -295,8 +294,7 @@ static void end_buffer_async_read(struct
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+
+ /*
+ * If none of the buffers had errors and they are all
+@@ -308,9 +306,7 @@ static void end_buffer_async_read(struct
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+
+ /*
+@@ -344,8 +340,7 @@ void end_buffer_async_write(struct buffe
+ }
+
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+
+ clear_buffer_async_write(bh);
+ unlock_buffer(bh);
+@@ -357,15 +352,12 @@ void end_buffer_async_write(struct buffe
+ }
+ tmp = tmp->b_this_page;
+ }
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+ end_page_writeback(page);
+ return;
+
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+ EXPORT_SYMBOL(end_buffer_async_write);
+
+@@ -3177,6 +3169,7 @@ struct buffer_head *alloc_buffer_head(gf
+ struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
+ if (ret) {
+ INIT_LIST_HEAD(&ret->b_assoc_buffers);
++ buffer_head_init_locks(ret);
+ preempt_disable();
+ __this_cpu_inc(bh_accounting.nr);
+ recalc_bh_state();
+Index: linux-stable/fs/ntfs/aops.c
+===================================================================
+--- linux-stable.orig/fs/ntfs/aops.c
++++ linux-stable/fs/ntfs/aops.c
+@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
+ "0x%llx.", (unsigned long long)bh->b_blocknr);
+ }
+ first = page_buffers(page);
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
++ flags = bh_uptodate_lock_irqsave(first);
+ clear_buffer_async_read(bh);
+ unlock_buffer(bh);
+ tmp = bh;
+@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s
+ }
+ tmp = tmp->b_this_page;
+ } while (tmp != bh);
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(first, flags);
+ /*
+ * If none of the buffers had errors then we can set the page uptodate,
+ * but we first have to perform the post read mst fixups, if the
+@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s
+ unlock_page(page);
+ return;
+ still_busy:
+- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+- local_irq_restore(flags);
+- return;
++ bh_uptodate_unlock_irqrestore(first, flags);
+ }
+
+ /**
+Index: linux-stable/include/linux/buffer_head.h
+===================================================================
+--- linux-stable.orig/include/linux/buffer_head.h
++++ linux-stable/include/linux/buffer_head.h
+@@ -72,8 +72,42 @@ struct buffer_head {
+ struct address_space *b_assoc_map; /* mapping this buffer is
+ associated with */
+ atomic_t b_count; /* users using this buffer_head */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spinlock_t b_uptodate_lock;
++#endif
+ };
+
++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
++{
++ unsigned long flags;
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++ local_irq_save(flags);
++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
++#else
++ spin_lock_irqsave(&bh->b_uptodate_lock, flags);
++#endif
++ return flags;
++}
++
++static inline void
++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
++{
++#ifndef CONFIG_PREEMPT_RT_BASE
++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
++ local_irq_restore(flags);
++#else
++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
++#endif
++}
++
++static inline void buffer_head_init_locks(struct buffer_head *bh)
++{
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spin_lock_init(&bh->b_uptodate_lock);
++#endif
++}
++
+ /*
+ * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
+ * and buffer_foo() functions.
diff --git a/ftrace-crap.patch b/ftrace-crap.patch
new file mode 100644
index 0000000..70a9227
--- /dev/null
+++ b/ftrace-crap.patch
@@ -0,0 +1,92 @@
+Subject: ftrace-crap.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 09 Sep 2011 16:55:53 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/trace/trace.c | 26 ++++++++++++++++++++++++--
+ kernel/trace/trace.h | 1 -
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+Index: linux-stable/kernel/trace/trace.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.c
++++ linux-stable/kernel/trace/trace.c
+@@ -402,11 +402,13 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
+ */
+ void trace_wake_up(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const unsigned long delay = msecs_to_jiffies(2);
+
+ if (trace_flags & TRACE_ITER_BLOCK)
+ return;
+ schedule_delayed_work(&wakeup_work, delay);
++#endif
+ }
+
+ static int __init set_buf_size(char *str)
+@@ -756,6 +758,12 @@ update_max_tr_single(struct trace_array
+ }
+ #endif /* CONFIG_TRACER_MAX_TRACE */
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static void default_wait_pipe(struct trace_iterator *iter);
++#else
++#define default_wait_pipe poll_wait_pipe
++#endif
++
+ /**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+@@ -3365,6 +3373,7 @@ static int tracing_release_pipe(struct i
+ return 0;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static unsigned int
+ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
+ {
+@@ -3386,8 +3395,7 @@ tracing_poll_pipe(struct file *filp, pol
+ }
+ }
+
+-
+-void default_wait_pipe(struct trace_iterator *iter)
++static void default_wait_pipe(struct trace_iterator *iter)
+ {
+ DEFINE_WAIT(wait);
+
+@@ -3398,6 +3406,20 @@ void default_wait_pipe(struct trace_iter
+
+ finish_wait(&trace_wait, &wait);
+ }
++#else
++static unsigned int
++tracing_poll_pipe(struct file *filp, poll_table *poll_table)
++{
++ struct trace_iterator *iter = filp->private_data;
++
++ if ((trace_flags & TRACE_ITER_BLOCK) || !trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ poll_wait_pipe(iter);
++ if (!trace_empty(iter))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++#endif
+
+ /*
+ * This is a make-shift waitqueue.
+Index: linux-stable/kernel/trace/trace.h
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.h
++++ linux-stable/kernel/trace/trace.h
+@@ -367,7 +367,6 @@ void trace_init_global_iter(struct trace
+
+ void tracing_iter_reset(struct trace_iterator *iter, int cpu);
+
+-void default_wait_pipe(struct trace_iterator *iter);
+ void poll_wait_pipe(struct trace_iterator *iter);
+
+ void ftrace(struct trace_array *tr,
diff --git a/ftrace-migrate-disable-tracing.patch b/ftrace-migrate-disable-tracing.patch
new file mode 100644
index 0000000..cc073a9
--- /dev/null
+++ b/ftrace-migrate-disable-tracing.patch
@@ -0,0 +1,81 @@
+Subject: ftrace-migrate-disable-tracing.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:56:42 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/ftrace_event.h | 3 ++-
+ kernel/trace/trace.c | 9 ++++++---
+ kernel/trace/trace_events.c | 1 +
+ kernel/trace/trace_output.c | 5 +++++
+ 4 files changed, 14 insertions(+), 4 deletions(-)
+
+Index: linux-stable/include/linux/ftrace_event.h
+===================================================================
+--- linux-stable.orig/include/linux/ftrace_event.h
++++ linux-stable/include/linux/ftrace_event.h
+@@ -49,7 +49,8 @@ struct trace_entry {
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
+- int padding;
++ unsigned short migrate_disable;
++ unsigned short padding;
+ };
+
+ #define FTRACE_MAX_EVENT \
+Index: linux-stable/kernel/trace/trace.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.c
++++ linux-stable/kernel/trace/trace.c
+@@ -1155,6 +1155,8 @@ tracing_generic_entry_update(struct trac
+ ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
++
++ entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+@@ -1980,9 +1982,10 @@ static void print_lat_help_header(struct
+ seq_puts(m, "# | / _----=> need-resched \n");
+ seq_puts(m, "# || / _---=> hardirq/softirq \n");
+ seq_puts(m, "# ||| / _--=> preempt-depth \n");
+- seq_puts(m, "# |||| / delay \n");
+- seq_puts(m, "# cmd pid ||||| time | caller \n");
+- seq_puts(m, "# \\ / ||||| \\ | / \n");
++ seq_puts(m, "# |||| / _--=> migrate-disable\n");
++ seq_puts(m, "# ||||| / delay \n");
++ seq_puts(m, "# cmd pid |||||| time | caller \n");
++ seq_puts(m, "# \\ / ||||| \\ | / \n");
+ }
+
+ static void print_event_info(struct trace_array *tr, struct seq_file *m)
+Index: linux-stable/kernel/trace/trace_events.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace_events.c
++++ linux-stable/kernel/trace/trace_events.c
+@@ -116,6 +116,7 @@ static int trace_define_common_fields(vo
+ __common_field(unsigned char, flags);
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
++ __common_field(unsigned short, migrate_disable);
+ __common_field(int, padding);
+
+ return ret;
+Index: linux-stable/kernel/trace/trace_output.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace_output.c
++++ linux-stable/kernel/trace/trace_output.c
+@@ -593,6 +593,11 @@ int trace_print_lat_fmt(struct trace_seq
+ else
+ ret = trace_seq_putc(s, '.');
+
++ if (entry->migrate_disable)
++ ret = trace_seq_printf(s, "%x", entry->migrate_disable);
++ else
++ ret = trace_seq_putc(s, '.');
++
+ return ret;
+ }
+
diff --git a/futex-requeue-pi-fix.patch b/futex-requeue-pi-fix.patch
new file mode 100644
index 0000000..d3e44b0
--- /dev/null
+++ b/futex-requeue-pi-fix.patch
@@ -0,0 +1,118 @@
+Subject: futex: Fix bug on when a requeued RT task times out
+From: Steven Rostedt <rostedt@goodmis.org>
+
+Requeue with timeout causes a bug with PREEMPT_RT_FULL.
+
+The bug comes from a timed out condition.
+
+
+ TASK 1 TASK 2
+ ------ ------
+ futex_wait_requeue_pi()
+ futex_wait_queue_me()
+ <timed out>
+
+ double_lock_hb();
+
+ raw_spin_lock(pi_lock);
+ if (current->pi_blocked_on) {
+ } else {
+ current->pi_blocked_on = PI_WAKE_INPROGRESS;
+ run_spin_unlock(pi_lock);
+ spin_lock(hb->lock); <-- blocked!
+
+
+ plist_for_each_entry_safe(this) {
+ rt_mutex_start_proxy_lock();
+ task_blocks_on_rt_mutex();
+ BUG_ON(task->pi_blocked_on)!!!!
+
+The BUG_ON() actually has a check for PI_WAKE_INPROGRESS, but the
+problem is that, after TASK 1 sets PI_WAKE_INPROGRESS, it then tries to
+grab the hb->lock, which it fails to do so. As the hb->lock is a mutex,
+it will block and set the "pi_blocked_on" to the hb->lock.
+
+When TASK 2 goes to requeue it, the check for PI_WAKE_INPROGESS fails
+because the task1's pi_blocked_on is no longer set to that, but instead,
+set to the hb->lock.
+
+The fix:
+
+When calling rt_mutex_start_proxy_lock() a check is made to see
+if the proxy tasks pi_blocked_on is set. If so, exit out early.
+Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
+the proxy task that it is being requeued, and will handle things
+appropriately.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+
+---
+ kernel/rtmutex.c | 32 +++++++++++++++++++++++++++++++-
+ kernel/rtmutex_common.h | 1 +
+ 2 files changed, 32 insertions(+), 1 deletion(-)
+
+Index: linux-stable/kernel/rtmutex.c
+===================================================================
+--- linux-stable.orig/kernel/rtmutex.c
++++ linux-stable/kernel/rtmutex.c
+@@ -69,7 +69,8 @@ static void fixup_rt_mutex_waiters(struc
+
+ static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
+ {
+- return waiter && waiter != PI_WAKEUP_INPROGRESS;
++ return waiter && waiter != PI_WAKEUP_INPROGRESS &&
++ waiter != PI_REQUEUE_INPROGRESS;
+ }
+
+ /*
+@@ -1002,6 +1003,35 @@ int rt_mutex_start_proxy_lock(struct rt_
+ return 1;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * In PREEMPT_RT there's an added race.
++ * If the task, that we are about to requeue, times out,
++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
++ * to skip this task. But right after the task sets
++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
++ * This will replace the PI_WAKEUP_INPROGRESS with the actual
++ * lock that it blocks on. We *must not* place this task
++ * on this proxy lock in that case.
++ *
++ * To prevent this race, we first take the task's pi_lock
++ * and check if it has updated its pi_blocked_on. If it has,
++ * we assume that it woke up and we return -EAGAIN.
++ * Otherwise, we set the task's pi_blocked_on to
++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
++ * it will know that we are in the process of requeuing it.
++ */
++ raw_spin_lock_irq(&task->pi_lock);
++ if (task->pi_blocked_on) {
++ raw_spin_unlock_irq(&task->pi_lock);
++ raw_spin_unlock(&lock->wait_lock);
++ return -EAGAIN;
++ }
++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
++ raw_spin_unlock_irq(&task->pi_lock);
++#endif
++
+ ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+
+ if (ret && !rt_mutex_owner(lock)) {
+Index: linux-stable/kernel/rtmutex_common.h
+===================================================================
+--- linux-stable.orig/kernel/rtmutex_common.h
++++ linux-stable/kernel/rtmutex_common.h
+@@ -104,6 +104,7 @@ static inline struct task_struct *rt_mut
+ * PI-futex support (proxy locking functions, etc.):
+ */
+ #define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2)
+
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
diff --git a/generic-cmpxchg-use-raw-local-irq.patch b/generic-cmpxchg-use-raw-local-irq.patch
new file mode 100644
index 0000000..3750dda
--- /dev/null
+++ b/generic-cmpxchg-use-raw-local-irq.patch
@@ -0,0 +1,49 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:30 -0500
+Subject: generic: Use raw local irq variant for generic cmpxchg
+
+No point in tracing those.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/asm-generic/cmpxchg-local.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/include/asm-generic/cmpxchg-local.h
+===================================================================
+--- linux-stable.orig/include/asm-generic/cmpxchg-local.h
++++ linux-stable/include/asm-generic/cmpxchg-local.h
+@@ -21,7 +21,7 @@ static inline unsigned long __cmpxchg_lo
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
+
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+@@ -42,7 +42,7 @@ static inline unsigned long __cmpxchg_lo
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+ return prev;
+ }
+
+@@ -55,11 +55,11 @@ static inline u64 __cmpxchg64_local_gene
+ u64 prev;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+ return prev;
+ }
+
diff --git a/genirq-add-default-mask-cmdline-option.patch b/genirq-add-default-mask-cmdline-option.patch
new file mode 100644
index 0000000..a6104c2
--- /dev/null
+++ b/genirq-add-default-mask-cmdline-option.patch
@@ -0,0 +1,70 @@
+Subject: genirq: Add default affinity mask command line option
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 25 May 2012 16:59:47 +0200
+
+If we isolate CPUs, then we don't want random device interrupts on
+them. Even w/o the user space irq balancer enabled we can end up with
+irqs on non boot cpus.
+
+Allow to restrict the default irq affinity mask.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ Documentation/kernel-parameters.txt | 9 +++++++++
+ kernel/irq/irqdesc.c | 21 +++++++++++++++++++--
+ 2 files changed, 28 insertions(+), 2 deletions(-)
+
+Index: linux-stable/Documentation/kernel-parameters.txt
+===================================================================
+--- linux-stable.orig/Documentation/kernel-parameters.txt
++++ linux-stable/Documentation/kernel-parameters.txt
+@@ -1157,6 +1157,15 @@ bytes respectively. Such letter suffixes
+ See comment before ip2_setup() in
+ drivers/char/ip2/ip2base.c.
+
++ irqaffinity= [SMP] Set the default irq affinity mask
++ Format:
++ <cpu number>,...,<cpu number>
++ or
++ <cpu number>-<cpu number>
++ (must be a positive range in ascending order)
++ or a mixture
++ <cpu number>,...,<cpu number>-<cpu number>
++
+ irqfixup [HW]
+ When an interrupt is not handled search all handlers
+ for it. Intended to get systems with badly broken
+Index: linux-stable/kernel/irq/irqdesc.c
+===================================================================
+--- linux-stable.orig/kernel/irq/irqdesc.c
++++ linux-stable/kernel/irq/irqdesc.c
+@@ -23,10 +23,27 @@
+ static struct lock_class_key irq_desc_lock_class;
+
+ #if defined(CONFIG_SMP)
++static int __init irq_affinity_setup(char *str)
++{
++ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
++ cpulist_parse(str, irq_default_affinity);
++ /*
++ * Set at least the boot cpu. We don't want to end up with
++ * bugreports caused by random comandline masks
++ */
++ cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
++ return 1;
++}
++__setup("irqaffinity=", irq_affinity_setup);
++
+ static void __init init_irq_default_affinity(void)
+ {
+- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+- cpumask_setall(irq_default_affinity);
++#ifdef CONFIG_CPUMASK_OFFSTACK
++ if (!irq_default_affinity)
++ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
++#endif
++ if (cpumask_empty(irq_default_affinity))
++ cpumask_setall(irq_default_affinity);
+ }
+ #else
+ static void __init init_irq_default_affinity(void)
diff --git a/genirq-disable-irqpoll-on-rt.patch b/genirq-disable-irqpoll-on-rt.patch
new file mode 100644
index 0000000..4531923
--- /dev/null
+++ b/genirq-disable-irqpoll-on-rt.patch
@@ -0,0 +1,41 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:57 -0500
+Subject: genirq: disable irqpoll on -rt
+
+Creates long latencies for no value
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/irq/spurious.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+Index: linux-stable/kernel/irq/spurious.c
+===================================================================
+--- linux-stable.orig/kernel/irq/spurious.c
++++ linux-stable/kernel/irq/spurious.c
+@@ -341,6 +341,11 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
+
+ static int __init irqfixup_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ printk(KERN_WARNING "irqfixup boot option not supported "
++ "w/ CONFIG_PREEMPT_RT\n");
++ return 1;
++#endif
+ irqfixup = 1;
+ printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
+ printk(KERN_WARNING "This may impact system performance.\n");
+@@ -353,6 +358,11 @@ module_param(irqfixup, int, 0644);
+
+ static int __init irqpoll_setup(char *str)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ printk(KERN_WARNING "irqpoll boot option not supported "
++ "w/ CONFIG_PREEMPT_RT\n");
++ return 1;
++#endif
+ irqfixup = 2;
+ printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
+ "enabled\n");
diff --git a/genirq-force-threading.patch b/genirq-force-threading.patch
new file mode 100644
index 0000000..0c60961
--- /dev/null
+++ b/genirq-force-threading.patch
@@ -0,0 +1,50 @@
+Subject: genirq-force-threading.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 03 Apr 2011 11:57:29 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/interrupt.h | 8 ++++++--
+ kernel/irq/manage.c | 2 ++
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -388,9 +388,13 @@ static inline int disable_irq_wake(unsig
+
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
+-extern bool force_irqthreads;
++# ifndef CONFIG_PREEMPT_RT_BASE
++ extern bool force_irqthreads;
++# else
++# define force_irqthreads (true)
++# endif
+ #else
+-#define force_irqthreads (0)
++#define force_irqthreads (false)
+ #endif
+
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+Index: linux-stable/kernel/irq/manage.c
+===================================================================
+--- linux-stable.orig/kernel/irq/manage.c
++++ linux-stable/kernel/irq/manage.c
+@@ -21,6 +21,7 @@
+ #include "internals.h"
+
+ #ifdef CONFIG_IRQ_FORCED_THREADING
++# ifndef CONFIG_PREEMPT_RT_BASE
+ __read_mostly bool force_irqthreads;
+
+ static int __init setup_forced_irqthreads(char *arg)
+@@ -29,6 +30,7 @@ static int __init setup_forced_irqthread
+ return 0;
+ }
+ early_param("threadirqs", setup_forced_irqthreads);
++# endif
+ #endif
+
+ /**
diff --git a/genirq-nodebug-shirq.patch b/genirq-nodebug-shirq.patch
new file mode 100644
index 0000000..b2647e1
--- /dev/null
+++ b/genirq-nodebug-shirq.patch
@@ -0,0 +1,22 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 18 Mar 2011 10:22:04 +0100
+Subject: genirq: Disable DEBUG_SHIRQ for rt
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/Kconfig.debug | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/lib/Kconfig.debug
+===================================================================
+--- linux-stable.orig/lib/Kconfig.debug
++++ linux-stable/lib/Kconfig.debug
+@@ -164,7 +164,7 @@ config DEBUG_KERNEL
+
+ config DEBUG_SHIRQ
+ bool "Debug shared IRQ handlers"
+- depends on DEBUG_KERNEL && GENERIC_HARDIRQS
++ depends on DEBUG_KERNEL && GENERIC_HARDIRQS && !PREEMPT_RT_BASE
+ help
+ Enable this to generate a spurious interrupt as soon as a shared
+ interrupt handler is registered, and just before one is deregistered.
diff --git a/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch b/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch
new file mode 100644
index 0000000..0090a77
--- /dev/null
+++ b/harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch
@@ -0,0 +1,37 @@
+Subject: hardirq.h: Define softirq_count() as OUL to kill build warning
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Thu, 13 Oct 2011 17:19:09 +0800
+
+kernel/lockdep.c: In function ‘print_bad_irq_dependency’:
+kernel/lockdep.c:1476:3: warning: format ‘%lu’ expects type ‘long unsigned int’, but argument 7 has type ‘unsigned int’
+kernel/lockdep.c: In function ‘print_usage_bug’:
+kernel/lockdep.c:2193:3: warning: format ‘%lu’ expects type ‘long unsigned int’, but argument 7 has type ‘unsigned int’
+
+kernel/lockdep.i show this:
+ printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
+ curr->comm, task_pid_nr(curr),
+ curr->hardirq_context, ((current_thread_info()->preempt_count) & (((1UL << (10))-1) << ((0 + 8) + 8))) >> ((0 + 8) + 8),
+ curr->softirq_context, (0U) >> (0 + 8),
+ curr->hardirqs_enabled,
+ curr->softirqs_enabled);
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Link: http://lkml.kernel.org/r/20111013091909.GA32739@zhy
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/hardirq.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/hardirq.h
+===================================================================
+--- linux-stable.orig/include/linux/hardirq.h
++++ linux-stable/include/linux/hardirq.h
+@@ -84,7 +84,7 @@
+ # define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ # define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+ #else
+-# define softirq_count() (0U)
++# define softirq_count() (0UL)
+ extern int in_serving_softirq(void);
+ #endif
+
diff --git a/hotplug-call-cpu_unplug_begin-a-little-early.patch b/hotplug-call-cpu_unplug_begin-a-little-early.patch
new file mode 100644
index 0000000..d9be584
--- /dev/null
+++ b/hotplug-call-cpu_unplug_begin-a-little-early.patch
@@ -0,0 +1,61 @@
+Subject: hotplug: Call cpu_unplug_begin() before DOWN_PREPARE
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Sun, 16 Oct 2011 18:56:44 +0800
+
+cpu_unplug_begin() should be called before CPU_DOWN_PREPARE, because
+at CPU_DOWN_PREPARE cpu_active is cleared and sched_domain is
+rebuilt. Otherwise the 'sync_unplug' thread will be running on the cpu
+on which it's created and not bound on the cpu which is about to go
+down.
+
+I found that by an incorrect warning on smp_processor_id() called by
+sync_unplug/1, and trace shows below:
+(echo 1 > /sys/device/system/cpu/cpu1/online)
+ bash-1664 [000] 83.136620: _cpu_down: Bind sync_unplug to cpu 1
+ bash-1664 [000] 83.136623: sched_wait_task: comm=sync_unplug/1 pid=1724 prio=120
+ bash-1664 [000] 83.136624: _cpu_down: Wake sync_unplug
+ bash-1664 [000] 83.136629: sched_wakeup: comm=sync_unplug/1 pid=1724 prio=120 success=1 target_cpu=000
+
+Wants to be folded back....
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Link: http://lkml.kernel.org/r/1318762607-2261-3-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/cpu.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+Index: linux-stable/kernel/cpu.c
+===================================================================
+--- linux-stable.orig/kernel/cpu.c
++++ linux-stable/kernel/cpu.c
+@@ -383,22 +383,20 @@ static int __ref _cpu_down(unsigned int
+ return -EBUSY;
+ }
+
+- err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
++ cpu_hotplug_begin();
++ err = cpu_unplug_begin(cpu);
+ if (err) {
+- nr_calls--;
+- __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+- printk("%s: attempt to take down CPU %u failed\n",
+- __func__, cpu);
++ printk("cpu_unplug_begin(%d) failed\n", cpu);
+ goto out_cancel;
+ }
+
+- cpu_hotplug_begin();
+- err = cpu_unplug_begin(cpu);
++ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ if (err) {
+ nr_calls--;
+ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+- printk("cpu_unplug_begin(%d) failed\n", cpu);
+- goto out_cancel;
++ printk("%s: attempt to take down CPU %u failed\n",
++ __func__, cpu);
++ goto out_release;
+ }
+
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
diff --git a/hotplug-light-get-online-cpus.patch b/hotplug-light-get-online-cpus.patch
new file mode 100644
index 0000000..9f0b4dc
--- /dev/null
+++ b/hotplug-light-get-online-cpus.patch
@@ -0,0 +1,212 @@
+Subject: hotplug: Lightweight get online cpus
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 15 Jun 2011 12:36:06 +0200
+
+get_online_cpus() is a heavy weight function which involves a global
+mutex. migrate_disable() wants a simpler construct which prevents only
+a CPU from going doing while a task is in a migrate disabled section.
+
+Implement a per cpu lockless mechanism, which serializes only in the
+real unplug case on a global mutex. That serialization affects only
+tasks on the cpu which should be brought down.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/cpu.h | 4 +
+ kernel/cpu.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 2 files changed, 128 insertions(+), 3 deletions(-)
+
+Index: linux-stable/include/linux/cpu.h
+===================================================================
+--- linux-stable.orig/include/linux/cpu.h
++++ linux-stable/include/linux/cpu.h
+@@ -175,6 +175,8 @@ extern struct bus_type cpu_subsys;
+
+ extern void get_online_cpus(void);
+ extern void put_online_cpus(void);
++extern void pin_current_cpu(void);
++extern void unpin_current_cpu(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+ #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+ #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+@@ -198,6 +200,8 @@ static inline void cpu_hotplug_driver_un
+
+ #define get_online_cpus() do { } while (0)
+ #define put_online_cpus() do { } while (0)
++static inline void pin_current_cpu(void) { }
++static inline void unpin_current_cpu(void) { }
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+Index: linux-stable/kernel/cpu.c
+===================================================================
+--- linux-stable.orig/kernel/cpu.c
++++ linux-stable/kernel/cpu.c
+@@ -63,6 +63,102 @@ static struct {
+ .refcount = 0,
+ };
+
++struct hotplug_pcp {
++ struct task_struct *unplug;
++ int refcount;
++ struct completion synced;
++};
++
++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
++
++/**
++ * pin_current_cpu - Prevent the current cpu from being unplugged
++ *
++ * Lightweight version of get_online_cpus() to prevent cpu from being
++ * unplugged when code runs in a migration disabled region.
++ *
++ * Must be called with preemption disabled (preempt_count = 1)!
++ */
++void pin_current_cpu(void)
++{
++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++
++retry:
++ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
++ hp->unplug == current || (current->flags & PF_STOMPER)) {
++ hp->refcount++;
++ return;
++ }
++ preempt_enable();
++ mutex_lock(&cpu_hotplug.lock);
++ mutex_unlock(&cpu_hotplug.lock);
++ preempt_disable();
++ goto retry;
++}
++
++/**
++ * unpin_current_cpu - Allow unplug of current cpu
++ *
++ * Must be called with preemption or interrupts disabled!
++ */
++void unpin_current_cpu(void)
++{
++ struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++
++ WARN_ON(hp->refcount <= 0);
++
++ /* This is safe. sync_unplug_thread is pinned to this cpu */
++ if (!--hp->refcount && hp->unplug && hp->unplug != current &&
++ !(current->flags & PF_STOMPER))
++ wake_up_process(hp->unplug);
++}
++
++/*
++ * FIXME: Is this really correct under all circumstances ?
++ */
++static int sync_unplug_thread(void *data)
++{
++ struct hotplug_pcp *hp = data;
++
++ preempt_disable();
++ hp->unplug = current;
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (hp->refcount) {
++ schedule_preempt_disabled();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++ set_current_state(TASK_RUNNING);
++ preempt_enable();
++ complete(&hp->synced);
++ return 0;
++}
++
++/*
++ * Start the sync_unplug_thread on the target cpu and wait for it to
++ * complete.
++ */
++static int cpu_unplug_begin(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++ struct task_struct *tsk;
++
++ init_completion(&hp->synced);
++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
++ if (IS_ERR(tsk))
++ return (PTR_ERR(tsk));
++ kthread_bind(tsk, cpu);
++ wake_up_process(tsk);
++ wait_for_completion(&hp->synced);
++ return 0;
++}
++
++static void cpu_unplug_done(unsigned int cpu)
++{
++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
++
++ hp->unplug = NULL;
++}
++
+ void get_online_cpus(void)
+ {
+ might_sleep();
+@@ -256,13 +352,14 @@ static int __ref take_cpu_down(void *_pa
+ /* Requires cpu_add_remove_lock to be held */
+ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ {
+- int err, nr_calls = 0;
++ int mycpu, err, nr_calls = 0;
+ void *hcpu = (void *)(long)cpu;
+ unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
+ struct take_cpu_down_param tcd_param = {
+ .mod = mod,
+ .hcpu = hcpu,
+ };
++ cpumask_var_t cpumask;
+
+ if (num_online_cpus() == 1)
+ return -EBUSY;
+@@ -270,7 +367,20 @@ static int __ref _cpu_down(unsigned int
+ if (!cpu_online(cpu))
+ return -EINVAL;
+
+- cpu_hotplug_begin();
++ /* Move the downtaker off the unplug cpu */
++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
++ return -ENOMEM;
++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
++ set_cpus_allowed_ptr(current, cpumask);
++ free_cpumask_var(cpumask);
++ preempt_disable();
++ mycpu = smp_processor_id();
++ if (mycpu == cpu) {
++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
++ preempt_enable();
++ return -EBUSY;
++ }
++ preempt_enable();
+
+ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ if (err) {
+@@ -278,7 +388,16 @@ static int __ref _cpu_down(unsigned int
+ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
+ printk("%s: attempt to take down CPU %u failed\n",
+ __func__, cpu);
+- goto out_release;
++ goto out_cancel;
++ }
++
++ cpu_hotplug_begin();
++ err = cpu_unplug_begin(cpu);
++ if (err) {
++ nr_calls--;
++ __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
++ printk("cpu_unplug_begin(%d) failed\n", cpu);
++ goto out_cancel;
+ }
+
+ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
+@@ -309,6 +428,8 @@ static int __ref _cpu_down(unsigned int
+ check_for_tasks(cpu);
+
+ out_release:
++ cpu_unplug_done(cpu);
++out_cancel:
+ cpu_hotplug_done();
+ if (!err)
+ cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
diff --git a/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
new file mode 100644
index 0000000..73e14f1
--- /dev/null
+++ b/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -0,0 +1,26 @@
+Subject: hotplug: sync_unplug: No "\n" in task name
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Sun, 16 Oct 2011 18:56:43 +0800
+
+Otherwise the output will look a little odd.
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Link: http://lkml.kernel.org/r/1318762607-2261-2-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/cpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/cpu.c
+===================================================================
+--- linux-stable.orig/kernel/cpu.c
++++ linux-stable/kernel/cpu.c
+@@ -143,7 +143,7 @@ static int cpu_unplug_begin(unsigned int
+ struct task_struct *tsk;
+
+ init_completion(&hp->synced);
+- tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d\n", cpu);
++ tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+ if (IS_ERR(tsk))
+ return (PTR_ERR(tsk));
+ kthread_bind(tsk, cpu);
diff --git a/hotplug-use-migrate-disable.patch b/hotplug-use-migrate-disable.patch
new file mode 100644
index 0000000..cef7418
--- /dev/null
+++ b/hotplug-use-migrate-disable.patch
@@ -0,0 +1,38 @@
+Subject: hotplug-use-migrate-disable.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 19:35:29 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/cpu.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+Index: linux-stable/kernel/cpu.c
+===================================================================
+--- linux-stable.orig/kernel/cpu.c
++++ linux-stable/kernel/cpu.c
+@@ -375,14 +375,13 @@ static int __ref _cpu_down(unsigned int
+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
+ set_cpus_allowed_ptr(current, cpumask);
+ free_cpumask_var(cpumask);
+- preempt_disable();
++ migrate_disable();
+ mycpu = smp_processor_id();
+ if (mycpu == cpu) {
+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+- preempt_enable();
++ migrate_enable();
+ return -EBUSY;
+ }
+- preempt_enable();
+
+ err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
+ if (err) {
+@@ -432,6 +431,7 @@ static int __ref _cpu_down(unsigned int
+ out_release:
+ cpu_unplug_done(cpu);
+ out_cancel:
++ migrate_enable();
+ cpu_hotplug_done();
+ if (!err)
+ cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
diff --git a/hrtimer-add-missing-debug_activate-aid-was-re-announce-3-0-6-rt17.patch b/hrtimer-add-missing-debug_activate-aid-was-re-announce-3-0-6-rt17.patch
new file mode 100644
index 0000000..fa14979
--- /dev/null
+++ b/hrtimer-add-missing-debug_activate-aid-was-re-announce-3-0-6-rt17.patch
@@ -0,0 +1,70 @@
+Subject: hrtimer: Add missing debug_activate() aid [Was: Re: [ANNOUNCE] 3.0.6-rt17]
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Thu, 13 Oct 2011 15:52:30 +0800
+
+On Fri, Oct 07, 2011 at 10:25:25AM -0700, Fernando Lopez-Lezcano wrote:
+> On 10/06/2011 06:15 PM, Thomas Gleixner wrote:
+> >Dear RT Folks,
+> >
+> >I'm pleased to announce the 3.0.6-rt17 release.
+>
+> Hi and thanks again. So far this one is not hanging which is very
+> good news. But I still see the hrtimer_fixup_activate warnings I
+> reported for rt16...
+
+Hi Fernando,
+
+I think below patch will smooth your concern?
+
+Thanks,
+Yong
+
+---
+From: Yong Zhang <yong.zhang0@gmail.com>
+Subject: [PATCH -rt] hrtimer: Add missing debug_activate() aid
+
+It will fix below warning, which is also reported by Fernando:
+
+[ 7.616090] ------------[ cut here ]------------
+[ 7.616093] WARNING: at kernel/hrtimer.c:391 hrtimer_fixup_activate+0x27/0x50()
+[ 7.616094] Hardware name: OptiPlex 755
+[ 7.616096] Modules linked in:
+[ 7.616099] Pid: 0, comm: kworker/0:0 Tainted: G W 3.0.6-rt17-00284-g9d73a61 #15
+[ 7.616100] Call Trace:
+[ 7.616103] [<c014d9a2>] warn_slowpath_common+0x72/0xa0
+[ 7.616106] [<c0175417>] ? hrtimer_fixup_activate+0x27/0x50
+[ 7.616109] [<c0175417>] ? hrtimer_fixup_activate+0x27/0x50
+[ 7.616112] [<c014d9f2>] warn_slowpath_null+0x22/0x30
+[ 7.616115] [<c0175417>] hrtimer_fixup_activate+0x27/0x50
+[ 7.616118] [<c03b3ab0>] debug_object_activate+0x100/0x130
+[ 7.616121] [<c0176b96>] ? hrtimer_start_range_ns+0x26/0x30
+[ 7.616123] [<c0175a59>] enqueue_hrtimer+0x19/0x100
+[ 7.616126] [<c0176b96>] ? hrtimer_start_range_ns+0x26/0x30
+[ 7.616129] [<c0176744>] __hrtimer_start_range_ns+0x144/0x540
+[ 7.616132] [<c072705a>] ? _raw_spin_unlock_irqrestore+0x3a/0x80
+[ 7.616136] [<c0176b96>] hrtimer_start_range_ns+0x26/0x30
+[ 7.616139] [<c01852b5>] tick_nohz_restart_sched_tick+0x185/0x1b0
+[ 7.616142] [<c0101878>] cpu_idle+0x98/0xc0
+[ 7.616146] [<c071fcd8>] start_secondary+0x1d3/0x1da
+[ 7.616148] ---[ end trace 0000000000000003 ]---
+
+Reported-by: Fernando Lopez-Lezcano <nando@ccrma.stanford.edu>
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Link: http://lkml.kernel.org/r/20111013075230.GA2740@zhy
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/hrtimer.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/kernel/hrtimer.c
+===================================================================
+--- linux-stable.orig/kernel/hrtimer.c
++++ linux-stable/kernel/hrtimer.c
+@@ -1063,6 +1063,7 @@ int __hrtimer_start_range_ns(struct hrti
+ * remove it again and report a failure. This avoids
+ * stale base->first entries.
+ */
++ debug_deactivate(timer);
+ __remove_hrtimer(timer, new_base,
+ timer->state & HRTIMER_STATE_CALLBACK, 0);
+ }
diff --git a/hrtimer-fix-reprogram-madness.patch b/hrtimer-fix-reprogram-madness.patch
new file mode 100644
index 0000000..ec68a04
--- /dev/null
+++ b/hrtimer-fix-reprogram-madness.patch
@@ -0,0 +1,42 @@
+Subject: hrtimer-fix-reprogram-madness.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 14 Sep 2011 14:48:43 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/hrtimer.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+Index: linux-stable/kernel/hrtimer.c
+===================================================================
+--- linux-stable.orig/kernel/hrtimer.c
++++ linux-stable/kernel/hrtimer.c
+@@ -1338,7 +1338,11 @@ static void hrtimer_rt_reprogram(int res
+ if (!enqueue_hrtimer(timer, base))
+ return;
+
+- if (hrtimer_reprogram(timer, base))
++#ifndef CONFIG_HIGH_RES_TIMERS
++ }
++#else
++ if (base->cpu_base->hres_active &&
++ hrtimer_reprogram(timer, base))
+ goto requeue;
+
+ } else if (hrtimer_active(timer)) {
+@@ -1347,6 +1351,7 @@ static void hrtimer_rt_reprogram(int res
+ * the event device.
+ */
+ if (&timer->node == base->active.next &&
++ base->cpu_base->hres_active &&
+ hrtimer_reprogram(timer, base))
+ goto requeue;
+ }
+@@ -1359,6 +1364,7 @@ requeue:
+ */
+ __remove_hrtimer(timer, base, timer->state, 0);
+ list_add_tail(&timer->cb_entry, &base->expired);
++#endif
+ }
+
+ /*
diff --git a/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
new file mode 100644
index 0000000..ffd05f8
--- /dev/null
+++ b/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -0,0 +1,430 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:31 -0500
+Subject: hrtimer: fixup hrtimer callback changes for preempt-rt
+
+In preempt-rt we can not call the callbacks which take sleeping locks
+from the timer interrupt context.
+
+Bring back the softirq split for now, until we fixed the signal
+delivery problem for real.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+---
+ include/linux/hrtimer.h | 3
+ kernel/hrtimer.c | 196 ++++++++++++++++++++++++++++++++++++++++++-----
+ kernel/sched/core.c | 1
+ kernel/sched/rt.c | 1
+ kernel/time/tick-sched.c | 1
+ kernel/watchdog.c | 1
+ 6 files changed, 183 insertions(+), 20 deletions(-)
+
+Index: linux-stable/include/linux/hrtimer.h
+===================================================================
+--- linux-stable.orig/include/linux/hrtimer.h
++++ linux-stable/include/linux/hrtimer.h
+@@ -111,6 +111,8 @@ struct hrtimer {
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ unsigned long state;
++ struct list_head cb_entry;
++ int irqsafe;
+ #ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ ktime_t praecox;
+ #endif
+@@ -150,6 +152,7 @@ struct hrtimer_clock_base {
+ int index;
+ clockid_t clockid;
+ struct timerqueue_head active;
++ struct list_head expired;
+ ktime_t resolution;
+ ktime_t (*get_time)(void);
+ ktime_t softirq_time;
+Index: linux-stable/kernel/hrtimer.c
+===================================================================
+--- linux-stable.orig/kernel/hrtimer.c
++++ linux-stable/kernel/hrtimer.c
+@@ -589,8 +589,7 @@ static int hrtimer_reprogram(struct hrti
+ * When the callback is running, we do not reprogram the clock event
+ * device. The timer callback is either running on a different CPU or
+ * the callback is executed in the hrtimer_interrupt context. The
+- * reprogramming is handled either by the softirq, which called the
+- * callback or at the end of the hrtimer_interrupt.
++ * reprogramming is handled at the end of the hrtimer_interrupt.
+ */
+ if (hrtimer_callback_running(timer))
+ return 0;
+@@ -625,6 +624,9 @@ static int hrtimer_reprogram(struct hrti
+ return res;
+ }
+
++static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
++static int hrtimer_rt_defer(struct hrtimer *timer);
++
+ /*
+ * Initialize the high resolution related parts of cpu_base
+ */
+@@ -644,7 +646,29 @@ static inline int hrtimer_enqueue_reprog
+ struct hrtimer_clock_base *base,
+ int wakeup)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++again:
+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
++ /*
++ * Move softirq based timers away from the rbtree in
++ * case it expired already. Otherwise we would have a
++ * stale base->first entry until the softirq runs.
++ */
++ if (!hrtimer_rt_defer(timer)) {
++ ktime_t now = ktime_get();
++
++ __run_hrtimer(timer, &now);
++ /*
++ * __run_hrtimer might have requeued timer and
++ * it could be base->first again.
++ */
++ if (&timer->node == base->active.next)
++ goto again;
++ return 1;
++ }
++#else
++ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
++#endif
+ if (wakeup) {
+ raw_spin_unlock(&base->cpu_base->lock);
+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+@@ -743,6 +767,11 @@ static inline int hrtimer_enqueue_reprog
+ }
+ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
+ static inline void retrigger_next_event(void *arg) { }
++static inline int hrtimer_reprogram(struct hrtimer *timer,
++ struct hrtimer_clock_base *base)
++{
++ return 0;
++}
+
+ #endif /* CONFIG_HIGH_RES_TIMERS */
+
+@@ -874,9 +903,9 @@ void hrtimer_wait_for_timer(const struct
+ {
+ struct hrtimer_clock_base *base = timer->base;
+
+- if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base))
++ if (base && base->cpu_base && !timer->irqsafe)
+ wait_event(base->cpu_base->wait,
+- !(timer->state & HRTIMER_STATE_CALLBACK));
++ !(timer->state & HRTIMER_STATE_CALLBACK));
+ }
+
+ #else
+@@ -926,6 +955,11 @@ static void __remove_hrtimer(struct hrti
+ if (!(timer->state & HRTIMER_STATE_ENQUEUED))
+ goto out;
+
++ if (unlikely(!list_empty(&timer->cb_entry))) {
++ list_del_init(&timer->cb_entry);
++ goto out;
++ }
++
+ next_timer = timerqueue_getnext(&base->active);
+ timerqueue_del(&base->active, &timer->node);
+ if (&timer->node == next_timer) {
+@@ -1199,6 +1233,7 @@ static void __hrtimer_init(struct hrtime
+
+ base = hrtimer_clockid_to_base(clock_id);
+ timer->base = &cpu_base->clock_base[base];
++ INIT_LIST_HEAD(&timer->cb_entry);
+ timerqueue_init(&timer->node);
+
+ #ifdef CONFIG_TIMER_STATS
+@@ -1282,10 +1317,118 @@ static void __run_hrtimer(struct hrtimer
+ timer->state &= ~HRTIMER_STATE_CALLBACK;
+ }
+
+-#ifdef CONFIG_HIGH_RES_TIMERS
+-
+ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
++ struct hrtimer_clock_base *base)
++{
++ /*
++ * Note, we clear the callback flag before we requeue the
++ * timer otherwise we trigger the callback_running() check
++ * in hrtimer_reprogram().
++ */
++ timer->state &= ~HRTIMER_STATE_CALLBACK;
++
++ if (restart != HRTIMER_NORESTART) {
++ BUG_ON(hrtimer_active(timer));
++ /*
++ * Enqueue the timer, if it's the leftmost timer then
++ * we need to reprogram it.
++ */
++ if (!enqueue_hrtimer(timer, base))
++ return;
++
++ if (hrtimer_reprogram(timer, base))
++ goto requeue;
++
++ } else if (hrtimer_active(timer)) {
++ /*
++ * If the timer was rearmed on another CPU, reprogram
++ * the event device.
++ */
++ if (&timer->node == base->active.next &&
++ hrtimer_reprogram(timer, base))
++ goto requeue;
++ }
++ return;
++
++requeue:
++ /*
++ * Timer is expired. Thus move it from tree to pending list
++ * again.
++ */
++ __remove_hrtimer(timer, base, timer->state, 0);
++ list_add_tail(&timer->cb_entry, &base->expired);
++}
++
++/*
++ * The changes in mainline which removed the callback modes from
++ * hrtimer are not yet working with -rt. The non wakeup_process()
++ * based callbacks which involve sleeping locks need to be treated
++ * seperately.
++ */
++static void hrtimer_rt_run_pending(void)
++{
++ enum hrtimer_restart (*fn)(struct hrtimer *);
++ struct hrtimer_cpu_base *cpu_base;
++ struct hrtimer_clock_base *base;
++ struct hrtimer *timer;
++ int index, restart;
++
++ local_irq_disable();
++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id());
++
++ raw_spin_lock(&cpu_base->lock);
++
++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
++ base = &cpu_base->clock_base[index];
++
++ while (!list_empty(&base->expired)) {
++ timer = list_first_entry(&base->expired,
++ struct hrtimer, cb_entry);
++
++ /*
++ * Same as the above __run_hrtimer function
++ * just we run with interrupts enabled.
++ */
++ debug_hrtimer_deactivate(timer);
++ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
++ timer_stats_account_hrtimer(timer);
++ fn = timer->function;
++
++ raw_spin_unlock_irq(&cpu_base->lock);
++ restart = fn(timer);
++ raw_spin_lock_irq(&cpu_base->lock);
++
++ hrtimer_rt_reprogram(restart, timer, base);
++ }
++ }
++
++ raw_spin_unlock_irq(&cpu_base->lock);
++
++ wake_up_timer_waiters(cpu_base);
++}
++
++static int hrtimer_rt_defer(struct hrtimer *timer)
++{
++ if (timer->irqsafe)
++ return 0;
++
++ __remove_hrtimer(timer, timer->base, timer->state, 0);
++ list_add_tail(&timer->cb_entry, &timer->base->expired);
++ return 1;
++}
++
++#else
++
++static inline void hrtimer_rt_run_pending(void) { }
++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
++
++#endif
++
++#ifdef CONFIG_HIGH_RES_TIMERS
++
+ /*
+ * High resolution timer interrupt
+ * Called with interrupts disabled
+@@ -1294,7 +1437,7 @@ void hrtimer_interrupt(struct clock_even
+ {
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ ktime_t expires_next, now, entry_time, delta;
+- int i, retries = 0;
++ int i, retries = 0, raise = 0;
+
+ BUG_ON(!cpu_base->hres_active);
+ cpu_base->nr_events++;
+@@ -1361,7 +1504,10 @@ retry:
+ break;
+ }
+
+- __run_hrtimer(timer, &basenow);
++ if (!hrtimer_rt_defer(timer))
++ __run_hrtimer(timer, &basenow);
++ else
++ raise = 1;
+ }
+ }
+
+@@ -1376,6 +1522,10 @@ retry:
+ if (expires_next.tv64 == KTIME_MAX ||
+ !tick_program_event(expires_next, 0)) {
+ cpu_base->hang_detected = 0;
++
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++
+ return;
+ }
+
+@@ -1456,24 +1606,26 @@ void hrtimer_peek_ahead_timers(void)
+ local_irq_restore(flags);
+ }
+
++#else /* CONFIG_HIGH_RES_TIMERS */
++
++static inline void __hrtimer_peek_ahead_timers(void) { }
++
++#endif /* !CONFIG_HIGH_RES_TIMERS */
++
+ static void run_hrtimer_softirq(struct softirq_action *h)
+ {
++#ifdef CONFIG_HIGH_RES_TIMERS
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+ if (cpu_base->clock_was_set) {
+ cpu_base->clock_was_set = 0;
+ clock_was_set();
+ }
++#endif
+
+- hrtimer_peek_ahead_timers();
++ hrtimer_rt_run_pending();
+ }
+
+-#else /* CONFIG_HIGH_RES_TIMERS */
+-
+-static inline void __hrtimer_peek_ahead_timers(void) { }
+-
+-#endif /* !CONFIG_HIGH_RES_TIMERS */
+-
+ /*
+ * Called from timer softirq every jiffy, expire hrtimers:
+ *
+@@ -1506,7 +1658,7 @@ void hrtimer_run_queues(void)
+ struct timerqueue_node *node;
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_clock_base *base;
+- int index, gettime = 1;
++ int index, gettime = 1, raise = 0;
+
+ if (hrtimer_hres_active())
+ return;
+@@ -1531,12 +1683,16 @@ void hrtimer_run_queues(void)
+ hrtimer_get_expires_tv64(timer))
+ break;
+
+- __run_hrtimer(timer, &base->softirq_time);
++ if (!hrtimer_rt_defer(timer))
++ __run_hrtimer(timer, &base->softirq_time);
++ else
++ raise = 1;
+ }
+ raw_spin_unlock(&cpu_base->lock);
+ }
+
+- wake_up_timer_waiters(cpu_base);
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+ }
+
+ /*
+@@ -1558,6 +1714,7 @@ static enum hrtimer_restart hrtimer_wake
+ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
+ {
+ sl->timer.function = hrtimer_wakeup;
++ sl->timer.irqsafe = 1;
+ sl->task = task;
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
+@@ -1696,6 +1853,7 @@ static void __cpuinit init_hrtimers_cpu(
+ for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
+ cpu_base->clock_base[i].cpu_base = cpu_base;
+ timerqueue_init_head(&cpu_base->clock_base[i].active);
++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired);
+ }
+
+ hrtimer_init_hres(cpu_base);
+@@ -1814,9 +1972,7 @@ void __init hrtimers_init(void)
+ hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ register_cpu_notifier(&hrtimers_nb);
+-#ifdef CONFIG_HIGH_RES_TIMERS
+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
+-#endif
+ }
+
+ /**
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -480,6 +480,7 @@ static void init_rq_hrtick(struct rq *rq
+
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrtick_timer.function = hrtick;
++ rq->hrtick_timer.irqsafe = 1;
+ }
+ #else /* CONFIG_SCHED_HRTICK */
+ static inline void hrtick_clear(struct rq *rq)
+Index: linux-stable/kernel/sched/rt.c
+===================================================================
+--- linux-stable.orig/kernel/sched/rt.c
++++ linux-stable/kernel/sched/rt.c
+@@ -41,6 +41,7 @@ void init_rt_bandwidth(struct rt_bandwid
+
+ hrtimer_init(&rt_b->rt_period_timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ rt_b->rt_period_timer.irqsafe = 1;
+ rt_b->rt_period_timer.function = sched_rt_period_timer;
+ }
+
+Index: linux-stable/kernel/time/tick-sched.c
+===================================================================
+--- linux-stable.orig/kernel/time/tick-sched.c
++++ linux-stable/kernel/time/tick-sched.c
+@@ -873,6 +873,7 @@ void tick_setup_sched_timer(void)
+ * Emulate tick processing via per-CPU hrtimers:
+ */
+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ ts->sched_timer.irqsafe = 1;
+ ts->sched_timer.function = tick_sched_timer;
+
+ /* Get the next period (per cpu) */
+Index: linux-stable/kernel/watchdog.c
+===================================================================
+--- linux-stable.orig/kernel/watchdog.c
++++ linux-stable/kernel/watchdog.c
+@@ -470,6 +470,7 @@ static void watchdog_prepare_cpu(int cpu
+ WARN_ON(per_cpu(softlockup_watchdog, cpu));
+ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer->function = watchdog_timer_fn;
++ hrtimer->irqsafe = 1;
+ }
+
+ static int watchdog_enable(int cpu)
diff --git a/hrtimers-prepare-full-preemption.patch b/hrtimers-prepare-full-preemption.patch
new file mode 100644
index 0000000..467eb03
--- /dev/null
+++ b/hrtimers-prepare-full-preemption.patch
@@ -0,0 +1,203 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:34 -0500
+Subject: hrtimers: prepare full preemption
+
+Make cancellation of a running callback in softirq context safe
+against preemption.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/hrtimer.h | 10 ++++++++++
+ kernel/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
+ kernel/itimer.c | 1 +
+ kernel/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
+ 4 files changed, 76 insertions(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/hrtimer.h
+===================================================================
+--- linux-stable.orig/include/linux/hrtimer.h
++++ linux-stable/include/linux/hrtimer.h
+@@ -192,6 +192,9 @@ struct hrtimer_cpu_base {
+ unsigned long nr_hangs;
+ ktime_t max_hang_time;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ wait_queue_head_t wait;
++#endif
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ };
+
+@@ -385,6 +388,13 @@ static inline int hrtimer_restart(struct
+ return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+ }
+
++/* Softirq preemption could deadlock timer removal */
++#ifdef CONFIG_PREEMPT_RT_BASE
++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
++#else
++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0)
++#endif
++
+ /* Query timers: */
+ extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+ extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+Index: linux-stable/kernel/hrtimer.c
+===================================================================
+--- linux-stable.orig/kernel/hrtimer.c
++++ linux-stable/kernel/hrtimer.c
+@@ -857,6 +857,32 @@ u64 hrtimer_forward(struct hrtimer *time
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_forward);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define wake_up_timer_waiters(b) wake_up(&(b)->wait)
++
++/**
++ * hrtimer_wait_for_timer - Wait for a running timer
++ *
++ * @timer: timer to wait for
++ *
++ * The function waits in case the timers callback function is
++ * currently executed on the waitqueue of the timer base. The
++ * waitqueue is woken up after the timer callback function has
++ * finished execution.
++ */
++void hrtimer_wait_for_timer(const struct hrtimer *timer)
++{
++ struct hrtimer_clock_base *base = timer->base;
++
++ if (base && base->cpu_base && !hrtimer_hres_active(base->cpu_base))
++ wait_event(base->cpu_base->wait,
++ !(timer->state & HRTIMER_STATE_CALLBACK));
++}
++
++#else
++# define wake_up_timer_waiters(b) do { } while (0)
++#endif
++
+ /*
+ * enqueue_hrtimer - internal function to (re)start a timer
+ *
+@@ -1094,7 +1120,7 @@ int hrtimer_cancel(struct hrtimer *timer
+
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ hrtimer_wait_for_timer(timer);
+ }
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_cancel);
+@@ -1509,6 +1535,8 @@ void hrtimer_run_queues(void)
+ }
+ raw_spin_unlock(&cpu_base->lock);
+ }
++
++ wake_up_timer_waiters(cpu_base);
+ }
+
+ /*
+@@ -1671,6 +1699,9 @@ static void __cpuinit init_hrtimers_cpu(
+ }
+
+ hrtimer_init_hres(cpu_base);
++#ifdef CONFIG_PREEMPT_RT_BASE
++ init_waitqueue_head(&cpu_base->wait);
++#endif
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
+Index: linux-stable/kernel/itimer.c
+===================================================================
+--- linux-stable.orig/kernel/itimer.c
++++ linux-stable/kernel/itimer.c
+@@ -213,6 +213,7 @@ again:
+ /* We are sharing ->siglock with it_real_fn() */
+ if (hrtimer_try_to_cancel(timer) < 0) {
+ spin_unlock_irq(&tsk->sighand->siglock);
++ hrtimer_wait_for_timer(&tsk->signal->real_timer);
+ goto again;
+ }
+ expires = timeval_to_ktime(value->it_value);
+Index: linux-stable/kernel/posix-timers.c
+===================================================================
+--- linux-stable.orig/kernel/posix-timers.c
++++ linux-stable/kernel/posix-timers.c
+@@ -766,6 +766,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+ return overrun;
+ }
+
++/*
++ * Protected by RCU!
++ */
++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (kc->timer_set == common_timer_set)
++ hrtimer_wait_for_timer(&timr->it.real.timer);
++ else
++ /* FIXME: Whacky hack for posix-cpu-timers */
++ schedule_timeout(1);
++#endif
++}
++
+ /* Set a POSIX.1b interval timer. */
+ /* timr->it_lock is taken. */
+ static int
+@@ -843,6 +857,7 @@ retry:
+ if (!timr)
+ return -EINVAL;
+
++ rcu_read_lock();
+ kc = clockid_to_kclock(timr->it_clock);
+ if (WARN_ON_ONCE(!kc || !kc->timer_set))
+ error = -EINVAL;
+@@ -851,9 +866,12 @@ retry:
+
+ unlock_timer(timr, flag);
+ if (error == TIMER_RETRY) {
++ timer_wait_for_callback(kc, timr);
+ rtn = NULL; // We already got the old time...
++ rcu_read_unlock();
+ goto retry;
+ }
++ rcu_read_unlock();
+
+ if (old_setting && !error &&
+ copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
+@@ -891,10 +909,15 @@ retry_delete:
+ if (!timer)
+ return -EINVAL;
+
++ rcu_read_lock();
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
++ rcu_read_unlock();
+
+ spin_lock(&current->sighand->siglock);
+ list_del(&timer->list);
+@@ -920,8 +943,18 @@ static void itimer_delete(struct k_itime
+ retry_delete:
+ spin_lock_irqsave(&timer->it_lock, flags);
+
++ /* On RT we can race with a deletion */
++ if (!timer->it_signal) {
++ unlock_timer(timer, flags);
++ return;
++ }
++
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
++ rcu_read_lock();
+ unlock_timer(timer, flags);
++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock),
++ timer);
++ rcu_read_unlock();
+ goto retry_delete;
+ }
+ list_del(&timer->list);
diff --git a/hwlatdetect.patch b/hwlatdetect.patch
new file mode 100644
index 0000000..c64e5db
--- /dev/null
+++ b/hwlatdetect.patch
@@ -0,0 +1,1352 @@
+Subject: hwlatdetect.patch
+From: Carsten Emde <C.Emde@osadl.org>
+Date: Tue, 19 Jul 2011 13:53:12 +0100
+
+Jon Masters developed this wonderful SMI detector. For details please
+consult Documentation/hwlat_detector.txt. It could be ported to Linux
+3.0 RT without any major change.
+
+Signed-off-by: Carsten Emde <C.Emde@osadl.org>
+
+---
+ Documentation/hwlat_detector.txt | 64 ++
+ drivers/misc/Kconfig | 29
+ drivers/misc/Makefile | 1
+ drivers/misc/hwlat_detector.c | 1212 +++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 1306 insertions(+)
+
+Index: linux-stable/Documentation/hwlat_detector.txt
+===================================================================
+--- /dev/null
++++ linux-stable/Documentation/hwlat_detector.txt
+@@ -0,0 +1,64 @@
++Introduction:
++-------------
++
++The module hwlat_detector is a special purpose kernel module that is used to
++detect large system latencies induced by the behavior of certain underlying
++hardware or firmware, independent of Linux itself. The code was developed
++originally to detect SMIs (System Management Interrupts) on x86 systems,
++however there is nothing x86 specific about this patchset. It was
++originally written for use by the "RT" patch since the Real Time
++kernel is highly latency sensitive.
++
++SMIs are usually not serviced by the Linux kernel, which typically does not
++even know that they are occuring. SMIs are instead are set up by BIOS code
++and are serviced by BIOS code, usually for "critical" events such as
++management of thermal sensors and fans. Sometimes though, SMIs are used for
++other tasks and those tasks can spend an inordinate amount of time in the
++handler (sometimes measured in milliseconds). Obviously this is a problem if
++you are trying to keep event service latencies down in the microsecond range.
++
++The hardware latency detector works by hogging all of the cpus for configurable
++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
++for some period, then looking for gaps in the TSC data. Any gap indicates a
++time when the polling was interrupted and since the machine is stopped and
++interrupts turned off the only thing that could do that would be an SMI.
++
++Note that the SMI detector should *NEVER* be used in a production environment.
++It is intended to be run manually to determine if the hardware platform has a
++problem with long system firmware service routines.
++
++Usage:
++------
++
++Loading the module hwlat_detector passing the parameter "enabled=1" (or by
++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
++step required to start the hwlat_detector. It is possible to redefine the
++threshold in microseconds (us) above which latency spikes will be taken
++into account (parameter "threshold=").
++
++Example:
++
++ # modprobe hwlat_detector enabled=1 threshold=100
++
++After the module is loaded, it creates a directory named "hwlat_detector" under
++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
++to have debugfs mounted, which might be on /sys/debug on your system.
++
++The /debug/hwlat_detector interface contains the following files:
++
++count - number of latency spikes observed since last reset
++enable - a global enable/disable toggle (0/1), resets count
++max - maximum hardware latency actually observed (usecs)
++sample - a pipe from which to read current raw sample data
++ in the format <timestamp> <latency observed usecs>
++ (can be opened O_NONBLOCK for a single sample)
++threshold - minimum latency value to be considered (usecs)
++width - time period to sample with CPUs held (usecs)
++ must be less than the total window size (enforced)
++window - total period of sampling, width being inside (usecs)
++
++By default we will set width to 500,000 and window to 1,000,000, meaning that
++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
++observe any latencies that exceed the threshold (initially 100 usecs),
++then we write to a global sample ring buffer of 8K samples, which is
++consumed by reading from the "sample" (pipe) debugfs file interface.
+Index: linux-stable/drivers/misc/Kconfig
+===================================================================
+--- linux-stable.orig/drivers/misc/Kconfig
++++ linux-stable/drivers/misc/Kconfig
+@@ -131,6 +131,35 @@ config IBM_ASM
+ for information on the specific driver level and support statement
+ for your IBM server.
+
++config HWLAT_DETECTOR
++ tristate "Testing module to detect hardware-induced latencies"
++ depends on DEBUG_FS
++ depends on RING_BUFFER
++ default m
++ ---help---
++ A simple hardware latency detector. Use this module to detect
++ large latencies introduced by the behavior of the underlying
++ system firmware external to Linux. We do this using periodic
++ use of stop_machine to grab all available CPUs and measure
++ for unexplainable gaps in the CPU timestamp counter(s). By
++ default, the module is not enabled until the "enable" file
++ within the "hwlat_detector" debugfs directory is toggled.
++
++ This module is often used to detect SMI (System Management
++ Interrupts) on x86 systems, though is not x86 specific. To
++ this end, we default to using a sample window of 1 second,
++ during which we will sample for 0.5 seconds. If an SMI or
++ similar event occurs during that time, it is recorded
++ into an 8K samples global ring buffer until retreived.
++
++ WARNING: This software should never be enabled (it can be built
++ but should not be turned on after it is loaded) in a production
++ environment where high latencies are a concern since the
++ sampling mechanism actually introduces latencies for
++ regular tasks while the CPU(s) are being held.
++
++ If unsure, say N
++
+ config PHANTOM
+ tristate "Sensable PHANToM (PCI)"
+ depends on PCI
+Index: linux-stable/drivers/misc/Makefile
+===================================================================
+--- linux-stable.orig/drivers/misc/Makefile
++++ linux-stable/drivers/misc/Makefile
+@@ -50,3 +50,4 @@ obj-y += carma/
+ obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
+ obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
+ obj-$(CONFIG_INTEL_MEI) += mei/
++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
+Index: linux-stable/drivers/misc/hwlat_detector.c
+===================================================================
+--- /dev/null
++++ linux-stable/drivers/misc/hwlat_detector.c
+@@ -0,0 +1,1212 @@
++/*
++ * hwlat_detector.c - A simple Hardware Latency detector.
++ *
++ * Use this module to detect large system latencies induced by the behavior of
++ * certain underlying system hardware or firmware, independent of Linux itself.
++ * The code was developed originally to detect the presence of SMIs on Intel
++ * and AMD systems, although there is no dependency upon x86 herein.
++ *
++ * The classical example usage of this module is in detecting the presence of
++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
++ * somewhat special form of hardware interrupt spawned from earlier CPU debug
++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
++ * LPC (or other device) to generate a special interrupt under certain
++ * circumstances, for example, upon expiration of a special SMI timer device,
++ * due to certain external thermal readings, on certain I/O address accesses,
++ * and other situations. An SMI hits a special CPU pin, triggers a special
++ * SMI mode (complete with special memory map), and the OS is unaware.
++ *
++ * Although certain hardware-inducing latencies are necessary (for example,
++ * a modern system often requires an SMI handler for correct thermal control
++ * and remote management) they can wreak havoc upon any OS-level performance
++ * guarantees toward low-latency, especially when the OS is not even made
++ * aware of the presence of these interrupts. For this reason, we need a
++ * somewhat brute force mechanism to detect these interrupts. In this case,
++ * we do it by hogging all of the CPU(s) for configurable timer intervals,
++ * sampling the built-in CPU timer, looking for discontiguous readings.
++ *
++ * WARNING: This implementation necessarily introduces latencies. Therefore,
++ * you should NEVER use this module in a production environment
++ * requiring any kind of low-latency performance guarantee(s).
++ *
++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
++ *
++ * Includes useful feedback from Clark Williams <clark@redhat.com>
++ *
++ * This file is licensed under the terms of the GNU General Public
++ * License version 2. This program is licensed "as is" without any
++ * warranty of any kind, whether express or implied.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/ring_buffer.h>
++#include <linux/stop_machine.h>
++#include <linux/time.h>
++#include <linux/hrtimer.h>
++#include <linux/kthread.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/uaccess.h>
++#include <linux/version.h>
++#include <linux/delay.h>
++#include <linux/slab.h>
++
++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
++#define U64STR_SIZE 22 /* 20 digits max */
++
++#define VERSION "1.0.0"
++#define BANNER "hwlat_detector: "
++#define DRVNAME "hwlat_detector"
++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
++
++/* Module metadata */
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
++MODULE_DESCRIPTION("A simple hardware latency detector");
++MODULE_VERSION(VERSION);
++
++/* Module parameters */
++
++static int debug;
++static int enabled;
++static int threshold;
++
++module_param(debug, int, 0); /* enable debug */
++module_param(enabled, int, 0); /* enable detector */
++module_param(threshold, int, 0); /* latency threshold */
++
++/* Buffering and sampling */
++
++static struct ring_buffer *ring_buffer; /* sample buffer */
++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
++static unsigned long buf_size = BUF_SIZE_DEFAULT;
++static struct task_struct *kthread; /* sampling thread */
++
++/* DebugFS filesystem entries */
++
++static struct dentry *debug_dir; /* debugfs directory */
++static struct dentry *debug_max; /* maximum TSC delta */
++static struct dentry *debug_count; /* total detect count */
++static struct dentry *debug_sample_width; /* sample width us */
++static struct dentry *debug_sample_window; /* sample window us */
++static struct dentry *debug_sample; /* raw samples us */
++static struct dentry *debug_threshold; /* threshold us */
++static struct dentry *debug_enable; /* enable/disable */
++
++/* Individual samples and global state */
++
++struct sample; /* latency sample */
++struct data; /* Global state */
++
++/* Sampling functions */
++static int __buffer_add_sample(struct sample *sample);
++static struct sample *buffer_get_sample(struct sample *sample);
++static int get_sample(void *unused);
++
++/* Threading and state */
++static int kthread_fn(void *unused);
++static int start_kthread(void);
++static int stop_kthread(void);
++static void __reset_stats(void);
++static int init_stats(void);
++
++/* Debugfs interface */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos, const u64 *entry);
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos, u64 *entry);
++static int debug_sample_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos);
++static int debug_sample_release(struct inode *inode, struct file *filp);
++static int debug_enable_fopen(struct inode *inode, struct file *filp);
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos);
++static ssize_t debug_enable_fwrite(struct file *file,
++ const char __user *user_buffer,
++ size_t user_size, loff_t *offset);
++
++/* Initialization functions */
++static int init_debugfs(void);
++static void free_debugfs(void);
++static int detector_init(void);
++static void detector_exit(void);
++
++/* Individual latency samples are stored here when detected and packed into
++ * the ring_buffer circular buffer, where they are overwritten when
++ * more than buf_size/sizeof(sample) samples are received. */
++struct sample {
++ u64 seqnum; /* unique sequence */
++ u64 duration; /* ktime delta */
++ struct timespec timestamp; /* wall time */
++ unsigned long lost;
++};
++
++/* keep the global state somewhere. Mostly used under stop_machine. */
++static struct data {
++
++ struct mutex lock; /* protect changes */
++
++ u64 count; /* total since reset */
++ u64 max_sample; /* max hardware latency */
++ u64 threshold; /* sample threshold level */
++
++ u64 sample_window; /* total sampling window (on+off) */
++ u64 sample_width; /* active sampling portion of window */
++
++ atomic_t sample_open; /* whether the sample file is open */
++
++ wait_queue_head_t wq; /* waitqeue for new sample values */
++
++} data;
++
++/**
++ * __buffer_add_sample - add a new latency sample recording to the ring buffer
++ * @sample: The new latency sample value
++ *
++ * This receives a new latency sample and records it in a global ring buffer.
++ * No additional locking is used in this case - suited for stop_machine use.
++ */
++static int __buffer_add_sample(struct sample *sample)
++{
++ return ring_buffer_write(ring_buffer,
++ sizeof(struct sample), sample);
++}
++
++/**
++ * buffer_get_sample - remove a hardware latency sample from the ring buffer
++ * @sample: Pre-allocated storage for the sample
++ *
++ * This retrieves a hardware latency sample from the global circular buffer
++ */
++static struct sample *buffer_get_sample(struct sample *sample)
++{
++ struct ring_buffer_event *e = NULL;
++ struct sample *s = NULL;
++ unsigned int cpu = 0;
++
++ if (!sample)
++ return NULL;
++
++ mutex_lock(&ring_buffer_mutex);
++ for_each_online_cpu(cpu) {
++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
++ if (e)
++ break;
++ }
++
++ if (e) {
++ s = ring_buffer_event_data(e);
++ memcpy(sample, s, sizeof(struct sample));
++ } else
++ sample = NULL;
++ mutex_unlock(&ring_buffer_mutex);
++
++ return sample;
++}
++
++/**
++ * get_sample - sample the CPU TSC and look for likely hardware latencies
++ * @unused: This is not used but is a part of the stop_machine API
++ *
++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
++ * hardware-induced latency. Called under stop_machine, with data.lock held.
++ */
++static int get_sample(void *unused)
++{
++ ktime_t start, t1, t2;
++ s64 diff, total = 0;
++ u64 sample = 0;
++ int ret = 1;
++
++ start = ktime_get(); /* start timestamp */
++
++ do {
++
++ t1 = ktime_get(); /* we'll look for a discontinuity */
++ t2 = ktime_get();
++
++ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
++ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
++
++ /* This shouldn't happen */
++ if (diff < 0) {
++ printk(KERN_ERR BANNER "time running backwards\n");
++ goto out;
++ }
++
++ if (diff > sample)
++ sample = diff; /* only want highest value */
++
++ } while (total <= data.sample_width);
++
++ /* If we exceed the threshold value, we have found a hardware latency */
++ if (sample > data.threshold) {
++ struct sample s;
++
++ data.count++;
++ s.seqnum = data.count;
++ s.duration = sample;
++ s.timestamp = CURRENT_TIME;
++ __buffer_add_sample(&s);
++
++ /* Keep a running maximum ever recorded hardware latency */
++ if (sample > data.max_sample)
++ data.max_sample = sample;
++ }
++
++ ret = 0;
++out:
++ return ret;
++}
++
++/*
++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
++ * @unused: A required part of the kthread API.
++ *
++ * Used to periodically sample the CPU TSC via a call to get_sample. We
++ * use stop_machine, whith does (intentionally) introduce latency since we
++ * need to ensure nothing else might be running (and thus pre-empting).
++ * Obviously this should never be used in production environments.
++ *
++ * stop_machine will schedule us typically only on CPU0 which is fine for
++ * almost every real-world hardware latency situation - but we might later
++ * generalize this if we find there are any actualy systems with alternate
++ * SMI delivery or other non CPU0 hardware latencies.
++ */
++static int kthread_fn(void *unused)
++{
++ int err = 0;
++ u64 interval = 0;
++
++ while (!kthread_should_stop()) {
++
++ mutex_lock(&data.lock);
++
++ err = stop_machine(get_sample, unused, 0);
++ if (err) {
++ /* Houston, we have a problem */
++ mutex_unlock(&data.lock);
++ goto err_out;
++ }
++
++ wake_up(&data.wq); /* wake up reader(s) */
++
++ interval = data.sample_window - data.sample_width;
++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
++
++ mutex_unlock(&data.lock);
++
++ if (msleep_interruptible(interval))
++ goto out;
++ }
++ goto out;
++err_out:
++ printk(KERN_ERR BANNER "could not call stop_machine, disabling\n");
++ enabled = 0;
++out:
++ return err;
++
++}
++
++/**
++ * start_kthread - Kick off the hardware latency sampling/detector kthread
++ *
++ * This starts a kernel thread that will sit and sample the CPU timestamp
++ * counter (TSC or similar) and look for potential hardware latencies.
++ */
++static int start_kthread(void)
++{
++ kthread = kthread_run(kthread_fn, NULL,
++ DRVNAME);
++ if (IS_ERR(kthread)) {
++ printk(KERN_ERR BANNER "could not start sampling thread\n");
++ enabled = 0;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++/**
++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
++ *
++ * This kicks the running hardware latency sampling/detector kernel thread and
++ * tells it to stop sampling now. Use this on unload and at system shutdown.
++ */
++static int stop_kthread(void)
++{
++ int ret;
++
++ ret = kthread_stop(kthread);
++
++ return ret;
++}
++
++/**
++ * __reset_stats - Reset statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We call this
++ * function in order to reset those when "enable" is toggled on or off, and
++ * also at initialization. Should be called with data.lock held.
++ */
++static void __reset_stats(void)
++{
++ data.count = 0;
++ data.max_sample = 0;
++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */
++}
++
++/**
++ * init_stats - Setup global state statistics for the hardware latency detector
++ *
++ * We use data to store various statistics and global state. We also use
++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
++ * induced system latencies. This function initializes these structures and
++ * allocates the global ring buffer also.
++ */
++static int init_stats(void)
++{
++ int ret = -ENOMEM;
++
++ mutex_init(&data.lock);
++ init_waitqueue_head(&data.wq);
++ atomic_set(&data.sample_open, 0);
++
++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
++
++ if (WARN(!ring_buffer, KERN_ERR BANNER
++ "failed to allocate ring buffer!\n"))
++ goto out;
++
++ __reset_stats();
++ data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */
++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
++
++ ret = 0;
++
++out:
++ return ret;
++
++}
++
++/*
++ * simple_data_read - Wrapper read function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ * @entry: The entry to read from
++ *
++ * This function provides a generic read implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_read directly, but we need to make sure that the data.lock
++ * spinlock is held during the actual read (even though we likely won't ever
++ * actually race here as the updater runs under a stop_machine context).
++ */
++static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos, const u64 *entry)
++{
++ char buf[U64STR_SIZE];
++ u64 val = 0;
++ int len = 0;
++
++ memset(buf, 0, sizeof(buf));
++
++ if (!entry)
++ return -EFAULT;
++
++ mutex_lock(&data.lock);
++ val = *entry;
++ mutex_unlock(&data.lock);
++
++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
++
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
++
++}
++
++/*
++ * simple_data_write - Wrapper write function for global state debugfs entries
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to write value from
++ * @cnt: The maximum number of bytes to write
++ * @ppos: The current "file" position
++ * @entry: The entry to write to
++ *
++ * This function provides a generic write implementation for the global state
++ * "data" structure debugfs filesystem entries. It would be nice to use
++ * simple_attr_write directly, but we need to make sure that the data.lock
++ * spinlock is held during the actual write (even though we likely won't ever
++ * actually race here as the updater runs under a stop_machine context).
++ */
++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
++ size_t cnt, loff_t *ppos, u64 *entry)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = strict_strtoull(buf, 10, &val);
++ if (err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ *entry = val;
++ mutex_unlock(&data.lock);
++
++ return csize;
++}
++
++/**
++ * debug_count_fopen - Open function for "count" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "count" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_count_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_count_fread - Read function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to read the
++ * number of latency readings exceeding the configured threshold since
++ * the detector was last reset (e.g. by writing a zero into "count").
++ */
++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_count_fwrite - Write function for "count" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "count" debugfs
++ * interface to the hardware latency detector. Can be used to write a
++ * desired value, especially to zero the total count.
++ */
++static ssize_t debug_count_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
++}
++
++/**
++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "enable" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_enable_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_enable_fread - Read function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * whether the detector is currently enabled ("0\n" or "1\n" returned).
++ */
++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[4];
++
++ if ((cnt < sizeof(buf)) || (*ppos))
++ return 0;
++
++ buf[0] = enabled ? '1' : '0';
++ buf[1] = '\n';
++ buf[2] = '\0';
++ if (copy_to_user(ubuf, buf, strlen(buf)))
++ return -EFAULT;
++ return *ppos = strlen(buf);
++}
++
++/**
++ * debug_enable_fwrite - Write function for "enable" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "enable" debugfs
++ * interface to the hardware latency detector. Can be used to enable or
++ * disable the detector, which will have the side-effect of possibly
++ * also resetting the global stats and kicking off the measuring
++ * kthread (on an enable) or the converse (upon a disable).
++ */
++static ssize_t debug_enable_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[4];
++ int csize = min(cnt, sizeof(buf));
++ long val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[sizeof(buf)-1] = '\0'; /* just in case */
++ err = strict_strtoul(buf, 10, &val);
++ if (0 != err)
++ return -EINVAL;
++
++ if (val) {
++ if (enabled)
++ goto unlock;
++ enabled = 1;
++ __reset_stats();
++ if (start_kthread())
++ return -EFAULT;
++ } else {
++ if (!enabled)
++ goto unlock;
++ enabled = 0;
++ err = stop_kthread();
++ if (err) {
++ printk(KERN_ERR BANNER "cannot stop kthread\n");
++ return -EFAULT;
++ }
++ wake_up(&data.wq); /* reader(s) should return */
++ }
++unlock:
++ return csize;
++}
++
++/**
++ * debug_max_fopen - Open function for "max" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "max" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_max_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_max_fread - Read function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to determine
++ * the maximum latency value observed since it was last reset.
++ */
++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++/**
++ * debug_max_fwrite - Write function for "max" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "max" debugfs
++ * interface to the hardware latency detector. Can be used to reset the
++ * maximum or set it to some other desired value - if, then, subsequent
++ * measurements exceed this value, the maximum will be updated.
++ */
++static ssize_t debug_max_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
++}
++
++
++/**
++ * debug_sample_fopen - An open function for "sample" debugfs interface
++ * @inode: The in-kernel inode representation of this debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function handles opening the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. Can be opened blocking or non-blocking,
++ * affecting whether it behaves as a buffer read pipe, or does not.
++ * Implements simple locking to prevent multiple simultaneous use.
++ */
++static int debug_sample_fopen(struct inode *inode, struct file *filp)
++{
++ if (!atomic_add_unless(&data.sample_open, 1, 1))
++ return -EBUSY;
++ else
++ return 0;
++}
++
++/**
++ * debug_sample_fread - A read function for "sample" debugfs interface
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that will contain the samples read
++ * @cnt: The maximum bytes to read from the debugfs "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function handles reading from the "sample" file within the hardware
++ * latency detector debugfs directory interface. This file is used to read
++ * raw samples from the global ring_buffer and allows the user to see a
++ * running latency history. By default this will block pending a new
++ * value written into the sample buffer, unless there are already a
++ * number of value(s) waiting in the buffer, or the sample file was
++ * previously opened in a non-blocking mode of operation.
++ */
++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ int len = 0;
++ char buf[64];
++ struct sample *sample = NULL;
++
++ if (!enabled)
++ return 0;
++
++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
++ if (!sample)
++ return -ENOMEM;
++
++ while (!buffer_get_sample(sample)) {
++
++ DEFINE_WAIT(wait);
++
++ if (filp->f_flags & O_NONBLOCK) {
++ len = -EAGAIN;
++ goto out;
++ }
++
++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
++ schedule();
++ finish_wait(&data.wq, &wait);
++
++ if (signal_pending(current)) {
++ len = -EINTR;
++ goto out;
++ }
++
++ if (!enabled) { /* enable was toggled */
++ len = 0;
++ goto out;
++ }
++ }
++
++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
++ sample->timestamp.tv_sec,
++ sample->timestamp.tv_nsec,
++ sample->duration);
++
++
++ /* handling partial reads is more trouble than it's worth */
++ if (len > cnt)
++ goto out;
++
++ if (copy_to_user(ubuf, buf, len))
++ len = -EFAULT;
++
++out:
++ kfree(sample);
++ return len;
++}
++
++/**
++ * debug_sample_release - Release function for "sample" debugfs interface
++ * @inode: The in-kernel inode represenation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function completes the close of the debugfs interface "sample" file.
++ * Frees the sample_open "lock" so that other users may open the interface.
++ */
++static int debug_sample_release(struct inode *inode, struct file *filp)
++{
++ atomic_dec(&data.sample_open);
++
++ return 0;
++}
++
++/**
++ * debug_threshold_fopen - Open function for "threshold" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "threshold" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_threshold_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_threshold_fread - Read function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * the current threshold level at which a latency will be recorded in the
++ * global ring buffer, typically on the order of 10us.
++ */
++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
++}
++
++/**
++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "threshold" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * the threshold level at which any subsequently detected latencies will
++ * be recorded into the global ring buffer.
++ */
++static ssize_t debug_threshold_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ int ret;
++
++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
++
++ if (enabled)
++ wake_up_process(kthread);
++
++ return ret;
++}
++
++/**
++ * debug_width_fopen - Open function for "width" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "width" debugfs
++ * interface to the hardware latency detector.
++ */
++static int debug_width_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_width_fread - Read function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to determine
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latecy periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch.
++ */
++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
++}
++
++/**
++ * debug_width_fwrite - Write function for "width" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "width" debugfs
++ * interface to the hardware latency detector. It can be used to configure
++ * for how many us of the total window us we will actively sample for any
++ * hardware-induced latency periods. Obviously, it is not possible to
++ * sample constantly and have the system respond to a sample reader, or,
++ * worse, without having the system appear to have gone out to lunch. It
++ * is enforced that width is less that the total window size.
++ */
++static ssize_t debug_width_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = strict_strtoull(buf, 10, &val);
++ if (0 != err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ if (val < data.sample_window)
++ data.sample_width = val;
++ else {
++ mutex_unlock(&data.lock);
++ return -EINVAL;
++ }
++ mutex_unlock(&data.lock);
++
++ if (enabled)
++ wake_up_process(kthread);
++
++ return csize;
++}
++
++/**
++ * debug_window_fopen - Open function for "window" debugfs entry
++ * @inode: The in-kernel inode representation of the debugfs "file"
++ * @filp: The active open file structure for the debugfs "file"
++ *
++ * This function provides an open implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs.
++ */
++static int debug_window_fopen(struct inode *inode, struct file *filp)
++{
++ return 0;
++}
++
++/**
++ * debug_window_fread - Read function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The userspace provided buffer to read value into
++ * @cnt: The maximum number of bytes to read
++ * @ppos: The current "file" position
++ *
++ * This function provides a read implementation for the "window" debugfs
++ * interface to the hardware latency detector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to read the total window size.
++ */
++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
++}
++
++/**
++ * debug_window_fwrite - Write function for "window" debugfs entry
++ * @filp: The active open file structure for the debugfs "file"
++ * @ubuf: The user buffer that contains the value to write
++ * @cnt: The maximum number of bytes to write to "file"
++ * @ppos: The current position in the debugfs "file"
++ *
++ * This function provides a write implementation for the "window" debufds
++ * interface to the hardware latency detetector. The window is the total time
++ * in us that will be considered one sample period. Conceptually, windows
++ * occur back-to-back and contain a sample width period during which
++ * actual sampling occurs. Can be used to write a new total window size. It
++ * is enfoced that any value written must be greater than the sample width
++ * size, or an error results.
++ */
++static ssize_t debug_window_fwrite(struct file *filp,
++ const char __user *ubuf,
++ size_t cnt,
++ loff_t *ppos)
++{
++ char buf[U64STR_SIZE];
++ int csize = min(cnt, sizeof(buf));
++ u64 val = 0;
++ int err = 0;
++
++ memset(buf, '\0', sizeof(buf));
++ if (copy_from_user(buf, ubuf, csize))
++ return -EFAULT;
++
++ buf[U64STR_SIZE-1] = '\0'; /* just in case */
++ err = strict_strtoull(buf, 10, &val);
++ if (0 != err)
++ return -EINVAL;
++
++ mutex_lock(&data.lock);
++ if (data.sample_width < val)
++ data.sample_window = val;
++ else {
++ mutex_unlock(&data.lock);
++ return -EINVAL;
++ }
++ mutex_unlock(&data.lock);
++
++ return csize;
++}
++
++/*
++ * Function pointers for the "count" debugfs file operations
++ */
++static const struct file_operations count_fops = {
++ .open = debug_count_fopen,
++ .read = debug_count_fread,
++ .write = debug_count_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "enable" debugfs file operations
++ */
++static const struct file_operations enable_fops = {
++ .open = debug_enable_fopen,
++ .read = debug_enable_fread,
++ .write = debug_enable_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "max" debugfs file operations
++ */
++static const struct file_operations max_fops = {
++ .open = debug_max_fopen,
++ .read = debug_max_fread,
++ .write = debug_max_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "sample" debugfs file operations
++ */
++static const struct file_operations sample_fops = {
++ .open = debug_sample_fopen,
++ .read = debug_sample_fread,
++ .release = debug_sample_release,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "threshold" debugfs file operations
++ */
++static const struct file_operations threshold_fops = {
++ .open = debug_threshold_fopen,
++ .read = debug_threshold_fread,
++ .write = debug_threshold_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "width" debugfs file operations
++ */
++static const struct file_operations width_fops = {
++ .open = debug_width_fopen,
++ .read = debug_width_fread,
++ .write = debug_width_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/*
++ * Function pointers for the "window" debugfs file operations
++ */
++static const struct file_operations window_fops = {
++ .open = debug_window_fopen,
++ .read = debug_window_fread,
++ .write = debug_window_fwrite,
++ .owner = THIS_MODULE,
++};
++
++/**
++ * init_debugfs - A function to initialize the debugfs interface files
++ *
++ * This function creates entries in debugfs for "hwlat_detector", including
++ * files to read values from the detector, current samples, and the
++ * maximum sample that has been captured since the hardware latency
++ * dectector was started.
++ */
++static int init_debugfs(void)
++{
++ int ret = -ENOMEM;
++
++ debug_dir = debugfs_create_dir(DRVNAME, NULL);
++ if (!debug_dir)
++ goto err_debug_dir;
++
++ debug_sample = debugfs_create_file("sample", 0444,
++ debug_dir, NULL,
++ &sample_fops);
++ if (!debug_sample)
++ goto err_sample;
++
++ debug_count = debugfs_create_file("count", 0444,
++ debug_dir, NULL,
++ &count_fops);
++ if (!debug_count)
++ goto err_count;
++
++ debug_max = debugfs_create_file("max", 0444,
++ debug_dir, NULL,
++ &max_fops);
++ if (!debug_max)
++ goto err_max;
++
++ debug_sample_window = debugfs_create_file("window", 0644,
++ debug_dir, NULL,
++ &window_fops);
++ if (!debug_sample_window)
++ goto err_window;
++
++ debug_sample_width = debugfs_create_file("width", 0644,
++ debug_dir, NULL,
++ &width_fops);
++ if (!debug_sample_width)
++ goto err_width;
++
++ debug_threshold = debugfs_create_file("threshold", 0644,
++ debug_dir, NULL,
++ &threshold_fops);
++ if (!debug_threshold)
++ goto err_threshold;
++
++ debug_enable = debugfs_create_file("enable", 0644,
++ debug_dir, &enabled,
++ &enable_fops);
++ if (!debug_enable)
++ goto err_enable;
++
++ else {
++ ret = 0;
++ goto out;
++ }
++
++err_enable:
++ debugfs_remove(debug_threshold);
++err_threshold:
++ debugfs_remove(debug_sample_width);
++err_width:
++ debugfs_remove(debug_sample_window);
++err_window:
++ debugfs_remove(debug_max);
++err_max:
++ debugfs_remove(debug_count);
++err_count:
++ debugfs_remove(debug_sample);
++err_sample:
++ debugfs_remove(debug_dir);
++err_debug_dir:
++out:
++ return ret;
++}
++
++/**
++ * free_debugfs - A function to cleanup the debugfs file interface
++ */
++static void free_debugfs(void)
++{
++ /* could also use a debugfs_remove_recursive */
++ debugfs_remove(debug_enable);
++ debugfs_remove(debug_threshold);
++ debugfs_remove(debug_sample_width);
++ debugfs_remove(debug_sample_window);
++ debugfs_remove(debug_max);
++ debugfs_remove(debug_count);
++ debugfs_remove(debug_sample);
++ debugfs_remove(debug_dir);
++}
++
++/**
++ * detector_init - Standard module initialization code
++ */
++static int detector_init(void)
++{
++ int ret = -ENOMEM;
++
++ printk(KERN_INFO BANNER "version %s\n", VERSION);
++
++ ret = init_stats();
++ if (0 != ret)
++ goto out;
++
++ ret = init_debugfs();
++ if (0 != ret)
++ goto err_stats;
++
++ if (enabled)
++ ret = start_kthread();
++
++ goto out;
++
++err_stats:
++ ring_buffer_free(ring_buffer);
++out:
++ return ret;
++
++}
++
++/**
++ * detector_exit - Standard module cleanup code
++ */
++static void detector_exit(void)
++{
++ int err;
++
++ if (enabled) {
++ enabled = 0;
++ err = stop_kthread();
++ if (err)
++ printk(KERN_ERR BANNER "cannot stop kthread\n");
++ }
++
++ free_debugfs();
++ ring_buffer_free(ring_buffer); /* free up the ring buffer */
++
++}
++
++module_init(detector_init);
++module_exit(detector_exit);
diff --git a/ide-use-nort-local-irq-variants.patch b/ide-use-nort-local-irq-variants.patch
new file mode 100644
index 0000000..1a363e6
--- /dev/null
+++ b/ide-use-nort-local-irq-variants.patch
@@ -0,0 +1,183 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:16 -0500
+Subject: ide: Do not disable interrupts for PREEMPT-RT
+
+Use the local_irq_*_nort variants.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/ide/alim15x3.c | 4 ++--
+ drivers/ide/hpt366.c | 4 ++--
+ drivers/ide/ide-io-std.c | 8 ++++----
+ drivers/ide/ide-io.c | 2 +-
+ drivers/ide/ide-iops.c | 4 ++--
+ drivers/ide/ide-probe.c | 4 ++--
+ drivers/ide/ide-taskfile.c | 6 +++---
+ 7 files changed, 16 insertions(+), 16 deletions(-)
+
+Index: linux-stable/drivers/ide/alim15x3.c
+===================================================================
+--- linux-stable.orig/drivers/ide/alim15x3.c
++++ linux-stable/drivers/ide/alim15x3.c
+@@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct p
+
+ isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ if (m5229_revision < 0xC2) {
+ /*
+@@ -325,7 +325,7 @@ out:
+ }
+ pci_dev_put(north);
+ pci_dev_put(isa_dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return 0;
+ }
+
+Index: linux-stable/drivers/ide/hpt366.c
+===================================================================
+--- linux-stable.orig/drivers/ide/hpt366.c
++++ linux-stable/drivers/ide/hpt366.c
+@@ -1241,7 +1241,7 @@ static int __devinit init_dma_hpt366(ide
+
+ dma_old = inb(base + 2);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ dma_new = dma_old;
+ pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
+@@ -1252,7 +1252,7 @@ static int __devinit init_dma_hpt366(ide
+ if (dma_new != dma_old)
+ outb(dma_new, base + 2);
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+Index: linux-stable/drivers/ide/ide-io-std.c
+===================================================================
+--- linux-stable.orig/drivers/ide/ide-io-std.c
++++ linux-stable/drivers/ide/ide-io-std.c
+@@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive,
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+@@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive,
+ insl(data_addr, buf, words);
+
+ if ((io_32bit & 2) && !mmio)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ if (((len + 1) & 3) < 2)
+ return;
+@@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive,
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+@@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive,
+ outsl(data_addr, buf, words);
+
+ if ((io_32bit & 2) && !mmio)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ if (((len + 1) & 3) < 2)
+ return;
+Index: linux-stable/drivers/ide/ide-io.c
+===================================================================
+--- linux-stable.orig/drivers/ide/ide-io.c
++++ linux-stable/drivers/ide/ide-io.c
+@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat
+ /* disable_irq_nosync ?? */
+ disable_irq(hwif->irq);
+ /* local CPU only, as if we were handling an interrupt */
+- local_irq_disable();
++ local_irq_disable_nort();
+ if (hwif->polling) {
+ startstop = handler(drive);
+ } else if (drive_is_ready(drive)) {
+Index: linux-stable/drivers/ide/ide-iops.c
+===================================================================
+--- linux-stable.orig/drivers/ide/ide-iops.c
++++ linux-stable/drivers/ide/ide-iops.c
+@@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive,
+ if ((stat & ATA_BUSY) == 0)
+ break;
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ *rstat = stat;
+ return -EBUSY;
+ }
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+ /*
+ * Allow status to settle, then read it again.
+Index: linux-stable/drivers/ide/ide-probe.c
+===================================================================
+--- linux-stable.orig/drivers/ide/ide-probe.c
++++ linux-stable/drivers/ide/ide-probe.c
+@@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *dri
+ int bswap = 1;
+
+ /* local CPU only; some systems need this */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ /* read 512 bytes of id info */
+ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ drive->dev_flags |= IDE_DFLAG_ID_READ;
+ #ifdef DEBUG
+Index: linux-stable/drivers/ide/ide-taskfile.c
+===================================================================
+--- linux-stable.orig/drivers/ide/ide-taskfile.c
++++ linux-stable/drivers/ide/ide-taskfile.c
+@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+
+ page_is_high = PageHighMem(page);
+ if (page_is_high)
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ buf = kmap_atomic(page) + offset;
+
+@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+ kunmap_atomic(buf);
+
+ if (page_is_high)
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ len -= nr_bytes;
+ }
+@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
+- local_irq_disable();
++ local_irq_disable_nort();
+
+ ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
+
diff --git a/infiniband-mellanox-ib-use-nort-irq.patch b/infiniband-mellanox-ib-use-nort-irq.patch
new file mode 100644
index 0000000..6839827
--- /dev/null
+++ b/infiniband-mellanox-ib-use-nort-irq.patch
@@ -0,0 +1,42 @@
+From: Sven-Thorsten Dietrich <sdietrich@novell.com>
+Date: Fri, 3 Jul 2009 08:30:35 -0500
+Subject: infiniband: Mellanox IB driver patch use _nort() primitives
+
+Fixes in_atomic stack-dump, when Mellanox module is loaded into the RT
+Kernel.
+
+Michael S. Tsirkin <mst@dev.mellanox.co.il> sayeth:
+"Basically, if you just make spin_lock_irqsave (and spin_lock_irq) not disable
+interrupts for non-raw spinlocks, I think all of infiniband will be fine without
+changes."
+
+Signed-off-by: Sven-Thorsten Dietrich <sven@thebigcorporation.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+===================================================================
+--- linux-stable.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ linux-stable/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -783,7 +783,7 @@ void ipoib_mcast_restart_task(struct wor
+
+ ipoib_mcast_stop_thread(dev, 0);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ netif_addr_lock(dev);
+ spin_lock(&priv->lock);
+
+@@ -865,7 +865,7 @@ void ipoib_mcast_restart_task(struct wor
+
+ spin_unlock(&priv->lock);
+ netif_addr_unlock(dev);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ /* We have to cancel outside of the spinlock */
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
diff --git a/inpt-gameport-use-local-irq-nort.patch b/inpt-gameport-use-local-irq-nort.patch
new file mode 100644
index 0000000..861a98d
--- /dev/null
+++ b/inpt-gameport-use-local-irq-nort.patch
@@ -0,0 +1,46 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:16 -0500
+Subject: input: gameport: Do not disable interrupts on PREEMPT_RT
+
+Use the _nort() primitives.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/input/gameport/gameport.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/drivers/input/gameport/gameport.c
+===================================================================
+--- linux-stable.orig/drivers/input/gameport/gameport.c
++++ linux-stable/drivers/input/gameport/gameport.c
+@@ -87,12 +87,12 @@ static int gameport_measure_speed(struct
+ tx = 1 << 30;
+
+ for(i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ GET_TIME(t1);
+ for (t = 0; t < 50; t++) gameport_read(gameport);
+ GET_TIME(t2);
+ GET_TIME(t3);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
+ }
+@@ -111,11 +111,11 @@ static int gameport_measure_speed(struct
+ tx = 1 << 30;
+
+ for(i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ rdtscl(t1);
+ for (t = 0; t < 50; t++) gameport_read(gameport);
+ rdtscl(t2);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ if (t2 - t1 < tx) tx = t2 - t1;
+ }
diff --git a/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch b/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch
new file mode 100644
index 0000000..60291d0
--- /dev/null
+++ b/intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch
@@ -0,0 +1,69 @@
+Subject: intel_idle: Convert i7300_idle_lock to raw spinlock
+From: Mike Galbraith <efault@gmx.de>
+Date: Wed, 07 Dec 2011 12:48:42 +0100
+
+24 core Intel box's first exposure to 3.0.12-rt30-rc3 didn't go well.
+
+[ 27.104159] i7300_idle: loaded v1.55
+[ 27.104192] BUG: scheduling while atomic: swapper/2/0/0x00000002
+[ 27.104309] Pid: 0, comm: swapper/2 Tainted: G N 3.0.12-rt30-rc3-rt #1
+[ 27.104317] Call Trace:
+[ 27.104338] [<ffffffff810046a5>] dump_trace+0x85/0x2e0
+[ 27.104372] [<ffffffff8144eb00>] thread_return+0x12b/0x30b
+[ 27.104381] [<ffffffff8144f1b9>] schedule+0x29/0xb0
+[ 27.104389] [<ffffffff814506e5>] rt_spin_lock_slowlock+0xc5/0x240
+[ 27.104401] [<ffffffffa01f818f>] i7300_idle_notifier+0x3f/0x360 [i7300_idle]
+[ 27.104415] [<ffffffff814546c7>] notifier_call_chain+0x37/0x70
+[ 27.104426] [<ffffffff81454748>] __atomic_notifier_call_chain+0x48/0x70
+[ 27.104439] [<ffffffff81001a39>] cpu_idle+0x89/0xb0
+[ 27.104449] bad: scheduling from the idle thread!
+
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/1323258522.5057.73.camel@marge.simson.net
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/idle/i7300_idle.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/drivers/idle/i7300_idle.c
+===================================================================
+--- linux-stable.orig/drivers/idle/i7300_idle.c
++++ linux-stable/drivers/idle/i7300_idle.c
+@@ -75,7 +75,7 @@ static unsigned long past_skip;
+
+ static struct pci_dev *fbd_dev;
+
+-static spinlock_t i7300_idle_lock;
++static raw_spinlock_t i7300_idle_lock;
+ static int i7300_idle_active;
+
+ static u8 i7300_idle_thrtctl_saved;
+@@ -457,7 +457,7 @@ static int i7300_idle_notifier(struct no
+ idle_begin_time = ktime_get();
+ }
+
+- spin_lock_irqsave(&i7300_idle_lock, flags);
++ raw_spin_lock_irqsave(&i7300_idle_lock, flags);
+ if (val == IDLE_START) {
+
+ cpumask_set_cpu(smp_processor_id(), idle_cpumask);
+@@ -506,7 +506,7 @@ static int i7300_idle_notifier(struct no
+ }
+ }
+ end:
+- spin_unlock_irqrestore(&i7300_idle_lock, flags);
++ raw_spin_unlock_irqrestore(&i7300_idle_lock, flags);
+ return 0;
+ }
+
+@@ -548,7 +548,7 @@ struct debugfs_file_info {
+
+ static int __init i7300_idle_init(void)
+ {
+- spin_lock_init(&i7300_idle_lock);
++ raw_spin_lock_init(&i7300_idle_lock);
+ total_us = 0;
+
+ if (i7300_idle_platform_probe(&fbd_dev, &ioat_dev, forceload))
diff --git a/ipc-make-rt-aware.patch b/ipc-make-rt-aware.patch
new file mode 100644
index 0000000..a960f1c
--- /dev/null
+++ b/ipc-make-rt-aware.patch
@@ -0,0 +1,89 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:12 -0500
+Subject: ipc: Make the ipc code -rt aware
+
+RT serializes the code with the (rt)spinlock but keeps preemption
+enabled. Some parts of the code need to be atomic nevertheless.
+
+Protect it with preempt_disable/enable_rt pairts.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ ipc/mqueue.c | 5 +++++
+ ipc/msg.c | 16 ++++++++++++++++
+ 2 files changed, 21 insertions(+)
+
+Index: linux-stable/ipc/mqueue.c
+===================================================================
+--- linux-stable.orig/ipc/mqueue.c
++++ linux-stable/ipc/mqueue.c
+@@ -912,12 +912,17 @@ static inline void pipelined_send(struct
+ struct msg_msg *message,
+ struct ext_wait_queue *receiver)
+ {
++ /*
++ * Keep them in one critical section for PREEMPT_RT:
++ */
++ preempt_disable_rt();
+ receiver->msg = message;
+ list_del(&receiver->list);
+ receiver->state = STATE_PENDING;
+ wake_up_process(receiver->task);
+ smp_wmb();
+ receiver->state = STATE_READY;
++ preempt_enable_rt();
+ }
+
+ /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
+Index: linux-stable/ipc/msg.c
+===================================================================
+--- linux-stable.orig/ipc/msg.c
++++ linux-stable/ipc/msg.c
+@@ -259,12 +259,20 @@ static void expunge_all(struct msg_queue
+ while (tmp != &msq->q_receivers) {
+ struct msg_receiver *msr;
+
++ /*
++ * Make sure that the wakeup doesnt preempt
++ * this CPU prematurely. (on PREEMPT_RT)
++ */
++ preempt_disable_rt();
++
+ msr = list_entry(tmp, struct msg_receiver, r_list);
+ tmp = tmp->next;
+ msr->r_msg = NULL;
+ wake_up_process(msr->r_tsk);
+ smp_mb();
+ msr->r_msg = ERR_PTR(res);
++
++ preempt_enable_rt();
+ }
+ }
+
+@@ -611,6 +619,12 @@ static inline int pipelined_send(struct
+ !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
+ msr->r_msgtype, msr->r_mode)) {
+
++ /*
++ * Make sure that the wakeup doesnt preempt
++ * this CPU prematurely. (on PREEMPT_RT)
++ */
++ preempt_disable_rt();
++
+ list_del(&msr->r_list);
+ if (msr->r_maxsize < msg->m_ts) {
+ msr->r_msg = NULL;
+@@ -624,9 +638,11 @@ static inline int pipelined_send(struct
+ wake_up_process(msr->r_tsk);
+ smp_mb();
+ msr->r_msg = msg;
++ preempt_enable_rt();
+
+ return 1;
+ }
++ preempt_enable_rt();
+ }
+ }
+ return 0;
diff --git a/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch b/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
new file mode 100644
index 0000000..cbc2b1a
--- /dev/null
+++ b/ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
@@ -0,0 +1,66 @@
+Subject: ipc/mqueue: Add a critical section to avoid a deadlock
+From: KOBAYASHI Yoshitake <yoshitake.kobayashi@toshiba.co.jp>
+Date: Sat, 23 Jul 2011 11:57:36 +0900
+
+(Repost for v3.0-rt1 and changed the distination addreses)
+I have tested the following patch on v3.0-rt1 with PREEMPT_RT_FULL.
+In POSIX message queue, if a sender process uses SCHED_FIFO and
+has a higher priority than a receiver process, the sender will
+be stuck at ipc/mqueue.c:452
+
+ 452 while (ewp->state == STATE_PENDING)
+ 453 cpu_relax();
+
+Description of the problem
+ (receiver process)
+ 1. receiver changes sender's state to STATE_PENDING (mqueue.c:846)
+ 2. wake up sender process and "switch to sender" (mqueue.c:847)
+ Note: This context switch only happens in PREEMPT_RT_FULL kernel.
+ (sender process)
+ 3. sender check the own state in above loop (mqueue.c:452-453)
+ *. receiver will never wake up and cannot change sender's state to
+ STATE_READY because sender has higher priority
+
+
+Signed-off-by: Yoshitake Kobayashi <yoshitake.kobayashi@toshiba.co.jp>
+Cc: viro@zeniv.linux.org.uk
+Cc: dchinner@redhat.com
+Cc: npiggin@kernel.dk
+Cc: hch@lst.de
+Cc: arnd@arndb.de
+Link: http://lkml.kernel.org/r/4E2A38A0.1090601@toshiba.co.jp
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ ipc/mqueue.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+Index: linux-stable/ipc/mqueue.c
+===================================================================
+--- linux-stable.orig/ipc/mqueue.c
++++ linux-stable/ipc/mqueue.c
+@@ -936,13 +936,18 @@ static inline void pipelined_receive(str
+ wake_up_interruptible(&info->wait_q);
+ return;
+ }
+- if (msg_insert(sender->msg, info))
+- return;
+- list_del(&sender->list);
+- sender->state = STATE_PENDING;
+- wake_up_process(sender->task);
+- smp_wmb();
+- sender->state = STATE_READY;
++ /*
++ * Keep them in one critical section for PREEMPT_RT:
++ */
++ preempt_disable_rt();
++ if (!msg_insert(sender->msg, info)) {
++ list_del(&sender->list);
++ sender->state = STATE_PENDING;
++ wake_up_process(sender->task);
++ smp_wmb();
++ sender->state = STATE_READY;
++ }
++ preempt_enable_rt();
+ }
+
+ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
diff --git a/ipc-sem-rework-semaphore-wakeups.patch b/ipc-sem-rework-semaphore-wakeups.patch
new file mode 100644
index 0000000..0c6abe6
--- /dev/null
+++ b/ipc-sem-rework-semaphore-wakeups.patch
@@ -0,0 +1,75 @@
+Subject: ipc/sem: Rework semaphore wakeups
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 14 Sep 2011 11:57:04 +0200
+
+Subject: ipc/sem: Rework semaphore wakeups
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Tue Sep 13 15:09:40 CEST 2011
+
+Current sysv sems have a weird ass wakeup scheme that involves keeping
+preemption disabled over a potential O(n^2) loop and busy waiting on
+that on other CPUs.
+
+Kill this and simply wake the task directly from under the sem_lock.
+
+This was discovered by a migrate_disable() debug feature that
+disallows:
+
+ spin_lock();
+ preempt_disable();
+ spin_unlock()
+ preempt_enable();
+
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Reported-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ ipc/sem.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+Index: linux-stable/ipc/sem.c
+===================================================================
+--- linux-stable.orig/ipc/sem.c
++++ linux-stable/ipc/sem.c
+@@ -461,6 +461,13 @@ undo:
+ static void wake_up_sem_queue_prepare(struct list_head *pt,
+ struct sem_queue *q, int error)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *p = q->sleeper;
++ get_task_struct(p);
++ q->status = error;
++ wake_up_process(p);
++ put_task_struct(p);
++#else
+ if (list_empty(pt)) {
+ /*
+ * Hold preempt off so that we don't get preempted and have the
+@@ -472,6 +479,7 @@ static void wake_up_sem_queue_prepare(st
+ q->pid = error;
+
+ list_add_tail(&q->simple_list, pt);
++#endif
+ }
+
+ /**
+@@ -485,6 +493,7 @@ static void wake_up_sem_queue_prepare(st
+ */
+ static void wake_up_sem_queue_do(struct list_head *pt)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ struct sem_queue *q, *t;
+ int did_something;
+
+@@ -497,6 +506,7 @@ static void wake_up_sem_queue_do(struct
+ }
+ if (did_something)
+ preempt_enable();
++#endif
+ }
+
+ static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
diff --git a/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
new file mode 100644
index 0000000..ba7fb0a
--- /dev/null
+++ b/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -0,0 +1,154 @@
+Subject: genirq: Allow disabling of softirq processing in irq thread context
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 31 Jan 2012 13:01:27 +0100
+
+The processing of softirqs in irq thread context is a performance gain
+for the non-rt workloads of a system, but it's counterproductive for
+interrupts which are explicitely related to the realtime
+workload. Allow such interrupts to prevent softirq processing in their
+thread context.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ include/linux/interrupt.h | 2 ++
+ include/linux/irq.h | 5 ++++-
+ kernel/irq/manage.c | 13 ++++++++++++-
+ kernel/irq/settings.h | 12 ++++++++++++
+ kernel/softirq.c | 7 +++++++
+ 5 files changed, 37 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -58,6 +58,7 @@
+ * IRQF_NO_THREAD - Interrupt cannot be threaded
+ * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
+ * resume time.
++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
+ */
+ #define IRQF_DISABLED 0x00000020
+ #define IRQF_SHARED 0x00000080
+@@ -71,6 +72,7 @@
+ #define IRQF_FORCE_RESUME 0x00008000
+ #define IRQF_NO_THREAD 0x00010000
+ #define IRQF_EARLY_RESUME 0x00020000
++#define IRQF_NO_SOFTIRQ_CALL 0x00040000
+
+ #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
+
+Index: linux-stable/include/linux/irq.h
+===================================================================
+--- linux-stable.orig/include/linux/irq.h
++++ linux-stable/include/linux/irq.h
+@@ -73,6 +73,7 @@ typedef void (*irq_preflow_handler_t)(st
+ * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context
+ * IRQ_NESTED_TRHEAD - Interrupt nests into another thread
+ * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable
++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ */
+ enum {
+ IRQ_TYPE_NONE = 0x00000000,
+@@ -97,12 +98,14 @@ enum {
+ IRQ_NESTED_THREAD = (1 << 15),
+ IRQ_NOTHREAD = (1 << 16),
+ IRQ_PER_CPU_DEVID = (1 << 17),
++ IRQ_NO_SOFTIRQ_CALL = (1 << 18),
+ };
+
+ #define IRQF_MODIFY_MASK \
+ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
+- IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID)
++ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
++ IRQ_NO_SOFTIRQ_CALL)
+
+ #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
+
+Index: linux-stable/kernel/irq/manage.c
+===================================================================
+--- linux-stable.orig/kernel/irq/manage.c
++++ linux-stable/kernel/irq/manage.c
+@@ -757,7 +757,15 @@ irq_forced_thread_fn(struct irq_desc *de
+ local_bh_disable();
+ ret = action->thread_fn(action->irq, action->dev_id);
+ irq_finalize_oneshot(desc, action);
+- local_bh_enable();
++ /*
++ * Interrupts which have real time requirements can be set up
++ * to avoid softirq processing in the thread handler. This is
++ * safe as these interrupts do not raise soft interrupts.
++ */
++ if (irq_settings_no_softirq_call(desc))
++ _local_bh_enable();
++ else
++ local_bh_enable();
+ return ret;
+ }
+
+@@ -1090,6 +1098,9 @@ __setup_irq(unsigned int irq, struct irq
+ irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
+ }
+
++ if (new->flags & IRQF_NO_SOFTIRQ_CALL)
++ irq_settings_set_no_softirq_call(desc);
++
+ /* Set default affinity mask once everything is setup */
+ setup_affinity(irq, desc, mask);
+
+Index: linux-stable/kernel/irq/settings.h
+===================================================================
+--- linux-stable.orig/kernel/irq/settings.h
++++ linux-stable/kernel/irq/settings.h
+@@ -14,6 +14,7 @@ enum {
+ _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
+ _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+ _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
+ _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+ };
+
+@@ -26,6 +27,7 @@ enum {
+ #define IRQ_NOAUTOEN GOT_YOU_MORON
+ #define IRQ_NESTED_THREAD GOT_YOU_MORON
+ #define IRQ_PER_CPU_DEVID GOT_YOU_MORON
++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
+ #undef IRQF_MODIFY_MASK
+ #define IRQF_MODIFY_MASK GOT_YOU_MORON
+
+@@ -36,6 +38,16 @@ irq_settings_clr_and_set(struct irq_desc
+ desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
+ }
+
++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
++{
++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
++}
++
++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
++{
++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
++}
++
+ static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
+ {
+ return desc->status_use_accessors & _IRQ_PER_CPU;
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -434,6 +434,13 @@ void local_bh_enable_ip(unsigned long ip
+ }
+ EXPORT_SYMBOL(local_bh_enable_ip);
+
++void _local_bh_enable(void)
++{
++ current->softirq_nestcnt--;
++ migrate_enable();
++}
++EXPORT_SYMBOL(_local_bh_enable);
++
+ /* For tracing */
+ int notrace __in_softirq(void)
+ {
diff --git a/jump-label-rt.patch b/jump-label-rt.patch
new file mode 100644
index 0000000..a1c6438
--- /dev/null
+++ b/jump-label-rt.patch
@@ -0,0 +1,23 @@
+Subject: jump-label-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 13 Jul 2011 11:03:16 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/jump_label.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/jump_label.h
+===================================================================
+--- linux-stable.orig/include/linux/jump_label.h
++++ linux-stable/include/linux/jump_label.h
+@@ -50,7 +50,8 @@
+ #include <linux/compiler.h>
+ #include <linux/workqueue.h>
+
+-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
++#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
++ !defined(CONFIG_PREEMPT_BASE)
+
+ struct static_key {
+ atomic_t enabled;
diff --git a/kconfig-disable-a-few-options-rt.patch b/kconfig-disable-a-few-options-rt.patch
new file mode 100644
index 0000000..db590c2
--- /dev/null
+++ b/kconfig-disable-a-few-options-rt.patch
@@ -0,0 +1,50 @@
+Subject: kconfig-disable-a-few-options-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 24 Jul 2011 12:11:43 +0200
+
+Disable stuff which is known to have issues on RT
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/Kconfig | 1 +
+ drivers/net/Kconfig | 1 +
+ mm/Kconfig | 2 +-
+ 3 files changed, 3 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/Kconfig
+===================================================================
+--- linux-stable.orig/arch/Kconfig
++++ linux-stable/arch/Kconfig
+@@ -6,6 +6,7 @@ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+ depends on HAVE_OPROFILE
++ depends on !PREEMPT_RT_FULL
+ select RING_BUFFER
+ select RING_BUFFER_ALLOW_SWAP
+ help
+Index: linux-stable/drivers/net/Kconfig
+===================================================================
+--- linux-stable.orig/drivers/net/Kconfig
++++ linux-stable/drivers/net/Kconfig
+@@ -153,6 +153,7 @@ config MACVTAP
+
+ config NETCONSOLE
+ tristate "Network console logging support"
++ depends on !PREEMPT_RT_FULL
+ ---help---
+ If you want to log kernel messages over the network, enable this.
+ See <file:Documentation/networking/netconsole.txt> for details.
+Index: linux-stable/mm/Kconfig
+===================================================================
+--- linux-stable.orig/mm/Kconfig
++++ linux-stable/mm/Kconfig
+@@ -318,7 +318,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+
+ config TRANSPARENT_HUGEPAGE
+ bool "Transparent Hugepage Support"
+- depends on X86 && MMU
++ depends on X86 && MMU && !PREEMPT_RT_FULL
+ select COMPACTION
+ help
+ Transparent Hugepages allows the kernel to use huge pages and
diff --git a/kconfig-preempt-rt-full.patch b/kconfig-preempt-rt-full.patch
new file mode 100644
index 0000000..bbeb098
--- /dev/null
+++ b/kconfig-preempt-rt-full.patch
@@ -0,0 +1,61 @@
+Subject: kconfig-preempt-rt-full.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 14:58:57 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ init/Makefile | 2 +-
+ kernel/Kconfig.preempt | 7 +++++++
+ scripts/mkcompile_h | 4 +++-
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+Index: linux-stable/init/Makefile
+===================================================================
+--- linux-stable.orig/init/Makefile
++++ linux-stable/init/Makefile
+@@ -33,4 +33,4 @@ silent_chk_compile.h = :
+ include/generated/compile.h: FORCE
+ @$($(quiet)chk_compile.h)
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
+- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
+Index: linux-stable/kernel/Kconfig.preempt
+===================================================================
+--- linux-stable.orig/kernel/Kconfig.preempt
++++ linux-stable/kernel/Kconfig.preempt
+@@ -67,6 +67,13 @@ config PREEMPT_RTB
+ enables changes which are preliminary for the full preemptiple
+ RT kernel.
+
++config PREEMPT_RT_FULL
++ bool "Fully Preemptible Kernel (RT)"
++ depends on IRQ_FORCED_THREADING
++ select PREEMPT_RT_BASE
++ help
++ All and everything
++
+ endchoice
+
+ config PREEMPT_COUNT
+Index: linux-stable/scripts/mkcompile_h
+===================================================================
+--- linux-stable.orig/scripts/mkcompile_h
++++ linux-stable/scripts/mkcompile_h
+@@ -4,7 +4,8 @@ TARGET=$1
+ ARCH=$2
+ SMP=$3
+ PREEMPT=$4
+-CC=$5
++RT=$5
++CC=$6
+
+ vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
+
+@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
+ CONFIG_FLAGS=""
+ if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
+ if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi
+ UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
+
+ # Truncate to maximum length
diff --git a/kgb-serial-hackaround.patch b/kgb-serial-hackaround.patch
new file mode 100644
index 0000000..732cd5b
--- /dev/null
+++ b/kgb-serial-hackaround.patch
@@ -0,0 +1,106 @@
+From: Jason Wessel <jason.wessel@windriver.com>
+Date: Thu, 28 Jul 2011 12:42:23 -0500
+Subject: kgdb/serial: Short term workaround
+
+On 07/27/2011 04:37 PM, Thomas Gleixner wrote:
+> - KGDB (not yet disabled) is reportedly unusable on -rt right now due
+> to missing hacks in the console locking which I dropped on purpose.
+>
+
+To work around this in the short term you can use this patch, in
+addition to the clocksource watchdog patch that Thomas brewed up.
+
+Comments are welcome of course. Ultimately the right solution is to
+change separation between the console and the HW to have a polled mode
++ work queue so as not to introduce any kind of latency.
+
+Thanks,
+Jason.
+
+---
+ drivers/tty/serial/8250/8250.c | 3 ++-
+ include/linux/kdb.h | 2 ++
+ kernel/debug/kdb/kdb_io.c | 6 ++----
+ 3 files changed, 6 insertions(+), 5 deletions(-)
+
+Index: linux-stable/drivers/tty/serial/8250/8250.c
+===================================================================
+--- linux-stable.orig/drivers/tty/serial/8250/8250.c
++++ linux-stable/drivers/tty/serial/8250/8250.c
+@@ -38,6 +38,7 @@
+ #include <linux/nmi.h>
+ #include <linux/mutex.h>
+ #include <linux/slab.h>
++#include <linux/kdb.h>
+ #ifdef CONFIG_SPARC
+ #include <linux/sunserialcore.h>
+ #endif
+@@ -2782,7 +2783,7 @@ serial8250_console_write(struct console
+
+ touch_nmi_watchdog();
+
+- if (port->sysrq || oops_in_progress)
++ if (port->sysrq || oops_in_progress || in_kdb_printk())
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
+Index: linux-stable/include/linux/kdb.h
+===================================================================
+--- linux-stable.orig/include/linux/kdb.h
++++ linux-stable/include/linux/kdb.h
+@@ -148,12 +148,14 @@ extern int kdb_register(char *, kdb_func
+ extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+ short, kdb_repeat_t);
+ extern int kdb_unregister(char *);
++#define in_kdb_printk() (kdb_trap_printk)
+ #else /* ! CONFIG_KGDB_KDB */
+ #define kdb_printf(...)
+ #define kdb_init(x)
+ #define kdb_register(...)
+ #define kdb_register_repeat(...)
+ #define kdb_uregister(x)
++#define in_kdb_printk() (0)
+ #endif /* CONFIG_KGDB_KDB */
+ enum {
+ KDB_NOT_INITIALIZED,
+Index: linux-stable/kernel/debug/kdb/kdb_io.c
+===================================================================
+--- linux-stable.orig/kernel/debug/kdb/kdb_io.c
++++ linux-stable/kernel/debug/kdb/kdb_io.c
+@@ -553,7 +553,6 @@ int vkdb_printf(const char *fmt, va_list
+ int diag;
+ int linecount;
+ int logging, saved_loglevel = 0;
+- int saved_trap_printk;
+ int got_printf_lock = 0;
+ int retlen = 0;
+ int fnd, len;
+@@ -564,8 +563,6 @@ int vkdb_printf(const char *fmt, va_list
+ unsigned long uninitialized_var(flags);
+
+ preempt_disable();
+- saved_trap_printk = kdb_trap_printk;
+- kdb_trap_printk = 0;
+
+ /* Serialize kdb_printf if multiple cpus try to write at once.
+ * But if any cpu goes recursive in kdb, just print the output,
+@@ -810,7 +807,6 @@ kdb_print_out:
+ } else {
+ __release(kdb_printf_lock);
+ }
+- kdb_trap_printk = saved_trap_printk;
+ preempt_enable();
+ return retlen;
+ }
+@@ -820,9 +816,11 @@ int kdb_printf(const char *fmt, ...)
+ va_list ap;
+ int r;
+
++ kdb_trap_printk++;
+ va_start(ap, fmt);
+ r = vkdb_printf(fmt, ap);
+ va_end(ap);
++ kdb_trap_printk--;
+
+ return r;
+ }
diff --git a/latency-hist.patch b/latency-hist.patch
new file mode 100644
index 0000000..0bea807
--- /dev/null
+++ b/latency-hist.patch
@@ -0,0 +1,1825 @@
+Subject: latency-hist.patch
+From: Carsten Emde <C.Emde@osadl.org>
+Date: Tue, 19 Jul 2011 14:03:41 +0100
+
+This patch provides a recording mechanism to store data of potential
+sources of system latencies. The recordings separately determine the
+latency caused by a delayed timer expiration, by a delayed wakeup of the
+related user space program and by the sum of both. The histograms can be
+enabled and reset individually. The data are accessible via the debug
+filesystem. For details please consult Documentation/trace/histograms.txt.
+
+Signed-off-by: Carsten Emde <C.Emde@osadl.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ Documentation/trace/histograms.txt | 186 +++++
+ include/linux/hrtimer.h | 3
+ include/linux/sched.h | 6
+ include/trace/events/hist.h | 69 ++
+ include/trace/events/latency_hist.h | 30
+ kernel/hrtimer.c | 23
+ kernel/trace/Kconfig | 104 +++
+ kernel/trace/Makefile | 4
+ kernel/trace/latency_hist.c | 1176 ++++++++++++++++++++++++++++++++++++
+ kernel/trace/trace_irqsoff.c | 11
+ 10 files changed, 1612 insertions(+)
+
+Index: linux-stable/Documentation/trace/histograms.txt
+===================================================================
+--- /dev/null
++++ linux-stable/Documentation/trace/histograms.txt
+@@ -0,0 +1,186 @@
++ Using the Linux Kernel Latency Histograms
++
++
++This document gives a short explanation how to enable, configure and use
++latency histograms. Latency histograms are primarily relevant in the
++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
++and are used in the quality management of the Linux real-time
++capabilities.
++
++
++* Purpose of latency histograms
++
++A latency histogram continuously accumulates the frequencies of latency
++data. There are two types of histograms
++- potential sources of latencies
++- effective latencies
++
++
++* Potential sources of latencies
++
++Potential sources of latencies are code segments where interrupts,
++preemption or both are disabled (aka critical sections). To create
++histograms of potential sources of latency, the kernel stores the time
++stamp at the start of a critical section, determines the time elapsed
++when the end of the section is reached, and increments the frequency
++counter of that latency value - irrespective of whether any concurrently
++running process is affected by latency or not.
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_INTERRUPT_OFF_LATENCY
++ CONFIG_PREEMPT_OFF_LATENCY
++
++
++* Effective latencies
++
++Effective latencies are actually occuring during wakeup of a process. To
++determine effective latencies, the kernel stores the time stamp when a
++process is scheduled to be woken up, and determines the duration of the
++wakeup time shortly before control is passed over to this process. Note
++that the apparent latency in user space may be somewhat longer, since the
++process may be interrupted after control is passed over to it but before
++the execution in user space takes place. Simply measuring the interval
++between enqueuing and wakeup may also not appropriate in cases when a
++process is scheduled as a result of a timer expiration. The timer may have
++missed its deadline, e.g. due to disabled interrupts, but this latency
++would not be registered. Therefore, the offsets of missed timers are
++recorded in a separate histogram. If both wakeup latency and missed timer
++offsets are configured and enabled, a third histogram may be enabled that
++records the overall latency as a sum of the timer latency, if any, and the
++wakeup latency. This histogram is called "timerandwakeup".
++- Configuration items (in the Kernel hacking/Tracers submenu)
++ CONFIG_WAKEUP_LATENCY
++ CONFIG_MISSED_TIMER_OFSETS
++
++
++* Usage
++
++The interface to the administration of the latency histograms is located
++in the debugfs file system. To mount it, either enter
++
++mount -t sysfs nodev /sys
++mount -t debugfs nodev /sys/kernel/debug
++
++from shell command line level, or add
++
++nodev /sys sysfs defaults 0 0
++nodev /sys/kernel/debug debugfs defaults 0 0
++
++to the file /etc/fstab. All latency histogram related files are then
++available in the directory /sys/kernel/debug/tracing/latency_hist. A
++particular histogram type is enabled by writing non-zero to the related
++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
++Select "preemptirqsoff" for the histograms of potential sources of
++latencies and "wakeup" for histograms of effective latencies etc. The
++histogram data - one per CPU - are available in the files
++
++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
++
++The histograms are reset by writing non-zero to the file "reset" in a
++particular latency directory. To reset all latency data, use
++
++#!/bin/sh
++
++TRACINGDIR=/sys/kernel/debug/tracing
++HISTDIR=$TRACINGDIR/latency_hist
++
++if test -d $HISTDIR
++then
++ cd $HISTDIR
++ for i in `find . | grep /reset$`
++ do
++ echo 1 >$i
++ done
++fi
++
++
++* Data format
++
++Latency data are stored with a resolution of one microsecond. The
++maximum latency is 10,240 microseconds. The data are only valid, if the
++overflow register is empty. Every output line contains the latency in
++microseconds in the first row and the number of samples in the second
++row. To display only lines with a positive latency count, use, for
++example,
++
++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
++
++#Minimum latency: 0 microseconds.
++#Average latency: 0 microseconds.
++#Maximum latency: 25 microseconds.
++#Total samples: 3104770694
++#There are 0 samples greater or equal than 10240 microseconds
++#usecs samples
++ 0 2984486876
++ 1 49843506
++ 2 58219047
++ 3 5348126
++ 4 2187960
++ 5 3388262
++ 6 959289
++ 7 208294
++ 8 40420
++ 9 4485
++ 10 14918
++ 11 18340
++ 12 25052
++ 13 19455
++ 14 5602
++ 15 969
++ 16 47
++ 17 18
++ 18 14
++ 19 1
++ 20 3
++ 21 2
++ 22 5
++ 23 2
++ 25 1
++
++
++* Wakeup latency of a selected process
++
++To only collect wakeup latency data of a particular process, write the
++PID of the requested process to
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/pid
++
++PIDs are not considered, if this variable is set to 0.
++
++
++* Details of the process with the highest wakeup latency so far
++
++Selected data of the process that suffered from the highest wakeup
++latency that occurred in a particular CPU are available in the file
++
++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
++
++In addition, other relevant system data at the time when the
++latency occurred are given.
++
++The format of the data is (all in one line):
++<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
++<- <PID> <Priority> <Command> <Timestamp>
++
++The value of <Timeroffset> is only relevant in the combined timer
++and wakeup latency recording. In the wakeup recording, it is
++always 0, in the missed_timer_offsets recording, it is the same
++as <Latency>.
++
++When retrospectively searching for the origin of a latency and
++tracing was not enabled, it may be helpful to know the name and
++some basic data of the task that (finally) was switching to the
++late real-tlme task. In addition to the victim's data, also the
++data of the possible culprit are therefore displayed after the
++"<-" symbol.
++
++Finally, the timestamp of the time when the latency occurred
++in <seconds>.<microseconds> after the most recent system boot
++is provided.
++
++These data are also reset when the wakeup histogram is reset.
+Index: linux-stable/include/linux/hrtimer.h
+===================================================================
+--- linux-stable.orig/include/linux/hrtimer.h
++++ linux-stable/include/linux/hrtimer.h
+@@ -111,6 +111,9 @@ struct hrtimer {
+ enum hrtimer_restart (*function)(struct hrtimer *);
+ struct hrtimer_clock_base *base;
+ unsigned long state;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ ktime_t praecox;
++#endif
+ #ifdef CONFIG_TIMER_STATS
+ int start_pid;
+ void *start_site;
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1583,6 +1583,12 @@ struct task_struct {
+ unsigned long trace;
+ /* bitmask and counter of trace recursion */
+ unsigned long trace_recursion;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ u64 preempt_timestamp_hist;
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ long timer_offset;
++#endif
++#endif
+ #endif /* CONFIG_TRACING */
+ #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
+ struct memcg_batch_info {
+Index: linux-stable/include/trace/events/hist.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/trace/events/hist.h
+@@ -0,0 +1,69 @@
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM hist
++
++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_HIST_H
++
++#include "latency_hist.h"
++#include <linux/tracepoint.h>
++
++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
++#define trace_preemptirqsoff_hist(a,b)
++#else
++TRACE_EVENT(preemptirqsoff_hist,
++
++ TP_PROTO(int reason, int starthist),
++
++ TP_ARGS(reason, starthist),
++
++ TP_STRUCT__entry(
++ __field(int, reason )
++ __field(int, starthist )
++ ),
++
++ TP_fast_assign(
++ __entry->reason = reason;
++ __entry->starthist = starthist;
++ ),
++
++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
++ __entry->starthist ? "start" : "stop")
++);
++#endif
++
++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
++#define trace_hrtimer_interrupt(a,b,c,d)
++#else
++TRACE_EVENT(hrtimer_interrupt,
++
++ TP_PROTO(int cpu, long long offset, struct task_struct *curr, struct task_struct *task),
++
++ TP_ARGS(cpu, offset, curr, task),
++
++ TP_STRUCT__entry(
++ __field(int, cpu )
++ __field(long long, offset )
++ __array(char, ccomm, TASK_COMM_LEN)
++ __field(int, cprio )
++ __array(char, tcomm, TASK_COMM_LEN)
++ __field(int, tprio )
++ ),
++
++ TP_fast_assign(
++ __entry->cpu = cpu;
++ __entry->offset = offset;
++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
++ __entry->cprio = curr->prio;
++ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", task != NULL ? TASK_COMM_LEN : 7);
++ __entry->tprio = task != NULL ? task->prio : -1;
++ ),
++
++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
++ __entry->cpu, __entry->offset, __entry->ccomm, __entry->cprio, __entry->tcomm, __entry->tprio)
++);
++#endif
++
++#endif /* _TRACE_HIST_H */
++
++/* This part must be outside protection */
++#include <trace/define_trace.h>
+Index: linux-stable/include/trace/events/latency_hist.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/trace/events/latency_hist.h
+@@ -0,0 +1,30 @@
++#ifndef _LATENCY_HIST_H
++#define _LATENCY_HIST_H
++
++enum hist_action {
++ IRQS_ON,
++ PREEMPT_ON,
++ TRACE_STOP,
++ IRQS_OFF,
++ PREEMPT_OFF,
++ TRACE_START,
++};
++
++static char *actions[] = {
++ "IRQS_ON",
++ "PREEMPT_ON",
++ "TRACE_STOP",
++ "IRQS_OFF",
++ "PREEMPT_OFF",
++ "TRACE_START",
++};
++
++static inline char *getaction(int action)
++{
++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
++ return(actions[action]);
++ return("unknown");
++}
++
++#endif /* _LATENCY_HIST_H */
++
+Index: linux-stable/kernel/hrtimer.c
+===================================================================
+--- linux-stable.orig/kernel/hrtimer.c
++++ linux-stable/kernel/hrtimer.c
+@@ -49,6 +49,7 @@
+ #include <asm/uaccess.h>
+
+ #include <trace/events/timer.h>
++#include <trace/events/hist.h>
+
+ /*
+ * The timer bases:
+@@ -983,6 +984,17 @@ int __hrtimer_start_range_ns(struct hrti
+ #endif
+ }
+
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ {
++ ktime_t now = new_base->get_time();
++
++ if (ktime_to_ns(tim) < ktime_to_ns(now))
++ timer->praecox = now;
++ else
++ timer->praecox = ktime_set(0, 0);
++ }
++#endif
++
+ hrtimer_set_expires_range_ns(timer, tim, delta_ns);
+
+ timer_stats_hrtimer_set_start_info(timer);
+@@ -1246,6 +1258,8 @@ static void __run_hrtimer(struct hrtimer
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+
++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
++
+ /*
+ * High resolution timer interrupt
+ * Called with interrupts disabled
+@@ -1289,6 +1303,15 @@ retry:
+
+ timer = container_of(node, struct hrtimer, node);
+
++ trace_hrtimer_interrupt(raw_smp_processor_id(),
++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ?
++ timer->praecox : hrtimer_get_expires(timer),
++ basenow)),
++ current,
++ timer->function == hrtimer_wakeup ?
++ container_of(timer, struct hrtimer_sleeper,
++ timer)->task : NULL);
++
+ /*
+ * The immediate goal for using the softexpires is
+ * minimizing wakeups, not running timers at the
+Index: linux-stable/kernel/trace/Kconfig
+===================================================================
+--- linux-stable.orig/kernel/trace/Kconfig
++++ linux-stable/kernel/trace/Kconfig
+@@ -191,6 +191,24 @@ config IRQSOFF_TRACER
+ enabled. This option and the preempt-off timing option can be
+ used together or separately.)
+
++config INTERRUPT_OFF_HIST
++ bool "Interrupts-off Latency Histogram"
++ depends on IRQSOFF_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the duration of time periods with interrupts disabled. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++ If PREEMPT_OFF_HIST is also selected, additional histograms (one
++ per cpu) are generated that accumulate the duration of time periods
++ when both interrupts and preemption are disabled. The histogram data
++ will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/irqsoff
++
+ config PREEMPT_TRACER
+ bool "Preemption-off Latency Tracer"
+ default n
+@@ -213,6 +231,24 @@ config PREEMPT_TRACER
+ enabled. This option and the irqs-off timing option can be
+ used together or separately.)
+
++config PREEMPT_OFF_HIST
++ bool "Preemption-off Latency Histogram"
++ depends on PREEMPT_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the duration of time periods with preemption disabled. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff
++
++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one
++ per cpu) are generated that accumulate the duration of time periods
++ when both interrupts and preemption are disabled. The histogram data
++ will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/preemptoff
++
+ config SCHED_TRACER
+ bool "Scheduling Latency Tracer"
+ select GENERIC_TRACER
+@@ -222,6 +258,74 @@ config SCHED_TRACER
+ This tracer tracks the latency of the highest priority task
+ to be scheduled in, starting from the point it has woken up.
+
++config WAKEUP_LATENCY_HIST
++ bool "Scheduling Latency Histogram"
++ depends on SCHED_TRACER
++ help
++ This option generates continuously updated histograms (one per cpu)
++ of the scheduling latency of the highest priority task.
++ The histograms are disabled by default. To enable them, write a
++ non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup
++
++ Two different algorithms are used, one to determine the latency of
++ processes that exclusively use the highest priority of the system and
++ another one to determine the latency of processes that share the
++ highest system priority with other processes. The former is used to
++ improve hardware and system software, the latter to optimize the
++ priority design of a given system. The histogram data will be
++ located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/wakeup
++
++ and
++
++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio
++
++ If both Scheduling Latency Histogram and Missed Timer Offsets
++ Histogram are selected, additional histogram data will be collected
++ that contain, in addition to the wakeup latency, the timer latency, in
++ case the wakeup was triggered by an expired timer. These histograms
++ are available in the
++
++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++ directory. They reflect the apparent interrupt and scheduling latency
++ and are best suitable to determine the worst-case latency of a given
++ system. To enable these histograms, write a non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
++config MISSED_TIMER_OFFSETS_HIST
++ depends on HIGH_RES_TIMERS
++ select GENERIC_TRACER
++ bool "Missed Timer Offsets Histogram"
++ help
++ Generate a histogram of missed timer offsets in microseconds. The
++ histograms are disabled by default. To enable them, write a non-zero
++ number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets
++
++ The histogram data will be located in the debug file system at
++
++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets
++
++ If both Scheduling Latency Histogram and Missed Timer Offsets
++ Histogram are selected, additional histogram data will be collected
++ that contain, in addition to the wakeup latency, the timer latency, in
++ case the wakeup was triggered by an expired timer. These histograms
++ are available in the
++
++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup
++
++ directory. They reflect the apparent interrupt and scheduling latency
++ and are best suitable to determine the worst-case latency of a given
++ system. To enable these histograms, write a non-zero number to
++
++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup
++
+ config ENABLE_DEFAULT_TRACERS
+ bool "Trace process context switches and events"
+ depends on !GENERIC_TRACER
+Index: linux-stable/kernel/trace/Makefile
+===================================================================
+--- linux-stable.orig/kernel/trace/Makefile
++++ linux-stable/kernel/trace/Makefile
+@@ -36,6 +36,10 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace_f
+ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
+ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o
++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o
++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o
+ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
+ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
+ obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
+Index: linux-stable/kernel/trace/latency_hist.c
+===================================================================
+--- /dev/null
++++ linux-stable/kernel/trace/latency_hist.c
+@@ -0,0 +1,1176 @@
++/*
++ * kernel/trace/latency_hist.c
++ *
++ * Add support for histograms of preemption-off latency and
++ * interrupt-off latency and wakeup latency, it depends on
++ * Real-Time Preemption Support.
++ *
++ * Copyright (C) 2005 MontaVista Software, Inc.
++ * Yi Yang <yyang@ch.mvista.com>
++ *
++ * Converted to work with the new latency tracer.
++ * Copyright (C) 2008 Red Hat, Inc.
++ * Steven Rostedt <srostedt@redhat.com>
++ *
++ */
++#include <linux/module.h>
++#include <linux/debugfs.h>
++#include <linux/seq_file.h>
++#include <linux/percpu.h>
++#include <linux/kallsyms.h>
++#include <linux/uaccess.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <asm/atomic.h>
++#include <asm/div64.h>
++
++#include "trace.h"
++#include <trace/events/sched.h>
++
++#define NSECS_PER_USECS 1000L
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/hist.h>
++
++enum {
++ IRQSOFF_LATENCY = 0,
++ PREEMPTOFF_LATENCY,
++ PREEMPTIRQSOFF_LATENCY,
++ WAKEUP_LATENCY,
++ WAKEUP_LATENCY_SHAREDPRIO,
++ MISSED_TIMER_OFFSETS,
++ TIMERANDWAKEUP_LATENCY,
++ MAX_LATENCY_TYPE,
++};
++
++#define MAX_ENTRY_NUM 10240
++
++struct hist_data {
++ atomic_t hist_mode; /* 0 log, 1 don't log */
++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
++ long min_lat;
++ long max_lat;
++ unsigned long long below_hist_bound_samples;
++ unsigned long long above_hist_bound_samples;
++ long long accumulate_lat;
++ unsigned long long total_samples;
++ unsigned long long hist_array[MAX_ENTRY_NUM];
++};
++
++struct enable_data {
++ int latency_type;
++ int enabled;
++};
++
++static char *latency_hist_dir_root = "latency_hist";
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist);
++static char *irqsoff_hist_dir = "irqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start);
++static DEFINE_PER_CPU(int, hist_irqsoff_counting);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist);
++static char *preemptoff_hist_dir = "preemptoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start);
++static DEFINE_PER_CPU(int, hist_preemptoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist);
++static char *preemptirqsoff_hist_dir = "preemptirqsoff";
++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start);
++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting);
++#endif
++
++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start);
++static struct enable_data preemptirqsoff_enabled_data = {
++ .latency_type = PREEMPTIRQSOFF_LATENCY,
++ .enabled = 0,
++};
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++struct maxlatproc_data {
++ char comm[FIELD_SIZEOF(struct task_struct, comm)];
++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)];
++ int pid;
++ int current_pid;
++ int prio;
++ int current_prio;
++ long latency;
++ long timeroffset;
++ cycle_t timestamp;
++};
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist);
++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio);
++static char *wakeup_latency_hist_dir = "wakeup";
++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio";
++static notrace void probe_wakeup_latency_hist_start(void *v,
++ struct task_struct *p, int success);
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++ struct task_struct *prev, struct task_struct *next);
++static notrace void probe_sched_migrate_task(void *,
++ struct task_struct *task, int cpu);
++static struct enable_data wakeup_latency_enabled_data = {
++ .latency_type = WAKEUP_LATENCY,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc);
++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio);
++static DEFINE_PER_CPU(struct task_struct *, wakeup_task);
++static DEFINE_PER_CPU(int, wakeup_sharedprio);
++static unsigned long wakeup_pid;
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets);
++static char *missed_timer_offsets_dir = "missed_timer_offsets";
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++ long long offset, struct task_struct *curr, struct task_struct *task);
++static struct enable_data missed_timer_offsets_enabled_data = {
++ .latency_type = MISSED_TIMER_OFFSETS,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc);
++static unsigned long missed_timer_offsets_pid;
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist);
++static char *timerandwakeup_latency_hist_dir = "timerandwakeup";
++static struct enable_data timerandwakeup_enabled_data = {
++ .latency_type = TIMERANDWAKEUP_LATENCY,
++ .enabled = 0,
++};
++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
++#endif
++
++void notrace latency_hist(int latency_type, int cpu, long latency,
++ long timeroffset, cycle_t stop,
++ struct task_struct *p)
++{
++ struct hist_data *my_hist;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ struct maxlatproc_data *mp = NULL;
++#endif
++
++ if (cpu < 0 || cpu >= NR_CPUS || latency_type < 0 ||
++ latency_type >= MAX_LATENCY_TYPE)
++ return;
++
++ switch (latency_type) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ case IRQSOFF_LATENCY:
++ my_hist = &per_cpu(irqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ case PREEMPTOFF_LATENCY:
++ my_hist = &per_cpu(preemptoff_hist, cpu);
++ break;
++#endif
++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ my_hist = &per_cpu(preemptirqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ my_hist = &per_cpu(wakeup_latency_hist, cpu);
++ mp = &per_cpu(wakeup_maxlatproc, cpu);
++ break;
++ case WAKEUP_LATENCY_SHAREDPRIO:
++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ my_hist = &per_cpu(missed_timer_offsets, cpu);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++ break;
++#endif
++
++ default:
++ return;
++ }
++
++ latency += my_hist->offset;
++
++ if (atomic_read(&my_hist->hist_mode) == 0)
++ return;
++
++ if (latency < 0 || latency >= MAX_ENTRY_NUM) {
++ if (latency < 0)
++ my_hist->below_hist_bound_samples++;
++ else
++ my_hist->above_hist_bound_samples++;
++ } else
++ my_hist->hist_array[latency]++;
++
++ if (unlikely(latency > my_hist->max_lat ||
++ my_hist->min_lat == LONG_MAX)) {
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ if (latency_type == WAKEUP_LATENCY ||
++ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++ latency_type == MISSED_TIMER_OFFSETS ||
++ latency_type == TIMERANDWAKEUP_LATENCY) {
++ strncpy(mp->comm, p->comm, sizeof(mp->comm));
++ strncpy(mp->current_comm, current->comm,
++ sizeof(mp->current_comm));
++ mp->pid = task_pid_nr(p);
++ mp->current_pid = task_pid_nr(current);
++ mp->prio = p->prio;
++ mp->current_prio = current->prio;
++ mp->latency = latency;
++ mp->timeroffset = timeroffset;
++ mp->timestamp = stop;
++ }
++#endif
++ my_hist->max_lat = latency;
++ }
++ if (unlikely(latency < my_hist->min_lat))
++ my_hist->min_lat = latency;
++ my_hist->total_samples++;
++ my_hist->accumulate_lat += latency;
++}
++
++static void *l_start(struct seq_file *m, loff_t *pos)
++{
++ loff_t *index_ptr = NULL;
++ loff_t index = *pos;
++ struct hist_data *my_hist = m->private;
++
++ if (index == 0) {
++ char minstr[32], avgstr[32], maxstr[32];
++
++ atomic_dec(&my_hist->hist_mode);
++
++ if (likely(my_hist->total_samples)) {
++ long avg = (long) div64_s64(my_hist->accumulate_lat,
++ my_hist->total_samples);
++ snprintf(minstr, sizeof(minstr), "%ld",
++ my_hist->min_lat - my_hist->offset);
++ snprintf(avgstr, sizeof(avgstr), "%ld",
++ avg - my_hist->offset);
++ snprintf(maxstr, sizeof(maxstr), "%ld",
++ my_hist->max_lat - my_hist->offset);
++ } else {
++ strcpy(minstr, "<undef>");
++ strcpy(avgstr, minstr);
++ strcpy(maxstr, minstr);
++ }
++
++ seq_printf(m, "#Minimum latency: %s microseconds\n"
++ "#Average latency: %s microseconds\n"
++ "#Maximum latency: %s microseconds\n"
++ "#Total samples: %llu\n"
++ "#There are %llu samples lower than %ld"
++ " microseconds.\n"
++ "#There are %llu samples greater or equal"
++ " than %ld microseconds.\n"
++ "#usecs\t%16s\n",
++ minstr, avgstr, maxstr,
++ my_hist->total_samples,
++ my_hist->below_hist_bound_samples,
++ -my_hist->offset,
++ my_hist->above_hist_bound_samples,
++ MAX_ENTRY_NUM - my_hist->offset,
++ "samples");
++ }
++ if (index < MAX_ENTRY_NUM) {
++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL);
++ if (index_ptr)
++ *index_ptr = index;
++ }
++
++ return index_ptr;
++}
++
++static void *l_next(struct seq_file *m, void *p, loff_t *pos)
++{
++ loff_t *index_ptr = p;
++ struct hist_data *my_hist = m->private;
++
++ if (++*pos >= MAX_ENTRY_NUM) {
++ atomic_inc(&my_hist->hist_mode);
++ return NULL;
++ }
++ *index_ptr = *pos;
++ return index_ptr;
++}
++
++static void l_stop(struct seq_file *m, void *p)
++{
++ kfree(p);
++}
++
++static int l_show(struct seq_file *m, void *p)
++{
++ int index = *(loff_t *) p;
++ struct hist_data *my_hist = m->private;
++
++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset,
++ my_hist->hist_array[index]);
++ return 0;
++}
++
++static struct seq_operations latency_hist_seq_op = {
++ .start = l_start,
++ .next = l_next,
++ .stop = l_stop,
++ .show = l_show
++};
++
++static int latency_hist_open(struct inode *inode, struct file *file)
++{
++ int ret;
++
++ ret = seq_open(file, &latency_hist_seq_op);
++ if (!ret) {
++ struct seq_file *seq = file->private_data;
++ seq->private = inode->i_private;
++ }
++ return ret;
++}
++
++static struct file_operations latency_hist_fops = {
++ .open = latency_hist_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = seq_release,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static void clear_maxlatprocdata(struct maxlatproc_data *mp)
++{
++ mp->comm[0] = mp->current_comm[0] = '\0';
++ mp->prio = mp->current_prio = mp->pid = mp->current_pid =
++ mp->latency = mp->timeroffset = -1;
++ mp->timestamp = 0;
++}
++#endif
++
++static void hist_reset(struct hist_data *hist)
++{
++ atomic_dec(&hist->hist_mode);
++
++ memset(hist->hist_array, 0, sizeof(hist->hist_array));
++ hist->below_hist_bound_samples = 0ULL;
++ hist->above_hist_bound_samples = 0ULL;
++ hist->min_lat = LONG_MAX;
++ hist->max_lat = LONG_MIN;
++ hist->total_samples = 0ULL;
++ hist->accumulate_lat = 0LL;
++
++ atomic_inc(&hist->hist_mode);
++}
++
++static ssize_t
++latency_hist_reset(struct file *file, const char __user *a,
++ size_t size, loff_t *off)
++{
++ int cpu;
++ struct hist_data *hist = NULL;
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ struct maxlatproc_data *mp = NULL;
++#endif
++ off_t latency_type = (off_t) file->private_data;
++
++ for_each_online_cpu(cpu) {
++
++ switch (latency_type) {
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ case PREEMPTOFF_LATENCY:
++ hist = &per_cpu(preemptoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ case IRQSOFF_LATENCY:
++ hist = &per_cpu(irqsoff_hist, cpu);
++ break;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ hist = &per_cpu(preemptirqsoff_hist, cpu);
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ hist = &per_cpu(wakeup_latency_hist, cpu);
++ mp = &per_cpu(wakeup_maxlatproc, cpu);
++ break;
++ case WAKEUP_LATENCY_SHAREDPRIO:
++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu);
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu);
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ hist = &per_cpu(missed_timer_offsets, cpu);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu);
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ hist = &per_cpu(timerandwakeup_latency_hist, cpu);
++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu);
++ break;
++#endif
++ }
++
++ hist_reset(hist);
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ if (latency_type == WAKEUP_LATENCY ||
++ latency_type == WAKEUP_LATENCY_SHAREDPRIO ||
++ latency_type == MISSED_TIMER_OFFSETS ||
++ latency_type == TIMERANDWAKEUP_LATENCY)
++ clear_maxlatprocdata(mp);
++#endif
++ }
++
++ return size;
++}
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ int r;
++ unsigned long *this_pid = file->private_data;
++
++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid);
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t do_pid(struct file *file, const char __user *ubuf,
++ size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ unsigned long pid;
++ unsigned long *this_pid = file->private_data;
++
++ if (cnt >= sizeof(buf))
++ return -EINVAL;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = '\0';
++
++ if (strict_strtoul(buf, 10, &pid))
++ return(-EINVAL);
++
++ *this_pid = pid;
++
++ return cnt;
++}
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static ssize_t
++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ int r;
++ struct maxlatproc_data *mp = file->private_data;
++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8);
++ unsigned long long t;
++ unsigned long usecs, secs;
++ char *buf;
++
++ if (mp->pid == -1 || mp->current_pid == -1) {
++ buf = "(none)\n";
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf,
++ strlen(buf));
++ }
++
++ buf = kmalloc(strmaxlen, GFP_KERNEL);
++ if (buf == NULL)
++ return -ENOMEM;
++
++ t = ns2usecs(mp->timestamp);
++ usecs = do_div(t, USEC_PER_SEC);
++ secs = (unsigned long) t;
++ r = snprintf(buf, strmaxlen,
++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid,
++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm,
++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm,
++ secs, usecs);
++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++ kfree(buf);
++ return r;
++}
++#endif
++
++static ssize_t
++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ struct enable_data *ed = file->private_data;
++ int r;
++
++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled);
++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
++}
++
++static ssize_t
++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos)
++{
++ char buf[64];
++ long enable;
++ struct enable_data *ed = file->private_data;
++
++ if (cnt >= sizeof(buf))
++ return -EINVAL;
++
++ if (copy_from_user(&buf, ubuf, cnt))
++ return -EFAULT;
++
++ buf[cnt] = 0;
++
++ if (strict_strtol(buf, 10, &enable))
++ return(-EINVAL);
++
++ if ((enable && ed->enabled) || (!enable && !ed->enabled))
++ return cnt;
++
++ if (enable) {
++ int ret;
++
++ switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ ret = register_trace_preemptirqsoff_hist(
++ probe_preemptirqsoff_hist, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_preemptirqsoff_hist "
++ "to trace_preemptirqsoff_hist\n");
++ return ret;
++ }
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ ret = register_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_start "
++ "to trace_sched_wakeup\n");
++ return ret;
++ }
++ ret = register_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_start "
++ "to trace_sched_wakeup_new\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ return ret;
++ }
++ ret = register_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_wakeup_latency_hist_stop "
++ "to trace_sched_switch\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ return ret;
++ }
++ ret = register_trace_sched_migrate_task(
++ probe_sched_migrate_task, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_sched_migrate_task "
++ "to trace_sched_migrate_task\n");
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ return ret;
++ }
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ ret = register_trace_hrtimer_interrupt(
++ probe_hrtimer_interrupt, NULL);
++ if (ret) {
++ pr_info("wakeup trace: Couldn't assign "
++ "probe_hrtimer_interrupt "
++ "to trace_hrtimer_interrupt\n");
++ return ret;
++ }
++ break;
++#endif
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ case TIMERANDWAKEUP_LATENCY:
++ if (!wakeup_latency_enabled_data.enabled ||
++ !missed_timer_offsets_enabled_data.enabled)
++ return -EINVAL;
++ break;
++#endif
++ default:
++ break;
++ }
++ } else {
++ switch (ed->latency_type) {
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ case PREEMPTIRQSOFF_LATENCY:
++ {
++ int cpu;
++
++ unregister_trace_preemptirqsoff_hist(
++ probe_preemptirqsoff_hist, NULL);
++ for_each_online_cpu(cpu) {
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ per_cpu(hist_irqsoff_counting,
++ cpu) = 0;
++#endif
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ per_cpu(hist_preemptoff_counting,
++ cpu) = 0;
++#endif
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ per_cpu(hist_preemptirqsoff_counting,
++ cpu) = 0;
++#endif
++ }
++ }
++ break;
++#endif
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ case WAKEUP_LATENCY:
++ {
++ int cpu;
++
++ unregister_trace_sched_wakeup(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_wakeup_new(
++ probe_wakeup_latency_hist_start, NULL);
++ unregister_trace_sched_switch(
++ probe_wakeup_latency_hist_stop, NULL);
++ unregister_trace_sched_migrate_task(
++ probe_sched_migrate_task, NULL);
++
++ for_each_online_cpu(cpu) {
++ per_cpu(wakeup_task, cpu) = NULL;
++ per_cpu(wakeup_sharedprio, cpu) = 0;
++ }
++ }
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ timerandwakeup_enabled_data.enabled = 0;
++#endif
++ break;
++#endif
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ case MISSED_TIMER_OFFSETS:
++ unregister_trace_hrtimer_interrupt(
++ probe_hrtimer_interrupt, NULL);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ timerandwakeup_enabled_data.enabled = 0;
++#endif
++ break;
++#endif
++ default:
++ break;
++ }
++ }
++ ed->enabled = enable;
++ return cnt;
++}
++
++static const struct file_operations latency_hist_reset_fops = {
++ .open = tracing_open_generic,
++ .write = latency_hist_reset,
++};
++
++static const struct file_operations enable_fops = {
++ .open = tracing_open_generic,
++ .read = show_enable,
++ .write = do_enable,
++};
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++static const struct file_operations pid_fops = {
++ .open = tracing_open_generic,
++ .read = show_pid,
++ .write = do_pid,
++};
++
++static const struct file_operations maxlatproc_fops = {
++ .open = tracing_open_generic,
++ .read = show_maxlatproc,
++};
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++static notrace void probe_preemptirqsoff_hist(void *v, int reason,
++ int starthist)
++{
++ int cpu = raw_smp_processor_id();
++ int time_set = 0;
++
++ if (starthist) {
++ cycle_t uninitialized_var(start);
++
++ if (!preempt_count() && !irqs_disabled())
++ return;
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ if ((reason == IRQS_OFF || reason == TRACE_START) &&
++ !per_cpu(hist_irqsoff_counting, cpu)) {
++ per_cpu(hist_irqsoff_counting, cpu) = 1;
++ start = ftrace_now(cpu);
++ time_set++;
++ per_cpu(hist_irqsoff_start, cpu) = start;
++ }
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ if ((reason == PREEMPT_OFF || reason == TRACE_START) &&
++ !per_cpu(hist_preemptoff_counting, cpu)) {
++ per_cpu(hist_preemptoff_counting, cpu) = 1;
++ if (!(time_set++))
++ start = ftrace_now(cpu);
++ per_cpu(hist_preemptoff_start, cpu) = start;
++ }
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ if (per_cpu(hist_irqsoff_counting, cpu) &&
++ per_cpu(hist_preemptoff_counting, cpu) &&
++ !per_cpu(hist_preemptirqsoff_counting, cpu)) {
++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1;
++ if (!time_set)
++ start = ftrace_now(cpu);
++ per_cpu(hist_preemptirqsoff_start, cpu) = start;
++ }
++#endif
++ } else {
++ cycle_t uninitialized_var(stop);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ if ((reason == IRQS_ON || reason == TRACE_STOP) &&
++ per_cpu(hist_irqsoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_irqsoff_start, cpu);
++
++ stop = ftrace_now(cpu);
++ time_set++;
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
++ stop, NULL);
++ }
++ per_cpu(hist_irqsoff_counting, cpu) = 0;
++ }
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
++ per_cpu(hist_preemptoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_preemptoff_start, cpu);
++
++ if (!(time_set++))
++ stop = ftrace_now(cpu);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
++ 0, stop, NULL);
++ }
++ per_cpu(hist_preemptoff_counting, cpu) = 0;
++ }
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ if ((!per_cpu(hist_irqsoff_counting, cpu) ||
++ !per_cpu(hist_preemptoff_counting, cpu)) &&
++ per_cpu(hist_preemptirqsoff_counting, cpu)) {
++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
++
++ if (!time_set)
++ stop = ftrace_now(cpu);
++ if (start) {
++ long latency = ((long) (stop - start)) /
++ NSECS_PER_USECS;
++
++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
++ latency, 0, stop, NULL);
++ }
++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0;
++ }
++#endif
++ }
++}
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++static DEFINE_RAW_SPINLOCK(wakeup_lock);
++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task,
++ int cpu)
++{
++ int old_cpu = task_cpu(task);
++
++ if (cpu != old_cpu) {
++ unsigned long flags;
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu);
++ if (task == cpu_wakeup_task) {
++ put_task_struct(cpu_wakeup_task);
++ per_cpu(wakeup_task, old_cpu) = NULL;
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task;
++ get_task_struct(cpu_wakeup_task);
++ }
++
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++ }
++}
++
++static notrace void probe_wakeup_latency_hist_start(void *v,
++ struct task_struct *p, int success)
++{
++ unsigned long flags;
++ struct task_struct *curr = current;
++ int cpu = task_cpu(p);
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++ if (wakeup_pid) {
++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++ p->prio == curr->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++ if (likely(wakeup_pid != task_pid_nr(p)))
++ goto out;
++ } else {
++ if (likely(!rt_task(p)) ||
++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) ||
++ p->prio > curr->prio)
++ goto out;
++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) ||
++ p->prio == curr->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++ }
++
++ if (cpu_wakeup_task)
++ put_task_struct(cpu_wakeup_task);
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p;
++ get_task_struct(cpu_wakeup_task);
++ cpu_wakeup_task->preempt_timestamp_hist =
++ ftrace_now(raw_smp_processor_id());
++out:
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++
++static notrace void probe_wakeup_latency_hist_stop(void *v,
++ struct task_struct *prev, struct task_struct *next)
++{
++ unsigned long flags;
++ int cpu = task_cpu(next);
++ long latency;
++ cycle_t stop;
++ struct task_struct *cpu_wakeup_task;
++
++ raw_spin_lock_irqsave(&wakeup_lock, flags);
++
++ cpu_wakeup_task = per_cpu(wakeup_task, cpu);
++
++ if (cpu_wakeup_task == NULL)
++ goto out;
++
++ /* Already running? */
++ if (unlikely(current == cpu_wakeup_task))
++ goto out_reset;
++
++ if (next != cpu_wakeup_task) {
++ if (next->prio < cpu_wakeup_task->prio)
++ goto out_reset;
++
++ if (next->prio == cpu_wakeup_task->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++
++ goto out;
++ }
++
++ if (current->prio == cpu_wakeup_task->prio)
++ per_cpu(wakeup_sharedprio, cpu) = 1;
++
++ /*
++ * The task we are waiting for is about to be switched to.
++ * Calculate latency and store it in histogram.
++ */
++ stop = ftrace_now(raw_smp_processor_id());
++
++ latency = ((long) (stop - next->preempt_timestamp_hist)) /
++ NSECS_PER_USECS;
++
++ if (per_cpu(wakeup_sharedprio, cpu)) {
++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
++ next);
++ per_cpu(wakeup_sharedprio, cpu) = 0;
++ } else {
++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next);
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ if (timerandwakeup_enabled_data.enabled) {
++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu,
++ next->timer_offset + latency, next->timer_offset,
++ stop, next);
++ }
++#endif
++ }
++
++out_reset:
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ next->timer_offset = 0;
++#endif
++ put_task_struct(cpu_wakeup_task);
++ per_cpu(wakeup_task, cpu) = NULL;
++out:
++ raw_spin_unlock_irqrestore(&wakeup_lock, flags);
++}
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++static notrace void probe_hrtimer_interrupt(void *v, int cpu,
++ long long latency_ns, struct task_struct *curr, struct task_struct *task)
++{
++ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
++ (task->prio < curr->prio ||
++ (task->prio == curr->prio &&
++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
++ long latency;
++ cycle_t now;
++
++ if (missed_timer_offsets_pid) {
++ if (likely(missed_timer_offsets_pid !=
++ task_pid_nr(task)))
++ return;
++ }
++
++ now = ftrace_now(cpu);
++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
++ task);
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ task->timer_offset = latency;
++#endif
++ }
++}
++#endif
++
++static __init int latency_hist_init(void)
++{
++ struct dentry *latency_hist_root = NULL;
++ struct dentry *dentry;
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ struct dentry *dentry_sharedprio;
++#endif
++ struct dentry *entry;
++ struct dentry *enable_root;
++ int i = 0;
++ struct hist_data *my_hist;
++ char name[64];
++ char *cpufmt = "CPU%d";
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ char *cpufmt_maxlatproc = "max_latency-CPU%d";
++ struct maxlatproc_data *mp = NULL;
++#endif
++
++ dentry = tracing_init_dentry();
++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry);
++ enable_root = debugfs_create_dir("enable", latency_hist_root);
++
++#ifdef CONFIG_INTERRUPT_OFF_HIST
++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(irqsoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(irqsoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#ifdef CONFIG_PREEMPT_OFF_HIST
++ dentry = debugfs_create_dir(preemptoff_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(preemptoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(preemptoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST)
++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
++ my_hist = &per_cpu(preemptirqsoff_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
++#endif
++
++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST)
++ entry = debugfs_create_file("preemptirqsoff", 0644,
++ enable_root, (void *)&preemptirqsoff_enabled_data,
++ &enable_fops);
++#endif
++
++#ifdef CONFIG_WAKEUP_LATENCY_HIST
++ dentry = debugfs_create_dir(wakeup_latency_hist_dir,
++ latency_hist_root);
++ dentry_sharedprio = debugfs_create_dir(
++ wakeup_latency_hist_dir_sharedprio, dentry);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(wakeup_latency_hist, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(wakeup_latency_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ entry = debugfs_create_file(name, 0444, dentry_sharedprio,
++ &per_cpu(wakeup_latency_hist_sharedprio, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++
++ mp = &per_cpu(wakeup_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++
++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i);
++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("pid", 0644, dentry,
++ (void *)&wakeup_pid, &pid_fops);
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops);
++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio,
++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops);
++ entry = debugfs_create_file("wakeup", 0644,
++ enable_root, (void *)&wakeup_latency_enabled_data,
++ &enable_fops);
++#endif
++
++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
++ dentry = debugfs_create_dir(missed_timer_offsets_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops);
++ my_hist = &per_cpu(missed_timer_offsets, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("pid", 0644, dentry,
++ (void *)&missed_timer_offsets_pid, &pid_fops);
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops);
++ entry = debugfs_create_file("missed_timer_offsets", 0644,
++ enable_root, (void *)&missed_timer_offsets_enabled_data,
++ &enable_fops);
++#endif
++
++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \
++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir,
++ latency_hist_root);
++ for_each_possible_cpu(i) {
++ sprintf(name, cpufmt, i);
++ entry = debugfs_create_file(name, 0444, dentry,
++ &per_cpu(timerandwakeup_latency_hist, i),
++ &latency_hist_fops);
++ my_hist = &per_cpu(timerandwakeup_latency_hist, i);
++ atomic_set(&my_hist->hist_mode, 1);
++ my_hist->min_lat = LONG_MAX;
++
++ sprintf(name, cpufmt_maxlatproc, i);
++ mp = &per_cpu(timerandwakeup_maxlatproc, i);
++ entry = debugfs_create_file(name, 0444, dentry, mp,
++ &maxlatproc_fops);
++ clear_maxlatprocdata(mp);
++ }
++ entry = debugfs_create_file("reset", 0644, dentry,
++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops);
++ entry = debugfs_create_file("timerandwakeup", 0644,
++ enable_root, (void *)&timerandwakeup_enabled_data,
++ &enable_fops);
++#endif
++ return 0;
++}
++
++__initcall(latency_hist_init);
+Index: linux-stable/kernel/trace/trace_irqsoff.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace_irqsoff.c
++++ linux-stable/kernel/trace/trace_irqsoff.c
+@@ -17,6 +17,7 @@
+ #include <linux/fs.h>
+
+ #include "trace.h"
++#include <trace/events/hist.h>
+
+ static struct trace_array *irqsoff_trace __read_mostly;
+ static int tracer_enabled __read_mostly;
+@@ -437,11 +438,13 @@ void start_critical_timings(void)
+ {
+ if (preempt_trace() || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(TRACE_START, 1);
+ }
+ EXPORT_SYMBOL_GPL(start_critical_timings);
+
+ void stop_critical_timings(void)
+ {
++ trace_preemptirqsoff_hist(TRACE_STOP, 0);
+ if (preempt_trace() || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -451,6 +454,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+ #ifdef CONFIG_PROVE_LOCKING
+ void time_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(a0, a1);
+ }
+@@ -459,6 +463,7 @@ void time_hardirqs_off(unsigned long a0,
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(a0, a1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+
+ #else /* !CONFIG_PROVE_LOCKING */
+@@ -484,6 +489,7 @@ inline void print_irqtrace_events(struct
+ */
+ void trace_hardirqs_on(void)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
+ }
+@@ -493,11 +499,13 @@ void trace_hardirqs_off(void)
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off);
+
+ void trace_hardirqs_on_caller(unsigned long caller_addr)
+ {
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
+ if (!preempt_trace() && irq_trace())
+ stop_critical_timing(CALLER_ADDR0, caller_addr);
+ }
+@@ -507,6 +515,7 @@ void trace_hardirqs_off_caller(unsigned
+ {
+ if (!preempt_trace() && irq_trace())
+ start_critical_timing(CALLER_ADDR0, caller_addr);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+@@ -516,12 +525,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+ #ifdef CONFIG_PREEMPT_TRACER
+ void trace_preempt_on(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(PREEMPT_ON, 0);
+ if (preempt_trace() && !irq_trace())
+ stop_critical_timing(a0, a1);
+ }
+
+ void trace_preempt_off(unsigned long a0, unsigned long a1)
+ {
++ trace_preemptirqsoff_hist(PREEMPT_ON, 1);
+ if (preempt_trace() && !irq_trace())
+ start_critical_timing(a0, a1);
+ }
diff --git a/lglocks-rt.patch b/lglocks-rt.patch
new file mode 100644
index 0000000..9f19696
--- /dev/null
+++ b/lglocks-rt.patch
@@ -0,0 +1,168 @@
+Subject: lglocks-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 15 Jun 2011 11:02:21 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/lglock.h | 13 ++++++++++-
+ kernel/lglock.c | 54 ++++++++++++++++++++++++++++++++-----------------
+ 2 files changed, 48 insertions(+), 19 deletions(-)
+
+Index: linux-stable/include/linux/lglock.h
+===================================================================
+--- linux-stable.orig/include/linux/lglock.h
++++ linux-stable/include/linux/lglock.h
+@@ -49,18 +49,29 @@
+ #endif
+
+ struct lglock {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ arch_spinlock_t __percpu *lock;
++#else
++ struct rt_mutex __percpu *lock;
++#endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lock_class_key lock_key;
+ struct lockdep_map lock_dep_map;
+ #endif
+ };
+
+-#define DEFINE_LGLOCK(name) \
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define DEFINE_LGLOCK(name) \
+ DEFINE_LGLOCK_LOCKDEP(name); \
+ DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ struct lglock name = { .lock = &name ## _lock }
++#else
++# define DEFINE_LGLOCK(name) \
++ DEFINE_LGLOCK_LOCKDEP(name); \
++ DEFINE_PER_CPU(struct rt_mutex, name ## _lock); \
++ struct lglock name = { .lock = &name ## _lock }
++#endif
+
+ void lg_lock_init(struct lglock *lg, char *name);
+ void lg_local_lock(struct lglock *lg);
+Index: linux-stable/kernel/lglock.c
+===================================================================
+--- linux-stable.orig/kernel/lglock.c
++++ linux-stable/kernel/lglock.c
+@@ -4,6 +4,15 @@
+ #include <linux/cpu.h>
+ #include <linux/string.h>
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define lg_lock_ptr arch_spinlock_t
++# define lg_do_lock(l) arch_spin_lock(l)
++# define lg_do_unlock(l) arch_spin_unlock(l)
++#else
++# define lg_lock_ptr struct rt_mutex
++# define lg_do_lock(l) __rt_spin_lock(l)
++# define lg_do_unlock(l) __rt_spin_unlock(l)
++#endif
+ /*
+ * Note there is no uninit, so lglocks cannot be defined in
+ * modules (but it's fine to use them from there)
+@@ -12,51 +21,60 @@
+
+ void lg_lock_init(struct lglock *lg, char *name)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int i;
++
++ for_each_possible_cpu(i) {
++ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
++
++ rt_mutex_init(lock);
++ }
++#endif
+ LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
+ }
+ EXPORT_SYMBOL(lg_lock_init);
+
+ void lg_local_lock(struct lglock *lg)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+- preempt_disable();
++ migrate_disable();
+ rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ EXPORT_SYMBOL(lg_local_lock);
+
+ void lg_local_unlock(struct lglock *lg)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+ rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = this_cpu_ptr(lg->lock);
+- arch_spin_unlock(lock);
+- preempt_enable();
++ lg_do_unlock(lock);
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(lg_local_unlock);
+
+ void lg_local_lock_cpu(struct lglock *lg, int cpu)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+- preempt_disable();
++ preempt_disable_nort();
+ rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ EXPORT_SYMBOL(lg_local_lock_cpu);
+
+ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
+ {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+
+ rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock = per_cpu_ptr(lg->lock, cpu);
+- arch_spin_unlock(lock);
+- preempt_enable();
++ lg_do_unlock(lock);
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(lg_local_unlock_cpu);
+
+@@ -64,12 +82,12 @@ void lg_global_lock(struct lglock *lg)
+ {
+ int i;
+
+- preempt_disable();
++ preempt_disable_nort();
+ rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ for_each_possible_cpu(i) {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+- arch_spin_lock(lock);
++ lg_do_lock(lock);
+ }
+ }
+ EXPORT_SYMBOL(lg_global_lock);
+@@ -80,10 +98,10 @@ void lg_global_unlock(struct lglock *lg)
+
+ rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ for_each_possible_cpu(i) {
+- arch_spinlock_t *lock;
++ lg_lock_ptr *lock;
+ lock = per_cpu_ptr(lg->lock, i);
+- arch_spin_unlock(lock);
++ lg_do_unlock(lock);
+ }
+- preempt_enable();
++ preempt_enable_nort();
+ }
+ EXPORT_SYMBOL(lg_global_unlock);
diff --git a/list-add-list-last-entry.patch b/list-add-list-last-entry.patch
new file mode 100644
index 0000000..1739281
--- /dev/null
+++ b/list-add-list-last-entry.patch
@@ -0,0 +1,31 @@
+Subject: list-add-list-last-entry.patch
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 21 Jun 2011 11:22:36 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/list.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+Index: linux-stable/include/linux/list.h
+===================================================================
+--- linux-stable.orig/include/linux/list.h
++++ linux-stable/include/linux/list.h
+@@ -362,6 +362,17 @@ static inline void list_splice_tail_init
+ list_entry((ptr)->next, type, member)
+
+ /**
++ * list_last_entry - get the last element from a list
++ * @ptr: the list head to take the element from.
++ * @type: the type of the struct this is embedded in.
++ * @member: the name of the list_struct within the struct.
++ *
++ * Note, that list is expected to be not empty.
++ */
++#define list_last_entry(ptr, type, member) \
++ list_entry((ptr)->prev, type, member)
++
++/**
+ * list_for_each - iterate over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
diff --git a/local-irq-rt-depending-variants.patch b/local-irq-rt-depending-variants.patch
new file mode 100644
index 0000000..406559f
--- /dev/null
+++ b/local-irq-rt-depending-variants.patch
@@ -0,0 +1,56 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 21 Jul 2009 22:34:14 +0200
+Subject: rt: local_irq_* variants depending on RT/!RT
+
+Add local_irq_*_(no)rt variant which are mainly used to break
+interrupt disabled sections on PREEMPT_RT or to explicitely disable
+interrupts on PREEMPT_RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/interrupt.h | 2 +-
+ include/linux/irqflags.h | 19 +++++++++++++++++++
+ 2 files changed, 20 insertions(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -211,7 +211,7 @@ extern void devm_free_irq(struct device
+ #ifdef CONFIG_LOCKDEP
+ # define local_irq_enable_in_hardirq() do { } while (0)
+ #else
+-# define local_irq_enable_in_hardirq() local_irq_enable()
++# define local_irq_enable_in_hardirq() local_irq_enable_nort()
+ #endif
+
+ extern void disable_irq_nosync(unsigned int irq);
+Index: linux-stable/include/linux/irqflags.h
+===================================================================
+--- linux-stable.orig/include/linux/irqflags.h
++++ linux-stable/include/linux/irqflags.h
+@@ -147,4 +147,23 @@
+
+ #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
+
++/*
++ * local_irq* variants depending on RT/!RT
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define local_irq_disable_nort() do { } while (0)
++# define local_irq_enable_nort() do { } while (0)
++# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0)
++# define local_irq_restore_nort(flags) do { (void)(flags); } while (0)
++# define local_irq_disable_rt() local_irq_disable()
++# define local_irq_enable_rt() local_irq_enable()
++#else
++# define local_irq_disable_nort() local_irq_disable()
++# define local_irq_enable_nort() local_irq_enable()
++# define local_irq_save_nort(flags) local_irq_save(flags)
++# define local_irq_restore_nort(flags) local_irq_restore(flags)
++# define local_irq_disable_rt() do { } while (0)
++# define local_irq_enable_rt() do { } while (0)
++#endif
++
+ #endif
diff --git a/local-var.patch b/local-var.patch
new file mode 100644
index 0000000..f4496db
--- /dev/null
+++ b/local-var.patch
@@ -0,0 +1,25 @@
+Subject: local-var.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 24 Jun 2011 18:40:37 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/percpu.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+Index: linux-stable/include/linux/percpu.h
+===================================================================
+--- linux-stable.orig/include/linux/percpu.h
++++ linux-stable/include/linux/percpu.h
+@@ -48,6 +48,11 @@
+ preempt_enable(); \
+ } while (0)
+
++#define get_local_var(var) get_cpu_var(var)
++#define put_local_var(var) put_cpu_var(var)
++#define get_local_ptr(var) get_cpu_ptr(var)
++#define put_local_ptr(var) put_cpu_ptr(var)
++
+ /* minimum unit size, also is the maximum supported allocation size */
+ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
+
diff --git a/local-vars-migrate-disable.patch b/local-vars-migrate-disable.patch
new file mode 100644
index 0000000..99f6827
--- /dev/null
+++ b/local-vars-migrate-disable.patch
@@ -0,0 +1,48 @@
+Subject: local-vars-migrate-disable.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 28 Jun 2011 20:42:16 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/percpu.h | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+Index: linux-stable/include/linux/percpu.h
+===================================================================
+--- linux-stable.orig/include/linux/percpu.h
++++ linux-stable/include/linux/percpu.h
+@@ -48,10 +48,30 @@
+ preempt_enable(); \
+ } while (0)
+
+-#define get_local_var(var) get_cpu_var(var)
+-#define put_local_var(var) put_cpu_var(var)
+-#define get_local_ptr(var) get_cpu_ptr(var)
+-#define put_local_ptr(var) put_cpu_ptr(var)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define get_local_var(var) get_cpu_var(var)
++# define put_local_var(var) put_cpu_var(var)
++# define get_local_ptr(var) get_cpu_ptr(var)
++# define put_local_ptr(var) put_cpu_ptr(var)
++#else
++# define get_local_var(var) (*({ \
++ migrate_disable(); \
++ &__get_cpu_var(var); }))
++
++# define put_local_var(var) do { \
++ (void)&(var); \
++ migrate_enable(); \
++} while (0)
++
++# define get_local_ptr(var) ({ \
++ migrate_disable(); \
++ this_cpu_ptr(var); })
++
++# define put_local_ptr(var) do { \
++ (void)(var); \
++ migrate_enable(); \
++} while (0)
++#endif
+
+ /* minimum unit size, also is the maximum supported allocation size */
+ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
diff --git a/localversion.patch b/localversion.patch
new file mode 100644
index 0000000..db9259c
--- /dev/null
+++ b/localversion.patch
@@ -0,0 +1,17 @@
+Subject: localversion.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 08 Jul 2011 20:25:16 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-8vdw4bfcsds27cvox6rpb334@git.kernel.org
+---
+ localversion-rt | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/localversion-rt
+===================================================================
+--- /dev/null
++++ linux-stable/localversion-rt
+@@ -0,0 +1 @@
++-rt1
diff --git a/lockdep-no-softirq-accounting-on-rt.patch b/lockdep-no-softirq-accounting-on-rt.patch
new file mode 100644
index 0000000..7ae3423
--- /dev/null
+++ b/lockdep-no-softirq-accounting-on-rt.patch
@@ -0,0 +1,60 @@
+Subject: lockdep-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 18:51:23 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/irqflags.h | 10 +++++++---
+ kernel/lockdep.c | 2 ++
+ 2 files changed, 9 insertions(+), 3 deletions(-)
+
+Index: linux-stable/include/linux/irqflags.h
+===================================================================
+--- linux-stable.orig/include/linux/irqflags.h
++++ linux-stable/include/linux/irqflags.h
+@@ -25,8 +25,6 @@
+ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
+ # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
+ # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
+-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+ # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
+ #else
+ # define trace_hardirqs_on() do { } while (0)
+@@ -39,9 +37,15 @@
+ # define trace_softirqs_enabled(p) 0
+ # define trace_hardirq_enter() do { } while (0)
+ # define trace_hardirq_exit() do { } while (0)
++# define INIT_TRACE_IRQFLAGS
++#endif
++
++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
++#else
+ # define lockdep_softirq_enter() do { } while (0)
+ # define lockdep_softirq_exit() do { } while (0)
+-# define INIT_TRACE_IRQFLAGS
+ #endif
+
+ #if defined(CONFIG_IRQSOFF_TRACER) || \
+Index: linux-stable/kernel/lockdep.c
+===================================================================
+--- linux-stable.orig/kernel/lockdep.c
++++ linux-stable/kernel/lockdep.c
+@@ -3495,6 +3495,7 @@ static void check_flags(unsigned long fl
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * We dont accurately track softirq state in e.g.
+ * hardirq contexts (such as on 4KSTACKS), so only
+@@ -3509,6 +3510,7 @@ static void check_flags(unsigned long fl
+ DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
+ }
+ }
++#endif
+
+ if (!debug_locks)
+ print_irqtrace_events(current);
diff --git a/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch b/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch
new file mode 100644
index 0000000..734a9da
--- /dev/null
+++ b/lockdep-selftest-convert-spinlock-to-raw-spinlock.patch
@@ -0,0 +1,92 @@
+Subject: lockdep: Selftest: convert spinlock to raw spinlock
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Mon, 16 Apr 2012 15:01:55 +0800
+
+From: Yong Zhang <yong.zhang@windriver.com>
+
+spinlock is sleepable on -rt and can not be used in
+interrupt context.
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Cc: Yong Zhang <yong.zhang@windriver.com>
+Link: http://lkml.kernel.org/r/1334559716-18447-2-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/locking-selftest.c | 34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+Index: linux-stable/lib/locking-selftest.c
+===================================================================
+--- linux-stable.orig/lib/locking-selftest.c
++++ linux-stable/lib/locking-selftest.c
+@@ -47,10 +47,10 @@ __setup("debug_locks_verbose=", setup_de
+ * Normal standalone locks, for the circular and irq-context
+ * dependency tests:
+ */
+-static DEFINE_SPINLOCK(lock_A);
+-static DEFINE_SPINLOCK(lock_B);
+-static DEFINE_SPINLOCK(lock_C);
+-static DEFINE_SPINLOCK(lock_D);
++static DEFINE_RAW_SPINLOCK(lock_A);
++static DEFINE_RAW_SPINLOCK(lock_B);
++static DEFINE_RAW_SPINLOCK(lock_C);
++static DEFINE_RAW_SPINLOCK(lock_D);
+
+ static DEFINE_RWLOCK(rwlock_A);
+ static DEFINE_RWLOCK(rwlock_B);
+@@ -73,12 +73,12 @@ static DECLARE_RWSEM(rwsem_D);
+ * but X* and Y* are different classes. We do this so that
+ * we do not trigger a real lockup:
+ */
+-static DEFINE_SPINLOCK(lock_X1);
+-static DEFINE_SPINLOCK(lock_X2);
+-static DEFINE_SPINLOCK(lock_Y1);
+-static DEFINE_SPINLOCK(lock_Y2);
+-static DEFINE_SPINLOCK(lock_Z1);
+-static DEFINE_SPINLOCK(lock_Z2);
++static DEFINE_RAW_SPINLOCK(lock_X1);
++static DEFINE_RAW_SPINLOCK(lock_X2);
++static DEFINE_RAW_SPINLOCK(lock_Y1);
++static DEFINE_RAW_SPINLOCK(lock_Y2);
++static DEFINE_RAW_SPINLOCK(lock_Z1);
++static DEFINE_RAW_SPINLOCK(lock_Z2);
+
+ static DEFINE_RWLOCK(rwlock_X1);
+ static DEFINE_RWLOCK(rwlock_X2);
+@@ -107,10 +107,10 @@ static DECLARE_RWSEM(rwsem_Z2);
+ */
+ #define INIT_CLASS_FUNC(class) \
+ static noinline void \
+-init_class_##class(spinlock_t *lock, rwlock_t *rwlock, struct mutex *mutex, \
+- struct rw_semaphore *rwsem) \
++init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \
++ struct mutex *mutex, struct rw_semaphore *rwsem)\
+ { \
+- spin_lock_init(lock); \
++ raw_spin_lock_init(lock); \
+ rwlock_init(rwlock); \
+ mutex_init(mutex); \
+ init_rwsem(rwsem); \
+@@ -168,10 +168,10 @@ static void init_shared_classes(void)
+ * Shortcuts for lock/unlock API variants, to keep
+ * the testcases compact:
+ */
+-#define L(x) spin_lock(&lock_##x)
+-#define U(x) spin_unlock(&lock_##x)
++#define L(x) raw_spin_lock(&lock_##x)
++#define U(x) raw_spin_unlock(&lock_##x)
+ #define LU(x) L(x); U(x)
+-#define SI(x) spin_lock_init(&lock_##x)
++#define SI(x) raw_spin_lock_init(&lock_##x)
+
+ #define WL(x) write_lock(&rwlock_##x)
+ #define WU(x) write_unlock(&rwlock_##x)
+@@ -911,7 +911,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
+
+ #define I2(x) \
+ do { \
+- spin_lock_init(&lock_##x); \
++ raw_spin_lock_init(&lock_##x); \
+ rwlock_init(&rwlock_##x); \
+ mutex_init(&mutex_##x); \
+ init_rwsem(&rwsem_##x); \
diff --git a/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
new file mode 100644
index 0000000..fece3d4
--- /dev/null
+++ b/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -0,0 +1,58 @@
+Subject: lockdep: Selftest: Only do hardirq context test for raw spinlock
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Mon, 16 Apr 2012 15:01:56 +0800
+
+From: Yong Zhang <yong.zhang@windriver.com>
+
+On -rt there is no softirq context any more and rwlock is sleepable,
+disable softirq context test and rwlock+irq test.
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Cc: Yong Zhang <yong.zhang@windriver.com>
+Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/locking-selftest.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+Index: linux-stable/lib/locking-selftest.c
+===================================================================
+--- linux-stable.orig/lib/locking-selftest.c
++++ linux-stable/lib/locking-selftest.c
+@@ -1175,6 +1175,7 @@ void locking_selftest(void)
+
+ printk(" --------------------------------------------------------------------------\n");
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * irq-context testcases:
+ */
+@@ -1187,6 +1188,28 @@ void locking_selftest(void)
+
+ DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
+ // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
++#else
++ /* On -rt, we only do hardirq context test for raw spinlock */
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
++
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
++#endif
+
+ if (unexpected_testcase_failures) {
+ printk("-----------------------------------------------------------------\n");
diff --git a/locking-various-init-fixes.patch b/locking-various-init-fixes.patch
new file mode 100644
index 0000000..f18974d
--- /dev/null
+++ b/locking-various-init-fixes.patch
@@ -0,0 +1,96 @@
+Subject: locking-various-init-fixes.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:25:03 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/char/random.c | 6 +++---
+ drivers/usb/chipidea/debug.c | 2 +-
+ fs/file.c | 2 +-
+ include/linux/idr.h | 2 +-
+ kernel/cred.c | 2 +-
+ 5 files changed, 7 insertions(+), 7 deletions(-)
+
+Index: linux-stable/drivers/char/random.c
+===================================================================
+--- linux-stable.orig/drivers/char/random.c
++++ linux-stable/drivers/char/random.c
+@@ -448,7 +448,7 @@ static struct entropy_store input_pool =
+ .poolinfo = &poolinfo_table[0],
+ .name = "input",
+ .limit = 1,
+- .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
+ .pool = input_pool_data
+ };
+
+@@ -457,7 +457,7 @@ static struct entropy_store blocking_poo
+ .name = "blocking",
+ .limit = 1,
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
+ .pool = blocking_pool_data
+ };
+
+@@ -465,7 +465,7 @@ static struct entropy_store nonblocking_
+ .poolinfo = &poolinfo_table[1],
+ .name = "nonblocking",
+ .pull = &input_pool,
+- .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
+ .pool = nonblocking_pool_data
+ };
+
+Index: linux-stable/drivers/usb/chipidea/debug.c
+===================================================================
+--- linux-stable.orig/drivers/usb/chipidea/debug.c
++++ linux-stable/drivers/usb/chipidea/debug.c
+@@ -225,7 +225,7 @@ static struct {
+ } dbg_data = {
+ .idx = 0,
+ .tty = 0,
+- .lck = __RW_LOCK_UNLOCKED(lck)
++ .lck = __RW_LOCK_UNLOCKED(dbg_data.lck)
+ };
+
+ /**
+Index: linux-stable/fs/file.c
+===================================================================
+--- linux-stable.orig/fs/file.c
++++ linux-stable/fs/file.c
+@@ -421,7 +421,7 @@ struct files_struct init_files = {
+ .close_on_exec = init_files.close_on_exec_init,
+ .open_fds = init_files.open_fds_init,
+ },
+- .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
++ .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
+ };
+
+ /*
+Index: linux-stable/include/linux/idr.h
+===================================================================
+--- linux-stable.orig/include/linux/idr.h
++++ linux-stable/include/linux/idr.h
+@@ -136,7 +136,7 @@ struct ida {
+ struct ida_bitmap *free_bitmap;
+ };
+
+-#define IDA_INIT(name) { .idr = IDR_INIT(name), .free_bitmap = NULL, }
++#define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
+ #define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
+
+ int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
+Index: linux-stable/kernel/cred.c
+===================================================================
+--- linux-stable.orig/kernel/cred.c
++++ linux-stable/kernel/cred.c
+@@ -36,7 +36,7 @@ static struct kmem_cache *cred_jar;
+ static struct thread_group_cred init_tgcred = {
+ .usage = ATOMIC_INIT(2),
+ .tgid = 0,
+- .lock = __SPIN_LOCK_UNLOCKED(init_cred.tgcred.lock),
++ .lock = __SPIN_LOCK_UNLOCKED(init_tgcred.lock),
+ };
+ #endif
+
diff --git a/md-raid5-percpu-handling-rt-aware.patch b/md-raid5-percpu-handling-rt-aware.patch
new file mode 100644
index 0000000..e354e70
--- /dev/null
+++ b/md-raid5-percpu-handling-rt-aware.patch
@@ -0,0 +1,65 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 6 Apr 2010 16:51:31 +0200
+Subject: md: raid5: Make raid5_percpu handling RT aware
+
+__raid_run_ops() disables preemption with get_cpu() around the access
+to the raid5_percpu variables. That causes scheduling while atomic
+spews on RT.
+
+Serialize the access to the percpu data with a lock and keep the code
+preemptible.
+
+Reported-by: Udo van den Heuvel <udovdh@xs4all.nl>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
+
+---
+ drivers/md/raid5.c | 7 +++++--
+ drivers/md/raid5.h | 1 +
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+Index: linux-stable/drivers/md/raid5.c
+===================================================================
+--- linux-stable.orig/drivers/md/raid5.c
++++ linux-stable/drivers/md/raid5.c
+@@ -1373,8 +1373,9 @@ static void __raid_run_ops(struct stripe
+ struct raid5_percpu *percpu;
+ unsigned long cpu;
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ percpu = per_cpu_ptr(conf->percpu, cpu);
++ spin_lock(&percpu->lock);
+ if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
+ ops_run_biofill(sh);
+ overlap_clear++;
+@@ -1426,7 +1427,8 @@ static void __raid_run_ops(struct stripe
+ if (test_and_clear_bit(R5_Overlap, &dev->flags))
+ wake_up(&sh->raid_conf->wait_for_overlap);
+ }
+- put_cpu();
++ spin_unlock(&percpu->lock);
++ put_cpu_light();
+ }
+
+ #ifdef CONFIG_MULTICORE_RAID456
+@@ -4925,6 +4927,7 @@ static int raid5_alloc_percpu(struct r5c
+ break;
+ }
+ per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
+ }
+ #ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+Index: linux-stable/drivers/md/raid5.h
+===================================================================
+--- linux-stable.orig/drivers/md/raid5.h
++++ linux-stable/drivers/md/raid5.h
+@@ -427,6 +427,7 @@ struct r5conf {
+ int recovery_disabled;
+ /* per cpu variables */
+ struct raid5_percpu {
++ spinlock_t lock; /* Protection for -RT */
+ struct page *spare_page; /* Used when checking P/Q in raid6 */
+ void *scribble; /* space for constructing buffer
+ * lists and performing address
diff --git a/migrate-disable-rt-variant.patch b/migrate-disable-rt-variant.patch
new file mode 100644
index 0000000..0c73a6f
--- /dev/null
+++ b/migrate-disable-rt-variant.patch
@@ -0,0 +1,29 @@
+Subject: migrate-disable-rt-variant.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 19:48:20 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/preempt.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+Index: linux-stable/include/linux/preempt.h
+===================================================================
+--- linux-stable.orig/include/linux/preempt.h
++++ linux-stable/include/linux/preempt.h
+@@ -121,11 +121,15 @@ extern void migrate_enable(void);
+ # define preempt_enable_rt() preempt_enable()
+ # define preempt_disable_nort() do { } while (0)
+ # define preempt_enable_nort() do { } while (0)
++# define migrate_disable_rt() migrate_disable()
++# define migrate_enable_rt() migrate_enable()
+ #else
+ # define preempt_disable_rt() do { } while (0)
+ # define preempt_enable_rt() do { } while (0)
+ # define preempt_disable_nort() preempt_disable()
+ # define preempt_enable_nort() preempt_enable()
++# define migrate_disable_rt() do { } while (0)
++# define migrate_enable_rt() do { } while (0)
+ #endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
diff --git a/mips-disable-highmem-on-rt.patch b/mips-disable-highmem-on-rt.patch
new file mode 100644
index 0000000..177a7a0
--- /dev/null
+++ b/mips-disable-highmem-on-rt.patch
@@ -0,0 +1,22 @@
+Subject: mips-disable-highmem-on-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 17:10:12 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/mips/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/arch/mips/Kconfig
+===================================================================
+--- linux-stable.orig/arch/mips/Kconfig
++++ linux-stable/arch/mips/Kconfig
+@@ -2119,7 +2119,7 @@ config CPU_R4400_WORKAROUNDS
+ #
+ config HIGHMEM
+ bool "High Memory Support"
+- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !PREEMPT_RT_FULL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
diff --git a/mips-enable-interrupts-in-signal.patch b/mips-enable-interrupts-in-signal.patch
new file mode 100644
index 0000000..21d0b98
--- /dev/null
+++ b/mips-enable-interrupts-in-signal.patch
@@ -0,0 +1,21 @@
+Subject: mips-enable-interrupts-in-signal.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 21:32:10 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/mips/kernel/signal.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/arch/mips/kernel/signal.c
+===================================================================
+--- linux-stable.orig/arch/mips/kernel/signal.c
++++ linux-stable/arch/mips/kernel/signal.c
+@@ -606,6 +606,7 @@ asmlinkage void do_notify_resume(struct
+ __u32 thread_info_flags)
+ {
+ local_irq_enable();
++ preempt_check_resched();
+
+ /* deal with pending signal delivery */
+ if (thread_info_flags & _TIF_SIGPENDING)
diff --git a/mm-allow-slab-rt.patch b/mm-allow-slab-rt.patch
new file mode 100644
index 0000000..fd8366d
--- /dev/null
+++ b/mm-allow-slab-rt.patch
@@ -0,0 +1,31 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:44:03 -0500
+Subject: mm: Allow only slab on RT
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ init/Kconfig | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: linux-stable/init/Kconfig
+===================================================================
+--- linux-stable.orig/init/Kconfig
++++ linux-stable/init/Kconfig
+@@ -1441,6 +1441,7 @@ config SLAB
+
+ config SLUB
+ bool "SLUB (Unqueued Allocator)"
++ depends on !PREEMPT_RT_FULL
+ help
+ SLUB is a slab allocator that minimizes cache line usage
+ instead of managing queues of cached objects (SLAB approach).
+@@ -1452,6 +1453,7 @@ config SLUB
+ config SLOB
+ depends on EXPERT
+ bool "SLOB (Simple Allocator)"
++ depends on !PREEMPT_RT_FULL
+ help
+ SLOB replaces the stock allocator with a drastically simpler
+ allocator. SLOB is generally more space efficient but
diff --git a/mm-cgroup-page-bit-spinlock.patch b/mm-cgroup-page-bit-spinlock.patch
new file mode 100644
index 0000000..e814d84
--- /dev/null
+++ b/mm-cgroup-page-bit-spinlock.patch
@@ -0,0 +1,95 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 19 Aug 2009 09:56:42 +0200
+Subject: mm: Replace cgroup_page bit spinlock
+
+Bit spinlocks are not working on RT. Replace them.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/page_cgroup.h | 15 +++++++++++++++
+ mm/page_cgroup.c | 11 +++++++++++
+ 2 files changed, 26 insertions(+)
+
+Index: linux-stable/include/linux/page_cgroup.h
+===================================================================
+--- linux-stable.orig/include/linux/page_cgroup.h
++++ linux-stable/include/linux/page_cgroup.h
+@@ -24,6 +24,9 @@ enum {
+ */
+ struct page_cgroup {
+ unsigned long flags;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spinlock_t pcg_lock;
++#endif
+ struct mem_cgroup *mem_cgroup;
+ };
+
+@@ -74,12 +77,20 @@ static inline void lock_page_cgroup(stru
+ * Don't take this lock in IRQ context.
+ * This lock is for pc->mem_cgroup, USED, MIGRATION
+ */
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_lock(PCG_LOCK, &pc->flags);
++#else
++ spin_lock(&pc->pcg_lock);
++#endif
+ }
+
+ static inline void unlock_page_cgroup(struct page_cgroup *pc)
+ {
++#ifndef CONFIG_PREEMPT_RT_BASE
+ bit_spin_unlock(PCG_LOCK, &pc->flags);
++#else
++ spin_unlock(&pc->pcg_lock);
++#endif
+ }
+
+ #else /* CONFIG_MEMCG */
+@@ -102,6 +113,10 @@ static inline void __init page_cgroup_in
+ {
+ }
+
++static inline void page_cgroup_lock_init(struct page_cgroup *pc)
++{
++}
++
+ #endif /* CONFIG_MEMCG */
+
+ #include <linux/swap.h>
+Index: linux-stable/mm/page_cgroup.c
+===================================================================
+--- linux-stable.orig/mm/page_cgroup.c
++++ linux-stable/mm/page_cgroup.c
+@@ -13,6 +13,14 @@
+
+ static unsigned long total_usage;
+
++static void page_cgroup_lock_init(struct page_cgroup *pc, int nr_pages)
++{
++#ifdef CONFIG_PREEMPT_RT_BASE
++ for (; nr_pages; nr_pages--, pc++)
++ spin_lock_init(&pc->pcg_lock);
++#endif
++}
++
+ #if !defined(CONFIG_SPARSEMEM)
+
+
+@@ -60,6 +68,7 @@ static int __init alloc_node_page_cgroup
+ return -ENOMEM;
+ NODE_DATA(nid)->node_page_cgroup = base;
+ total_usage += table_size;
++ page_cgroup_lock_init(base, nr_pages);
+ return 0;
+ }
+
+@@ -150,6 +159,8 @@ static int __meminit init_section_page_c
+ return -ENOMEM;
+ }
+
++ page_cgroup_lock_init(base, PAGES_PER_SECTION);
++
+ /*
+ * The passed "pfn" may not be aligned to SECTION. For the calculation
+ * we need to apply a mask.
diff --git a/mm-convert-swap-to-percpu-locked.patch b/mm-convert-swap-to-percpu-locked.patch
new file mode 100644
index 0000000..303054c
--- /dev/null
+++ b/mm-convert-swap-to-percpu-locked.patch
@@ -0,0 +1,124 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:51 -0500
+Subject: mm: convert swap to percpu locked
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ mm/swap.c | 33 +++++++++++++++++++++------------
+ 1 file changed, 21 insertions(+), 12 deletions(-)
+
+Index: linux-stable/mm/swap.c
+===================================================================
+--- linux-stable.orig/mm/swap.c
++++ linux-stable/mm/swap.c
+@@ -30,6 +30,7 @@
+ #include <linux/backing-dev.h>
+ #include <linux/memcontrol.h>
+ #include <linux/gfp.h>
++#include <linux/locallock.h>
+
+ #include "internal.h"
+
+@@ -40,6 +41,9 @@ static DEFINE_PER_CPU(struct pagevec[NR_
+ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
+ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+
++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
++static DEFINE_LOCAL_IRQ_LOCK(swap_lock);
++
+ /*
+ * This path almost never happens for VM activity - pages are normally
+ * freed via pagevecs. But it gets used by networking.
+@@ -354,11 +358,11 @@ void rotate_reclaimable_page(struct page
+ unsigned long flags;
+
+ page_cache_get(page);
+- local_irq_save(flags);
++ local_lock_irqsave(rotate_lock, flags);
+ pvec = &__get_cpu_var(lru_rotate_pvecs);
+ if (!pagevec_add(pvec, page))
+ pagevec_move_tail(pvec);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(rotate_lock, flags);
+ }
+ }
+
+@@ -403,12 +407,13 @@ static void activate_page_drain(int cpu)
+ void activate_page(struct page *page)
+ {
+ if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
+- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
++ struct pagevec *pvec = &get_locked_var(swap_lock,
++ activate_page_pvecs);
+
+ page_cache_get(page);
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, __activate_page, NULL);
+- put_cpu_var(activate_page_pvecs);
++ put_locked_var(swap_lock, activate_page_pvecs);
+ }
+ }
+
+@@ -448,12 +453,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+
+ void __lru_cache_add(struct page *page, enum lru_list lru)
+ {
+- struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
++ struct pagevec *pvec = &get_locked_var(swap_lock, lru_add_pvecs)[lru];
+
+ page_cache_get(page);
+ if (!pagevec_add(pvec, page))
+ __pagevec_lru_add(pvec, lru);
+- put_cpu_var(lru_add_pvecs);
++ put_locked_var(swap_lock, lru_add_pvecs);
+ }
+ EXPORT_SYMBOL(__lru_cache_add);
+
+@@ -588,9 +593,9 @@ void lru_add_drain_cpu(int cpu)
+ unsigned long flags;
+
+ /* No harm done if a racing interrupt already did this */
+- local_irq_save(flags);
++ local_lock_irqsave(rotate_lock, flags);
+ pagevec_move_tail(pvec);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(rotate_lock, flags);
+ }
+
+ pvec = &per_cpu(lru_deactivate_pvecs, cpu);
+@@ -618,18 +623,19 @@ void deactivate_page(struct page *page)
+ return;
+
+ if (likely(get_page_unless_zero(page))) {
+- struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
++ struct pagevec *pvec = &get_locked_var(swap_lock,
++ lru_deactivate_pvecs);
+
+ if (!pagevec_add(pvec, page))
+ pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
+- put_cpu_var(lru_deactivate_pvecs);
++ put_locked_var(swap_lock, lru_deactivate_pvecs);
+ }
+ }
+
+ void lru_add_drain(void)
+ {
+- lru_add_drain_cpu(get_cpu());
+- put_cpu();
++ lru_add_drain_cpu(local_lock_cpu(swap_lock));
++ local_unlock_cpu(swap_lock);
+ }
+
+ static void lru_add_drain_per_cpu(struct work_struct *dummy)
+@@ -847,6 +853,9 @@ void __init swap_setup(void)
+ {
+ unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
+
++ local_irq_lock_init(rotate_lock);
++ local_irq_lock_init(swap_lock);
++
+ #ifdef CONFIG_SWAP
+ bdi_init(swapper_space.backing_dev_info);
+ #endif
diff --git a/mm-make-vmstat-rt-aware.patch b/mm-make-vmstat-rt-aware.patch
new file mode 100644
index 0000000..be91dcb
--- /dev/null
+++ b/mm-make-vmstat-rt-aware.patch
@@ -0,0 +1,88 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:13 -0500
+Subject: [PATCH] mm: make vmstat -rt aware
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/vmstat.h | 4 ++++
+ mm/vmstat.c | 6 ++++++
+ 2 files changed, 10 insertions(+)
+
+Index: linux-stable/include/linux/vmstat.h
+===================================================================
+--- linux-stable.orig/include/linux/vmstat.h
++++ linux-stable/include/linux/vmstat.h
+@@ -29,7 +29,9 @@ DECLARE_PER_CPU(struct vm_event_state, v
+
+ static inline void __count_vm_event(enum vm_event_item item)
+ {
++ preempt_disable_rt();
+ __this_cpu_inc(vm_event_states.event[item]);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_event(enum vm_event_item item)
+@@ -39,7 +41,9 @@ static inline void count_vm_event(enum v
+
+ static inline void __count_vm_events(enum vm_event_item item, long delta)
+ {
++ preempt_disable_rt();
+ __this_cpu_add(vm_event_states.event[item], delta);
++ preempt_enable_rt();
+ }
+
+ static inline void count_vm_events(enum vm_event_item item, long delta)
+Index: linux-stable/mm/vmstat.c
+===================================================================
+--- linux-stable.orig/mm/vmstat.c
++++ linux-stable/mm/vmstat.c
+@@ -216,6 +216,7 @@ void __mod_zone_page_state(struct zone *
+ long x;
+ long t;
+
++ preempt_disable_rt();
+ x = delta + __this_cpu_read(*p);
+
+ t = __this_cpu_read(pcp->stat_threshold);
+@@ -225,6 +226,7 @@ void __mod_zone_page_state(struct zone *
+ x = 0;
+ }
+ __this_cpu_write(*p, x);
++ preempt_enable_rt();
+ }
+ EXPORT_SYMBOL(__mod_zone_page_state);
+
+@@ -257,6 +259,7 @@ void __inc_zone_state(struct zone *zone,
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_inc_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v > t)) {
+@@ -265,6 +268,7 @@ void __inc_zone_state(struct zone *zone,
+ zone_page_state_add(v + overstep, zone, item);
+ __this_cpu_write(*p, -overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+@@ -279,6 +283,7 @@ void __dec_zone_state(struct zone *zone,
+ s8 __percpu *p = pcp->vm_stat_diff + item;
+ s8 v, t;
+
++ preempt_disable_rt();
+ v = __this_cpu_dec_return(*p);
+ t = __this_cpu_read(pcp->stat_threshold);
+ if (unlikely(v < - t)) {
+@@ -287,6 +292,7 @@ void __dec_zone_state(struct zone *zone,
+ zone_page_state_add(v - overstep, zone, item);
+ __this_cpu_write(*p, overstep);
+ }
++ preempt_enable_rt();
+ }
+
+ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
diff --git a/mm-page-alloc-fix.patch b/mm-page-alloc-fix.patch
new file mode 100644
index 0000000..a6ef431
--- /dev/null
+++ b/mm-page-alloc-fix.patch
@@ -0,0 +1,24 @@
+Subject: mm-page-alloc-fix.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Jul 2011 16:47:49 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/page_alloc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/mm/page_alloc.c
+===================================================================
+--- linux-stable.orig/mm/page_alloc.c
++++ linux-stable/mm/page_alloc.c
+@@ -2180,8 +2180,8 @@ __alloc_pages_direct_compact(gfp_t gfp_m
+ if (*did_some_progress != COMPACT_SKIPPED) {
+
+ /* Page migration frees to the PCP lists but we want merging */
+- drain_pages(get_cpu());
+- put_cpu();
++ drain_pages(get_cpu_light());
++ put_cpu_light();
+
+ page = get_page_from_freelist(gfp_mask, nodemask,
+ order, zonelist, high_zoneidx,
diff --git a/mm-page-alloc-use-list-last-entry.patch b/mm-page-alloc-use-list-last-entry.patch
new file mode 100644
index 0000000..77d7268
--- /dev/null
+++ b/mm-page-alloc-use-list-last-entry.patch
@@ -0,0 +1,22 @@
+Subject: mm-page-alloc-use-list-last-entry.patch
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 21 Jun 2011 11:24:35 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/mm/page_alloc.c
+===================================================================
+--- linux-stable.orig/mm/page_alloc.c
++++ linux-stable/mm/page_alloc.c
+@@ -668,7 +668,7 @@ static void free_pcppages_bulk(struct zo
+ batch_free = to_free;
+
+ do {
+- page = list_entry(list->prev, struct page, lru);
++ page = list_last_entry(list, struct page, lru);
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
diff --git a/mm-page-alloc-use-local-lock-on-target-cpu.patch b/mm-page-alloc-use-local-lock-on-target-cpu.patch
new file mode 100644
index 0000000..c226693
--- /dev/null
+++ b/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -0,0 +1,59 @@
+Subject: mm: page_alloc: Use local_lock_on() instead of plain spinlock
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 27 Sep 2012 11:11:46 +0200
+
+The plain spinlock while sufficient does not update the local_lock
+internals. Use a proper local_lock function instead to ease debugging.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ include/linux/locallock.h | 11 +++++++++++
+ mm/page_alloc.c | 4 ++--
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/locallock.h
+===================================================================
+--- linux-stable.orig/include/linux/locallock.h
++++ linux-stable/include/linux/locallock.h
+@@ -137,6 +137,12 @@ static inline int __local_lock_irqsave(s
+ _flags = __get_cpu_var(lvar).flags; \
+ } while (0)
+
++#define local_lock_irqsave_on(lvar, _flags, cpu) \
++ do { \
++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \
++ _flags = per_cpu(lvar, cpu).flags; \
++ } while (0)
++
+ static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
+ unsigned long flags)
+ {
+@@ -156,6 +162,11 @@ static inline int __local_unlock_irqrest
+ put_local_var(lvar); \
+ } while (0)
+
++#define local_unlock_irqrestore_on(lvar, flags, cpu) \
++ do { \
++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \
++ } while (0)
++
+ #define local_spin_trylock_irq(lvar, lock) \
+ ({ \
+ int __locked; \
+Index: linux-stable/mm/page_alloc.c
+===================================================================
+--- linux-stable.orig/mm/page_alloc.c
++++ linux-stable/mm/page_alloc.c
+@@ -221,9 +221,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ # define cpu_lock_irqsave(cpu, flags) \
+- spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
++ local_lock_irqsave_on(pa_lock, flags, cpu)
+ # define cpu_unlock_irqrestore(cpu, flags) \
+- spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
++ local_unlock_irqrestore_on(pa_lock, flags, cpu)
+ #else
+ # define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
+ # define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
diff --git a/mm-page_alloc-reduce-lock-sections-further.patch b/mm-page_alloc-reduce-lock-sections-further.patch
new file mode 100644
index 0000000..6d9f679
--- /dev/null
+++ b/mm-page_alloc-reduce-lock-sections-further.patch
@@ -0,0 +1,197 @@
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri, 3 Jul 2009 08:44:37 -0500
+Subject: mm: page_alloc reduce lock sections further
+
+Split out the pages which are to be freed into a separate list and
+call free_pages_bulk() outside of the percpu page allocator locks.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/page_alloc.c | 81 +++++++++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 60 insertions(+), 21 deletions(-)
+
+Index: linux-stable/mm/page_alloc.c
+===================================================================
+--- linux-stable.orig/mm/page_alloc.c
++++ linux-stable/mm/page_alloc.c
+@@ -637,7 +637,7 @@ static inline int free_pages_check(struc
+ }
+
+ /*
+- * Frees a number of pages from the PCP lists
++ * Frees a number of pages which have been collected from the pcp lists.
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+@@ -648,16 +648,42 @@ static inline int free_pages_check(struc
+ * pinned" detection logic.
+ */
+ static void free_pcppages_bulk(struct zone *zone, int count,
+- struct per_cpu_pages *pcp)
++ struct list_head *list)
+ {
+- int migratetype = 0;
+- int batch_free = 0;
+ int to_free = count;
++ unsigned long flags;
+
+- spin_lock(&zone->lock);
++ spin_lock_irqsave(&zone->lock, flags);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+
++ while (!list_empty(list)) {
++ struct page *page = list_first_entry(list, struct page, lru);
++
++ /* must delete as __free_one_page list manipulates */
++ list_del(&page->lru);
++ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
++ __free_one_page(page, zone, 0, page_private(page));
++ trace_mm_page_pcpu_drain(page, 0, page_private(page));
++ to_free--;
++ }
++ WARN_ON(to_free != 0);
++ __mod_zone_page_state(zone, NR_FREE_PAGES, count);
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++
++/*
++ * Moves a number of pages from the PCP lists to free list which
++ * is freed outside of the locked region.
++ *
++ * Assumes all pages on list are in same zone, and of same order.
++ * count is the number of pages to free.
++ */
++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src,
++ struct list_head *dst)
++{
++ int migratetype = 0, batch_free = 0;
++
+ while (to_free) {
+ struct page *page;
+ struct list_head *list;
+@@ -673,7 +699,7 @@ static void free_pcppages_bulk(struct zo
+ batch_free++;
+ if (++migratetype == MIGRATE_PCPTYPES)
+ migratetype = 0;
+- list = &pcp->lists[migratetype];
++ list = &src->lists[migratetype];
+ } while (list_empty(list));
+
+ /* This is the only non-empty list. Free them all. */
+@@ -682,27 +708,24 @@ static void free_pcppages_bulk(struct zo
+
+ do {
+ page = list_last_entry(list, struct page, lru);
+- /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+- __free_one_page(page, zone, 0, page_private(page));
+- trace_mm_page_pcpu_drain(page, 0, page_private(page));
++ list_add(&page->lru, dst);
+ } while (--to_free && --batch_free && !list_empty(list));
+ }
+- __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+- spin_unlock(&zone->lock);
+ }
+
+ static void free_one_page(struct zone *zone, struct page *page, int order,
+ int migratetype)
+ {
+- spin_lock(&zone->lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&zone->lock, flags);
+ zone->all_unreclaimable = 0;
+ zone->pages_scanned = 0;
+
+ __free_one_page(page, zone, order, migratetype);
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+- spin_unlock(&zone->lock);
++ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
+ static bool free_pages_prepare(struct page *page, unsigned int order)
+@@ -1168,6 +1191,7 @@ static int rmqueue_bulk(struct zone *zon
+ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
+ {
+ unsigned long flags;
++ LIST_HEAD(dst);
+ int to_drain;
+
+ local_lock_irqsave(pa_lock, flags);
+@@ -1176,10 +1200,11 @@ void drain_zone_pages(struct zone *zone,
+ else
+ to_drain = pcp->count;
+ if (to_drain > 0) {
+- free_pcppages_bulk(zone, to_drain, pcp);
++ isolate_pcp_pages(to_drain, pcp, &dst);
+ pcp->count -= to_drain;
+ }
+ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, to_drain, &dst);
+ }
+ #endif
+
+@@ -1198,16 +1223,21 @@ static void drain_pages(unsigned int cpu
+ for_each_populated_zone(zone) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
++ int count;
+
+ cpu_lock_irqsave(cpu, flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+- if (pcp->count) {
+- free_pcppages_bulk(zone, pcp->count, pcp);
++ count = pcp->count;
++ if (count) {
++ isolate_pcp_pages(count, pcp, &dst);
+ pcp->count = 0;
+ }
+ cpu_unlock_irqrestore(cpu, flags);
++ if (count)
++ free_pcppages_bulk(zone, count, &dst);
+ }
+ }
+
+@@ -1348,8 +1378,15 @@ void free_hot_cold_page(struct page *pag
+ list_add(&page->lru, &pcp->lists[migratetype]);
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+- free_pcppages_bulk(zone, pcp->batch, pcp);
++ LIST_HEAD(dst);
++ int count;
++
++ isolate_pcp_pages(pcp->batch, pcp, &dst);
+ pcp->count -= pcp->batch;
++ count = pcp->batch;
++ local_unlock_irqrestore(pa_lock, flags);
++ free_pcppages_bulk(zone, count, &dst);
++ return;
+ }
+
+ out:
+@@ -5888,14 +5925,16 @@ static int __meminit __zone_pcp_update(v
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
++ LIST_HEAD(dst);
+
+ pset = per_cpu_ptr(zone->pageset, cpu);
+ pcp = &pset->pcp;
+
+ cpu_lock_irqsave(cpu, flags);
+- if (pcp->count > 0)
+- free_pcppages_bulk(zone, pcp->count, pcp);
+- setup_pageset(pset, batch);
++ if (pcp->count > 0) {
++ isolate_pcp_pages(pcp->count, pcp, &dst);
++ free_pcppages_bulk(zone, pcp->count, &dst);
++ }
+ cpu_unlock_irqrestore(cpu, flags);
+ }
+ return 0;
diff --git a/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/mm-page_alloc-rt-friendly-per-cpu-pages.patch
new file mode 100644
index 0000000..d915106
--- /dev/null
+++ b/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -0,0 +1,211 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:37 -0500
+Subject: mm: page_alloc: rt-friendly per-cpu pages
+
+rt-friendly per-cpu pages: convert the irqs-off per-cpu locking
+method into a preemptible, explicit-per-cpu-locks method.
+
+Contains fixes from:
+ Peter Zijlstra <a.p.zijlstra@chello.nl>
+ Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ mm/page_alloc.c | 57 ++++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 39 insertions(+), 18 deletions(-)
+
+Index: linux-stable/mm/page_alloc.c
+===================================================================
+--- linux-stable.orig/mm/page_alloc.c
++++ linux-stable/mm/page_alloc.c
+@@ -58,6 +58,7 @@
+ #include <linux/prefetch.h>
+ #include <linux/migrate.h>
+ #include <linux/page-debug-flags.h>
++#include <linux/locallock.h>
+
+ #include <asm/tlbflush.h>
+ #include <asm/div64.h>
+@@ -216,6 +217,18 @@ EXPORT_SYMBOL(nr_node_ids);
+ EXPORT_SYMBOL(nr_online_nodes);
+ #endif
+
++static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define cpu_lock_irqsave(cpu, flags) \
++ spin_lock_irqsave(&per_cpu(pa_lock, cpu).lock, flags)
++# define cpu_unlock_irqrestore(cpu, flags) \
++ spin_unlock_irqrestore(&per_cpu(pa_lock, cpu).lock, flags)
++#else
++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags)
++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags)
++#endif
++
+ int page_group_by_mobility_disabled __read_mostly;
+
+ /*
+@@ -726,13 +739,13 @@ static void __free_pages_ok(struct page
+ if (!free_pages_prepare(page, order))
+ return;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (unlikely(wasMlocked))
+ free_page_mlock(page);
+ __count_vm_events(PGFREE, 1 << order);
+ free_one_page(page_zone(page), page, order,
+ get_pageblock_migratetype(page));
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
+@@ -1157,7 +1170,7 @@ void drain_zone_pages(struct zone *zone,
+ unsigned long flags;
+ int to_drain;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (pcp->count >= pcp->batch)
+ to_drain = pcp->batch;
+ else
+@@ -1166,7 +1179,7 @@ void drain_zone_pages(struct zone *zone,
+ free_pcppages_bulk(zone, to_drain, pcp);
+ pcp->count -= to_drain;
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+ #endif
+
+@@ -1186,7 +1199,7 @@ static void drain_pages(unsigned int cpu
+ struct per_cpu_pageset *pset;
+ struct per_cpu_pages *pcp;
+
+- local_irq_save(flags);
++ cpu_lock_irqsave(cpu, flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
+
+ pcp = &pset->pcp;
+@@ -1194,7 +1207,7 @@ static void drain_pages(unsigned int cpu
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ pcp->count = 0;
+ }
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
+ }
+ }
+
+@@ -1247,7 +1260,12 @@ void drain_all_pages(void)
+ else
+ cpumask_clear_cpu(cpu, &cpus_with_pcps);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
++#else
++ for_each_cpu(cpu, &cpus_with_pcps)
++ drain_pages(cpu);
++#endif
+ }
+
+ #ifdef CONFIG_HIBERNATION
+@@ -1303,7 +1321,7 @@ void free_hot_cold_page(struct page *pag
+
+ migratetype = get_pageblock_migratetype(page);
+ set_page_private(page, migratetype);
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (unlikely(wasMlocked))
+ free_page_mlock(page);
+ __count_vm_event(PGFREE);
+@@ -1335,7 +1353,7 @@ void free_hot_cold_page(struct page *pag
+ }
+
+ out:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ /*
+@@ -1447,7 +1465,7 @@ again:
+ struct per_cpu_pages *pcp;
+ struct list_head *list;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+ if (list_empty(list)) {
+@@ -1479,17 +1497,19 @@ again:
+ */
+ WARN_ON_ONCE(order > 1);
+ }
+- spin_lock_irqsave(&zone->lock, flags);
++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+ page = __rmqueue(zone, order, migratetype);
+- spin_unlock(&zone->lock);
+- if (!page)
++ if (!page) {
++ spin_unlock(&zone->lock);
+ goto failed;
++ }
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
++ spin_unlock(&zone->lock);
+ }
+
+ __count_zone_vm_events(PGALLOC, zone, 1 << order);
+ zone_statistics(preferred_zone, zone, gfp_flags);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+
+ VM_BUG_ON(bad_range(zone, page));
+ if (prep_new_page(page, order, gfp_flags))
+@@ -1497,7 +1517,7 @@ again:
+ return page;
+
+ failed:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ return NULL;
+ }
+
+@@ -4992,6 +5012,7 @@ static int page_alloc_cpu_notify(struct
+ void __init page_alloc_init(void)
+ {
+ hotcpu_notifier(page_alloc_cpu_notify, 0);
++ local_irq_lock_init(pa_lock);
+ }
+
+ /*
+@@ -5871,11 +5892,11 @@ static int __meminit __zone_pcp_update(v
+ pset = per_cpu_ptr(zone->pageset, cpu);
+ pcp = &pset->pcp;
+
+- local_irq_save(flags);
++ cpu_lock_irqsave(cpu, flags);
+ if (pcp->count > 0)
+ free_pcppages_bulk(zone, pcp->count, pcp);
+ setup_pageset(pset, batch);
+- local_irq_restore(flags);
++ cpu_unlock_irqrestore(cpu, flags);
+ }
+ return 0;
+ }
+@@ -5892,12 +5913,12 @@ void zone_pcp_reset(struct zone *zone)
+ unsigned long flags;
+
+ /* avoid races with drain_pages() */
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ if (zone->pageset != &boot_pageset) {
+ free_percpu(zone->pageset);
+ zone->pageset = &boot_pageset;
+ }
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ }
+
+ /*
diff --git a/mm-prepare-pf-disable-discoupling.patch b/mm-prepare-pf-disable-discoupling.patch
new file mode 100644
index 0000000..7df7107
--- /dev/null
+++ b/mm-prepare-pf-disable-discoupling.patch
@@ -0,0 +1,126 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:37 -0500
+Subject: mm: Prepare decoupling the page fault disabling logic
+
+Add a pagefault_disabled variable to task_struct to allow decoupling
+the pagefault-disabled logic from the preempt count.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/sched.h | 1 +
+ include/linux/uaccess.h | 33 +++------------------------------
+ kernel/fork.c | 1 +
+ mm/memory.c | 29 +++++++++++++++++++++++++++++
+ 4 files changed, 34 insertions(+), 30 deletions(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1448,6 +1448,7 @@ struct task_struct {
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+ #endif
++ int pagefault_disabled;
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+Index: linux-stable/include/linux/uaccess.h
+===================================================================
+--- linux-stable.orig/include/linux/uaccess.h
++++ linux-stable/include/linux/uaccess.h
+@@ -6,37 +6,10 @@
+
+ /*
+ * These routines enable/disable the pagefault handler in that
+- * it will not take any locks and go straight to the fixup table.
+- *
+- * They have great resemblance to the preempt_disable/enable calls
+- * and in fact they are identical; this is because currently there is
+- * no other way to make the pagefault handlers do this. So we do
+- * disable preemption but we don't necessarily care about that.
++ * it will not take any MM locks and go straight to the fixup table.
+ */
+-static inline void pagefault_disable(void)
+-{
+- inc_preempt_count();
+- /*
+- * make sure to have issued the store before a pagefault
+- * can hit.
+- */
+- barrier();
+-}
+-
+-static inline void pagefault_enable(void)
+-{
+- /*
+- * make sure to issue those last loads/stores before enabling
+- * the pagefault handler again.
+- */
+- barrier();
+- dec_preempt_count();
+- /*
+- * make sure we do..
+- */
+- barrier();
+- preempt_check_resched();
+-}
++extern void pagefault_disable(void);
++extern void pagefault_enable(void);
+
+ #ifndef ARCH_HAS_NOCACHE_UACCESS
+
+Index: linux-stable/kernel/fork.c
+===================================================================
+--- linux-stable.orig/kernel/fork.c
++++ linux-stable/kernel/fork.c
+@@ -1298,6 +1298,7 @@ static struct task_struct *copy_process(
+ p->hardirq_context = 0;
+ p->softirq_context = 0;
+ #endif
++ p->pagefault_disabled = 0;
+ #ifdef CONFIG_LOCKDEP
+ p->lockdep_depth = 0; /* no locks held yet */
+ p->curr_chain_key = 0;
+Index: linux-stable/mm/memory.c
+===================================================================
+--- linux-stable.orig/mm/memory.c
++++ linux-stable/mm/memory.c
+@@ -3484,6 +3484,35 @@ unlock:
+ return 0;
+ }
+
++void pagefault_disable(void)
++{
++ inc_preempt_count();
++ current->pagefault_disabled++;
++ /*
++ * make sure to have issued the store before a pagefault
++ * can hit.
++ */
++ barrier();
++}
++EXPORT_SYMBOL_GPL(pagefault_disable);
++
++void pagefault_enable(void)
++{
++ /*
++ * make sure to issue those last loads/stores before enabling
++ * the pagefault handler again.
++ */
++ barrier();
++ current->pagefault_disabled--;
++ dec_preempt_count();
++ /*
++ * make sure we do..
++ */
++ barrier();
++ preempt_check_resched();
++}
++EXPORT_SYMBOL_GPL(pagefault_enable);
++
+ /*
+ * By the time we get here, we already hold the mm semaphore
+ */
diff --git a/mm-protect-activate-switch-mm.patch b/mm-protect-activate-switch-mm.patch
new file mode 100644
index 0000000..8dd324a
--- /dev/null
+++ b/mm-protect-activate-switch-mm.patch
@@ -0,0 +1,47 @@
+Subject: mm-protect-activate-switch-mm.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 04 Jul 2011 09:48:40 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ fs/exec.c | 2 ++
+ mm/mmu_context.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+Index: linux-stable/fs/exec.c
+===================================================================
+--- linux-stable.orig/fs/exec.c
++++ linux-stable/fs/exec.c
+@@ -836,10 +836,12 @@ static int exec_mmap(struct mm_struct *m
+ }
+ }
+ task_lock(tsk);
++ local_irq_disable_rt();
+ active_mm = tsk->active_mm;
+ tsk->mm = mm;
+ tsk->active_mm = mm;
+ activate_mm(active_mm, mm);
++ local_irq_enable_rt();
+ task_unlock(tsk);
+ arch_pick_mmap_layout(mm);
+ if (old_mm) {
+Index: linux-stable/mm/mmu_context.c
+===================================================================
+--- linux-stable.orig/mm/mmu_context.c
++++ linux-stable/mm/mmu_context.c
+@@ -26,6 +26,7 @@ void use_mm(struct mm_struct *mm)
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
++ local_irq_disable_rt();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ atomic_inc(&mm->mm_count);
+@@ -33,6 +34,7 @@ void use_mm(struct mm_struct *mm)
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
++ local_irq_enable_rt();
+ task_unlock(tsk);
+
+ if (active_mm != mm)
diff --git a/mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch b/mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch
new file mode 100644
index 0000000..cf78795
--- /dev/null
+++ b/mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch
@@ -0,0 +1,80 @@
+Subject: mm: Protect activate_mm() by preempt_[disable%7Cenable]_rt()
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Tue, 15 May 2012 13:53:56 +0800
+
+From: Yong Zhang <yong.zhang@windriver.com>
+
+Otherwise there will be warning on ARM like below:
+
+WARNING: at build/linux/kernel/smp.c:459 smp_call_function_many+0x98/0x264()
+Modules linked in:
+[<c0013bb4>] (unwind_backtrace+0x0/0xe4) from [<c001be94>] (warn_slowpath_common+0x4c/0x64)
+[<c001be94>] (warn_slowpath_common+0x4c/0x64) from [<c001bec4>] (warn_slowpath_null+0x18/0x1c)
+[<c001bec4>] (warn_slowpath_null+0x18/0x1c) from [<c0053ff8>](smp_call_function_many+0x98/0x264)
+[<c0053ff8>] (smp_call_function_many+0x98/0x264) from [<c0054364>] (smp_call_function+0x44/0x6c)
+[<c0054364>] (smp_call_function+0x44/0x6c) from [<c0017d50>] (__new_context+0xbc/0x124)
+[<c0017d50>] (__new_context+0xbc/0x124) from [<c009e49c>] (flush_old_exec+0x460/0x5e4)
+[<c009e49c>] (flush_old_exec+0x460/0x5e4) from [<c00d61ac>] (load_elf_binary+0x2e0/0x11ac)
+[<c00d61ac>] (load_elf_binary+0x2e0/0x11ac) from [<c009d060>] (search_binary_handler+0x94/0x2a4)
+[<c009d060>] (search_binary_handler+0x94/0x2a4) from [<c009e8fc>] (do_execve+0x254/0x364)
+[<c009e8fc>] (do_execve+0x254/0x364) from [<c0010e84>] (sys_execve+0x34/0x54)
+[<c0010e84>] (sys_execve+0x34/0x54) from [<c000da00>] (ret_fast_syscall+0x0/0x30)
+---[ end trace 0000000000000002 ]---
+
+The reason is that ARM need irq enabled when doing activate_mm().
+According to mm-protect-activate-switch-mm.patch, actually
+preempt_[disable|enable]_rt() is sufficient.
+
+Inspired-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/1337061236-1766-1-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+---
+ fs/exec.c | 4 ++--
+ mm/mmu_context.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/fs/exec.c
+===================================================================
+--- linux-stable.orig/fs/exec.c
++++ linux-stable/fs/exec.c
+@@ -836,12 +836,12 @@ static int exec_mmap(struct mm_struct *m
+ }
+ }
+ task_lock(tsk);
+- local_irq_disable_rt();
++ preempt_disable_rt();
+ active_mm = tsk->active_mm;
+ tsk->mm = mm;
+ tsk->active_mm = mm;
+ activate_mm(active_mm, mm);
+- local_irq_enable_rt();
++ preempt_enable_rt();
+ task_unlock(tsk);
+ arch_pick_mmap_layout(mm);
+ if (old_mm) {
+Index: linux-stable/mm/mmu_context.c
+===================================================================
+--- linux-stable.orig/mm/mmu_context.c
++++ linux-stable/mm/mmu_context.c
+@@ -26,7 +26,7 @@ void use_mm(struct mm_struct *mm)
+ struct task_struct *tsk = current;
+
+ task_lock(tsk);
+- local_irq_disable_rt();
++ preempt_disable_rt();
+ active_mm = tsk->active_mm;
+ if (active_mm != mm) {
+ atomic_inc(&mm->mm_count);
+@@ -34,7 +34,7 @@ void use_mm(struct mm_struct *mm)
+ }
+ tsk->mm = mm;
+ switch_mm(active_mm, mm, tsk);
+- local_irq_enable_rt();
++ preempt_enable_rt();
+ task_unlock(tsk);
+
+ if (active_mm != mm)
diff --git a/mm-remove-preempt-count-from-pf.patch b/mm-remove-preempt-count-from-pf.patch
new file mode 100644
index 0000000..5ba03dc
--- /dev/null
+++ b/mm-remove-preempt-count-from-pf.patch
@@ -0,0 +1,36 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 25 Jul 2009 22:06:27 +0200
+Subject: mm: Remove preempt count from pagefault disable/enable
+
+Now that all users are cleaned up, we can remove the preemption count.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/memory.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+Index: linux-stable/mm/memory.c
+===================================================================
+--- linux-stable.orig/mm/memory.c
++++ linux-stable/mm/memory.c
+@@ -3487,7 +3487,6 @@ unlock:
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ void pagefault_disable(void)
+ {
+- inc_preempt_count();
+ current->pagefault_disabled++;
+ /*
+ * make sure to have issued the store before a pagefault
+@@ -3505,12 +3504,6 @@ void pagefault_enable(void)
+ */
+ barrier();
+ current->pagefault_disabled--;
+- dec_preempt_count();
+- /*
+- * make sure we do..
+- */
+- barrier();
+- preempt_check_resched();
+ }
+ EXPORT_SYMBOL_GPL(pagefault_enable);
+ #endif
diff --git a/mm-rt-kmap-atomic-scheduling.patch b/mm-rt-kmap-atomic-scheduling.patch
new file mode 100644
index 0000000..84a6f07
--- /dev/null
+++ b/mm-rt-kmap-atomic-scheduling.patch
@@ -0,0 +1,119 @@
+Subject: mm, rt: kmap_atomic scheduling
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Thu, 28 Jul 2011 10:43:51 +0200
+
+In fact, with migrate_disable() existing one could play games with
+kmap_atomic. You could save/restore the kmap_atomic slots on context
+switch (if there are any in use of course), this should be esp easy now
+that we have a kmap_atomic stack.
+
+Something like the below.. it wants replacing all the preempt_disable()
+stuff with pagefault_disable() && migrate_disable() of course, but then
+you can flip kmaps around like below.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+[dvhart@linux.intel.com: build fix]
+Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
+---
+ arch/x86/kernel/process_32.c | 36 ++++++++++++++++++++++++++++++++++++
+ include/linux/sched.h | 5 +++++
+ mm/memory.c | 2 ++
+ 3 files changed, 43 insertions(+)
+
+Index: linux-stable/arch/x86/kernel/process_32.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/process_32.c
++++ linux-stable/arch/x86/kernel/process_32.c
+@@ -36,6 +36,7 @@
+ #include <linux/uaccess.h>
+ #include <linux/io.h>
+ #include <linux/kdebug.h>
++#include <linux/highmem.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/ldt.h>
+@@ -276,6 +277,41 @@ __switch_to(struct task_struct *prev_p,
+ task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
+ __switch_to_xtra(prev_p, next_p, tss);
+
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ /*
++ * Save @prev's kmap_atomic stack
++ */
++ prev_p->kmap_idx = __this_cpu_read(__kmap_atomic_idx);
++ if (unlikely(prev_p->kmap_idx)) {
++ int i;
++
++ for (i = 0; i < prev_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ pte_t *ptep = kmap_pte - idx;
++ prev_p->kmap_pte[i] = *ptep;
++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
++ }
++
++ __this_cpu_write(__kmap_atomic_idx, 0);
++ }
++
++ /*
++ * Restore @next_p's kmap_atomic stack
++ */
++ if (unlikely(next_p->kmap_idx)) {
++ int i;
++
++ __this_cpu_write(__kmap_atomic_idx, next_p->kmap_idx);
++
++ for (i = 0; i < next_p->kmap_idx; i++) {
++ int idx = i + KM_TYPE_NR * smp_processor_id();
++
++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
++ }
++ }
++#endif
++
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -63,6 +63,7 @@ struct sched_param {
+ #include <linux/nodemask.h>
+ #include <linux/mm_types.h>
+
++#include <asm/kmap_types.h>
+ #include <asm/page.h>
+ #include <asm/ptrace.h>
+ #include <asm/cputime.h>
+@@ -1619,6 +1620,10 @@ struct task_struct {
+ struct rcu_head put_rcu;
+ int softirq_nestcnt;
+ #endif
++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
++ int kmap_idx;
++ pte_t kmap_pte[KM_TYPE_NR];
++#endif
+ };
+
+ #ifdef CONFIG_PREEMPT_RT_FULL
+Index: linux-stable/mm/memory.c
+===================================================================
+--- linux-stable.orig/mm/memory.c
++++ linux-stable/mm/memory.c
+@@ -3487,6 +3487,7 @@ unlock:
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ void pagefault_disable(void)
+ {
++ migrate_disable();
+ current->pagefault_disabled++;
+ /*
+ * make sure to have issued the store before a pagefault
+@@ -3504,6 +3505,7 @@ void pagefault_enable(void)
+ */
+ barrier();
+ current->pagefault_disabled--;
++ migrate_enable();
+ }
+ EXPORT_SYMBOL_GPL(pagefault_enable);
+ #endif
diff --git a/mm-scatterlist-dont-disable-irqs-on-RT.patch b/mm-scatterlist-dont-disable-irqs-on-RT.patch
new file mode 100644
index 0000000..cc45e7a
--- /dev/null
+++ b/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -0,0 +1,40 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:34 -0500
+Subject: mm: scatterlist dont disable irqs on RT
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ lib/scatterlist.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+Index: linux-stable/lib/scatterlist.c
+===================================================================
+--- linux-stable.orig/lib/scatterlist.c
++++ linux-stable/lib/scatterlist.c
+@@ -479,7 +479,7 @@ void sg_miter_stop(struct sg_mapping_ite
+ flush_kernel_dcache_page(miter->page);
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+- WARN_ON(!irqs_disabled());
++ WARN_ON_NONRT(!irqs_disabled());
+ kunmap_atomic(miter->addr);
+ } else
+ kunmap(miter->page);
+@@ -519,7 +519,7 @@ static size_t sg_copy_buffer(struct scat
+
+ sg_miter_start(&miter, sgl, nents, sg_flags);
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ while (sg_miter_next(&miter) && offset < buflen) {
+ unsigned int len;
+@@ -536,7 +536,7 @@ static size_t sg_copy_buffer(struct scat
+
+ sg_miter_stop(&miter);
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return offset;
+ }
+
diff --git a/mm-shrink-the-page-frame-to-rt-size.patch b/mm-shrink-the-page-frame-to-rt-size.patch
new file mode 100644
index 0000000..2f62793
--- /dev/null
+++ b/mm-shrink-the-page-frame-to-rt-size.patch
@@ -0,0 +1,147 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 3 Jul 2009 08:44:54 -0500
+Subject: mm: shrink the page frame to !-rt size
+
+He below is a boot-tested hack to shrink the page frame size back to
+normal.
+
+Should be a net win since there should be many less PTE-pages than
+page-frames.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/mm.h | 46 +++++++++++++++++++++++++++++++++++++++-------
+ include/linux/mm_types.h | 6 +++++-
+ mm/memory.c | 32 ++++++++++++++++++++++++++++++++
+ 3 files changed, 76 insertions(+), 8 deletions(-)
+
+Index: linux-stable/include/linux/mm.h
+===================================================================
+--- linux-stable.orig/include/linux/mm.h
++++ linux-stable/include/linux/mm.h
+@@ -1225,27 +1225,59 @@ static inline pmd_t *pmd_alloc(struct mm
+ * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
+ * When freeing, reset page->mapping so free_pages_check won't complain.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #define __pte_lockptr(page) &((page)->ptl)
+-#define pte_lock_init(_page) do { \
+- spin_lock_init(__pte_lockptr(_page)); \
+-} while (0)
++
++static inline struct page *pte_lock_init(struct page *page)
++{
++ spin_lock_init(__pte_lockptr(page));
++ return page;
++}
++
+ #define pte_lock_deinit(page) ((page)->mapping = NULL)
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On PREEMPT_RT_FULL the spinlock_t's are too large to embed in the
++ * page frame, hence it only has a pointer and we need to dynamically
++ * allocate the lock when we allocate PTE-pages.
++ *
++ * This is an overall win, since only a small fraction of the pages
++ * will be PTE pages under normal circumstances.
++ */
++
++#define __pte_lockptr(page) ((page)->ptl)
++
++extern struct page *pte_lock_init(struct page *page);
++extern void pte_lock_deinit(struct page *page);
++
++#endif /* PREEMPT_RT_FULL */
++
+ #define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
+ #else /* !USE_SPLIT_PTLOCKS */
+ /*
+ * We use mm->page_table_lock to guard all pagetable pages of the mm.
+ */
+-#define pte_lock_init(page) do {} while (0)
++static inline struct page *pte_lock_init(struct page *page) { return page; }
+ #define pte_lock_deinit(page) do {} while (0)
+ #define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
+ #endif /* USE_SPLIT_PTLOCKS */
+
+-static inline void pgtable_page_ctor(struct page *page)
++static inline struct page *__pgtable_page_ctor(struct page *page)
+ {
+- pte_lock_init(page);
+- inc_zone_page_state(page, NR_PAGETABLE);
++ page = pte_lock_init(page);
++ if (page)
++ inc_zone_page_state(page, NR_PAGETABLE);
++ return page;
+ }
+
++#define pgtable_page_ctor(page) \
++do { \
++ page = __pgtable_page_ctor(page); \
++} while (0)
++
+ static inline void pgtable_page_dtor(struct page *page)
+ {
+ pte_lock_deinit(page);
+Index: linux-stable/include/linux/mm_types.h
+===================================================================
+--- linux-stable.orig/include/linux/mm_types.h
++++ linux-stable/include/linux/mm_types.h
+@@ -145,7 +145,11 @@ struct page {
+ * system if PG_buddy is set.
+ */
+ #if USE_SPLIT_PTLOCKS
+- spinlock_t ptl;
++# ifndef CONFIG_PREEMPT_RT_FULL
++ spinlock_t ptl;
++# else
++ spinlock_t *ptl;
++# endif
+ #endif
+ struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct page *first_page; /* Compound tail pages */
+Index: linux-stable/mm/memory.c
+===================================================================
+--- linux-stable.orig/mm/memory.c
++++ linux-stable/mm/memory.c
+@@ -4062,3 +4062,35 @@ void copy_user_huge_page(struct page *ds
+ }
+ }
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
++
++#if defined(CONFIG_PREEMPT_RT_FULL) && (USE_SPLIT_PTLOCKS > 0)
++/*
++ * Heinous hack, relies on the caller doing something like:
++ *
++ * pte = alloc_pages(PGALLOC_GFP, 0);
++ * if (pte)
++ * pgtable_page_ctor(pte);
++ * return pte;
++ *
++ * This ensures we release the page and return NULL when the
++ * lock allocation fails.
++ */
++struct page *pte_lock_init(struct page *page)
++{
++ page->ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
++ if (page->ptl) {
++ spin_lock_init(__pte_lockptr(page));
++ } else {
++ __free_page(page);
++ page = NULL;
++ }
++ return page;
++}
++
++void pte_lock_deinit(struct page *page)
++{
++ kfree(page->ptl);
++ page->mapping = NULL;
++}
++
++#endif
diff --git a/mm-slab-fix-potential-deadlock.patch b/mm-slab-fix-potential-deadlock.patch
new file mode 100644
index 0000000..80e9268
--- /dev/null
+++ b/mm-slab-fix-potential-deadlock.patch
@@ -0,0 +1,122 @@
+Subject: mm: slab: Fix potential deadlock
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 26 Sep 2012 16:20:00 +0200
+
+ =============================================
+[ INFO: possible recursive locking detected ]
+ 3.6.0-rt1+ #49 Not tainted
+ ---------------------------------------------
+ swapper/0/1 is trying to acquire lock:
+ lock_slab_on+0x72/0x77
+
+ but task is already holding lock:
+ __local_lock_irq+0x24/0x77
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&per_cpu(slab_lock, __cpu).lock);
+ lock(&per_cpu(slab_lock, __cpu).lock);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+ 2 locks held by swapper/0/1:
+ kmem_cache_create+0x33/0x89
+ __local_lock_irq+0x24/0x77
+
+ stack backtrace:
+ Pid: 1, comm: swapper/0 Not tainted 3.6.0-rt1+ #49
+ Call Trace:
+ __lock_acquire+0x9a4/0xdc4
+ ? __local_lock_irq+0x24/0x77
+ ? lock_slab_on+0x72/0x77
+ lock_acquire+0xc4/0x108
+ ? lock_slab_on+0x72/0x77
+ ? unlock_slab_on+0x5b/0x5b
+ rt_spin_lock+0x36/0x3d
+ ? lock_slab_on+0x72/0x77
+ ? migrate_disable+0x85/0x93
+ lock_slab_on+0x72/0x77
+ do_ccupdate_local+0x19/0x44
+ slab_on_each_cpu+0x36/0x5a
+ do_tune_cpucache+0xc1/0x305
+ enable_cpucache+0x8c/0xb5
+ setup_cpu_cache+0x28/0x182
+ __kmem_cache_create+0x34b/0x380
+ ? shmem_mount+0x1a/0x1a
+ kmem_cache_create+0x4a/0x89
+ ? shmem_mount+0x1a/0x1a
+ shmem_init+0x3e/0xd4
+ kernel_init+0x11c/0x214
+ kernel_thread_helper+0x4/0x10
+ ? retint_restore_args+0x13/0x13
+ ? start_kernel+0x3bc/0x3bc
+ ? gs_change+0x13/0x13
+
+It's not a missing annotation. It's simply wrong code and needs to be
+fixed. Instead of nesting the local and the remote cpu lock simply
+acquire only the remote cpu lock, which is sufficient protection for
+this procedure.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ include/linux/locallock.h | 8 ++++++++
+ mm/slab.c | 10 ++--------
+ 2 files changed, 10 insertions(+), 8 deletions(-)
+
+Index: linux-stable/include/linux/locallock.h
+===================================================================
+--- linux-stable.orig/include/linux/locallock.h
++++ linux-stable/include/linux/locallock.h
+@@ -96,6 +96,9 @@ static inline void __local_lock_irq(stru
+ #define local_lock_irq(lvar) \
+ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
+
++#define local_lock_irq_on(lvar, cpu) \
++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
++
+ static inline void __local_unlock_irq(struct local_irq_lock *lv)
+ {
+ LL_WARN(!lv->nestcnt);
+@@ -111,6 +114,11 @@ static inline void __local_unlock_irq(st
+ put_local_var(lvar); \
+ } while (0)
+
++#define local_unlock_irq_on(lvar, cpu) \
++ do { \
++ __local_unlock_irq(&per_cpu(lvar, cpu)); \
++ } while (0)
++
+ static inline int __local_lock_irqsave(struct local_irq_lock *lv)
+ {
+ if (lv->owner != current) {
+Index: linux-stable/mm/slab.c
+===================================================================
+--- linux-stable.orig/mm/slab.c
++++ linux-stable/mm/slab.c
+@@ -728,18 +728,12 @@ slab_on_each_cpu(void (*func)(void *arg,
+
+ static void lock_slab_on(unsigned int cpu)
+ {
+- if (cpu == smp_processor_id())
+- local_lock_irq(slab_lock);
+- else
+- local_spin_lock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
++ local_lock_irq_on(slab_lock, cpu);
+ }
+
+ static void unlock_slab_on(unsigned int cpu)
+ {
+- if (cpu == smp_processor_id())
+- local_unlock_irq(slab_lock);
+- else
+- local_spin_unlock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
++ local_unlock_irq_on(slab_lock, cpu);
+ }
+ #endif
+
diff --git a/mm-slab-more-lock-breaks.patch b/mm-slab-more-lock-breaks.patch
new file mode 100644
index 0000000..77efcc0
--- /dev/null
+++ b/mm-slab-more-lock-breaks.patch
@@ -0,0 +1,269 @@
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri, 3 Jul 2009 08:44:43 -0500
+Subject: mm: More lock breaks in slab.c
+
+Handle __free_pages outside of the locked regions. This reduces the
+lock contention on the percpu slab locks in -rt significantly.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ mm/slab.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 86 insertions(+), 22 deletions(-)
+
+Index: linux-stable/mm/slab.c
+===================================================================
+--- linux-stable.orig/mm/slab.c
++++ linux-stable/mm/slab.c
+@@ -704,6 +704,7 @@ static void slab_set_debugobj_lock_class
+ #endif
+
+ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
++static DEFINE_PER_CPU(struct list_head, slab_free_list);
+ static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
+
+ #ifndef CONFIG_PREEMPT_RT_BASE
+@@ -719,14 +720,57 @@ slab_on_each_cpu(void (*func)(void *arg,
+ {
+ unsigned int i;
+
+- for_each_online_cpu(i) {
+- spin_lock_irq(&per_cpu(slab_lock, i).lock);
++ get_cpu_light();
++ for_each_online_cpu(i)
+ func(arg, i);
+- spin_unlock_irq(&per_cpu(slab_lock, i).lock);
+- }
++ put_cpu_light();
++}
++
++static void lock_slab_on(unsigned int cpu)
++{
++ if (cpu == smp_processor_id())
++ local_lock_irq(slab_lock);
++ else
++ local_spin_lock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
++}
++
++static void unlock_slab_on(unsigned int cpu)
++{
++ if (cpu == smp_processor_id())
++ local_unlock_irq(slab_lock);
++ else
++ local_spin_unlock_irq(slab_lock, &per_cpu(slab_lock, cpu).lock);
+ }
+ #endif
+
++static void free_delayed(struct list_head *h)
++{
++ while(!list_empty(h)) {
++ struct page *page = list_first_entry(h, struct page, lru);
++
++ list_del(&page->lru);
++ __free_pages(page, page->index);
++ }
++}
++
++static void unlock_l3_and_free_delayed(spinlock_t *list_lock)
++{
++ LIST_HEAD(tmp);
++
++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
++ local_spin_unlock_irq(slab_lock, list_lock);
++ free_delayed(&tmp);
++}
++
++static void unlock_slab_and_free_delayed(unsigned long flags)
++{
++ LIST_HEAD(tmp);
++
++ list_splice_init(&__get_cpu_var(slab_free_list), &tmp);
++ local_unlock_irqrestore(slab_lock, flags);
++ free_delayed(&tmp);
++}
++
+ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
+ {
+ return cachep->array[smp_processor_id()];
+@@ -1340,7 +1384,7 @@ static void __cpuinit cpuup_canceled(lon
+ free_block(cachep, nc->entry, nc->avail, node);
+
+ if (!cpumask_empty(mask)) {
+- local_spin_unlock_irq(slab_lock, &l3->list_lock);
++ unlock_l3_and_free_delayed(&l3->list_lock);
+ goto free_array_cache;
+ }
+
+@@ -1354,7 +1398,7 @@ static void __cpuinit cpuup_canceled(lon
+ alien = l3->alien;
+ l3->alien = NULL;
+
+- local_spin_unlock_irq(slab_lock, &l3->list_lock);
++ unlock_l3_and_free_delayed(&l3->list_lock);
+
+ kfree(shared);
+ if (alien) {
+@@ -1635,6 +1679,8 @@ void __init kmem_cache_init(void)
+ use_alien_caches = 0;
+
+ local_irq_lock_init(slab_lock);
++ for_each_possible_cpu(i)
++ INIT_LIST_HEAD(&per_cpu(slab_free_list, i));
+
+ for (i = 0; i < NUM_INIT_LISTS; i++) {
+ kmem_list3_init(&initkmem_list3[i]);
+@@ -1973,12 +2019,14 @@ static void *kmem_getpages(struct kmem_c
+ /*
+ * Interface to system's page release.
+ */
+-static void kmem_freepages(struct kmem_cache *cachep, void *addr)
++static void kmem_freepages(struct kmem_cache *cachep, void *addr, bool delayed)
+ {
+ unsigned long i = (1 << cachep->gfporder);
+- struct page *page = virt_to_page(addr);
++ struct page *page, *basepage = virt_to_page(addr);
+ const unsigned long nr_freed = i;
+
++ page = basepage;
++
+ kmemcheck_free_shadow(page, cachep->gfporder);
+
+ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+@@ -1995,7 +2043,13 @@ static void kmem_freepages(struct kmem_c
+ }
+ if (current->reclaim_state)
+ current->reclaim_state->reclaimed_slab += nr_freed;
+- free_pages((unsigned long)addr, cachep->gfporder);
++
++ if (!delayed) {
++ free_pages((unsigned long)addr, cachep->gfporder);
++ } else {
++ basepage->index = cachep->gfporder;
++ list_add(&basepage->lru, &__get_cpu_var(slab_free_list));
++ }
+ }
+
+ static void kmem_rcu_free(struct rcu_head *head)
+@@ -2003,7 +2057,7 @@ static void kmem_rcu_free(struct rcu_hea
+ struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
+ struct kmem_cache *cachep = slab_rcu->cachep;
+
+- kmem_freepages(cachep, slab_rcu->addr);
++ kmem_freepages(cachep, slab_rcu->addr, false);
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->slabp_cache, slab_rcu);
+ }
+@@ -2222,7 +2276,8 @@ static void slab_destroy_debugcheck(stru
+ * Before calling the slab must have been unlinked from the cache. The
+ * cache-lock is not held/needed.
+ */
+-static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
++static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp,
++ bool delayed)
+ {
+ void *addr = slabp->s_mem - slabp->colouroff;
+
+@@ -2235,7 +2290,7 @@ static void slab_destroy(struct kmem_cac
+ slab_rcu->addr = addr;
+ call_rcu(&slab_rcu->head, kmem_rcu_free);
+ } else {
+- kmem_freepages(cachep, addr);
++ kmem_freepages(cachep, addr, delayed);
+ if (OFF_SLAB(cachep))
+ kmem_cache_free(cachep->slabp_cache, slabp);
+ }
+@@ -2700,9 +2755,15 @@ static void do_drain(void *arg)
+ __do_drain(arg, smp_processor_id());
+ }
+ #else
+-static void do_drain(void *arg, int this_cpu)
++static void do_drain(void *arg, int cpu)
+ {
+- __do_drain(arg, this_cpu);
++ LIST_HEAD(tmp);
++
++ lock_slab_on(cpu);
++ __do_drain(arg, cpu);
++ list_splice_init(&per_cpu(slab_free_list, cpu), &tmp);
++ unlock_slab_on(cpu);
++ free_delayed(&tmp);
+ }
+ #endif
+
+@@ -2760,7 +2821,7 @@ static int drain_freelist(struct kmem_ca
+ */
+ l3->free_objects -= cache->num;
+ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+- slab_destroy(cache, slabp);
++ slab_destroy(cache, slabp, false);
+ nr_freed++;
+ }
+ out:
+@@ -3095,7 +3156,7 @@ static int cache_grow(struct kmem_cache
+ spin_unlock(&l3->list_lock);
+ return 1;
+ opps1:
+- kmem_freepages(cachep, objp);
++ kmem_freepages(cachep, objp, false);
+ failed:
+ if (local_flags & __GFP_WAIT)
+ local_lock_irq(slab_lock);
+@@ -3772,7 +3833,7 @@ static void free_block(struct kmem_cache
+ * a different cache, refer to comments before
+ * alloc_slabmgmt.
+ */
+- slab_destroy(cachep, slabp);
++ slab_destroy(cachep, slabp, true);
+ } else {
+ list_add(&slabp->list, &l3->slabs_free);
+ }
+@@ -4039,7 +4100,7 @@ void kmem_cache_free(struct kmem_cache *
+ debug_check_no_obj_freed(objp, cachep->object_size);
+ local_lock_irqsave(slab_lock, flags);
+ __cache_free(cachep, objp, __builtin_return_address(0));
+- local_unlock_irqrestore(slab_lock, flags);
++ unlock_slab_and_free_delayed(flags);
+
+ trace_kmem_cache_free(_RET_IP_, objp);
+ }
+@@ -4070,7 +4131,7 @@ void kfree(const void *objp)
+ debug_check_no_obj_freed(objp, c->object_size);
+ local_lock_irqsave(slab_lock, flags);
+ __cache_free(c, (void *)objp, __builtin_return_address(0));
+- local_unlock_irqrestore(slab_lock, flags);
++ unlock_slab_and_free_delayed(flags);
+ }
+ EXPORT_SYMBOL(kfree);
+
+@@ -4126,7 +4187,8 @@ static int alloc_kmemlist(struct kmem_ca
+ }
+ l3->free_limit = (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+- local_spin_unlock_irq(slab_lock, &l3->list_lock);
++ unlock_l3_and_free_delayed(&l3->list_lock);
++
+ kfree(shared);
+ free_alien_cache(new_alien);
+ continue;
+@@ -4192,7 +4254,9 @@ static void do_ccupdate_local(void *info
+ #else
+ static void do_ccupdate_local(void *info, int cpu)
+ {
++ lock_slab_on(cpu);
+ __do_ccupdate_local(info, cpu);
++ unlock_slab_on(cpu);
+ }
+ #endif
+
+@@ -4234,8 +4298,8 @@ static int do_tune_cpucache(struct kmem_
+ local_spin_lock_irq(slab_lock,
+ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
+- local_spin_unlock_irq(slab_lock,
+- &cachep->nodelists[cpu_to_mem(i)]->list_lock);
++
++ unlock_l3_and_free_delayed(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ kfree(ccold);
+ }
+ kfree(new);
diff --git a/mm-slab-move-debug-out.patch b/mm-slab-move-debug-out.patch
new file mode 100644
index 0000000..6fc8444
--- /dev/null
+++ b/mm-slab-move-debug-out.patch
@@ -0,0 +1,39 @@
+Subject: mm-slab-move-debug-out.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 20 Jun 2011 10:42:04 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/slab.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/mm/slab.c
+===================================================================
+--- linux-stable.orig/mm/slab.c
++++ linux-stable/mm/slab.c
+@@ -3991,10 +3991,10 @@ void kmem_cache_free(struct kmem_cache *
+ {
+ unsigned long flags;
+
+- local_irq_save(flags);
+ debug_check_no_locks_freed(objp, cachep->object_size);
+ if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+ debug_check_no_obj_freed(objp, cachep->object_size);
++ local_irq_save(flags);
+ __cache_free(cachep, objp, __builtin_return_address(0));
+ local_irq_restore(flags);
+
+@@ -4020,12 +4020,12 @@ void kfree(const void *objp)
+
+ if (unlikely(ZERO_OR_NULL_PTR(objp)))
+ return;
+- local_irq_save(flags);
+ kfree_debugcheck(objp);
+ c = virt_to_cache(objp);
+ debug_check_no_locks_freed(objp, c->object_size);
+
+ debug_check_no_obj_freed(objp, c->object_size);
++ local_irq_save(flags);
+ __cache_free(c, (void *)objp, __builtin_return_address(0));
+ local_irq_restore(flags);
+ }
diff --git a/mm-slab-wrap-functions.patch b/mm-slab-wrap-functions.patch
new file mode 100644
index 0000000..1758e62
--- /dev/null
+++ b/mm-slab-wrap-functions.patch
@@ -0,0 +1,451 @@
+Subject: mm-slab-wrap-functions.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 18 Jun 2011 19:44:43 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/slab.c | 154 ++++++++++++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 105 insertions(+), 49 deletions(-)
+
+Index: linux-stable/mm/slab.c
+===================================================================
+--- linux-stable.orig/mm/slab.c
++++ linux-stable/mm/slab.c
+@@ -117,6 +117,7 @@
+ #include <linux/kmemcheck.h>
+ #include <linux/memory.h>
+ #include <linux/prefetch.h>
++#include <linux/locallock.h>
+
+ #include <net/sock.h>
+
+@@ -703,12 +704,40 @@ static void slab_set_debugobj_lock_class
+ #endif
+
+ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
++static DEFINE_LOCAL_IRQ_LOCK(slab_lock);
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++# define slab_on_each_cpu(func, cp) on_each_cpu(func, cp, 1)
++#else
++/*
++ * execute func() for all CPUs. On PREEMPT_RT we dont actually have
++ * to run on the remote CPUs - we only have to take their CPU-locks.
++ * (This is a rare operation, so cacheline bouncing is not an issue.)
++ */
++static void
++slab_on_each_cpu(void (*func)(void *arg, int this_cpu), void *arg)
++{
++ unsigned int i;
++
++ for_each_online_cpu(i) {
++ spin_lock_irq(&per_cpu(slab_lock, i).lock);
++ func(arg, i);
++ spin_unlock_irq(&per_cpu(slab_lock, i).lock);
++ }
++}
++#endif
+
+ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
+ {
+ return cachep->array[smp_processor_id()];
+ }
+
++static inline struct array_cache *cpu_cache_get_on_cpu(struct kmem_cache *cachep,
++ int cpu)
++{
++ return cachep->array[cpu];
++}
++
+ static inline struct kmem_cache *__find_general_cachep(size_t size,
+ gfp_t gfpflags)
+ {
+@@ -1175,9 +1204,10 @@ static void reap_alien(struct kmem_cache
+ if (l3->alien) {
+ struct array_cache *ac = l3->alien[node];
+
+- if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
++ if (ac && ac->avail &&
++ local_spin_trylock_irq(slab_lock, &ac->lock)) {
+ __drain_alien_cache(cachep, ac, node);
+- spin_unlock_irq(&ac->lock);
++ local_spin_unlock_irq(slab_lock, &ac->lock);
+ }
+ }
+ }
+@@ -1192,9 +1222,9 @@ static void drain_alien_cache(struct kme
+ for_each_online_node(i) {
+ ac = alien[i];
+ if (ac) {
+- spin_lock_irqsave(&ac->lock, flags);
++ local_spin_lock_irqsave(slab_lock, &ac->lock, flags);
+ __drain_alien_cache(cachep, ac, i);
+- spin_unlock_irqrestore(&ac->lock, flags);
++ local_spin_unlock_irqrestore(slab_lock, &ac->lock, flags);
+ }
+ }
+ }
+@@ -1273,11 +1303,11 @@ static int init_cache_nodelists_node(int
+ cachep->nodelists[node] = l3;
+ }
+
+- spin_lock_irq(&cachep->nodelists[node]->list_lock);
++ local_spin_lock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
+ cachep->nodelists[node]->free_limit =
+ (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+- spin_unlock_irq(&cachep->nodelists[node]->list_lock);
++ local_spin_unlock_irq(slab_lock, &cachep->nodelists[node]->list_lock);
+ }
+ return 0;
+ }
+@@ -1302,7 +1332,7 @@ static void __cpuinit cpuup_canceled(lon
+ if (!l3)
+ goto free_array_cache;
+
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+
+ /* Free limit for this kmem_list3 */
+ l3->free_limit -= cachep->batchcount;
+@@ -1310,7 +1340,7 @@ static void __cpuinit cpuup_canceled(lon
+ free_block(cachep, nc->entry, nc->avail, node);
+
+ if (!cpumask_empty(mask)) {
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ goto free_array_cache;
+ }
+
+@@ -1324,7 +1354,7 @@ static void __cpuinit cpuup_canceled(lon
+ alien = l3->alien;
+ l3->alien = NULL;
+
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+
+ kfree(shared);
+ if (alien) {
+@@ -1398,7 +1428,7 @@ static int __cpuinit cpuup_prepare(long
+ l3 = cachep->nodelists[node];
+ BUG_ON(!l3);
+
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ if (!l3->shared) {
+ /*
+ * We are serialised from CPU_DEAD or
+@@ -1413,7 +1443,7 @@ static int __cpuinit cpuup_prepare(long
+ alien = NULL;
+ }
+ #endif
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ kfree(shared);
+ free_alien_cache(alien);
+ if (cachep->flags & SLAB_DEBUG_OBJECTS)
+@@ -1604,6 +1634,8 @@ void __init kmem_cache_init(void)
+ if (num_possible_nodes() == 1)
+ use_alien_caches = 0;
+
++ local_irq_lock_init(slab_lock);
++
+ for (i = 0; i < NUM_INIT_LISTS; i++) {
+ kmem_list3_init(&initkmem_list3[i]);
+ if (i < MAX_NUMNODES)
+@@ -2614,7 +2646,7 @@ __kmem_cache_create (const char *name, s
+ #if DEBUG
+ static void check_irq_off(void)
+ {
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ }
+
+ static void check_irq_on(void)
+@@ -2649,26 +2681,37 @@ static void drain_array(struct kmem_cach
+ struct array_cache *ac,
+ int force, int node);
+
+-static void do_drain(void *arg)
++static void __do_drain(void *arg, unsigned int cpu)
+ {
+ struct kmem_cache *cachep = arg;
+ struct array_cache *ac;
+- int node = numa_mem_id();
++ int node = cpu_to_mem(cpu);
+
+- check_irq_off();
+- ac = cpu_cache_get(cachep);
++ ac = cpu_cache_get_on_cpu(cachep, cpu);
+ spin_lock(&cachep->nodelists[node]->list_lock);
+ free_block(cachep, ac->entry, ac->avail, node);
+ spin_unlock(&cachep->nodelists[node]->list_lock);
+ ac->avail = 0;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_BASE
++static void do_drain(void *arg)
++{
++ __do_drain(arg, smp_processor_id());
++}
++#else
++static void do_drain(void *arg, int this_cpu)
++{
++ __do_drain(arg, this_cpu);
++}
++#endif
++
+ static void drain_cpu_caches(struct kmem_cache *cachep)
+ {
+ struct kmem_list3 *l3;
+ int node;
+
+- on_each_cpu(do_drain, cachep, 1);
++ slab_on_each_cpu(do_drain, cachep);
+ check_irq_on();
+ for_each_online_node(node) {
+ l3 = cachep->nodelists[node];
+@@ -2699,10 +2742,10 @@ static int drain_freelist(struct kmem_ca
+ nr_freed = 0;
+ while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
+
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ p = l3->slabs_free.prev;
+ if (p == &l3->slabs_free) {
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ goto out;
+ }
+
+@@ -2716,7 +2759,7 @@ static int drain_freelist(struct kmem_ca
+ * to the cache.
+ */
+ l3->free_objects -= cache->num;
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ slab_destroy(cache, slabp);
+ nr_freed++;
+ }
+@@ -3011,7 +3054,7 @@ static int cache_grow(struct kmem_cache
+ offset *= cachep->colour_off;
+
+ if (local_flags & __GFP_WAIT)
+- local_irq_enable();
++ local_unlock_irq(slab_lock);
+
+ /*
+ * The test for missing atomic flag is performed here, rather than
+@@ -3041,7 +3084,7 @@ static int cache_grow(struct kmem_cache
+ cache_init_objs(cachep, slabp);
+
+ if (local_flags & __GFP_WAIT)
+- local_irq_disable();
++ local_lock_irq(slab_lock);
+ check_irq_off();
+ spin_lock(&l3->list_lock);
+
+@@ -3055,7 +3098,7 @@ opps1:
+ kmem_freepages(cachep, objp);
+ failed:
+ if (local_flags & __GFP_WAIT)
+- local_irq_disable();
++ local_lock_irq(slab_lock);
+ return 0;
+ }
+
+@@ -3469,11 +3512,11 @@ retry:
+ * set and go into memory reserves if necessary.
+ */
+ if (local_flags & __GFP_WAIT)
+- local_irq_enable();
++ local_unlock_irq(slab_lock);
+ kmem_flagcheck(cache, flags);
+ obj = kmem_getpages(cache, local_flags, numa_mem_id());
+ if (local_flags & __GFP_WAIT)
+- local_irq_disable();
++ local_lock_irq(slab_lock);
+ if (obj) {
+ /*
+ * Insert into the appropriate per node queues
+@@ -3591,7 +3634,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ return NULL;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+- local_irq_save(save_flags);
++ local_lock_irqsave(slab_lock, save_flags);
+
+ if (nodeid == NUMA_NO_NODE)
+ nodeid = slab_node;
+@@ -3616,7 +3659,7 @@ __cache_alloc_node(struct kmem_cache *ca
+ /* ___cache_alloc_node can fall back to other nodes */
+ ptr = ____cache_alloc_node(cachep, flags, nodeid);
+ out:
+- local_irq_restore(save_flags);
++ local_unlock_irqrestore(slab_lock, save_flags);
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
+ flags);
+@@ -3676,9 +3719,9 @@ __cache_alloc(struct kmem_cache *cachep,
+ return NULL;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+- local_irq_save(save_flags);
++ local_lock_irqsave(slab_lock, save_flags);
+ objp = __do_cache_alloc(cachep, flags);
+- local_irq_restore(save_flags);
++ local_unlock_irqrestore(slab_lock, save_flags);
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
+ flags);
+@@ -3994,9 +4037,9 @@ void kmem_cache_free(struct kmem_cache *
+ debug_check_no_locks_freed(objp, cachep->object_size);
+ if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+ debug_check_no_obj_freed(objp, cachep->object_size);
+- local_irq_save(flags);
++ local_lock_irqsave(slab_lock, flags);
+ __cache_free(cachep, objp, __builtin_return_address(0));
+- local_irq_restore(flags);
++ local_unlock_irqrestore(slab_lock, flags);
+
+ trace_kmem_cache_free(_RET_IP_, objp);
+ }
+@@ -4025,9 +4068,9 @@ void kfree(const void *objp)
+ debug_check_no_locks_freed(objp, c->object_size);
+
+ debug_check_no_obj_freed(objp, c->object_size);
+- local_irq_save(flags);
++ local_lock_irqsave(slab_lock, flags);
+ __cache_free(c, (void *)objp, __builtin_return_address(0));
+- local_irq_restore(flags);
++ local_unlock_irqrestore(slab_lock, flags);
+ }
+ EXPORT_SYMBOL(kfree);
+
+@@ -4070,7 +4113,7 @@ static int alloc_kmemlist(struct kmem_ca
+ if (l3) {
+ struct array_cache *shared = l3->shared;
+
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+
+ if (shared)
+ free_block(cachep, shared->entry,
+@@ -4083,7 +4126,7 @@ static int alloc_kmemlist(struct kmem_ca
+ }
+ l3->free_limit = (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ kfree(shared);
+ free_alien_cache(new_alien);
+ continue;
+@@ -4130,17 +4173,28 @@ struct ccupdate_struct {
+ struct array_cache *new[0];
+ };
+
+-static void do_ccupdate_local(void *info)
++static void __do_ccupdate_local(void *info, int cpu)
+ {
+ struct ccupdate_struct *new = info;
+ struct array_cache *old;
+
+- check_irq_off();
+- old = cpu_cache_get(new->cachep);
++ old = cpu_cache_get_on_cpu(new->cachep, cpu);
++
++ new->cachep->array[cpu] = new->new[cpu];
++ new->new[cpu] = old;
++}
+
+- new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
+- new->new[smp_processor_id()] = old;
++#ifndef CONFIG_PREEMPT_RT_BASE
++static void do_ccupdate_local(void *info)
++{
++ __do_ccupdate_local(info, smp_processor_id());
+ }
++#else
++static void do_ccupdate_local(void *info, int cpu)
++{
++ __do_ccupdate_local(info, cpu);
++}
++#endif
+
+ /* Always called with the slab_mutex held */
+ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
+@@ -4166,7 +4220,7 @@ static int do_tune_cpucache(struct kmem_
+ }
+ new->cachep = cachep;
+
+- on_each_cpu(do_ccupdate_local, (void *)new, 1);
++ slab_on_each_cpu(do_ccupdate_local, (void *)new);
+
+ check_irq_on();
+ cachep->batchcount = batchcount;
+@@ -4177,9 +4231,11 @@ static int do_tune_cpucache(struct kmem_
+ struct array_cache *ccold = new->new[i];
+ if (!ccold)
+ continue;
+- spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
++ local_spin_lock_irq(slab_lock,
++ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
+- spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
++ local_spin_unlock_irq(slab_lock,
++ &cachep->nodelists[cpu_to_mem(i)]->list_lock);
+ kfree(ccold);
+ }
+ kfree(new);
+@@ -4255,7 +4311,7 @@ static void drain_array(struct kmem_cach
+ if (ac->touched && !force) {
+ ac->touched = 0;
+ } else {
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+ if (ac->avail) {
+ tofree = force ? ac->avail : (ac->limit + 4) / 5;
+ if (tofree > ac->avail)
+@@ -4265,7 +4321,7 @@ static void drain_array(struct kmem_cach
+ memmove(ac->entry, &(ac->entry[tofree]),
+ sizeof(void *) * ac->avail);
+ }
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ }
+ }
+
+@@ -4404,7 +4460,7 @@ static int s_show(struct seq_file *m, vo
+ continue;
+
+ check_irq_on();
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+
+ list_for_each_entry(slabp, &l3->slabs_full, list) {
+ if (slabp->inuse != cachep->num && !error)
+@@ -4429,7 +4485,7 @@ static int s_show(struct seq_file *m, vo
+ if (l3->shared)
+ shared_avail += l3->shared->avail;
+
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ }
+ num_slabs += active_slabs;
+ num_objs = num_slabs * cachep->num;
+@@ -4658,13 +4714,13 @@ static int leaks_show(struct seq_file *m
+ continue;
+
+ check_irq_on();
+- spin_lock_irq(&l3->list_lock);
++ local_spin_lock_irq(slab_lock, &l3->list_lock);
+
+ list_for_each_entry(slabp, &l3->slabs_full, list)
+ handle_slab(n, cachep, slabp);
+ list_for_each_entry(slabp, &l3->slabs_partial, list)
+ handle_slab(n, cachep, slabp);
+- spin_unlock_irq(&l3->list_lock);
++ local_spin_unlock_irq(slab_lock, &l3->list_lock);
+ }
+ name = cachep->name;
+ if (n[0] == n[1]) {
diff --git a/mm-vmalloc-use-get-cpu-light.patch b/mm-vmalloc-use-get-cpu-light.patch
new file mode 100644
index 0000000..59d9bf4
--- /dev/null
+++ b/mm-vmalloc-use-get-cpu-light.patch
@@ -0,0 +1,66 @@
+Subject: mm-vmalloc.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Jul 2011 11:39:36 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ mm/vmalloc.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+Index: linux-stable/mm/vmalloc.c
+===================================================================
+--- linux-stable.orig/mm/vmalloc.c
++++ linux-stable/mm/vmalloc.c
+@@ -782,7 +782,7 @@ static struct vmap_block *new_vmap_block
+ struct vmap_block *vb;
+ struct vmap_area *va;
+ unsigned long vb_idx;
+- int node, err;
++ int node, err, cpu;
+
+ node = numa_node_id();
+
+@@ -821,12 +821,13 @@ static struct vmap_block *new_vmap_block
+ BUG_ON(err);
+ radix_tree_preload_end();
+
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = &__get_cpu_var(vmap_block_queue);
+ vb->vbq = vbq;
+ spin_lock(&vbq->lock);
+ list_add_rcu(&vb->free_list, &vbq->free);
+ spin_unlock(&vbq->lock);
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+
+ return vb;
+ }
+@@ -900,7 +901,7 @@ static void *vb_alloc(unsigned long size
+ struct vmap_block *vb;
+ unsigned long addr = 0;
+ unsigned int order;
+- int purge = 0;
++ int purge = 0, cpu;
+
+ BUG_ON(size & ~PAGE_MASK);
+ BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
+@@ -916,7 +917,8 @@ static void *vb_alloc(unsigned long size
+
+ again:
+ rcu_read_lock();
+- vbq = &get_cpu_var(vmap_block_queue);
++ cpu = get_cpu_light();
++ vbq = &__get_cpu_var(vmap_block_queue);
+ list_for_each_entry_rcu(vb, &vbq->free, free_list) {
+ int i;
+
+@@ -953,7 +955,7 @@ next:
+ if (purge)
+ purge_fragmented_blocks_thiscpu();
+
+- put_cpu_var(vmap_block_queue);
++ put_cpu_light();
+ rcu_read_unlock();
+
+ if (!addr) {
diff --git a/mutex-no-spin-on-rt.patch b/mutex-no-spin-on-rt.patch
new file mode 100644
index 0000000..4294d9c
--- /dev/null
+++ b/mutex-no-spin-on-rt.patch
@@ -0,0 +1,19 @@
+Subject: mutex-no-spin-on-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:51:45 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/Kconfig.locks | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/Kconfig.locks
+===================================================================
+--- linux-stable.orig/kernel/Kconfig.locks
++++ linux-stable/kernel/Kconfig.locks
+@@ -199,4 +199,4 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
+ def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
+
+ config MUTEX_SPIN_ON_OWNER
+- def_bool SMP && !DEBUG_MUTEXES
++ def_bool SMP && !DEBUG_MUTEXES && !PREEMPT_RT_FULL
diff --git a/net-another-local-irq-disable-alloc-atomic-headache.patch b/net-another-local-irq-disable-alloc-atomic-headache.patch
new file mode 100644
index 0000000..f6f2226
--- /dev/null
+++ b/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -0,0 +1,49 @@
+Subject: net: Another local_irq_disable/kmalloc headache
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 26 Sep 2012 16:21:08 +0200
+
+Replace it by a local lock. Though that's pretty inefficient :(
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ net/core/skbuff.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+Index: linux-stable/net/core/skbuff.c
+===================================================================
+--- linux-stable.orig/net/core/skbuff.c
++++ linux-stable/net/core/skbuff.c
+@@ -60,6 +60,7 @@
+ #include <linux/scatterlist.h>
+ #include <linux/errqueue.h>
+ #include <linux/prefetch.h>
++#include <linux/locallock.h>
+
+ #include <net/protocol.h>
+ #include <net/dst.h>
+@@ -345,6 +346,7 @@ struct netdev_alloc_cache {
+ unsigned int pagecnt_bias;
+ };
+ static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
+
+ #define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
+
+@@ -354,7 +356,7 @@ static void *__netdev_alloc_frag(unsigne
+ void *data = NULL;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(netdev_alloc_lock, flags);
+ nc = &__get_cpu_var(netdev_alloc_cache);
+ if (unlikely(!nc->page)) {
+ refill:
+@@ -379,7 +381,7 @@ recycle:
+ nc->offset += fragsz;
+ nc->pagecnt_bias--;
+ end:
+- local_irq_restore(flags);
++ local_unlock_irqrestore(netdev_alloc_lock, flags);
+ return data;
+ }
+
diff --git a/net-flip-lock-dep-thingy.patch b/net-flip-lock-dep-thingy.patch
new file mode 100644
index 0000000..aa2628a
--- /dev/null
+++ b/net-flip-lock-dep-thingy.patch
@@ -0,0 +1,113 @@
+Subject: net-flip-lock-dep-thingy.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 28 Jun 2011 10:59:58 +0200
+
+=======================================================
+[ INFO: possible circular locking dependency detected ]
+3.0.0-rc3+ #26
+-------------------------------------------------------
+ip/1104 is trying to acquire lock:
+ (local_softirq_lock){+.+...}, at: [<ffffffff81056d12>] __local_lock+0x25/0x68
+
+but task is already holding lock:
+ (sk_lock-AF_INET){+.+...}, at: [<ffffffff81433308>] lock_sock+0x10/0x12
+
+which lock already depends on the new lock.
+
+
+the existing dependency chain (in reverse order) is:
+
+-> #1 (sk_lock-AF_INET){+.+...}:
+ [<ffffffff810836e5>] lock_acquire+0x103/0x12e
+ [<ffffffff813e2781>] lock_sock_nested+0x82/0x92
+ [<ffffffff81433308>] lock_sock+0x10/0x12
+ [<ffffffff81433afa>] tcp_close+0x1b/0x355
+ [<ffffffff81453c99>] inet_release+0xc3/0xcd
+ [<ffffffff813dff3f>] sock_release+0x1f/0x74
+ [<ffffffff813dffbb>] sock_close+0x27/0x2b
+ [<ffffffff81129c63>] fput+0x11d/0x1e3
+ [<ffffffff81126577>] filp_close+0x70/0x7b
+ [<ffffffff8112667a>] sys_close+0xf8/0x13d
+ [<ffffffff814ae882>] system_call_fastpath+0x16/0x1b
+
+-> #0 (local_softirq_lock){+.+...}:
+ [<ffffffff81082ecc>] __lock_acquire+0xacc/0xdc8
+ [<ffffffff810836e5>] lock_acquire+0x103/0x12e
+ [<ffffffff814a7e40>] _raw_spin_lock+0x3b/0x4a
+ [<ffffffff81056d12>] __local_lock+0x25/0x68
+ [<ffffffff81056d8b>] local_bh_disable+0x36/0x3b
+ [<ffffffff814a7fc4>] _raw_write_lock_bh+0x16/0x4f
+ [<ffffffff81433c38>] tcp_close+0x159/0x355
+ [<ffffffff81453c99>] inet_release+0xc3/0xcd
+ [<ffffffff813dff3f>] sock_release+0x1f/0x74
+ [<ffffffff813dffbb>] sock_close+0x27/0x2b
+ [<ffffffff81129c63>] fput+0x11d/0x1e3
+ [<ffffffff81126577>] filp_close+0x70/0x7b
+ [<ffffffff8112667a>] sys_close+0xf8/0x13d
+ [<ffffffff814ae882>] system_call_fastpath+0x16/0x1b
+
+other info that might help us debug this:
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(sk_lock-AF_INET);
+ lock(local_softirq_lock);
+ lock(sk_lock-AF_INET);
+ lock(local_softirq_lock);
+
+ *** DEADLOCK ***
+
+1 lock held by ip/1104:
+ #0: (sk_lock-AF_INET){+.+...}, at: [<ffffffff81433308>] lock_sock+0x10/0x12
+
+stack backtrace:
+Pid: 1104, comm: ip Not tainted 3.0.0-rc3+ #26
+Call Trace:
+ [<ffffffff81081649>] print_circular_bug+0x1f8/0x209
+ [<ffffffff81082ecc>] __lock_acquire+0xacc/0xdc8
+ [<ffffffff81056d12>] ? __local_lock+0x25/0x68
+ [<ffffffff810836e5>] lock_acquire+0x103/0x12e
+ [<ffffffff81056d12>] ? __local_lock+0x25/0x68
+ [<ffffffff81046c75>] ? get_parent_ip+0x11/0x41
+ [<ffffffff814a7e40>] _raw_spin_lock+0x3b/0x4a
+ [<ffffffff81056d12>] ? __local_lock+0x25/0x68
+ [<ffffffff81046c8c>] ? get_parent_ip+0x28/0x41
+ [<ffffffff81056d12>] __local_lock+0x25/0x68
+ [<ffffffff81056d8b>] local_bh_disable+0x36/0x3b
+ [<ffffffff81433308>] ? lock_sock+0x10/0x12
+ [<ffffffff814a7fc4>] _raw_write_lock_bh+0x16/0x4f
+ [<ffffffff81433c38>] tcp_close+0x159/0x355
+ [<ffffffff81453c99>] inet_release+0xc3/0xcd
+ [<ffffffff813dff3f>] sock_release+0x1f/0x74
+ [<ffffffff813dffbb>] sock_close+0x27/0x2b
+ [<ffffffff81129c63>] fput+0x11d/0x1e3
+ [<ffffffff81126577>] filp_close+0x70/0x7b
+ [<ffffffff8112667a>] sys_close+0xf8/0x13d
+ [<ffffffff814ae882>] system_call_fastpath+0x16/0x1b
+
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ net/core/sock.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+Index: linux-stable/net/core/sock.c
+===================================================================
+--- linux-stable.orig/net/core/sock.c
++++ linux-stable/net/core/sock.c
+@@ -2204,12 +2204,11 @@ void lock_sock_nested(struct sock *sk, i
+ if (sk->sk_lock.owned)
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+- spin_unlock(&sk->sk_lock.slock);
++ spin_unlock_bh(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+- local_bh_enable();
+ }
+ EXPORT_SYMBOL(lock_sock_nested);
+
diff --git a/net-netif_rx_ni-migrate-disable.patch b/net-netif_rx_ni-migrate-disable.patch
new file mode 100644
index 0000000..c148913
--- /dev/null
+++ b/net-netif_rx_ni-migrate-disable.patch
@@ -0,0 +1,27 @@
+Subject: net-netif_rx_ni-migrate-disable.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 16:29:27 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ net/core/dev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/net/core/dev.c
+===================================================================
+--- linux-stable.orig/net/core/dev.c
++++ linux-stable/net/core/dev.c
+@@ -2990,11 +2990,11 @@ int netif_rx_ni(struct sk_buff *skb)
+ {
+ int err;
+
+- preempt_disable();
++ migrate_disable();
+ err = netif_rx(skb);
+ if (local_softirq_pending())
+ thread_do_softirq();
+- preempt_enable();
++ migrate_enable();
+
+ return err;
+ }
diff --git a/net-tx-action-avoid-livelock-on-rt.patch b/net-tx-action-avoid-livelock-on-rt.patch
new file mode 100644
index 0000000..38eadcd
--- /dev/null
+++ b/net-tx-action-avoid-livelock-on-rt.patch
@@ -0,0 +1,94 @@
+Subject: net: Avoid livelock in net_tx_action() on RT
+From: Steven Rostedt <srostedt@redhat.com>
+Date: Thu, 06 Oct 2011 10:48:39 -0400
+
+qdisc_lock is taken w/o disabling interrupts or bottom halfs. So code
+holding a qdisc_lock() can be interrupted and softirqs can run on the
+return of interrupt in !RT.
+
+The spin_trylock() in net_tx_action() makes sure, that the softirq
+does not deadlock. When the lock can't be acquired q is requeued and
+the NET_TX softirq is raised. That causes the softirq to run over and
+over.
+
+That works in mainline as do_softirq() has a retry loop limit and
+leaves the softirq processing in the interrupt return path and
+schedules ksoftirqd. The task which holds qdisc_lock cannot be
+preempted, so the lock is released and either ksoftirqd or the next
+softirq in the return from interrupt path can proceed. Though it's a
+bit strange to actually run MAX_SOFTIRQ_RESTART (10) loops before it
+decides to bail out even if it's clear in the first iteration :)
+
+On RT all softirq processing is done in a FIFO thread and we don't
+have a loop limit, so ksoftirqd preempts the lock holder forever and
+unqueues and requeues until the reset button is hit.
+
+Due to the forced threading of ksoftirqd on RT we actually cannot
+deadlock on qdisc_lock because it's a "sleeping lock". So it's safe to
+replace the spin_trylock() with a spin_lock(). When contended,
+ksoftirqd is scheduled out and the lock holder can proceed.
+
+[ tglx: Massaged changelog and code comments ]
+
+Solved-by: Thomas Gleixner <tglx@linuxtronix.de>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Tested-by: Carsten Emde <cbe@osadl.org>
+Cc: Clark Williams <williams@redhat.com>
+Cc: John Kacur <jkacur@redhat.com>
+Cc: Luis Claudio R. Goncalves <lclaudio@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ net/core/dev.c | 32 +++++++++++++++++++++++++++++++-
+ 1 file changed, 31 insertions(+), 1 deletion(-)
+
+Index: linux-stable/net/core/dev.c
+===================================================================
+--- linux-stable.orig/net/core/dev.c
++++ linux-stable/net/core/dev.c
+@@ -3000,6 +3000,36 @@ int netif_rx_ni(struct sk_buff *skb)
+ }
+ EXPORT_SYMBOL(netif_rx_ni);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * RT runs ksoftirqd as a real time thread and the root_lock is a
++ * "sleeping spinlock". If the trylock fails then we can go into an
++ * infinite loop when ksoftirqd preempted the task which actually
++ * holds the lock, because we requeue q and raise NET_TX softirq
++ * causing ksoftirqd to loop forever.
++ *
++ * It's safe to use spin_lock on RT here as softirqs run in thread
++ * context and cannot deadlock against the thread which is holding
++ * root_lock.
++ *
++ * On !RT the trylock might fail, but there we bail out from the
++ * softirq loop after 10 attempts which we can't do on RT. And the
++ * task holding root_lock cannot be preempted, so the only downside of
++ * that trylock is that we need 10 loops to decide that we should have
++ * given up in the first one :)
++ */
++static inline int take_root_lock(spinlock_t *lock)
++{
++ spin_lock(lock);
++ return 1;
++}
++#else
++static inline int take_root_lock(spinlock_t *lock)
++{
++ return spin_trylock(lock);
++}
++#endif
++
+ static void net_tx_action(struct softirq_action *h)
+ {
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+@@ -3038,7 +3068,7 @@ static void net_tx_action(struct softirq
+ head = head->next_sched;
+
+ root_lock = qdisc_lock(q);
+- if (spin_trylock(root_lock)) {
++ if (take_root_lock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
diff --git a/net-use-cpu-chill.patch b/net-use-cpu-chill.patch
new file mode 100644
index 0000000..14565a9
--- /dev/null
+++ b/net-use-cpu-chill.patch
@@ -0,0 +1,66 @@
+Subject: net: Use cpu_chill() instead of cpu_relax()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 07 Mar 2012 21:10:04 +0100
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Use cpu_chill() instead of cpu_relax() to let the system
+make progress.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ net/packet/af_packet.c | 5 +++--
+ net/rds/ib_rdma.c | 3 ++-
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+Index: linux-stable/net/packet/af_packet.c
+===================================================================
+--- linux-stable.orig/net/packet/af_packet.c
++++ linux-stable/net/packet/af_packet.c
+@@ -88,6 +88,7 @@
+ #include <linux/virtio_net.h>
+ #include <linux/errqueue.h>
+ #include <linux/net_tstamp.h>
++#include <linux/delay.h>
+
+ #ifdef CONFIG_INET
+ #include <net/inet_common.h>
+@@ -666,7 +667,7 @@ static void prb_retire_rx_blk_timer_expi
+ if (BLOCK_NUM_PKTS(pbd)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
+@@ -920,7 +921,7 @@ static void prb_retire_current_block(str
+ if (!(status & TP_STATUS_BLK_TMO)) {
+ while (atomic_read(&pkc->blk_fill_in_prog)) {
+ /* Waiting for skb_copy_bits to finish... */
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+ prb_close_block(pkc, pbd, po, status);
+Index: linux-stable/net/rds/ib_rdma.c
+===================================================================
+--- linux-stable.orig/net/rds/ib_rdma.c
++++ linux-stable/net/rds/ib_rdma.c
+@@ -34,6 +34,7 @@
+ #include <linux/slab.h>
+ #include <linux/rculist.h>
+ #include <linux/llist.h>
++#include <linux/delay.h>
+
+ #include "rds.h"
+ #include "ib.h"
+@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace
+ for_each_online_cpu(cpu) {
+ flag = &per_cpu(clean_list_grace, cpu);
+ while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
+- cpu_relax();
++ cpu_chill();
+ }
+ }
+
diff --git a/net-use-cpu-light-in-ip-send-unicast-reply.patch b/net-use-cpu-light-in-ip-send-unicast-reply.patch
new file mode 100644
index 0000000..4380282
--- /dev/null
+++ b/net-use-cpu-light-in-ip-send-unicast-reply.patch
@@ -0,0 +1,32 @@
+Subject: net: Use get_cpu_light() in ip_send_unicast_reply()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 01 Oct 2012 17:12:35 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ net/ipv4/ip_output.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+Index: linux-stable/net/ipv4/ip_output.c
+===================================================================
+--- linux-stable.orig/net/ipv4/ip_output.c
++++ linux-stable/net/ipv4/ip_output.c
+@@ -1516,7 +1516,8 @@ void ip_send_unicast_reply(struct net *n
+ if (IS_ERR(rt))
+ return;
+
+- inet = &get_cpu_var(unicast_sock);
++ get_cpu_light();
++ inet = &__get_cpu_var(unicast_sock);
+
+ inet->tos = arg->tos;
+ sk = &inet->sk;
+@@ -1540,7 +1541,7 @@ void ip_send_unicast_reply(struct net *n
+ ip_push_pending_frames(sk, &fl4);
+ }
+
+- put_cpu_var(unicast_sock);
++ put_cpu_light();
+
+ ip_rt_put(rt);
+ }
diff --git a/net-wireless-warn-nort.patch b/net-wireless-warn-nort.patch
new file mode 100644
index 0000000..3cdda54
--- /dev/null
+++ b/net-wireless-warn-nort.patch
@@ -0,0 +1,22 @@
+Subject: net-wireless-warn-nort.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Jul 2011 21:05:33 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ net/mac80211/rx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/net/mac80211/rx.c
+===================================================================
+--- linux-stable.orig/net/mac80211/rx.c
++++ linux-stable/net/mac80211/rx.c
+@@ -2960,7 +2960,7 @@ void ieee80211_rx(struct ieee80211_hw *h
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+
+- WARN_ON_ONCE(softirq_count() == 0);
++ WARN_ON_ONCE_NONRT(softirq_count() == 0);
+
+ if (WARN_ON(status->band < 0 ||
+ status->band >= IEEE80211_NUM_BANDS))
diff --git a/ntp-make-ntp-lock-raw-sigh.patch b/ntp-make-ntp-lock-raw-sigh.patch
new file mode 100644
index 0000000..72d9875
--- /dev/null
+++ b/ntp-make-ntp-lock-raw-sigh.patch
@@ -0,0 +1,127 @@
+Subject: ntp: Make ntp_lock raw.
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 10 Apr 2012 11:14:55 +0200
+
+This needs to be revisited. Not sure whether we can avoid to make this
+lock raw, but it'd really like to.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/time/ntp.c | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+Index: linux-stable/kernel/time/ntp.c
+===================================================================
+--- linux-stable.orig/kernel/time/ntp.c
++++ linux-stable/kernel/time/ntp.c
+@@ -22,7 +22,7 @@
+ * NTP timekeeping variables:
+ */
+
+-DEFINE_SPINLOCK(ntp_lock);
++DEFINE_RAW_SPINLOCK(ntp_lock);
+
+
+ /* USER_HZ period (usecs): */
+@@ -347,7 +347,7 @@ void ntp_clear(void)
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&ntp_lock, flags);
++ raw_spin_lock_irqsave(&ntp_lock, flags);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+@@ -361,7 +361,7 @@ void ntp_clear(void)
+
+ /* Clear PPS state variables */
+ pps_clear();
+- spin_unlock_irqrestore(&ntp_lock, flags);
++ raw_spin_unlock_irqrestore(&ntp_lock, flags);
+
+ }
+
+@@ -371,9 +371,9 @@ u64 ntp_tick_length(void)
+ unsigned long flags;
+ s64 ret;
+
+- spin_lock_irqsave(&ntp_lock, flags);
++ raw_spin_lock_irqsave(&ntp_lock, flags);
+ ret = tick_length;
+- spin_unlock_irqrestore(&ntp_lock, flags);
++ raw_spin_unlock_irqrestore(&ntp_lock, flags);
+ return ret;
+ }
+
+@@ -394,7 +394,7 @@ int second_overflow(unsigned long secs)
+ int leap = 0;
+ unsigned long flags;
+
+- spin_lock_irqsave(&ntp_lock, flags);
++ raw_spin_lock_irqsave(&ntp_lock, flags);
+
+ /*
+ * Leap second processing. If in leap-insert state at the end of the
+@@ -478,7 +478,7 @@ int second_overflow(unsigned long secs)
+ time_adjust = 0;
+
+ out:
+- spin_unlock_irqrestore(&ntp_lock, flags);
++ raw_spin_unlock_irqrestore(&ntp_lock, flags);
+
+ return leap;
+ }
+@@ -660,7 +660,7 @@ int do_adjtimex(struct timex *txc)
+
+ getnstimeofday(&ts);
+
+- spin_lock_irq(&ntp_lock);
++ raw_spin_lock_irq(&ntp_lock);
+
+ if (txc->modes & ADJ_ADJTIME) {
+ long save_adjust = time_adjust;
+@@ -702,7 +702,7 @@ int do_adjtimex(struct timex *txc)
+ /* fill PPS status fields */
+ pps_fill_timex(txc);
+
+- spin_unlock_irq(&ntp_lock);
++ raw_spin_unlock_irq(&ntp_lock);
+
+ txc->time.tv_sec = ts.tv_sec;
+ txc->time.tv_usec = ts.tv_nsec;
+@@ -900,7 +900,7 @@ void hardpps(const struct timespec *phas
+
+ pts_norm = pps_normalize_ts(*phase_ts);
+
+- spin_lock_irqsave(&ntp_lock, flags);
++ raw_spin_lock_irqsave(&ntp_lock, flags);
+
+ /* clear the error bits, they will be set again if needed */
+ time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
+@@ -913,7 +913,7 @@ void hardpps(const struct timespec *phas
+ * just start the frequency interval */
+ if (unlikely(pps_fbase.tv_sec == 0)) {
+ pps_fbase = *raw_ts;
+- spin_unlock_irqrestore(&ntp_lock, flags);
++ raw_spin_unlock_irqrestore(&ntp_lock, flags);
+ return;
+ }
+
+@@ -928,7 +928,7 @@ void hardpps(const struct timespec *phas
+ time_status |= STA_PPSJITTER;
+ /* restart the frequency calibration interval */
+ pps_fbase = *raw_ts;
+- spin_unlock_irqrestore(&ntp_lock, flags);
++ raw_spin_unlock_irqrestore(&ntp_lock, flags);
+ pr_err("hardpps: PPSJITTER: bad pulse\n");
+ return;
+ }
+@@ -945,7 +945,7 @@ void hardpps(const struct timespec *phas
+
+ hardpps_update_phase(pts_norm.nsec);
+
+- spin_unlock_irqrestore(&ntp_lock, flags);
++ raw_spin_unlock_irqrestore(&ntp_lock, flags);
+ }
+ EXPORT_SYMBOL(hardpps);
+
diff --git a/of-convert-devtree-lock.patch b/of-convert-devtree-lock.patch
new file mode 100644
index 0000000..4d901b5
--- /dev/null
+++ b/of-convert-devtree-lock.patch
@@ -0,0 +1,392 @@
+Subject: of-convert-devtree-lock.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 21 Mar 2011 14:35:34 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/sparc/kernel/prom_common.c | 4 -
+ drivers/of/base.c | 92 ++++++++++++++++++++++------------------
+ include/linux/of.h | 2
+ 3 files changed, 55 insertions(+), 43 deletions(-)
+
+Index: linux-stable/arch/sparc/kernel/prom_common.c
+===================================================================
+--- linux-stable.orig/arch/sparc/kernel/prom_common.c
++++ linux-stable/arch/sparc/kernel/prom_common.c
+@@ -64,7 +64,7 @@ int of_set_property(struct device_node *
+ err = -ENODEV;
+
+ mutex_lock(&of_set_property_mutex);
+- write_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ prevp = &dp->properties;
+ while (*prevp) {
+ struct property *prop = *prevp;
+@@ -91,7 +91,7 @@ int of_set_property(struct device_node *
+ }
+ prevp = &(*prevp)->next;
+ }
+- write_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ mutex_unlock(&of_set_property_mutex);
+
+ /* XXX Upate procfs if necessary... */
+Index: linux-stable/drivers/of/base.c
+===================================================================
+--- linux-stable.orig/drivers/of/base.c
++++ linux-stable/drivers/of/base.c
+@@ -54,7 +54,7 @@ static DEFINE_MUTEX(of_aliases_mutex);
+ /* use when traversing tree through the allnext, child, sibling,
+ * or parent members of struct device_node.
+ */
+-DEFINE_RWLOCK(devtree_lock);
++DEFINE_RAW_SPINLOCK(devtree_lock);
+
+ int of_n_addr_cells(struct device_node *np)
+ {
+@@ -187,10 +187,11 @@ struct property *of_find_property(const
+ int *lenp)
+ {
+ struct property *pp;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ pp = __of_find_property(np, name, lenp);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ return pp;
+ }
+@@ -208,13 +209,13 @@ struct device_node *of_find_all_nodes(st
+ {
+ struct device_node *np;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ np = prev ? prev->allnext : allnodes;
+ for (; np != NULL; np = np->allnext)
+ if (of_node_get(np))
+ break;
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_all_nodes);
+@@ -273,11 +274,12 @@ static int __of_device_is_compatible(con
+ int of_device_is_compatible(const struct device_node *device,
+ const char *compat)
+ {
++ unsigned long flags;
+ int res;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ res = __of_device_is_compatible(device, compat);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return res;
+ }
+ EXPORT_SYMBOL(of_device_is_compatible);
+@@ -339,13 +341,14 @@ EXPORT_SYMBOL(of_device_is_available);
+ struct device_node *of_get_parent(const struct device_node *node)
+ {
+ struct device_node *np;
++ unsigned long flags;
+
+ if (!node)
+ return NULL;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = of_node_get(node->parent);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_get_parent);
+@@ -364,14 +367,15 @@ EXPORT_SYMBOL(of_get_parent);
+ struct device_node *of_get_next_parent(struct device_node *node)
+ {
+ struct device_node *parent;
++ unsigned long flags;
+
+ if (!node)
+ return NULL;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ parent = of_node_get(node->parent);
+ of_node_put(node);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return parent;
+ }
+
+@@ -387,14 +391,15 @@ struct device_node *of_get_next_child(co
+ struct device_node *prev)
+ {
+ struct device_node *next;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = prev ? prev->sibling : node->child;
+ for (; next; next = next->sibling)
+ if (of_node_get(next))
+ break;
+ of_node_put(prev);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return next;
+ }
+ EXPORT_SYMBOL(of_get_next_child);
+@@ -436,14 +441,15 @@ EXPORT_SYMBOL(of_get_next_available_chil
+ struct device_node *of_find_node_by_path(const char *path)
+ {
+ struct device_node *np = allnodes;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ for (; np; np = np->allnext) {
+ if (np->full_name && (of_node_cmp(np->full_name, path) == 0)
+ && of_node_get(np))
+ break;
+ }
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_path);
+@@ -463,15 +469,16 @@ struct device_node *of_find_node_by_name
+ const char *name)
+ {
+ struct device_node *np;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->name && (of_node_cmp(np->name, name) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_name);
+@@ -492,15 +499,16 @@ struct device_node *of_find_node_by_type
+ const char *type)
+ {
+ struct device_node *np;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext)
+ if (np->type && (of_node_cmp(np->type, type) == 0)
+ && of_node_get(np))
+ break;
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_type);
+@@ -523,8 +531,9 @@ struct device_node *of_find_compatible_n
+ const char *type, const char *compatible)
+ {
+ struct device_node *np;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ if (type
+@@ -535,7 +544,7 @@ struct device_node *of_find_compatible_n
+ break;
+ }
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_compatible_node);
+@@ -557,8 +566,9 @@ struct device_node *of_find_node_with_pr
+ {
+ struct device_node *np;
+ struct property *pp;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ for (pp = np->properties; pp; pp = pp->next) {
+@@ -570,7 +580,7 @@ struct device_node *of_find_node_with_pr
+ }
+ out:
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_with_property);
+@@ -611,10 +621,11 @@ const struct of_device_id *of_match_node
+ const struct device_node *node)
+ {
+ const struct of_device_id *match;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ match = __of_match_node(matches, node);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return match;
+ }
+ EXPORT_SYMBOL(of_match_node);
+@@ -635,15 +646,16 @@ struct device_node *of_find_matching_nod
+ const struct of_device_id *matches)
+ {
+ struct device_node *np;
++ unsigned long flags;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+ if (__of_match_node(matches, np) && of_node_get(np))
+ break;
+ }
+ of_node_put(from);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_matching_node);
+@@ -686,12 +698,12 @@ struct device_node *of_find_node_by_phan
+ {
+ struct device_node *np;
+
+- read_lock(&devtree_lock);
++ raw_spin_lock(&devtree_lock);
+ for (np = allnodes; np; np = np->allnext)
+ if (np->phandle == handle)
+ break;
+ of_node_get(np);
+- read_unlock(&devtree_lock);
++ raw_spin_unlock(&devtree_lock);
+ return np;
+ }
+ EXPORT_SYMBOL(of_find_node_by_phandle);
+@@ -1063,18 +1075,18 @@ int prom_add_property(struct device_node
+ unsigned long flags;
+
+ prop->next = NULL;
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (strcmp(prop->name, (*next)->name) == 0) {
+ /* duplicate ! don't insert it */
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return -1;
+ }
+ next = &(*next)->next;
+ }
+ *next = prop;
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ #ifdef CONFIG_PROC_DEVICETREE
+ /* try to add to proc as well if it was initialized */
+@@ -1099,7 +1111,7 @@ int prom_remove_property(struct device_n
+ unsigned long flags;
+ int found = 0;
+
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (*next == prop) {
+@@ -1112,7 +1124,7 @@ int prom_remove_property(struct device_n
+ }
+ next = &(*next)->next;
+ }
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ if (!found)
+ return -ENODEV;
+@@ -1149,7 +1161,7 @@ int prom_update_property(struct device_n
+ if (!oldprop)
+ return prom_add_property(np, newprop);
+
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ next = &np->properties;
+ while (*next) {
+ if (*next == oldprop) {
+@@ -1163,7 +1175,7 @@ int prom_update_property(struct device_n
+ }
+ next = &(*next)->next;
+ }
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+ if (!found)
+ return -ENODEV;
+@@ -1193,12 +1205,12 @@ void of_attach_node(struct device_node *
+ {
+ unsigned long flags;
+
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+ np->sibling = np->parent->child;
+ np->allnext = allnodes;
+ np->parent->child = np;
+ allnodes = np;
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
+
+ /**
+@@ -1212,7 +1224,7 @@ void of_detach_node(struct device_node *
+ struct device_node *parent;
+ unsigned long flags;
+
+- write_lock_irqsave(&devtree_lock, flags);
++ raw_spin_lock_irqsave(&devtree_lock, flags);
+
+ parent = np->parent;
+ if (!parent)
+@@ -1243,7 +1255,7 @@ void of_detach_node(struct device_node *
+ of_node_set_flag(np, OF_DETACHED);
+
+ out_unlock:
+- write_unlock_irqrestore(&devtree_lock, flags);
++ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ }
+ #endif /* defined(CONFIG_OF_DYNAMIC) */
+
+Index: linux-stable/include/linux/of.h
+===================================================================
+--- linux-stable.orig/include/linux/of.h
++++ linux-stable/include/linux/of.h
+@@ -91,7 +91,7 @@ static inline void of_node_put(struct de
+ extern struct device_node *allnodes;
+ extern struct device_node *of_chosen;
+ extern struct device_node *of_aliases;
+-extern rwlock_t devtree_lock;
++extern raw_spinlock_t devtree_lock;
+
+ static inline bool of_have_populated_dt(void)
+ {
diff --git a/of-fixup-recursive-locking.patch b/of-fixup-recursive-locking.patch
new file mode 100644
index 0000000..29cd9af
--- /dev/null
+++ b/of-fixup-recursive-locking.patch
@@ -0,0 +1,195 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 13 Aug 2009 09:04:10 +0200
+Subject: OF: Fixup resursive locking code paths
+
+There is no real reason to use a rwlock for devtree_lock. It even
+could be a mutex, but unfortunately it's locked from cpu hotplug
+pathes which can't schedule :(
+
+So it needs to become a raw lock on rt as well. devtree_lock would be
+the only user of a raw_rw_lock, so we are better of cleaning the
+recursive locking pathes which allows us to convert devtree_lock to a
+read_lock.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/of/base.c | 93 +++++++++++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 71 insertions(+), 22 deletions(-)
+
+Index: linux-stable/drivers/of/base.c
+===================================================================
+--- linux-stable.orig/drivers/of/base.c
++++ linux-stable/drivers/of/base.c
+@@ -163,16 +163,14 @@ void of_node_put(struct device_node *nod
+ EXPORT_SYMBOL(of_node_put);
+ #endif /* CONFIG_OF_DYNAMIC */
+
+-struct property *of_find_property(const struct device_node *np,
+- const char *name,
+- int *lenp)
++static struct property *__of_find_property(const struct device_node *np,
++ const char *name, int *lenp)
+ {
+ struct property *pp;
+
+ if (!np)
+ return NULL;
+
+- read_lock(&devtree_lock);
+ for (pp = np->properties; pp; pp = pp->next) {
+ if (of_prop_cmp(pp->name, name) == 0) {
+ if (lenp)
+@@ -180,6 +178,18 @@ struct property *of_find_property(const
+ break;
+ }
+ }
++
++ return pp;
++}
++
++struct property *of_find_property(const struct device_node *np,
++ const char *name,
++ int *lenp)
++{
++ struct property *pp;
++
++ read_lock(&devtree_lock);
++ pp = __of_find_property(np, name, lenp);
+ read_unlock(&devtree_lock);
+
+ return pp;
+@@ -213,8 +223,20 @@ EXPORT_SYMBOL(of_find_all_nodes);
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
++static const void *__of_get_property(const struct device_node *np,
++ const char *name, int *lenp)
++{
++ struct property *pp = __of_find_property(np, name, lenp);
++
++ return pp ? pp->value : NULL;
++}
++
++/*
++ * Find a property with a given name for a given node
++ * and return the value.
++ */
+ const void *of_get_property(const struct device_node *np, const char *name,
+- int *lenp)
++ int *lenp)
+ {
+ struct property *pp = of_find_property(np, name, lenp);
+
+@@ -225,13 +247,13 @@ EXPORT_SYMBOL(of_get_property);
+ /** Checks if the given "compat" string matches one of the strings in
+ * the device's "compatible" property
+ */
+-int of_device_is_compatible(const struct device_node *device,
+- const char *compat)
++static int __of_device_is_compatible(const struct device_node *device,
++ const char *compat)
+ {
+ const char* cp;
+- int cplen, l;
++ int uninitialized_var(cplen), l;
+
+- cp = of_get_property(device, "compatible", &cplen);
++ cp = __of_get_property(device, "compatible", &cplen);
+ if (cp == NULL)
+ return 0;
+ while (cplen > 0) {
+@@ -244,6 +266,20 @@ int of_device_is_compatible(const struct
+
+ return 0;
+ }
++
++/** Checks if the given "compat" string matches one of the strings in
++ * the device's "compatible" property
++ */
++int of_device_is_compatible(const struct device_node *device,
++ const char *compat)
++{
++ int res;
++
++ read_lock(&devtree_lock);
++ res = __of_device_is_compatible(device, compat);
++ read_unlock(&devtree_lock);
++ return res;
++}
+ EXPORT_SYMBOL(of_device_is_compatible);
+
+ /**
+@@ -494,7 +530,8 @@ struct device_node *of_find_compatible_n
+ if (type
+ && !(np->type && (of_node_cmp(np->type, type) == 0)))
+ continue;
+- if (of_device_is_compatible(np, compatible) && of_node_get(np))
++ if (__of_device_is_compatible(np, compatible) &&
++ of_node_get(np))
+ break;
+ }
+ of_node_put(from);
+@@ -538,15 +575,9 @@ out:
+ }
+ EXPORT_SYMBOL(of_find_node_with_property);
+
+-/**
+- * of_match_node - Tell if an device_node has a matching of_match structure
+- * @matches: array of of device match structures to search in
+- * @node: the of device structure to match against
+- *
+- * Low level utility function used by device matching.
+- */
+-const struct of_device_id *of_match_node(const struct of_device_id *matches,
+- const struct device_node *node)
++static
++const struct of_device_id *__of_match_node(const struct of_device_id *matches,
++ const struct device_node *node)
+ {
+ if (!matches)
+ return NULL;
+@@ -560,14 +591,32 @@ const struct of_device_id *of_match_node
+ match &= node->type
+ && !strcmp(matches->type, node->type);
+ if (matches->compatible[0])
+- match &= of_device_is_compatible(node,
+- matches->compatible);
++ match &= __of_device_is_compatible(node,
++ matches->compatible);
+ if (match)
+ return matches;
+ matches++;
+ }
+ return NULL;
+ }
++
++/**
++ * of_match_node - Tell if an device_node has a matching of_match structure
++ * @matches: array of of device match structures to search in
++ * @node: the of device structure to match against
++ *
++ * Low level utility function used by device matching.
++ */
++const struct of_device_id *of_match_node(const struct of_device_id *matches,
++ const struct device_node *node)
++{
++ const struct of_device_id *match;
++
++ read_lock(&devtree_lock);
++ match = __of_match_node(matches, node);
++ read_unlock(&devtree_lock);
++ return match;
++}
+ EXPORT_SYMBOL(of_match_node);
+
+ /**
+@@ -590,7 +639,7 @@ struct device_node *of_find_matching_nod
+ read_lock(&devtree_lock);
+ np = from ? from->allnext : allnodes;
+ for (; np; np = np->allnext) {
+- if (of_match_node(matches, np) && of_node_get(np))
++ if (__of_match_node(matches, np) && of_node_get(np))
+ break;
+ }
+ of_node_put(from);
diff --git a/oleg-signal-rt-fix.patch b/oleg-signal-rt-fix.patch
new file mode 100644
index 0000000..bcd0658
--- /dev/null
+++ b/oleg-signal-rt-fix.patch
@@ -0,0 +1,150 @@
+Subject: signal/x86: Delay calling signals in atomic
+From: Oleg Nesterov <oleg@redhat.com>
+
+On x86_64 we must disable preemption before we enable interrupts
+for stack faults, int3 and debugging, because the current task is using
+a per CPU debug stack defined by the IST. If we schedule out, another task
+can come in and use the same stack and cause the stack to be corrupted
+and crash the kernel on return.
+
+When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and
+one of these is the spin lock used in signal handling.
+
+Some of the debug code (int3) causes do_trap() to send a signal.
+This function calls a spin lock that has been converted to a mutex
+and has the possibility to sleep. If this happens, the above issues with
+the corrupted stack is possible.
+
+Instead of calling the signal right away, for PREEMPT_RT and x86_64,
+the signal information is stored on the stacks task_struct and
+TIF_NOTIFY_RESUME is set. Then on exit of the trap, the signal resume
+code will send the signal when preemption is enabled.
+
+[ rostedt: Switched from #ifdef CONFIG_PREEMPT_RT_FULL to
+ ARCH_RT_DELAYS_SIGNAL_SEND and added comments to the code. ]
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+
+ arch/x86/include/asm/signal.h | 13 +++++++++++++
+ arch/x86/kernel/signal.c | 8 ++++++++
+ include/linux/sched.h | 4 ++++
+ kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++--
+ 4 files changed, 60 insertions(+), 2 deletions(-)
+
+Index: linux-stable/arch/x86/include/asm/signal.h
+===================================================================
+--- linux-stable.orig/arch/x86/include/asm/signal.h
++++ linux-stable/arch/x86/include/asm/signal.h
+@@ -31,6 +31,19 @@ typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+ } sigset_t;
+
++/*
++ * Because some traps use the IST stack, we must keep
++ * preemption disabled while calling do_trap(), but do_trap()
++ * may call force_sig_info() which will grab the signal spin_locks
++ * for the task, which in PREEMPT_RT_FULL are mutexes.
++ * By defining ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will
++ * set TIF_NOTIFY_RESUME and set up the signal to be sent on exit
++ * of the trap.
++ */
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
++#define ARCH_RT_DELAYS_SIGNAL_SEND
++#endif
++
+ #else
+ /* Here we must cater to libcs that poke about in kernel headers. */
+
+Index: linux-stable/arch/x86/kernel/signal.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/signal.c
++++ linux-stable/arch/x86/kernel/signal.c
+@@ -785,6 +785,14 @@ do_notify_resume(struct pt_regs *regs, v
+ mce_notify_process();
+ #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
+
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (unlikely(current->forced_info.si_signo)) {
++ struct task_struct *t = current;
++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
++ t->forced_info.si_signo = 0;
++ }
++#endif
++
+ if (thread_info_flags & _TIF_UPROBE) {
+ clear_thread_flag(TIF_UPROBE);
+ uprobe_notify_resume(regs);
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1408,6 +1408,10 @@ struct task_struct {
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+ struct sigpending pending;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /* TODO: move me into ->restart_block ? */
++ struct siginfo forced_info;
++#endif
+
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+Index: linux-stable/kernel/signal.c
+===================================================================
+--- linux-stable.orig/kernel/signal.c
++++ linux-stable/kernel/signal.c
+@@ -1305,8 +1305,8 @@ int do_send_sig_info(int sig, struct sig
+ * We don't want to have recursive SIGSEGV's etc, for example,
+ * that is why we also clear SIGNAL_UNKILLABLE.
+ */
+-int
+-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
++static int
++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ unsigned long int flags;
+ int ret, blocked, ignored;
+@@ -1331,6 +1331,39 @@ force_sig_info(int sig, struct siginfo *
+ return ret;
+ }
+
++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
++{
++/*
++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap
++ * since it can not enable preemption, and the signal code's spin_locks
++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will
++ * send the signal on exit of the trap.
++ */
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (in_atomic()) {
++ if (WARN_ON_ONCE(t != current))
++ return 0;
++ if (WARN_ON_ONCE(t->forced_info.si_signo))
++ return 0;
++
++ if (is_si_special(info)) {
++ WARN_ON_ONCE(info != SEND_SIG_PRIV);
++ t->forced_info.si_signo = sig;
++ t->forced_info.si_errno = 0;
++ t->forced_info.si_code = SI_KERNEL;
++ t->forced_info.si_pid = 0;
++ t->forced_info.si_uid = 0;
++ } else {
++ t->forced_info = *info;
++ }
++
++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
++ return 0;
++ }
++#endif
++ return do_force_sig_info(sig, info, t);
++}
++
+ /*
+ * Nuke all other threads in the group.
+ */
diff --git a/panic-disable-random-on-rt.patch b/panic-disable-random-on-rt.patch
new file mode 100644
index 0000000..f720813
--- /dev/null
+++ b/panic-disable-random-on-rt.patch
@@ -0,0 +1,22 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/panic.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: linux-stable/kernel/panic.c
+===================================================================
+--- linux-stable.orig/kernel/panic.c
++++ linux-stable/kernel/panic.c
+@@ -371,9 +371,11 @@ static u64 oops_id;
+
+ static int init_oops_id(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+ else
++#endif
+ oops_id++;
+
+ return 0;
diff --git a/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
new file mode 100644
index 0000000..cbbb61c
--- /dev/null
+++ b/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -0,0 +1,166 @@
+Subject: rcu: Make ksoftirqd do RCU quiescent states
+From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Date: Wed, 5 Oct 2011 11:45:18 -0700
+
+Implementing RCU-bh in terms of RCU-preempt makes the system vulnerable
+to network-based denial-of-service attacks. This patch therefore
+makes __do_softirq() invoke rcu_bh_qs(), but only when __do_softirq()
+is running in ksoftirqd context. A wrapper layer in interposed so that
+other calls to __do_softirq() avoid invoking rcu_bh_qs(). The underlying
+function __do_softirq_common() does the actual work.
+
+The reason that rcu_bh_qs() is bad in these non-ksoftirqd contexts is
+that there might be a local_bh_enable() inside an RCU-preempt read-side
+critical section. This local_bh_enable() can invoke __do_softirq()
+directly, so if __do_softirq() were to invoke rcu_bh_qs() (which just
+calls rcu_preempt_qs() in the PREEMPT_RT_FULL case), there would be
+an illegal RCU-preempt quiescent state in the middle of an RCU-preempt
+read-side critical section. Therefore, quiescent states can only happen
+in cases where __do_softirq() is invoked directly from ksoftirqd.
+
+Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/rcupdate.h | 6 ------
+ kernel/rcutree.c | 7 ++++++-
+ kernel/rcutree.h | 1 +
+ kernel/rcutree_plugin.h | 2 +-
+ kernel/softirq.c | 20 +++++++++++++-------
+ 5 files changed, 21 insertions(+), 15 deletions(-)
+
+Index: linux-stable/include/linux/rcupdate.h
+===================================================================
+--- linux-stable.orig/include/linux/rcupdate.h
++++ linux-stable/include/linux/rcupdate.h
+@@ -195,13 +195,7 @@ static inline int rcu_preempt_depth(void
+
+ /* Internal to kernel */
+ extern void rcu_sched_qs(int cpu);
+-
+-#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void rcu_bh_qs(int cpu);
+-#else
+-static inline void rcu_bh_qs(int cpu) { }
+-#endif
+-
+ extern void rcu_check_callbacks(int cpu, int user);
+ struct notifier_block;
+ extern void rcu_idle_enter(void);
+Index: linux-stable/kernel/rcutree.c
+===================================================================
+--- linux-stable.orig/kernel/rcutree.c
++++ linux-stable/kernel/rcutree.c
+@@ -182,7 +182,12 @@ void rcu_sched_qs(int cpu)
+ rdp->passed_quiesce = 1;
+ }
+
+-#ifndef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT_FULL
++void rcu_bh_qs(int cpu)
++{
++ rcu_preempt_qs(cpu);
++}
++#else
+ void rcu_bh_qs(int cpu)
+ {
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+Index: linux-stable/kernel/rcutree.h
+===================================================================
+--- linux-stable.orig/kernel/rcutree.h
++++ linux-stable/kernel/rcutree.h
+@@ -463,6 +463,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
+ /* Forward declarations for rcutree_plugin.h */
+ static void rcu_bootup_announce(void);
+ long rcu_batches_completed(void);
++static void rcu_preempt_qs(int cpu);
+ static void rcu_preempt_note_context_switch(int cpu);
+ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
+ #ifdef CONFIG_HOTPLUG_CPU
+Index: linux-stable/kernel/rcutree_plugin.h
+===================================================================
+--- linux-stable.orig/kernel/rcutree_plugin.h
++++ linux-stable/kernel/rcutree_plugin.h
+@@ -1727,7 +1727,7 @@ static void __cpuinit rcu_prepare_kthrea
+
+ #endif /* #else #ifdef CONFIG_RCU_BOOST */
+
+-#if !defined(CONFIG_RCU_FAST_NO_HZ)
++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
+
+ /*
+ * Check to see if any future RCU-related work will need to be done
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -139,7 +139,7 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
+
+-static void handle_pending_softirqs(u32 pending, int cpu)
++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
+ {
+ struct softirq_action *h = softirq_vec;
+ unsigned int prev_count = preempt_count();
+@@ -162,7 +162,8 @@ static void handle_pending_softirqs(u32
+ prev_count, (unsigned int) preempt_count());
+ preempt_count() = prev_count;
+ }
+- rcu_bh_qs(cpu);
++ if (need_rcu_bh_qs)
++ rcu_bh_qs(cpu);
+ }
+ local_irq_disable();
+ }
+@@ -322,7 +323,7 @@ restart:
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+- handle_pending_softirqs(pending, cpu);
++ handle_pending_softirqs(pending, cpu, 1);
+
+ pending = local_softirq_pending();
+ if (pending && --max_restart)
+@@ -393,7 +394,12 @@ static inline void ksoftirqd_clr_sched_p
+ static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
+ static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
+
+-static void __do_softirq(void);
++static void __do_softirq_common(int need_rcu_bh_qs);
++
++void __do_softirq(void)
++{
++ __do_softirq_common(0);
++}
+
+ void __init softirq_early_init(void)
+ {
+@@ -464,7 +470,7 @@ EXPORT_SYMBOL(in_serving_softirq);
+ * Called with bh and local interrupts disabled. For full RT cpu must
+ * be pinned.
+ */
+-static void __do_softirq(void)
++static void __do_softirq_common(int need_rcu_bh_qs)
+ {
+ u32 pending = local_softirq_pending();
+ int cpu = smp_processor_id();
+@@ -478,7 +484,7 @@ static void __do_softirq(void)
+
+ lockdep_softirq_enter();
+
+- handle_pending_softirqs(pending, cpu);
++ handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
+
+ pending = local_softirq_pending();
+ if (pending)
+@@ -517,7 +523,7 @@ static int __thread_do_softirq(int cpu)
+ * schedule!
+ */
+ if (local_softirq_pending())
+- __do_softirq();
++ __do_softirq_common(cpu >= 0);
+ local_unlock(local_softirq_lock);
+ unpin_current_cpu();
+ preempt_disable();
diff --git a/pci-access-use-__wake_up_all_locked.patch b/pci-access-use-__wake_up_all_locked.patch
new file mode 100644
index 0000000..844ca4a
--- /dev/null
+++ b/pci-access-use-__wake_up_all_locked.patch
@@ -0,0 +1,27 @@
+Subject: pci: Use __wake_up_all_locked pci_unblock_user_cfg_access()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 01 Dec 2011 00:07:16 +0100
+
+The waitqueue is protected by the pci_lock, so we can just avoid to
+lock the waitqueue lock itself. That prevents the
+might_sleep()/scheduling while atomic problem on RT
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ drivers/pci/access.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/drivers/pci/access.c
+===================================================================
+--- linux-stable.orig/drivers/pci/access.c
++++ linux-stable/drivers/pci/access.c
+@@ -465,7 +465,7 @@ void pci_cfg_access_unlock(struct pci_de
+ WARN_ON(!dev->block_cfg_access);
+
+ dev->block_cfg_access = 0;
+- wake_up_all(&pci_cfg_wait);
++ wake_up_all_locked(&pci_cfg_wait);
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+ }
+ EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/perf-make-swevent-hrtimer-irqsafe.patch b/perf-make-swevent-hrtimer-irqsafe.patch
new file mode 100644
index 0000000..4c9d583
--- /dev/null
+++ b/perf-make-swevent-hrtimer-irqsafe.patch
@@ -0,0 +1,70 @@
+From: Yong Zhang <yong.zhang@windriver.com>
+Date: Wed, 11 Jul 2012 22:05:21 +0000
+Subject: perf: Make swevent hrtimer run in irq instead of softirq
+
+Otherwise we get a deadlock like below:
+
+[ 1044.042749] BUG: scheduling while atomic: ksoftirqd/21/141/0x00010003
+[ 1044.042752] INFO: lockdep is turned off.
+[ 1044.042754] Modules linked in:
+[ 1044.042757] Pid: 141, comm: ksoftirqd/21 Tainted: G W 3.4.0-rc2-rt3-23676-ga723175-dirty #29
+[ 1044.042759] Call Trace:
+[ 1044.042761] <IRQ> [<ffffffff8107d8e5>] __schedule_bug+0x65/0x80
+[ 1044.042770] [<ffffffff8168978c>] __schedule+0x83c/0xa70
+[ 1044.042775] [<ffffffff8106bdd2>] ? prepare_to_wait+0x32/0xb0
+[ 1044.042779] [<ffffffff81689a5e>] schedule+0x2e/0xa0
+[ 1044.042782] [<ffffffff81071ebd>] hrtimer_wait_for_timer+0x6d/0xb0
+[ 1044.042786] [<ffffffff8106bb30>] ? wake_up_bit+0x40/0x40
+[ 1044.042790] [<ffffffff81071f20>] hrtimer_cancel+0x20/0x40
+[ 1044.042794] [<ffffffff8111da0c>] perf_swevent_cancel_hrtimer+0x3c/0x50
+[ 1044.042798] [<ffffffff8111da31>] task_clock_event_stop+0x11/0x40
+[ 1044.042802] [<ffffffff8111da6e>] task_clock_event_del+0xe/0x10
+[ 1044.042805] [<ffffffff8111c568>] event_sched_out+0x118/0x1d0
+[ 1044.042809] [<ffffffff8111c649>] group_sched_out+0x29/0x90
+[ 1044.042813] [<ffffffff8111ed7e>] __perf_event_disable+0x18e/0x200
+[ 1044.042817] [<ffffffff8111c343>] remote_function+0x63/0x70
+[ 1044.042821] [<ffffffff810b0aae>] generic_smp_call_function_single_interrupt+0xce/0x120
+[ 1044.042826] [<ffffffff81022bc7>] smp_call_function_single_interrupt+0x27/0x40
+[ 1044.042831] [<ffffffff8168d50c>] call_function_single_interrupt+0x6c/0x80
+[ 1044.042833] <EOI> [<ffffffff811275b0>] ? perf_event_overflow+0x20/0x20
+[ 1044.042840] [<ffffffff8168b970>] ? _raw_spin_unlock_irq+0x30/0x70
+[ 1044.042844] [<ffffffff8168b976>] ? _raw_spin_unlock_irq+0x36/0x70
+[ 1044.042848] [<ffffffff810702e2>] run_hrtimer_softirq+0xc2/0x200
+[ 1044.042853] [<ffffffff811275b0>] ? perf_event_overflow+0x20/0x20
+[ 1044.042857] [<ffffffff81045265>] __do_softirq_common+0xf5/0x3a0
+[ 1044.042862] [<ffffffff81045c3d>] __thread_do_softirq+0x15d/0x200
+[ 1044.042865] [<ffffffff81045dda>] run_ksoftirqd+0xfa/0x210
+[ 1044.042869] [<ffffffff81045ce0>] ? __thread_do_softirq+0x200/0x200
+[ 1044.042873] [<ffffffff81045ce0>] ? __thread_do_softirq+0x200/0x200
+[ 1044.042877] [<ffffffff8106b596>] kthread+0xb6/0xc0
+[ 1044.042881] [<ffffffff8168b97b>] ? _raw_spin_unlock_irq+0x3b/0x70
+[ 1044.042886] [<ffffffff8168d994>] kernel_thread_helper+0x4/0x10
+[ 1044.042889] [<ffffffff8107d98c>] ? finish_task_switch+0x8c/0x110
+[ 1044.042894] [<ffffffff8168b97b>] ? _raw_spin_unlock_irq+0x3b/0x70
+[ 1044.042897] [<ffffffff8168bd5d>] ? retint_restore_args+0xe/0xe
+[ 1044.042900] [<ffffffff8106b4e0>] ? kthreadd+0x1e0/0x1e0
+[ 1044.042902] [<ffffffff8168d990>] ? gs_change+0xb/0xb
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/1341476476-5666-1-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+
+---
+ kernel/events/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/kernel/events/core.c
+===================================================================
+--- linux-stable.orig/kernel/events/core.c
++++ linux-stable/kernel/events/core.c
+@@ -5441,6 +5441,7 @@ static void perf_swevent_init_hrtimer(st
+
+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hwc->hrtimer.function = perf_swevent_hrtimer;
++ hwc->hrtimer.irqsafe = 1;
+
+ /*
+ * Since hrtimers have a fixed rate, we can do a static freq->period
diff --git a/perf-move-irq-work-to-softirq-in-rt.patch b/perf-move-irq-work-to-softirq-in-rt.patch
new file mode 100644
index 0000000..20713df
--- /dev/null
+++ b/perf-move-irq-work-to-softirq-in-rt.patch
@@ -0,0 +1,67 @@
+Subject: x86-no-perf-irq-work-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 13 Jul 2011 14:05:05 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/irq_work.c | 2 ++
+ kernel/irq_work.c | 2 ++
+ kernel/timer.c | 6 +++++-
+ 3 files changed, 9 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/x86/kernel/irq_work.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/irq_work.c
++++ linux-stable/arch/x86/kernel/irq_work.c
+@@ -18,6 +18,7 @@ void smp_irq_work_interrupt(struct pt_re
+ irq_exit();
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void arch_irq_work_raise(void)
+ {
+ #ifdef CONFIG_X86_LOCAL_APIC
+@@ -28,3 +29,4 @@ void arch_irq_work_raise(void)
+ apic_wait_icr_idle();
+ #endif
+ }
++#endif
+Index: linux-stable/kernel/irq_work.c
+===================================================================
+--- linux-stable.orig/kernel/irq_work.c
++++ linux-stable/kernel/irq_work.c
+@@ -107,8 +107,10 @@ void irq_work_run(void)
+ if (llist_empty(this_list))
+ return;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUG_ON(!in_irq());
+ BUG_ON(!irqs_disabled());
++#endif
+
+ llnode = llist_del_all(this_list);
+ while (llnode != NULL) {
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -1426,7 +1426,7 @@ void update_process_times(int user_tick)
+ scheduler_tick();
+ run_local_timers();
+ rcu_check_callbacks(cpu, user_tick);
+-#ifdef CONFIG_IRQ_WORK
++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
+ if (in_irq())
+ irq_work_run();
+ #endif
+@@ -1440,6 +1440,10 @@ static void run_timer_softirq(struct sof
+ {
+ struct tvec_base *base = __this_cpu_read(tvec_bases);
+
++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
++ irq_work_run();
++#endif
++
+ printk_tick();
+ hrtimer_run_pending();
+
diff --git a/peter_zijlstra-frob-hrtimer.patch b/peter_zijlstra-frob-hrtimer.patch
new file mode 100644
index 0000000..0299ee8
--- /dev/null
+++ b/peter_zijlstra-frob-hrtimer.patch
@@ -0,0 +1,106 @@
+Subject: hrtimer: Don't call the timer handler from hrtimer_start
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri Aug 12 17:39:54 CEST 2011
+
+ [<ffffffff812de4a9>] __delay+0xf/0x11
+ [<ffffffff812e36e9>] do_raw_spin_lock+0xd2/0x13c
+ [<ffffffff815028ee>] _raw_spin_lock+0x60/0x73 rt_b->rt_runtime_lock
+ [<ffffffff81068f68>] ? sched_rt_period_timer+0xad/0x281
+ [<ffffffff81068f68>] sched_rt_period_timer+0xad/0x281
+ [<ffffffff8109e5e1>] __run_hrtimer+0x1e4/0x347
+ [<ffffffff81068ebb>] ? enqueue_rt_entity+0x36/0x36
+ [<ffffffff8109f2b1>] __hrtimer_start_range_ns+0x2b5/0x40a base->cpu_base->lock (lock_hrtimer_base)
+ [<ffffffff81068b6f>] __enqueue_rt_entity+0x26f/0x2aa rt_b->rt_runtime_lock (start_rt_bandwidth)
+ [<ffffffff81068ead>] enqueue_rt_entity+0x28/0x36
+ [<ffffffff81069355>] enqueue_task_rt+0x3d/0xb0
+ [<ffffffff810679d6>] enqueue_task+0x5d/0x64
+ [<ffffffff810714fc>] task_setprio+0x210/0x29c rq->lock
+ [<ffffffff810b56cb>] __rt_mutex_adjust_prio+0x25/0x2a p->pi_lock
+ [<ffffffff810b5d2c>] task_blocks_on_rt_mutex+0x196/0x20f
+
+Instead make __hrtimer_start_range_ns() return -ETIME when the timer
+is in the past. Since body actually uses the hrtimer_start*() return
+value its pretty safe to wreck it.
+
+Also, it will only ever return -ETIME for timer->irqsafe || !wakeup
+timers.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+---
+ kernel/hrtimer.c | 48 +++++++++++++++++++++++-------------------------
+ 1 file changed, 23 insertions(+), 25 deletions(-)
+
+Index: linux-stable/kernel/hrtimer.c
+===================================================================
+--- linux-stable.orig/kernel/hrtimer.c
++++ linux-stable/kernel/hrtimer.c
+@@ -646,37 +646,24 @@ static inline int hrtimer_enqueue_reprog
+ struct hrtimer_clock_base *base,
+ int wakeup)
+ {
+-#ifdef CONFIG_PREEMPT_RT_BASE
+-again:
+ if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
++ if (!wakeup)
++ return -ETIME;
++
++#ifdef CONFIG_PREEMPT_RT_BASE
+ /*
+ * Move softirq based timers away from the rbtree in
+ * case it expired already. Otherwise we would have a
+ * stale base->first entry until the softirq runs.
+ */
+- if (!hrtimer_rt_defer(timer)) {
+- ktime_t now = ktime_get();
+-
+- __run_hrtimer(timer, &now);
+- /*
+- * __run_hrtimer might have requeued timer and
+- * it could be base->first again.
+- */
+- if (&timer->node == base->active.next)
+- goto again;
+- return 1;
+- }
+-#else
+- if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
++ if (!hrtimer_rt_defer(timer))
++ return -ETIME;
+ #endif
+- if (wakeup) {
+- raw_spin_unlock(&base->cpu_base->lock);
+- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+- raw_spin_lock(&base->cpu_base->lock);
+- } else
+- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ raw_spin_unlock(&base->cpu_base->lock);
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ raw_spin_lock(&base->cpu_base->lock);
+
+- return 1;
++ return 0;
+ }
+
+ return 0;
+@@ -1067,8 +1054,19 @@ int __hrtimer_start_range_ns(struct hrti
+ *
+ * XXX send_remote_softirq() ?
+ */
+- if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+- hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++ if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) {
++ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
++ if (ret) {
++ /*
++ * In case we failed to reprogram the timer (mostly
++ * because out current timer is already elapsed),
++ * remove it again and report a failure. This avoids
++ * stale base->first entries.
++ */
++ __remove_hrtimer(timer, new_base,
++ timer->state & HRTIMER_STATE_CALLBACK, 0);
++ }
++ }
+
+ unlock_hrtimer_base(timer, &flags);
+
diff --git a/peter_zijlstra-frob-migrate_disable-2.patch b/peter_zijlstra-frob-migrate_disable-2.patch
new file mode 100644
index 0000000..d226598
--- /dev/null
+++ b/peter_zijlstra-frob-migrate_disable-2.patch
@@ -0,0 +1,186 @@
+Subject: sched: Generic migrate_disable
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Thu Aug 11 15:14:58 CEST 2011
+
+Make migrate_disable() be a preempt_disable() for !rt kernels. This
+allows generic code to use it but still enforces that these code
+sections stay relatively small.
+
+A preemptible migrate_disable() accessible for general use would allow
+people growing arbitrary per-cpu crap instead of clean these things
+up.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-275i87sl8e1jcamtchmehonm@git.kernel.org
+---
+ include/linux/preempt.h | 21 +++++++++------------
+ include/linux/sched.h | 13 +++++++++++++
+ include/linux/smp.h | 9 ++-------
+ kernel/sched/core.c | 6 ++++--
+ kernel/trace/trace.c | 2 +-
+ lib/smp_processor_id.c | 2 +-
+ 6 files changed, 30 insertions(+), 23 deletions(-)
+
+Index: linux-stable/include/linux/preempt.h
+===================================================================
+--- linux-stable.orig/include/linux/preempt.h
++++ linux-stable/include/linux/preempt.h
+@@ -108,28 +108,25 @@ do { \
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+-#ifdef CONFIG_SMP
+-extern void migrate_disable(void);
+-extern void migrate_enable(void);
+-#else
+-# define migrate_disable() do { } while (0)
+-# define migrate_enable() do { } while (0)
+-#endif
+-
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ # define preempt_disable_rt() preempt_disable()
+ # define preempt_enable_rt() preempt_enable()
+ # define preempt_disable_nort() do { } while (0)
+ # define preempt_enable_nort() do { } while (0)
+-# define migrate_disable_rt() migrate_disable()
+-# define migrate_enable_rt() migrate_enable()
++# ifdef CONFIG_SMP
++ extern void migrate_disable(void);
++ extern void migrate_enable(void);
++# else /* CONFIG_SMP */
++# define migrate_disable() do { } while (0)
++# define migrate_enable() do { } while (0)
++# endif /* CONFIG_SMP */
+ #else
+ # define preempt_disable_rt() do { } while (0)
+ # define preempt_enable_rt() do { } while (0)
+ # define preempt_disable_nort() preempt_disable()
+ # define preempt_enable_nort() preempt_enable()
+-# define migrate_disable_rt() do { } while (0)
+-# define migrate_enable_rt() do { } while (0)
++# define migrate_disable() preempt_disable()
++# define migrate_enable() preempt_enable()
+ #endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1275,7 +1275,9 @@ struct task_struct {
+ #endif
+
+ unsigned int policy;
++#ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
++#endif
+ int nr_cpus_allowed;
+ cpumask_t cpus_allowed;
+
+@@ -2771,11 +2773,22 @@ static inline void set_task_cpu(struct t
+
+ #endif /* CONFIG_SMP */
+
++static inline int __migrate_disabled(struct task_struct *p)
++{
++#ifdef CONFIG_PREEMPT_RT_FULL
++ return p->migrate_disable;
++#else
++ return 0;
++#endif
++}
++
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
+ if (p->migrate_disable)
+ return cpumask_of(task_cpu(p));
++#endif
+
+ return &p->cpus_allowed;
+ }
+Index: linux-stable/include/linux/smp.h
+===================================================================
+--- linux-stable.orig/include/linux/smp.h
++++ linux-stable/include/linux/smp.h
+@@ -218,13 +218,8 @@ static inline void kick_all_cpus_sync(vo
+ #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
+-#ifndef CONFIG_PREEMPT_RT_FULL
+-# define get_cpu_light() get_cpu()
+-# define put_cpu_light() put_cpu()
+-#else
+-# define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
+-# define put_cpu_light() migrate_enable()
+-#endif
++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light() migrate_enable()
+
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -5203,7 +5203,7 @@ void __cpuinit init_idle(struct task_str
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (!p->migrate_disable) {
++ if (!__migrate_disabled(p)) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+@@ -5259,7 +5259,7 @@ int set_cpus_allowed_ptr(struct task_str
+ do_set_cpus_allowed(p, new_mask);
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -5278,6 +5278,7 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+@@ -5370,6 +5371,7 @@ void migrate_enable(void)
+ preempt_enable();
+ }
+ EXPORT_SYMBOL(migrate_enable);
++#endif /* CONFIG_PREEMPT_RT_FULL */
+
+ /*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+Index: linux-stable/kernel/trace/trace.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace.c
++++ linux-stable/kernel/trace/trace.c
+@@ -1156,7 +1156,7 @@ tracing_generic_entry_update(struct trac
+ ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
+
+- entry->migrate_disable = (tsk) ? tsk->migrate_disable & 0xFF : 0;
++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
+ }
+ EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
+
+Index: linux-stable/lib/smp_processor_id.c
+===================================================================
+--- linux-stable.orig/lib/smp_processor_id.c
++++ linux-stable/lib/smp_processor_id.c
+@@ -41,7 +41,7 @@ notrace unsigned int debug_smp_processor
+
+ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
+ "code: %s/%d\n", preempt_count() - 1,
+- current->migrate_disable, current->comm, current->pid);
++ __migrate_disabled(current), current->comm, current->pid);
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+
diff --git a/peter_zijlstra-frob-migrate_disable.patch b/peter_zijlstra-frob-migrate_disable.patch
new file mode 100644
index 0000000..1cce90c
--- /dev/null
+++ b/peter_zijlstra-frob-migrate_disable.patch
@@ -0,0 +1,69 @@
+Subject: sched: Optimize migrate_disable
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Thu Aug 11 15:03:35 CEST 2011
+
+Change from task_rq_lock() to raw_spin_lock(&rq->lock) to avoid a few
+atomic ops. See comment on why it should be safe.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-cbz6hkl5r5mvwtx5s3tor2y6@git.kernel.org
+---
+ kernel/sched/core.c | 24 ++++++++++++++++++++----
+ 1 file changed, 20 insertions(+), 4 deletions(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -5298,7 +5298,19 @@ void migrate_disable(void)
+ preempt_enable();
+ return;
+ }
+- rq = task_rq_lock(p, &flags);
++
++ /*
++ * Since this is always current we can get away with only locking
++ * rq->lock, the ->cpus_allowed value can normally only be changed
++ * while holding both p->pi_lock and rq->lock, but seeing that this
++ * it current, we cannot actually be waking up, so all code that
++ * relies on serialization against p->pi_lock is out of scope.
++ *
++ * Taking rq->lock serializes us against things like
++ * set_cpus_allowed_ptr() that can still happen concurrently.
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
+ p->migrate_disable = 1;
+ mask = tsk_cpus_allowed(p);
+
+@@ -5309,7 +5321,7 @@ void migrate_disable(void)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->nr_cpus_allowed = cpumask_weight(mask);
+ }
+- task_rq_unlock(rq, p, &flags);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ preempt_enable();
+ }
+ EXPORT_SYMBOL(migrate_disable);
+@@ -5337,7 +5349,11 @@ void migrate_enable(void)
+ return;
+ }
+
+- rq = task_rq_lock(p, &flags);
++ /*
++ * See comment in migrate_disable().
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
+
+@@ -5349,7 +5365,7 @@ void migrate_enable(void)
+ p->nr_cpus_allowed = cpumask_weight(mask);
+ }
+
+- task_rq_unlock(rq, p, &flags);
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ unpin_current_cpu();
+ preempt_enable();
+ }
diff --git a/peter_zijlstra-frob-pagefault_disable.patch b/peter_zijlstra-frob-pagefault_disable.patch
new file mode 100644
index 0000000..539c0c8
--- /dev/null
+++ b/peter_zijlstra-frob-pagefault_disable.patch
@@ -0,0 +1,388 @@
+Subject: mm: pagefault_disabled()
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Thu Aug 11 15:31:31 CEST 2011
+
+Wrap the test for pagefault_disabled() into a helper, this allows us
+to remove the need for current->pagefault_disabled on !-rt kernels.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-3yy517m8zsi9fpsf14xfaqkw@git.kernel.org
+---
+ arch/alpha/mm/fault.c | 2 +-
+ arch/arm/mm/fault.c | 2 +-
+ arch/avr32/mm/fault.c | 3 +--
+ arch/cris/mm/fault.c | 2 +-
+ arch/frv/mm/fault.c | 2 +-
+ arch/ia64/mm/fault.c | 2 +-
+ arch/m32r/mm/fault.c | 2 +-
+ arch/m68k/mm/fault.c | 2 +-
+ arch/microblaze/mm/fault.c | 2 +-
+ arch/mips/mm/fault.c | 2 +-
+ arch/mn10300/mm/fault.c | 2 +-
+ arch/parisc/mm/fault.c | 2 +-
+ arch/powerpc/mm/fault.c | 2 +-
+ arch/s390/mm/fault.c | 6 +++---
+ arch/score/mm/fault.c | 2 +-
+ arch/sh/mm/fault.c | 2 +-
+ arch/sparc/mm/fault_32.c | 2 +-
+ arch/sparc/mm/fault_64.c | 2 +-
+ arch/tile/mm/fault.c | 2 +-
+ arch/um/kernel/trap.c | 2 +-
+ arch/x86/mm/fault.c | 2 +-
+ arch/xtensa/mm/fault.c | 2 +-
+ include/linux/sched.h | 14 ++++++++++++++
+ kernel/fork.c | 2 ++
+ 24 files changed, 40 insertions(+), 25 deletions(-)
+
+Index: linux-stable/arch/alpha/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/alpha/mm/fault.c
++++ linux-stable/arch/alpha/mm/fault.c
+@@ -108,7 +108,7 @@ do_page_fault(unsigned long address, uns
+
+ /* If we're in an interrupt context, or have no user context,
+ we must not take the fault. */
+- if (!mm || in_atomic() || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_ALPHA_LARGE_VMALLOC
+Index: linux-stable/arch/arm/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/arm/mm/fault.c
++++ linux-stable/arch/arm/mm/fault.c
+@@ -279,7 +279,7 @@ do_page_fault(unsigned long addr, unsign
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ /*
+Index: linux-stable/arch/avr32/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/avr32/mm/fault.c
++++ linux-stable/arch/avr32/mm/fault.c
+@@ -81,8 +81,7 @@ asmlinkage void do_page_fault(unsigned l
+ * If we're in an interrupt or have no user context, we must
+ * not take the fault...
+ */
+- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM) ||
+- current->pagefault_disabled)
++ if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
+ goto no_context;
+
+ local_irq_enable();
+Index: linux-stable/arch/cris/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/cris/mm/fault.c
++++ linux-stable/arch/cris/mm/fault.c
+@@ -114,7 +114,7 @@ do_page_fault(unsigned long address, str
+ * user context, we must not take the fault.
+ */
+
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ retry:
+Index: linux-stable/arch/frv/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/frv/mm/fault.c
++++ linux-stable/arch/frv/mm/fault.c
+@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/ia64/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/ia64/mm/fault.c
++++ linux-stable/arch/ia64/mm/fault.c
+@@ -98,7 +98,7 @@ ia64_do_page_fault (unsigned long addres
+ /*
+ * If we're in an interrupt or have no user context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ #ifdef CONFIG_VIRTUAL_MEM_MAP
+Index: linux-stable/arch/m32r/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/m32r/mm/fault.c
++++ linux-stable/arch/m32r/mm/fault.c
+@@ -114,7 +114,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user context or are running in an
+ * atomic region then we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ /* When running in the kernel we expect faults to occur only to
+Index: linux-stable/arch/m68k/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/m68k/mm/fault.c
++++ linux-stable/arch/m68k/mm/fault.c
+@@ -85,7 +85,7 @@ int do_page_fault(struct pt_regs *regs,
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ retry:
+Index: linux-stable/arch/microblaze/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/microblaze/mm/fault.c
++++ linux-stable/arch/microblaze/mm/fault.c
+@@ -108,7 +108,7 @@ void do_page_fault(struct pt_regs *regs,
+ if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
+ is_write = 0;
+
+- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ if (kernel_mode(regs))
+ goto bad_area_nosemaphore;
+
+Index: linux-stable/arch/mips/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/mips/mm/fault.c
++++ linux-stable/arch/mips/mm/fault.c
+@@ -89,7 +89,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ retry:
+Index: linux-stable/arch/mn10300/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/mn10300/mm/fault.c
++++ linux-stable/arch/mn10300/mm/fault.c
+@@ -167,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/parisc/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/parisc/mm/fault.c
++++ linux-stable/arch/parisc/mm/fault.c
+@@ -176,7 +176,7 @@ void do_page_fault(struct pt_regs *regs,
+ unsigned long acc_type;
+ int fault;
+
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/powerpc/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/mm/fault.c
++++ linux-stable/arch/powerpc/mm/fault.c
+@@ -261,7 +261,7 @@ int __kprobes do_page_fault(struct pt_re
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
+- if (in_atomic() || mm == NULL || current->pagefault_disabled) {
++ if (!mm || pagefault_disabled()) {
+ if (!user_mode(regs))
+ return SIGSEGV;
+ /* in_atomic() in user mode is really bad,
+Index: linux-stable/arch/s390/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/s390/mm/fault.c
++++ linux-stable/arch/s390/mm/fault.c
+@@ -286,8 +286,8 @@ static inline int do_exception(struct pt
+ * user context.
+ */
+ fault = VM_FAULT_BADCONTEXT;
+- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
+- tsk->pagefault_disabled))
++ if (unlikely(!user_space_fault(trans_exc_code) ||
++ !mm || pagefault_disabled()))
+ goto out;
+
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+@@ -425,7 +425,7 @@ void __kprobes do_asce_exception(struct
+
+ trans_exc_code = regs->int_parm_long;
+ if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm ||
+- current->pagefault_disabled))
++ pagefault_disabled()))
+ goto no_context;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/score/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/score/mm/fault.c
++++ linux-stable/arch/score/mm/fault.c
+@@ -72,7 +72,7 @@ asmlinkage void do_page_fault(struct pt_
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto bad_area_nosemaphore;
+
+ down_read(&mm->mmap_sem);
+Index: linux-stable/arch/sh/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/sh/mm/fault.c
++++ linux-stable/arch/sh/mm/fault.c
+@@ -445,7 +445,7 @@ asmlinkage void __kprobes do_page_fault(
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+Index: linux-stable/arch/sparc/mm/fault_32.c
+===================================================================
+--- linux-stable.orig/arch/sparc/mm/fault_32.c
++++ linux-stable/arch/sparc/mm/fault_32.c
+@@ -200,7 +200,7 @@ asmlinkage void do_sparc_fault(struct pt
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled)
++ if (!mm || pagefault_disabled())
+ goto no_context;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+Index: linux-stable/arch/sparc/mm/fault_64.c
+===================================================================
+--- linux-stable.orig/arch/sparc/mm/fault_64.c
++++ linux-stable/arch/sparc/mm/fault_64.c
+@@ -323,7 +323,7 @@ asmlinkage void __kprobes do_sparc64_fau
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_enabled)
++ if (!mm || pagefault_disabled())
+ goto intr_or_no_mm;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+Index: linux-stable/arch/tile/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/tile/mm/fault.c
++++ linux-stable/arch/tile/mm/fault.c
+@@ -359,7 +359,7 @@ static int handle_page_fault(struct pt_r
+ * If we're in an interrupt, have no user context or are running in an
+ * atomic region then we must not take the fault.
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled) {
++ if (!mm || pagefault_disabled()) {
+ vma = NULL; /* happy compiler */
+ goto bad_area_nosemaphore;
+ }
+Index: linux-stable/arch/um/kernel/trap.c
+===================================================================
+--- linux-stable.orig/arch/um/kernel/trap.c
++++ linux-stable/arch/um/kernel/trap.c
+@@ -39,7 +39,7 @@ int handle_page_fault(unsigned long addr
+ * If the fault was during atomic operation, don't take the fault, just
+ * fail.
+ */
+- if (in_atomic() || current->pagefault_disabled)
++ if (pagefault_disabled())
+ goto out_nosemaphore;
+
+ retry:
+Index: linux-stable/arch/x86/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/x86/mm/fault.c
++++ linux-stable/arch/x86/mm/fault.c
+@@ -1094,7 +1094,7 @@ do_page_fault(struct pt_regs *regs, unsi
+ * If we're in an interrupt, have no user context or are running
+ * in an atomic region then we must not take the fault:
+ */
+- if (unlikely(in_atomic() || !mm || current->pagefault_disabled)) {
++ if (unlikely(!mm || pagefault_disabled())) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
+ }
+Index: linux-stable/arch/xtensa/mm/fault.c
+===================================================================
+--- linux-stable.orig/arch/xtensa/mm/fault.c
++++ linux-stable/arch/xtensa/mm/fault.c
+@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
+ /* If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+- if (in_atomic() || !mm || current->pagefault_disabled) {
++ if (!mm || pagefault_disabled()) {
+ bad_page_fault(regs, address, SIGSEGV);
+ return;
+ }
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -91,6 +91,7 @@ struct sched_param {
+ #include <linux/cred.h>
+ #include <linux/llist.h>
+ #include <linux/uidgid.h>
++#include <linux/hardirq.h>
+
+ #include <asm/processor.h>
+
+@@ -1448,7 +1449,9 @@ struct task_struct {
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
+ int pagefault_disabled;
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ unsigned long hardirq_enable_ip;
+@@ -1600,6 +1603,17 @@ struct task_struct {
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
++#else
++static inline bool cur_pf_disabled(void) { return false; }
++#endif
++
++static inline bool pagefault_disabled(void)
++{
++ return in_atomic() || cur_pf_disabled();
++}
++
+ /*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+Index: linux-stable/kernel/fork.c
+===================================================================
+--- linux-stable.orig/kernel/fork.c
++++ linux-stable/kernel/fork.c
+@@ -1298,7 +1298,9 @@ static struct task_struct *copy_process(
+ p->hardirq_context = 0;
+ p->softirq_context = 0;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
+ p->pagefault_disabled = 0;
++#endif
+ #ifdef CONFIG_LOCKDEP
+ p->lockdep_depth = 0; /* no locks held yet */
+ p->curr_chain_key = 0;
diff --git a/peter_zijlstra-frob-rcu.patch b/peter_zijlstra-frob-rcu.patch
new file mode 100644
index 0000000..35b36bc
--- /dev/null
+++ b/peter_zijlstra-frob-rcu.patch
@@ -0,0 +1,168 @@
+Subject: rcu: Frob softirq test
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Sat Aug 13 00:23:17 CEST 2011
+
+With RT_FULL we get the below wreckage:
+
+[ 126.060484] =======================================================
+[ 126.060486] [ INFO: possible circular locking dependency detected ]
+[ 126.060489] 3.0.1-rt10+ #30
+[ 126.060490] -------------------------------------------------------
+[ 126.060492] irq/24-eth0/1235 is trying to acquire lock:
+[ 126.060495] (&(lock)->wait_lock#2){+.+...}, at: [<ffffffff81501c81>] rt_mutex_slowunlock+0x16/0x55
+[ 126.060503]
+[ 126.060504] but task is already holding lock:
+[ 126.060506] (&p->pi_lock){-...-.}, at: [<ffffffff81074fdc>] try_to_wake_up+0x35/0x429
+[ 126.060511]
+[ 126.060511] which lock already depends on the new lock.
+[ 126.060513]
+[ 126.060514]
+[ 126.060514] the existing dependency chain (in reverse order) is:
+[ 126.060516]
+[ 126.060516] -> #1 (&p->pi_lock){-...-.}:
+[ 126.060519] [<ffffffff810afe9e>] lock_acquire+0x145/0x18a
+[ 126.060524] [<ffffffff8150291e>] _raw_spin_lock_irqsave+0x4b/0x85
+[ 126.060527] [<ffffffff810b5aa4>] task_blocks_on_rt_mutex+0x36/0x20f
+[ 126.060531] [<ffffffff815019bb>] rt_mutex_slowlock+0xd1/0x15a
+[ 126.060534] [<ffffffff81501ae3>] rt_mutex_lock+0x2d/0x2f
+[ 126.060537] [<ffffffff810d9020>] rcu_boost+0xad/0xde
+[ 126.060541] [<ffffffff810d90ce>] rcu_boost_kthread+0x7d/0x9b
+[ 126.060544] [<ffffffff8109a760>] kthread+0x99/0xa1
+[ 126.060547] [<ffffffff81509b14>] kernel_thread_helper+0x4/0x10
+[ 126.060551]
+[ 126.060552] -> #0 (&(lock)->wait_lock#2){+.+...}:
+[ 126.060555] [<ffffffff810af1b8>] __lock_acquire+0x1157/0x1816
+[ 126.060558] [<ffffffff810afe9e>] lock_acquire+0x145/0x18a
+[ 126.060561] [<ffffffff8150279e>] _raw_spin_lock+0x40/0x73
+[ 126.060564] [<ffffffff81501c81>] rt_mutex_slowunlock+0x16/0x55
+[ 126.060566] [<ffffffff81501ce7>] rt_mutex_unlock+0x27/0x29
+[ 126.060569] [<ffffffff810d9f86>] rcu_read_unlock_special+0x17e/0x1c4
+[ 126.060573] [<ffffffff810da014>] __rcu_read_unlock+0x48/0x89
+[ 126.060576] [<ffffffff8106847a>] select_task_rq_rt+0xc7/0xd5
+[ 126.060580] [<ffffffff8107511c>] try_to_wake_up+0x175/0x429
+[ 126.060583] [<ffffffff81075425>] wake_up_process+0x15/0x17
+[ 126.060585] [<ffffffff81080a51>] wakeup_softirqd+0x24/0x26
+[ 126.060590] [<ffffffff81081df9>] irq_exit+0x49/0x55
+[ 126.060593] [<ffffffff8150a3bd>] smp_apic_timer_interrupt+0x8a/0x98
+[ 126.060597] [<ffffffff81509793>] apic_timer_interrupt+0x13/0x20
+[ 126.060600] [<ffffffff810d5952>] irq_forced_thread_fn+0x1b/0x44
+[ 126.060603] [<ffffffff810d582c>] irq_thread+0xde/0x1af
+[ 126.060606] [<ffffffff8109a760>] kthread+0x99/0xa1
+[ 126.060608] [<ffffffff81509b14>] kernel_thread_helper+0x4/0x10
+[ 126.060611]
+[ 126.060612] other info that might help us debug this:
+[ 126.060614]
+[ 126.060615] Possible unsafe locking scenario:
+[ 126.060616]
+[ 126.060617] CPU0 CPU1
+[ 126.060619] ---- ----
+[ 126.060620] lock(&p->pi_lock);
+[ 126.060623] lock(&(lock)->wait_lock);
+[ 126.060625] lock(&p->pi_lock);
+[ 126.060627] lock(&(lock)->wait_lock);
+[ 126.060629]
+[ 126.060629] *** DEADLOCK ***
+[ 126.060630]
+[ 126.060632] 1 lock held by irq/24-eth0/1235:
+[ 126.060633] #0: (&p->pi_lock){-...-.}, at: [<ffffffff81074fdc>] try_to_wake_up+0x35/0x429
+[ 126.060638]
+[ 126.060638] stack backtrace:
+[ 126.060641] Pid: 1235, comm: irq/24-eth0 Not tainted 3.0.1-rt10+ #30
+[ 126.060643] Call Trace:
+[ 126.060644] <IRQ> [<ffffffff810acbde>] print_circular_bug+0x289/0x29a
+[ 126.060651] [<ffffffff810af1b8>] __lock_acquire+0x1157/0x1816
+[ 126.060655] [<ffffffff810ab3aa>] ? trace_hardirqs_off_caller+0x1f/0x99
+[ 126.060658] [<ffffffff81501c81>] ? rt_mutex_slowunlock+0x16/0x55
+[ 126.060661] [<ffffffff810afe9e>] lock_acquire+0x145/0x18a
+[ 126.060664] [<ffffffff81501c81>] ? rt_mutex_slowunlock+0x16/0x55
+[ 126.060668] [<ffffffff8150279e>] _raw_spin_lock+0x40/0x73
+[ 126.060671] [<ffffffff81501c81>] ? rt_mutex_slowunlock+0x16/0x55
+[ 126.060674] [<ffffffff810d9655>] ? rcu_report_qs_rsp+0x87/0x8c
+[ 126.060677] [<ffffffff81501c81>] rt_mutex_slowunlock+0x16/0x55
+[ 126.060680] [<ffffffff810d9ea3>] ? rcu_read_unlock_special+0x9b/0x1c4
+[ 126.060683] [<ffffffff81501ce7>] rt_mutex_unlock+0x27/0x29
+[ 126.060687] [<ffffffff810d9f86>] rcu_read_unlock_special+0x17e/0x1c4
+[ 126.060690] [<ffffffff810da014>] __rcu_read_unlock+0x48/0x89
+[ 126.060693] [<ffffffff8106847a>] select_task_rq_rt+0xc7/0xd5
+[ 126.060696] [<ffffffff810683da>] ? select_task_rq_rt+0x27/0xd5
+[ 126.060701] [<ffffffff810a852a>] ? clockevents_program_event+0x8e/0x90
+[ 126.060704] [<ffffffff8107511c>] try_to_wake_up+0x175/0x429
+[ 126.060708] [<ffffffff810a95dc>] ? tick_program_event+0x1f/0x21
+[ 126.060711] [<ffffffff81075425>] wake_up_process+0x15/0x17
+[ 126.060715] [<ffffffff81080a51>] wakeup_softirqd+0x24/0x26
+[ 126.060718] [<ffffffff81081df9>] irq_exit+0x49/0x55
+[ 126.060721] [<ffffffff8150a3bd>] smp_apic_timer_interrupt+0x8a/0x98
+[ 126.060724] [<ffffffff81509793>] apic_timer_interrupt+0x13/0x20
+[ 126.060726] <EOI> [<ffffffff81072855>] ? migrate_disable+0x75/0x12d
+[ 126.060733] [<ffffffff81080a61>] ? local_bh_disable+0xe/0x1f
+[ 126.060736] [<ffffffff81080a70>] ? local_bh_disable+0x1d/0x1f
+[ 126.060739] [<ffffffff810d5952>] irq_forced_thread_fn+0x1b/0x44
+[ 126.060742] [<ffffffff81502ac0>] ? _raw_spin_unlock_irq+0x3b/0x59
+[ 126.060745] [<ffffffff810d582c>] irq_thread+0xde/0x1af
+[ 126.060748] [<ffffffff810d5937>] ? irq_thread_fn+0x3a/0x3a
+[ 126.060751] [<ffffffff810d574e>] ? irq_finalize_oneshot+0xd1/0xd1
+[ 126.060754] [<ffffffff810d574e>] ? irq_finalize_oneshot+0xd1/0xd1
+[ 126.060757] [<ffffffff8109a760>] kthread+0x99/0xa1
+[ 126.060761] [<ffffffff81509b14>] kernel_thread_helper+0x4/0x10
+[ 126.060764] [<ffffffff81069ed7>] ? finish_task_switch+0x87/0x10a
+[ 126.060768] [<ffffffff81502ec4>] ? retint_restore_args+0xe/0xe
+[ 126.060771] [<ffffffff8109a6c7>] ? __init_kthread_worker+0x8c/0x8c
+[ 126.060774] [<ffffffff81509b10>] ? gs_change+0xb/0xb
+
+Because irq_exit() does:
+
+void irq_exit(void)
+{
+ account_system_vtime(current);
+ trace_hardirq_exit();
+ sub_preempt_count(IRQ_EXIT_OFFSET);
+ if (!in_interrupt() && local_softirq_pending())
+ invoke_softirq();
+
+ ...
+}
+
+Which triggers a wakeup, which uses RCU, now if the interrupted task has
+t->rcu_read_unlock_special set, the rcu usage from the wakeup will end
+up in rcu_read_unlock_special(). rcu_read_unlock_special() will test
+for in_irq(), which will fail as we just decremented preempt_count
+with IRQ_EXIT_OFFSET, and in_sering_softirq(), which for
+PREEMPT_RT_FULL reads:
+
+int in_serving_softirq(void)
+{
+ int res;
+
+ preempt_disable();
+ res = __get_cpu_var(local_softirq_runner) == current;
+ preempt_enable();
+ return res;
+}
+
+Which will thus also fail, resulting in the above wreckage.
+
+The 'somewhat' ugly solution is to open-code the preempt_count() test
+in rcu_read_unlock_special().
+
+Also, we're not at all sure how ->rcu_read_unlock_special gets set
+here... so this is very likely a bandaid and more thought is required.
+
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+---
+ kernel/rcutree_plugin.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/rcutree_plugin.h
+===================================================================
+--- linux-stable.orig/kernel/rcutree_plugin.h
++++ linux-stable/kernel/rcutree_plugin.h
+@@ -331,7 +331,7 @@ void rcu_read_unlock_special(struct task
+ }
+
+ /* Hardware IRQ handlers cannot block. */
+- if (in_irq() || in_serving_softirq()) {
++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
+ local_irq_restore(flags);
+ return;
+ }
diff --git a/peterz-raw_pagefault_disable.patch b/peterz-raw_pagefault_disable.patch
new file mode 100644
index 0000000..0712081
--- /dev/null
+++ b/peterz-raw_pagefault_disable.patch
@@ -0,0 +1,151 @@
+Subject: mm: raw_pagefault_disable
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri Aug 05 17:16:58 CEST 2011
+
+Adding migrate_disable() to pagefault_disable() to preserve the
+per-cpu thing for kmap_atomic might not have been the best of choices.
+But short of adding preempt_disable/migrate_disable foo all over the
+kmap code it still seems the best way.
+
+It does however yield the below borkage as well as wreck !-rt builds
+since !-rt does rely on pagefault_disable() not preempting. So fix all
+that up by adding raw_pagefault_disable().
+
+ <NMI> [<ffffffff81076d5c>] warn_slowpath_common+0x85/0x9d
+ [<ffffffff81076e17>] warn_slowpath_fmt+0x46/0x48
+ [<ffffffff814f7fca>] ? _raw_spin_lock+0x6c/0x73
+ [<ffffffff810cac87>] ? watchdog_overflow_callback+0x9b/0xd0
+ [<ffffffff810caca3>] watchdog_overflow_callback+0xb7/0xd0
+ [<ffffffff810f51bb>] __perf_event_overflow+0x11c/0x1fe
+ [<ffffffff810f298f>] ? perf_event_update_userpage+0x149/0x151
+ [<ffffffff810f2846>] ? perf_event_task_disable+0x7c/0x7c
+ [<ffffffff810f5b7c>] perf_event_overflow+0x14/0x16
+ [<ffffffff81046e02>] x86_pmu_handle_irq+0xcb/0x108
+ [<ffffffff814f9a6b>] perf_event_nmi_handler+0x46/0x91
+ [<ffffffff814fb2ba>] notifier_call_chain+0x79/0xa6
+ [<ffffffff814fb34d>] __atomic_notifier_call_chain+0x66/0x98
+ [<ffffffff814fb2e7>] ? notifier_call_chain+0xa6/0xa6
+ [<ffffffff814fb393>] atomic_notifier_call_chain+0x14/0x16
+ [<ffffffff814fb3c3>] notify_die+0x2e/0x30
+ [<ffffffff814f8f75>] do_nmi+0x7e/0x22b
+ [<ffffffff814f8bca>] nmi+0x1a/0x2c
+ [<ffffffff814fb130>] ? sub_preempt_count+0x4b/0xaa
+ <<EOE>> <IRQ> [<ffffffff812d44cc>] delay_tsc+0xac/0xd1
+ [<ffffffff812d4399>] __delay+0xf/0x11
+ [<ffffffff812d95d9>] do_raw_spin_lock+0xd2/0x13c
+ [<ffffffff814f813e>] _raw_spin_lock_irqsave+0x6b/0x85
+ [<ffffffff8106772a>] ? task_rq_lock+0x35/0x8d
+ [<ffffffff8106772a>] task_rq_lock+0x35/0x8d
+ [<ffffffff8106fe2f>] migrate_disable+0x65/0x12c
+ [<ffffffff81114e69>] pagefault_disable+0xe/0x1f
+ [<ffffffff81039c73>] dump_trace+0x21f/0x2e2
+ [<ffffffff8103ad79>] show_trace_log_lvl+0x54/0x5d
+ [<ffffffff8103ad97>] show_trace+0x15/0x17
+ [<ffffffff814f4f5f>] dump_stack+0x77/0x80
+ [<ffffffff812d94b0>] spin_bug+0x9c/0xa3
+ [<ffffffff81067745>] ? task_rq_lock+0x50/0x8d
+ [<ffffffff812d954e>] do_raw_spin_lock+0x47/0x13c
+ [<ffffffff814f7fbe>] _raw_spin_lock+0x60/0x73
+ [<ffffffff81067745>] ? task_rq_lock+0x50/0x8d
+ [<ffffffff81067745>] task_rq_lock+0x50/0x8d
+ [<ffffffff8106fe2f>] migrate_disable+0x65/0x12c
+ [<ffffffff81114e69>] pagefault_disable+0xe/0x1f
+ [<ffffffff81039c73>] dump_trace+0x21f/0x2e2
+ [<ffffffff8104369b>] save_stack_trace+0x2f/0x4c
+ [<ffffffff810a7848>] save_trace+0x3f/0xaf
+ [<ffffffff810aa2bd>] mark_lock+0x228/0x530
+ [<ffffffff810aac27>] __lock_acquire+0x662/0x1812
+ [<ffffffff8103dad4>] ? native_sched_clock+0x37/0x6d
+ [<ffffffff810a790e>] ? trace_hardirqs_off_caller+0x1f/0x99
+ [<ffffffff810693f6>] ? sched_rt_period_timer+0xbd/0x218
+ [<ffffffff810ac403>] lock_acquire+0x145/0x18a
+ [<ffffffff810693f6>] ? sched_rt_period_timer+0xbd/0x218
+ [<ffffffff814f7f9e>] _raw_spin_lock+0x40/0x73
+ [<ffffffff810693f6>] ? sched_rt_period_timer+0xbd/0x218
+ [<ffffffff810693f6>] sched_rt_period_timer+0xbd/0x218
+ [<ffffffff8109aa39>] __run_hrtimer+0x1e4/0x347
+ [<ffffffff81069339>] ? can_migrate_task.clone.82+0x14a/0x14a
+ [<ffffffff8109b97c>] hrtimer_interrupt+0xee/0x1d6
+ [<ffffffff814fb23d>] ? add_preempt_count+0xae/0xb2
+ [<ffffffff814ffb38>] smp_apic_timer_interrupt+0x85/0x98
+ [<ffffffff814fef13>] apic_timer_interrupt+0x13/0x20
+
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-31keae8mkjiv8esq4rl76cib@git.kernel.org
+---
+ include/linux/uaccess.h | 30 ++++++++++++++++++++++++++++--
+ mm/memory.c | 2 ++
+ 2 files changed, 30 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/uaccess.h
+===================================================================
+--- linux-stable.orig/include/linux/uaccess.h
++++ linux-stable/include/linux/uaccess.h
+@@ -8,8 +8,34 @@
+ * These routines enable/disable the pagefault handler in that
+ * it will not take any MM locks and go straight to the fixup table.
+ */
++static inline void raw_pagefault_disable(void)
++{
++ inc_preempt_count();
++ barrier();
++}
++
++static inline void raw_pagefault_enable(void)
++{
++ barrier();
++ dec_preempt_count();
++ barrier();
++ preempt_check_resched();
++}
++
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline void pagefault_disable(void)
++{
++ raw_pagefault_disable();
++}
++
++static inline void pagefault_enable(void)
++{
++ raw_pagefault_enable();
++}
++#else
+ extern void pagefault_disable(void);
+ extern void pagefault_enable(void);
++#endif
+
+ #ifndef ARCH_HAS_NOCACHE_UACCESS
+
+@@ -50,9 +76,9 @@ static inline unsigned long __copy_from_
+ mm_segment_t old_fs = get_fs(); \
+ \
+ set_fs(KERNEL_DS); \
+- pagefault_disable(); \
++ raw_pagefault_disable(); \
+ ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
+- pagefault_enable(); \
++ raw_pagefault_enable(); \
+ set_fs(old_fs); \
+ ret; \
+ })
+Index: linux-stable/mm/memory.c
+===================================================================
+--- linux-stable.orig/mm/memory.c
++++ linux-stable/mm/memory.c
+@@ -3484,6 +3484,7 @@ unlock:
+ return 0;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
+ void pagefault_disable(void)
+ {
+ inc_preempt_count();
+@@ -3512,6 +3513,7 @@ void pagefault_enable(void)
+ preempt_check_resched();
+ }
+ EXPORT_SYMBOL_GPL(pagefault_enable);
++#endif
+
+ /*
+ * By the time we get here, we already hold the mm semaphore
diff --git a/peterz-srcu-crypto-chain.patch b/peterz-srcu-crypto-chain.patch
new file mode 100644
index 0000000..d105962
--- /dev/null
+++ b/peterz-srcu-crypto-chain.patch
@@ -0,0 +1,196 @@
+Subject: crypto: Convert crypto notifier chain to SRCU
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 05 Oct 2012 09:03:24 +0100
+
+The crypto notifier deadlocks on RT. Though this can be a real deadlock
+on mainline as well due to fifo fair rwsems.
+
+The involved parties here are:
+
+[ 82.172678] swapper/0 S 0000000000000001 0 1 0 0x00000000
+[ 82.172682] ffff88042f18fcf0 0000000000000046 ffff88042f18fc80 ffffffff81491238
+[ 82.172685] 0000000000011cc0 0000000000011cc0 ffff88042f18c040 ffff88042f18ffd8
+[ 82.172688] 0000000000011cc0 0000000000011cc0 ffff88042f18ffd8 0000000000011cc0
+[ 82.172689] Call Trace:
+[ 82.172697] [<ffffffff81491238>] ? _raw_spin_unlock_irqrestore+0x6c/0x7a
+[ 82.172701] [<ffffffff8148fd3f>] schedule+0x64/0x66
+[ 82.172704] [<ffffffff8148ec6b>] schedule_timeout+0x27/0xd0
+[ 82.172708] [<ffffffff81043c0c>] ? unpin_current_cpu+0x1a/0x6c
+[ 82.172713] [<ffffffff8106e491>] ? migrate_enable+0x12f/0x141
+[ 82.172716] [<ffffffff8148fbbd>] wait_for_common+0xbb/0x11f
+[ 82.172719] [<ffffffff810709f2>] ? try_to_wake_up+0x182/0x182
+[ 82.172722] [<ffffffff8148fc96>] wait_for_completion_interruptible+0x1d/0x2e
+[ 82.172726] [<ffffffff811debfd>] crypto_wait_for_test+0x49/0x6b
+[ 82.172728] [<ffffffff811ded32>] crypto_register_alg+0x53/0x5a
+[ 82.172730] [<ffffffff811ded6c>] crypto_register_algs+0x33/0x72
+[ 82.172734] [<ffffffff81ad7686>] ? aes_init+0x12/0x12
+[ 82.172737] [<ffffffff81ad76ea>] aesni_init+0x64/0x66
+[ 82.172741] [<ffffffff81000318>] do_one_initcall+0x7f/0x13b
+[ 82.172744] [<ffffffff81ac4d34>] kernel_init+0x199/0x22c
+[ 82.172747] [<ffffffff81ac44ef>] ? loglevel+0x31/0x31
+[ 82.172752] [<ffffffff814987c4>] kernel_thread_helper+0x4/0x10
+[ 82.172755] [<ffffffff81491574>] ? retint_restore_args+0x13/0x13
+[ 82.172759] [<ffffffff81ac4b9b>] ? start_kernel+0x3ca/0x3ca
+[ 82.172761] [<ffffffff814987c0>] ? gs_change+0x13/0x13
+
+[ 82.174186] cryptomgr_test S 0000000000000001 0 41 2 0x00000000
+[ 82.174189] ffff88042c971980 0000000000000046 ffffffff81d74830 0000000000000292
+[ 82.174192] 0000000000011cc0 0000000000011cc0 ffff88042c96eb80 ffff88042c971fd8
+[ 82.174195] 0000000000011cc0 0000000000011cc0 ffff88042c971fd8 0000000000011cc0
+[ 82.174195] Call Trace:
+[ 82.174198] [<ffffffff8148fd3f>] schedule+0x64/0x66
+[ 82.174201] [<ffffffff8148ec6b>] schedule_timeout+0x27/0xd0
+[ 82.174204] [<ffffffff81043c0c>] ? unpin_current_cpu+0x1a/0x6c
+[ 82.174206] [<ffffffff8106e491>] ? migrate_enable+0x12f/0x141
+[ 82.174209] [<ffffffff8148fbbd>] wait_for_common+0xbb/0x11f
+[ 82.174212] [<ffffffff810709f2>] ? try_to_wake_up+0x182/0x182
+[ 82.174215] [<ffffffff8148fc96>] wait_for_completion_interruptible+0x1d/0x2e
+[ 82.174218] [<ffffffff811e4883>] cryptomgr_notify+0x280/0x385
+[ 82.174221] [<ffffffff814943de>] notifier_call_chain+0x6b/0x98
+[ 82.174224] [<ffffffff8108a11c>] ? rt_down_read+0x10/0x12
+[ 82.174227] [<ffffffff810677cd>] __blocking_notifier_call_chain+0x70/0x8d
+[ 82.174230] [<ffffffff810677fe>] blocking_notifier_call_chain+0x14/0x16
+[ 82.174234] [<ffffffff811dd272>] crypto_probing_notify+0x24/0x50
+[ 82.174236] [<ffffffff811dd7a1>] crypto_alg_mod_lookup+0x3e/0x74
+[ 82.174238] [<ffffffff811dd949>] crypto_alloc_base+0x36/0x8f
+[ 82.174241] [<ffffffff811e9408>] cryptd_alloc_ablkcipher+0x6e/0xb5
+[ 82.174243] [<ffffffff811dd591>] ? kzalloc.clone.5+0xe/0x10
+[ 82.174246] [<ffffffff8103085d>] ablk_init_common+0x1d/0x38
+[ 82.174249] [<ffffffff8103852a>] ablk_ecb_init+0x15/0x17
+[ 82.174251] [<ffffffff811dd8c6>] __crypto_alloc_tfm+0xc7/0x114
+[ 82.174254] [<ffffffff811e0caa>] ? crypto_lookup_skcipher+0x1f/0xe4
+[ 82.174256] [<ffffffff811e0dcf>] crypto_alloc_ablkcipher+0x60/0xa5
+[ 82.174258] [<ffffffff811e5bde>] alg_test_skcipher+0x24/0x9b
+[ 82.174261] [<ffffffff8106d96d>] ? finish_task_switch+0x3f/0xfa
+[ 82.174263] [<ffffffff811e6b8e>] alg_test+0x16f/0x1d7
+[ 82.174267] [<ffffffff811e45ac>] ? cryptomgr_probe+0xac/0xac
+[ 82.174269] [<ffffffff811e45d8>] cryptomgr_test+0x2c/0x47
+[ 82.174272] [<ffffffff81061161>] kthread+0x7e/0x86
+[ 82.174275] [<ffffffff8106d9dd>] ? finish_task_switch+0xaf/0xfa
+[ 82.174278] [<ffffffff814987c4>] kernel_thread_helper+0x4/0x10
+[ 82.174281] [<ffffffff81491574>] ? retint_restore_args+0x13/0x13
+[ 82.174284] [<ffffffff810610e3>] ? __init_kthread_worker+0x8c/0x8c
+[ 82.174287] [<ffffffff814987c0>] ? gs_change+0x13/0x13
+
+[ 82.174329] cryptomgr_probe D 0000000000000002 0 47 2 0x00000000
+[ 82.174332] ffff88042c991b70 0000000000000046 ffff88042c991bb0 0000000000000006
+[ 82.174335] 0000000000011cc0 0000000000011cc0 ffff88042c98ed00 ffff88042c991fd8
+[ 82.174338] 0000000000011cc0 0000000000011cc0 ffff88042c991fd8 0000000000011cc0
+[ 82.174338] Call Trace:
+[ 82.174342] [<ffffffff8148fd3f>] schedule+0x64/0x66
+[ 82.174344] [<ffffffff814901ad>] __rt_mutex_slowlock+0x85/0xbe
+[ 82.174347] [<ffffffff814902d2>] rt_mutex_slowlock+0xec/0x159
+[ 82.174351] [<ffffffff81089c4d>] rt_mutex_fastlock.clone.8+0x29/0x2f
+[ 82.174353] [<ffffffff81490372>] rt_mutex_lock+0x33/0x37
+[ 82.174356] [<ffffffff8108a0f2>] __rt_down_read+0x50/0x5a
+[ 82.174358] [<ffffffff8108a11c>] ? rt_down_read+0x10/0x12
+[ 82.174360] [<ffffffff8108a11c>] rt_down_read+0x10/0x12
+[ 82.174363] [<ffffffff810677b5>] __blocking_notifier_call_chain+0x58/0x8d
+[ 82.174366] [<ffffffff810677fe>] blocking_notifier_call_chain+0x14/0x16
+[ 82.174369] [<ffffffff811dd272>] crypto_probing_notify+0x24/0x50
+[ 82.174372] [<ffffffff811debd6>] crypto_wait_for_test+0x22/0x6b
+[ 82.174374] [<ffffffff811decd3>] crypto_register_instance+0xb4/0xc0
+[ 82.174377] [<ffffffff811e9b76>] cryptd_create+0x378/0x3b6
+[ 82.174379] [<ffffffff811de512>] ? __crypto_lookup_template+0x5b/0x63
+[ 82.174382] [<ffffffff811e4545>] cryptomgr_probe+0x45/0xac
+[ 82.174385] [<ffffffff811e4500>] ? crypto_alloc_pcomp+0x1b/0x1b
+[ 82.174388] [<ffffffff81061161>] kthread+0x7e/0x86
+[ 82.174391] [<ffffffff8106d9dd>] ? finish_task_switch+0xaf/0xfa
+[ 82.174394] [<ffffffff814987c4>] kernel_thread_helper+0x4/0x10
+[ 82.174398] [<ffffffff81491574>] ? retint_restore_args+0x13/0x13
+[ 82.174401] [<ffffffff810610e3>] ? __init_kthread_worker+0x8c/0x8c
+[ 82.174403] [<ffffffff814987c0>] ? gs_change+0x13/0x13
+
+cryptomgr_test spawns the cryptomgr_probe thread from the notifier
+call. The probe thread fires the same notifier as the test thread and
+deadlocks on the rwsem on RT.
+
+Now this is a potential deadlock in mainline as well, because we have
+fifo fair rwsems. If another thread blocks with a down_write() on the
+notifier chain before the probe thread issues the down_read() it will
+block the probe thread and the whole party is dead locked.
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ crypto/algapi.c | 5 +++--
+ crypto/api.c | 6 +++---
+ crypto/internal.h | 4 ++--
+ 3 files changed, 8 insertions(+), 7 deletions(-)
+
+Index: linux-stable/crypto/algapi.c
+===================================================================
+--- linux-stable.orig/crypto/algapi.c
++++ linux-stable/crypto/algapi.c
+@@ -683,13 +683,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+
+ int crypto_register_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_register(&crypto_chain, nb);
++ return srcu_notifier_chain_register(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_register_notifier);
+
+ int crypto_unregister_notifier(struct notifier_block *nb)
+ {
+- return blocking_notifier_chain_unregister(&crypto_chain, nb);
++ return srcu_notifier_chain_unregister(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
+
+@@ -956,6 +956,7 @@ EXPORT_SYMBOL_GPL(crypto_xor);
+
+ static int __init crypto_algapi_init(void)
+ {
++ srcu_init_notifier_head(&crypto_chain);
+ crypto_init_proc();
+ return 0;
+ }
+Index: linux-stable/crypto/api.c
+===================================================================
+--- linux-stable.orig/crypto/api.c
++++ linux-stable/crypto/api.c
+@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
+ DECLARE_RWSEM(crypto_alg_sem);
+ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+
+-BLOCKING_NOTIFIER_HEAD(crypto_chain);
++struct srcu_notifier_head crypto_chain;
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
+ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
+@@ -237,10 +237,10 @@ int crypto_probing_notify(unsigned long
+ {
+ int ok;
+
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ if (ok == NOTIFY_DONE) {
+ request_module("cryptomgr");
+- ok = blocking_notifier_call_chain(&crypto_chain, val, v);
++ ok = srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ return ok;
+Index: linux-stable/crypto/internal.h
+===================================================================
+--- linux-stable.orig/crypto/internal.h
++++ linux-stable/crypto/internal.h
+@@ -48,7 +48,7 @@ struct crypto_larval {
+
+ extern struct list_head crypto_alg_list;
+ extern struct rw_semaphore crypto_alg_sem;
+-extern struct blocking_notifier_head crypto_chain;
++extern struct srcu_notifier_head crypto_chain;
+
+ #ifdef CONFIG_PROC_FS
+ void __init crypto_init_proc(void);
+@@ -136,7 +136,7 @@ static inline int crypto_is_moribund(str
+
+ static inline void crypto_notify(unsigned long val, void *v)
+ {
+- blocking_notifier_call_chain(&crypto_chain, val, v);
++ srcu_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ #endif /* _CRYPTO_INTERNAL_H */
diff --git a/pid-h-include-atomic-h.patch b/pid-h-include-atomic-h.patch
new file mode 100644
index 0000000..6be9892
--- /dev/null
+++ b/pid-h-include-atomic-h.patch
@@ -0,0 +1,21 @@
+Subject: rwsem-inlcude-fix.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jul 2011 21:24:27 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/pid.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/include/linux/pid.h
+===================================================================
+--- linux-stable.orig/include/linux/pid.h
++++ linux-stable/include/linux/pid.h
+@@ -2,6 +2,7 @@
+ #define _LINUX_PID_H
+
+ #include <linux/rcupdate.h>
++#include <linux/atomic.h>
+
+ enum pid_type
+ {
diff --git a/ping-sysrq.patch b/ping-sysrq.patch
new file mode 100644
index 0000000..c72785a
--- /dev/null
+++ b/ping-sysrq.patch
@@ -0,0 +1,129 @@
+Subject: net: sysrq via icmp
+From: Carsten Emde <C.Emde@osadl.org>
+Date: Tue, 19 Jul 2011 13:51:17 +0100
+
+There are (probably rare) situations when a system crashed and the system
+console becomes unresponsive but the network icmp layer still is alive.
+Wouldn't it be wonderful, if we then could submit a sysreq command via ping?
+
+This patch provides this facility. Please consult the updated documentation
+Documentation/sysrq.txt for details.
+
+Signed-off-by: Carsten Emde <C.Emde@osadl.org>
+
+---
+ Documentation/sysrq.txt | 11 +++++++++--
+ include/net/netns/ipv4.h | 1 +
+ net/ipv4/icmp.c | 30 ++++++++++++++++++++++++++++++
+ net/ipv4/sysctl_net_ipv4.c | 7 +++++++
+ 4 files changed, 47 insertions(+), 2 deletions(-)
+
+Index: linux-stable/Documentation/sysrq.txt
+===================================================================
+--- linux-stable.orig/Documentation/sysrq.txt
++++ linux-stable/Documentation/sysrq.txt
+@@ -57,10 +57,17 @@ On PowerPC - Press 'ALT - Print Screen (
+ On other - If you know of the key combos for other architectures, please
+ let me know so I can add them to this section.
+
+-On all - write a character to /proc/sysrq-trigger. e.g.:
+-
++On all - write a character to /proc/sysrq-trigger, e.g.:
+ echo t > /proc/sysrq-trigger
+
++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
++ Send an ICMP echo request with this pattern plus the particular
++ SysRq command key. Example:
++ # ping -c1 -s57 -p0102030468
++ will trigger the SysRq-H (help) command.
++
++
+ * What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ 'b' - Will immediately reboot the system without syncing or unmounting
+Index: linux-stable/include/net/netns/ipv4.h
+===================================================================
+--- linux-stable.orig/include/net/netns/ipv4.h
++++ linux-stable/include/net/netns/ipv4.h
+@@ -57,6 +57,7 @@ struct netns_ipv4 {
+
+ int sysctl_icmp_echo_ignore_all;
+ int sysctl_icmp_echo_ignore_broadcasts;
++ int sysctl_icmp_echo_sysrq;
+ int sysctl_icmp_ignore_bogus_error_responses;
+ int sysctl_icmp_ratelimit;
+ int sysctl_icmp_ratemask;
+Index: linux-stable/net/ipv4/icmp.c
+===================================================================
+--- linux-stable.orig/net/ipv4/icmp.c
++++ linux-stable/net/ipv4/icmp.c
+@@ -69,6 +69,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/fcntl.h>
++#include <linux/sysrq.h>
+ #include <linux/socket.h>
+ #include <linux/in.h>
+ #include <linux/inet.h>
+@@ -767,6 +768,30 @@ static void icmp_redirect(struct sk_buff
+ }
+
+ /*
++ * 32bit and 64bit have different timestamp length, so we check for
++ * the cookie at offset 20 and verify it is repeated at offset 50
++ */
++#define CO_POS0 20
++#define CO_POS1 50
++#define CO_SIZE sizeof(int)
++#define ICMP_SYSRQ_SIZE 57
++
++/*
++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie
++ * pattern and if it matches send the next byte as a trigger to sysrq.
++ */
++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb)
++{
++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq);
++ char *p = skb->data;
++
++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) &&
++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) &&
++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE])
++ handle_sysrq(p[CO_POS0 + CO_SIZE]);
++}
++
++/*
+ * Handle ICMP_ECHO ("ping") requests.
+ *
+ * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
+@@ -793,6 +818,11 @@ static void icmp_echo(struct sk_buff *sk
+ icmp_param.data_len = skb->len;
+ icmp_param.head_len = sizeof(struct icmphdr);
+ icmp_reply(&icmp_param, skb);
++
++ if (skb->len == ICMP_SYSRQ_SIZE &&
++ net->ipv4.sysctl_icmp_echo_sysrq) {
++ icmp_check_sysrq(net, skb);
++ }
+ }
+ }
+
+Index: linux-stable/net/ipv4/sysctl_net_ipv4.c
+===================================================================
+--- linux-stable.orig/net/ipv4/sysctl_net_ipv4.c
++++ linux-stable/net/ipv4/sysctl_net_ipv4.c
+@@ -756,6 +756,13 @@ static struct ctl_table ipv4_net_table[]
+ .proc_handler = proc_dointvec
+ },
+ {
++ .procname = "icmp_echo_sysrq",
++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
+ .procname = "icmp_ignore_bogus_error_responses",
+ .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
+ .maxlen = sizeof(int),
diff --git a/posix-timers-avoid-wakeups-when-no-timers-are-active.patch b/posix-timers-avoid-wakeups-when-no-timers-are-active.patch
new file mode 100644
index 0000000..edf4ecc
--- /dev/null
+++ b/posix-timers-avoid-wakeups-when-no-timers-are-active.patch
@@ -0,0 +1,59 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:44 -0500
+Subject: posix-timers: Avoid wakeups when no timers are active
+
+Waking the thread even when no timers are scheduled is useless.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/posix-cpu-timers.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+Index: linux-stable/kernel/posix-cpu-timers.c
+===================================================================
+--- linux-stable.orig/kernel/posix-cpu-timers.c
++++ linux-stable/kernel/posix-cpu-timers.c
+@@ -1408,6 +1408,21 @@ wait_to_die:
+ return 0;
+ }
+
++static inline int __fastpath_timer_check(struct task_struct *tsk)
++{
++ /* tsk == current, ensure it is safe to use ->signal/sighand */
++ if (unlikely(tsk->exit_state))
++ return 0;
++
++ if (!task_cputime_zero(&tsk->cputime_expires))
++ return 1;
++
++ if (!task_cputime_zero(&tsk->signal->cputime_expires))
++ return 1;
++
++ return 0;
++}
++
+ void run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ unsigned long cpu = smp_processor_id();
+@@ -1420,7 +1435,7 @@ void run_posix_cpu_timers(struct task_st
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
+
+ /* check to see if we're already queued */
+- if (!tsk->posix_timer_list) {
++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) {
+ get_task_struct(tsk);
+ if (tasklist) {
+ tsk->posix_timer_list = tasklist;
+@@ -1432,9 +1447,9 @@ void run_posix_cpu_timers(struct task_st
+ tsk->posix_timer_list = tsk;
+ }
+ per_cpu(posix_timer_tasklist, cpu) = tsk;
++
++ wake_up_process(per_cpu(posix_timer_task, cpu));
+ }
+- /* XXX signal the thread somehow */
+- wake_up_process(per_cpu(posix_timer_task, cpu));
+ }
+
+ /*
diff --git a/posix-timers-no-broadcast.patch b/posix-timers-no-broadcast.patch
new file mode 100644
index 0000000..46f0801
--- /dev/null
+++ b/posix-timers-no-broadcast.patch
@@ -0,0 +1,35 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:29:20 -0500
+Subject: posix-timers: Prevent broadcast signals
+
+Posix timers should not send broadcast signals and kernel only
+signals. Prevent it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/posix-timers.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+Index: linux-stable/kernel/posix-timers.c
+===================================================================
+--- linux-stable.orig/kernel/posix-timers.c
++++ linux-stable/kernel/posix-timers.c
+@@ -439,6 +439,7 @@ static enum hrtimer_restart posix_timer_
+ static struct pid *good_sigevent(sigevent_t * event)
+ {
+ struct task_struct *rtn = current->group_leader;
++ int sig = event->sigev_signo;
+
+ if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+ (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+@@ -447,7 +448,8 @@ static struct pid *good_sigevent(sigeven
+ return NULL;
+
+ if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) ||
++ sig_kernel_coredump(sig)))
+ return NULL;
+
+ return task_pid(rtn);
diff --git a/posix-timers-shorten-cpu-timers-thread.patch b/posix-timers-shorten-cpu-timers-thread.patch
new file mode 100644
index 0000000..f0b8a69
--- /dev/null
+++ b/posix-timers-shorten-cpu-timers-thread.patch
@@ -0,0 +1,28 @@
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Fri, 3 Jul 2009 08:30:00 -0500
+Subject: posix-timers: Shorten posix_cpu_timers/<CPU> kernel thread names
+
+Shorten the softirq kernel thread names because they always overflow the
+limited comm length, appearing as "posix_cpu_timer" CPU# times.
+
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/posix-cpu-timers.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/posix-cpu-timers.c
+===================================================================
+--- linux-stable.orig/kernel/posix-cpu-timers.c
++++ linux-stable/kernel/posix-cpu-timers.c
+@@ -1451,7 +1451,7 @@ static int posix_cpu_thread_call(struct
+ switch (action) {
+ case CPU_UP_PREPARE:
+ p = kthread_create(posix_cpu_timers_thread, hcpu,
+- "posix_cpu_timers/%d",cpu);
++ "posixcputmr/%d",cpu);
+ if (IS_ERR(p))
+ return NOTIFY_BAD;
+ p->flags |= PF_NOFREEZE;
diff --git a/posix-timers-thread-posix-cpu-timers-on-rt.patch b/posix-timers-thread-posix-cpu-timers-on-rt.patch
new file mode 100644
index 0000000..ffb6d43
--- /dev/null
+++ b/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -0,0 +1,313 @@
+From: John Stultz <johnstul@us.ibm.com>
+Date: Fri, 3 Jul 2009 08:29:58 -0500
+Subject: posix-timers: thread posix-cpu-timers on -rt
+
+posix-cpu-timer code takes non -rt safe locks in hard irq
+context. Move it to a thread.
+
+[ 3.0 fixes from Peter Zijlstra <peterz@infradead.org> ]
+
+Signed-off-by: John Stultz <johnstul@us.ibm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/init_task.h | 7 +
+ include/linux/sched.h | 3
+ init/main.c | 1
+ kernel/fork.c | 3
+ kernel/posix-cpu-timers.c | 182 ++++++++++++++++++++++++++++++++++++++++++++--
+ 5 files changed, 190 insertions(+), 6 deletions(-)
+
+Index: linux-stable/include/linux/init_task.h
+===================================================================
+--- linux-stable.orig/include/linux/init_task.h
++++ linux-stable/include/linux/init_task.h
+@@ -141,6 +141,12 @@ extern struct task_group root_task_group
+ # define INIT_PERF_EVENTS(tsk)
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++# define INIT_TIMER_LIST .posix_timer_list = NULL,
++#else
++# define INIT_TIMER_LIST
++#endif
++
+ #define INIT_TASK_COMM "swapper"
+
+ /*
+@@ -196,6 +202,7 @@ extern struct task_group root_task_group
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
++ INIT_TIMER_LIST \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1373,6 +1373,9 @@ struct task_struct {
+
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct task_struct *posix_timer_list;
++#endif
+
+ /* process credentials */
+ const struct cred __rcu *real_cred; /* objective and real subjective task
+Index: linux-stable/init/main.c
+===================================================================
+--- linux-stable.orig/init/main.c
++++ linux-stable/init/main.c
+@@ -69,6 +69,7 @@
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
+ #include <linux/file.h>
++#include <linux/posix-timers.h>
+
+ #include <asm/io.h>
+ #include <asm/bugs.h>
+Index: linux-stable/kernel/fork.c
+===================================================================
+--- linux-stable.orig/kernel/fork.c
++++ linux-stable/kernel/fork.c
+@@ -1130,6 +1130,9 @@ void mm_init_owner(struct mm_struct *mm,
+ */
+ static void posix_cpu_timers_init(struct task_struct *tsk)
+ {
++#ifdef CONFIG_PREEMPT_RT_BASE
++ tsk->posix_timer_list = NULL;
++#endif
+ tsk->cputime_expires.prof_exp = 0;
+ tsk->cputime_expires.virt_exp = 0;
+ tsk->cputime_expires.sched_exp = 0;
+Index: linux-stable/kernel/posix-cpu-timers.c
+===================================================================
+--- linux-stable.orig/kernel/posix-cpu-timers.c
++++ linux-stable/kernel/posix-cpu-timers.c
+@@ -682,7 +682,7 @@ static int posix_cpu_timer_set(struct k_
+ /*
+ * Disarm any old timer after extracting its expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+
+ ret = 0;
+ old_incr = timer->it.cpu.incr;
+@@ -1198,7 +1198,7 @@ void posix_cpu_timer_schedule(struct k_i
+ /*
+ * Now re-arm for the new expiry time.
+ */
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+ arm_timer(timer);
+ spin_unlock(&p->sighand->siglock);
+
+@@ -1262,10 +1262,11 @@ static inline int fastpath_timer_check(s
+ sig = tsk->signal;
+ if (sig->cputimer.running) {
+ struct task_cputime group_sample;
++ unsigned long flags;
+
+- raw_spin_lock(&sig->cputimer.lock);
++ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
+ group_sample = sig->cputimer.cputime;
+- raw_spin_unlock(&sig->cputimer.lock);
++ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
+
+ if (task_cputime_expired(&group_sample, &sig->cputime_expires))
+ return 1;
+@@ -1279,13 +1280,13 @@ static inline int fastpath_timer_check(s
+ * already updated our counts. We need to check if any timers fire now.
+ * Interrupts are disabled.
+ */
+-void run_posix_cpu_timers(struct task_struct *tsk)
++static void __run_posix_cpu_timers(struct task_struct *tsk)
+ {
+ LIST_HEAD(firing);
+ struct k_itimer *timer, *next;
+ unsigned long flags;
+
+- BUG_ON(!irqs_disabled());
++ BUG_ON_NONRT(!irqs_disabled());
+
+ /*
+ * The fast path checks that there are no expired thread or thread
+@@ -1343,6 +1344,175 @@ void run_posix_cpu_timers(struct task_st
+ }
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++#include <linux/kthread.h>
++#include <linux/cpu.h>
++DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++
++static int posix_cpu_timers_thread(void *data)
++{
++ int cpu = (long)data;
++
++ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++
++ while (!kthread_should_stop()) {
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
++
++ if (cpu_is_offline(cpu))
++ goto wait_to_die;
++
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
++
++ /* its possible the list is empty, just return */
++ if (!tsk) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule();
++ __set_current_state(TASK_RUNNING);
++ continue;
++ }
++
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
++
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
++
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
++ }
++ }
++ return 0;
++
++wait_to_die:
++ /* Wait for kthread_stop */
++ set_current_state(TASK_INTERRUPTIBLE);
++ while (!kthread_should_stop()) {
++ schedule();
++ set_current_state(TASK_INTERRUPTIBLE);
++ }
++ __set_current_state(TASK_RUNNING);
++ return 0;
++}
++
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ unsigned long cpu = smp_processor_id();
++ struct task_struct *tasklist;
++
++ BUG_ON(!irqs_disabled());
++ if(!per_cpu(posix_timer_task, cpu))
++ return;
++ /* get per-cpu references */
++ tasklist = per_cpu(posix_timer_tasklist, cpu);
++
++ /* check to see if we're already queued */
++ if (!tsk->posix_timer_list) {
++ get_task_struct(tsk);
++ if (tasklist) {
++ tsk->posix_timer_list = tasklist;
++ } else {
++ /*
++ * The list is terminated by a self-pointing
++ * task_struct
++ */
++ tsk->posix_timer_list = tsk;
++ }
++ per_cpu(posix_timer_tasklist, cpu) = tsk;
++ }
++ /* XXX signal the thread somehow */
++ wake_up_process(per_cpu(posix_timer_task, cpu));
++}
++
++/*
++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
++ * Here we can start up the necessary migration thread for the new CPU.
++ */
++static int posix_cpu_thread_call(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int cpu = (long)hcpu;
++ struct task_struct *p;
++ struct sched_param param;
++
++ switch (action) {
++ case CPU_UP_PREPARE:
++ p = kthread_create(posix_cpu_timers_thread, hcpu,
++ "posix_cpu_timers/%d",cpu);
++ if (IS_ERR(p))
++ return NOTIFY_BAD;
++ p->flags |= PF_NOFREEZE;
++ kthread_bind(p, cpu);
++ /* Must be high prio to avoid getting starved */
++ param.sched_priority = MAX_RT_PRIO-1;
++ sched_setscheduler(p, SCHED_FIFO, &param);
++ per_cpu(posix_timer_task,cpu) = p;
++ break;
++ case CPU_ONLINE:
++ /* Strictly unneccessary, as first user will wake it. */
++ wake_up_process(per_cpu(posix_timer_task,cpu));
++ break;
++#ifdef CONFIG_HOTPLUG_CPU
++ case CPU_UP_CANCELED:
++ /* Unbind it from offline cpu so it can run. Fall thru. */
++ kthread_bind(per_cpu(posix_timer_task, cpu),
++ cpumask_any(cpu_online_mask));
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++ case CPU_DEAD:
++ kthread_stop(per_cpu(posix_timer_task,cpu));
++ per_cpu(posix_timer_task,cpu) = NULL;
++ break;
++#endif
++ }
++ return NOTIFY_OK;
++}
++
++/* Register at highest priority so that task migration (migrate_all_tasks)
++ * happens before everything else.
++ */
++static struct notifier_block __devinitdata posix_cpu_thread_notifier = {
++ .notifier_call = posix_cpu_thread_call,
++ .priority = 10
++};
++
++static int __init posix_cpu_thread_init(void)
++{
++ void *hcpu = (void *)(long)smp_processor_id();
++ /* Start one for boot CPU. */
++ unsigned long cpu;
++
++ /* init the per-cpu posix_timer_tasklets */
++ for_each_possible_cpu(cpu)
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
++ register_cpu_notifier(&posix_cpu_thread_notifier);
++ return 0;
++}
++early_initcall(posix_cpu_thread_init);
++#else /* CONFIG_PREEMPT_RT_BASE */
++void run_posix_cpu_timers(struct task_struct *tsk)
++{
++ __run_posix_cpu_timers(tsk);
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ /*
+ * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
+ * The tsk->sighand->siglock must be held by the caller.
diff --git a/power-disable-highmem-on-rt.patch b/power-disable-highmem-on-rt.patch
new file mode 100644
index 0000000..481bfaf
--- /dev/null
+++ b/power-disable-highmem-on-rt.patch
@@ -0,0 +1,22 @@
+Subject: power-disable-highmem-on-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 17:08:34 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/powerpc/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/arch/powerpc/Kconfig
+===================================================================
+--- linux-stable.orig/arch/powerpc/Kconfig
++++ linux-stable/arch/powerpc/Kconfig
+@@ -278,7 +278,7 @@ menu "Kernel options"
+
+ config HIGHMEM
+ bool "High memory support"
+- depends on PPC32
++ depends on PPC32 && !PREEMPT_RT_FULL
+
+ source kernel/Kconfig.hz
+ source kernel/Kconfig.preempt
diff --git a/power-use-generic-rwsem-on-rt.patch b/power-use-generic-rwsem-on-rt.patch
new file mode 100644
index 0000000..91771f6
--- /dev/null
+++ b/power-use-generic-rwsem-on-rt.patch
@@ -0,0 +1,23 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/powerpc/Kconfig | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/powerpc/Kconfig
+===================================================================
+--- linux-stable.orig/arch/powerpc/Kconfig
++++ linux-stable/arch/powerpc/Kconfig
+@@ -60,10 +60,11 @@ config LOCKDEP_SUPPORT
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
++ default y if PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config GENERIC_LOCKBREAK
+ bool
diff --git a/ppc-mark-low-level-handlers-no-thread.patch b/ppc-mark-low-level-handlers-no-thread.patch
new file mode 100644
index 0000000..cd00a61
--- /dev/null
+++ b/ppc-mark-low-level-handlers-no-thread.patch
@@ -0,0 +1,39 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jun 2012 19:53:17 +0200
+Subject: powerpc: Mark low level irq handlers NO_THREAD
+
+These low level handlers cannot be threaded. Mark them NO_THREAD
+
+Reported-by: leroy christophe <christophe.leroy@c-s.fr>
+Tested-by: leroy christophe <christophe.leroy@c-s.fr>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/powerpc/platforms/8xx/m8xx_setup.c | 1 +
+ arch/powerpc/sysdev/cpm1.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+Index: linux-stable/arch/powerpc/platforms/8xx/m8xx_setup.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/platforms/8xx/m8xx_setup.c
++++ linux-stable/arch/powerpc/platforms/8xx/m8xx_setup.c
+@@ -43,6 +43,7 @@ static irqreturn_t timebase_interrupt(in
+
+ static struct irqaction tbint_irqaction = {
+ .handler = timebase_interrupt,
++ .flags = IRQF_NO_THREAD,
+ .name = "tbint",
+ };
+
+Index: linux-stable/arch/powerpc/sysdev/cpm1.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/sysdev/cpm1.c
++++ linux-stable/arch/powerpc/sysdev/cpm1.c
+@@ -120,6 +120,7 @@ static irqreturn_t cpm_error_interrupt(i
+
+ static struct irqaction cpm_error_irqaction = {
+ .handler = cpm_error_interrupt,
++ .flags = IRQF_NO_THREAD,
+ .name = "error",
+ };
+
diff --git a/preempt-nort-rt-variants.patch b/preempt-nort-rt-variants.patch
new file mode 100644
index 0000000..94549dd
--- /dev/null
+++ b/preempt-nort-rt-variants.patch
@@ -0,0 +1,54 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 24 Jul 2009 12:38:56 +0200
+Subject: preempt: Provide preempt_*_(no)rt variants
+
+RT needs a few preempt_disable/enable points which are not necessary
+otherwise. Implement variants to avoid #ifdeffery.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/preempt.h | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/preempt.h
+===================================================================
+--- linux-stable.orig/include/linux/preempt.h
++++ linux-stable/include/linux/preempt.h
+@@ -54,11 +54,15 @@ do { \
+ dec_preempt_count(); \
+ } while (0)
+
+-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++#ifndef CONFIG_PREEMPT_RT_BASE
++# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++#else
++# define preempt_enable_no_resched() preempt_enable()
++#endif
+
+ #define preempt_enable() \
+ do { \
+- preempt_enable_no_resched(); \
++ sched_preempt_enable_no_resched(); \
+ barrier(); \
+ preempt_check_resched(); \
+ } while (0)
+@@ -104,6 +108,18 @@ do { \
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define preempt_disable_rt() preempt_disable()
++# define preempt_enable_rt() preempt_enable()
++# define preempt_disable_nort() do { } while (0)
++# define preempt_enable_nort() do { } while (0)
++#else
++# define preempt_disable_rt() do { } while (0)
++# define preempt_enable_rt() do { } while (0)
++# define preempt_disable_nort() preempt_disable()
++# define preempt_enable_nort() preempt_enable()
++#endif
++
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+
+ struct preempt_notifier;
diff --git a/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
new file mode 100644
index 0000000..6abf87e
--- /dev/null
+++ b/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -0,0 +1,37 @@
+Subject: printk: %27force_early_printk%27 boot param to help with debugging
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 02 Sep 2011 14:41:29 +0200
+
+Subject: printk: 'force_early_printk' boot param to help with debugging
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri Sep 02 14:29:33 CEST 2011
+
+Gives me an option to screw printk and actually see what the machine
+says.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/1314967289.1301.11.camel@twins
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
+---
+ kernel/printk.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+Index: linux-stable/kernel/printk.c
+===================================================================
+--- linux-stable.orig/kernel/printk.c
++++ linux-stable/kernel/printk.c
+@@ -1255,6 +1255,13 @@ asmlinkage void early_printk(const char
+ */
+ static bool __read_mostly printk_killswitch;
+
++static int __init force_early_printk_setup(char *str)
++{
++ printk_killswitch = true;
++ return 0;
++}
++early_param("force_early_printk", force_early_printk_setup);
++
+ void printk_kill(void)
+ {
+ printk_killswitch = true;
diff --git a/printk-kill.patch b/printk-kill.patch
new file mode 100644
index 0000000..fedc1ef
--- /dev/null
+++ b/printk-kill.patch
@@ -0,0 +1,121 @@
+Subject: printk-kill.patch
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 22 Jul 2011 17:58:40 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/printk.h | 3 ++-
+ kernel/printk.c | 33 +++++++++++++++++++++++++++++++++
+ kernel/watchdog.c | 15 +++++++++++++--
+ 3 files changed, 48 insertions(+), 3 deletions(-)
+
+Index: linux-stable/include/linux/printk.h
+===================================================================
+--- linux-stable.orig/include/linux/printk.h
++++ linux-stable/include/linux/printk.h
+@@ -98,9 +98,11 @@ int no_printk(const char *fmt, ...)
+ #ifdef CONFIG_EARLY_PRINTK
+ extern asmlinkage __printf(1, 2)
+ void early_printk(const char *fmt, ...);
++extern void printk_kill(void);
+ #else
+ static inline __printf(1, 2) __cold
+ void early_printk(const char *s, ...) { }
++static inline void printk_kill(void) { }
+ #endif
+
+ extern int printk_needs_cpu(int cpu);
+@@ -137,7 +139,6 @@ extern int __printk_ratelimit(const char
+ #define printk_ratelimit() __printk_ratelimit(__func__)
+ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ unsigned int interval_msec);
+-
+ extern int printk_delay_msec;
+ extern int dmesg_restrict;
+ extern int kptr_restrict;
+Index: linux-stable/kernel/printk.c
+===================================================================
+--- linux-stable.orig/kernel/printk.c
++++ linux-stable/kernel/printk.c
+@@ -1246,6 +1246,32 @@ asmlinkage void early_printk(const char
+ early_vprintk(fmt, ap);
+ va_end(ap);
+ }
++
++/*
++ * This is independent of any log levels - a global
++ * kill switch that turns off all of printk.
++ *
++ * Used by the NMI watchdog if early-printk is enabled.
++ */
++static bool __read_mostly printk_killswitch;
++
++void printk_kill(void)
++{
++ printk_killswitch = true;
++}
++
++static int forced_early_printk(const char *fmt, va_list ap)
++{
++ if (!printk_killswitch)
++ return 0;
++ early_vprintk(fmt, ap);
++ return 1;
++}
++#else
++static inline int forced_early_printk(const char *fmt, va_list ap)
++{
++ return 0;
++}
+ #endif
+
+ static bool __read_mostly ignore_loglevel;
+@@ -1508,6 +1534,13 @@ asmlinkage int vprintk_emit(int facility
+ int this_cpu;
+ int printed_len = 0;
+
++ /*
++ * Fall back to early_printk if a debugging subsystem has
++ * killed printk output
++ */
++ if (unlikely(forced_early_printk(fmt, args)))
++ return 1;
++
+ boot_delay_msec();
+ printk_delay();
+
+Index: linux-stable/kernel/watchdog.c
+===================================================================
+--- linux-stable.orig/kernel/watchdog.c
++++ linux-stable/kernel/watchdog.c
+@@ -202,6 +202,8 @@ static int is_softlockup(unsigned long t
+
+ #ifdef CONFIG_HARDLOCKUP_DETECTOR
+
++static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
++
+ static struct perf_event_attr wd_hw_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+@@ -236,10 +238,19 @@ static void watchdog_overflow_callback(s
+ if (__this_cpu_read(hard_watchdog_warn) == true)
+ return;
+
+- if (hardlockup_panic)
++ /*
++ * If early-printk is enabled then make sure we do not
++ * lock up in printk() and kill console logging:
++ */
++ printk_kill();
++
++ if (hardlockup_panic) {
+ panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+- else
++ } else {
++ raw_spin_lock(&watchdog_output_lock);
+ WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
++ raw_spin_unlock(&watchdog_output_lock);
++ }
+
+ __this_cpu_write(hard_watchdog_warn, true);
+ return;
diff --git a/printk-rt-aware.patch b/printk-rt-aware.patch
new file mode 100644
index 0000000..2529fcd
--- /dev/null
+++ b/printk-rt-aware.patch
@@ -0,0 +1,103 @@
+Subject: printk-rt-aware.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 19 Sep 2012 14:50:37 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/printk.c | 33 +++++++++++++++++++++++++++++----
+ 1 file changed, 29 insertions(+), 4 deletions(-)
+
+Index: linux-stable/kernel/printk.c
+===================================================================
+--- linux-stable.orig/kernel/printk.c
++++ linux-stable/kernel/printk.c
+@@ -1312,6 +1312,7 @@ static void call_console_drivers(int lev
+ if (!console_drivers)
+ return;
+
++ migrate_disable();
+ for_each_console(con) {
+ if (exclusive_console && con != exclusive_console)
+ continue;
+@@ -1324,6 +1325,7 @@ static void call_console_drivers(int lev
+ continue;
+ con->write(con, text, len);
+ }
++ migrate_enable();
+ }
+
+ /*
+@@ -1383,12 +1385,18 @@ static inline int can_use_console(unsign
+ * interrupts disabled. It should return with 'lockbuf_lock'
+ * released but interrupts still disabled.
+ */
+-static int console_trylock_for_printk(unsigned int cpu)
++static int console_trylock_for_printk(unsigned int cpu, unsigned long flags)
+ __releases(&logbuf_lock)
+ {
+ int retval = 0, wake = 0;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ int lock = !early_boot_irqs_disabled && !irqs_disabled_flags(flags) &&
++ (preempt_count() <= 1);
++#else
++ int lock = 1;
++#endif
+
+- if (console_trylock()) {
++ if (lock && console_trylock()) {
+ retval = 1;
+
+ /*
+@@ -1667,8 +1675,15 @@ asmlinkage int vprintk_emit(int facility
+ * The console_trylock_for_printk() function will release 'logbuf_lock'
+ * regardless of whether it actually gets the console semaphore or not.
+ */
+- if (console_trylock_for_printk(this_cpu))
++ if (console_trylock_for_printk(this_cpu, flags)) {
++#ifndef CONFIG_PREEMPT_RT_FULL
++ console_unlock();
++#else
++ raw_local_irq_restore(flags);
+ console_unlock();
++ raw_local_irq_save(flags);
++#endif
++ }
+
+ lockdep_on();
+ out_restore_irqs:
+@@ -2057,11 +2072,16 @@ static void console_cont_flush(char *tex
+ goto out;
+
+ len = cont_print_text(text, size);
++#ifndef CONFIG_PREEMPT_RT_FULL
+ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings();
+ call_console_drivers(cont.level, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
++#else
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(cont.level, text, len);
++#endif
+ return;
+ out:
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+@@ -2144,12 +2164,17 @@ skip:
+ console_idx = log_next(console_idx);
+ console_seq++;
+ console_prev = msg->flags;
+- raw_spin_unlock(&logbuf_lock);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings(); /* don't trace print latency */
+ call_console_drivers(level, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
++#else
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(level, text, len);
++#endif
+ }
+ console_locked = 0;
+
diff --git a/radix-tree-rt-aware.patch b/radix-tree-rt-aware.patch
new file mode 100644
index 0000000..102f0b6
--- /dev/null
+++ b/radix-tree-rt-aware.patch
@@ -0,0 +1,72 @@
+Subject: radix-tree-rt-aware.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:33:18 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/radix-tree.h | 8 +++++++-
+ lib/radix-tree.c | 5 ++++-
+ 2 files changed, 11 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/radix-tree.h
+===================================================================
+--- linux-stable.orig/include/linux/radix-tree.h
++++ linux-stable/include/linux/radix-tree.h
+@@ -230,7 +230,13 @@ unsigned long radix_tree_next_hole(struc
+ unsigned long index, unsigned long max_scan);
+ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
+ unsigned long index, unsigned long max_scan);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ int radix_tree_preload(gfp_t gfp_mask);
++#else
++static inline int radix_tree_preload(gfp_t gm) { return 0; }
++#endif
++
+ void radix_tree_init(void);
+ void *radix_tree_tag_set(struct radix_tree_root *root,
+ unsigned long index, unsigned int tag);
+@@ -255,7 +261,7 @@ unsigned long radix_tree_locate_item(str
+
+ static inline void radix_tree_preload_end(void)
+ {
+- preempt_enable();
++ preempt_enable_nort();
+ }
+
+ /**
+Index: linux-stable/lib/radix-tree.c
+===================================================================
+--- linux-stable.orig/lib/radix-tree.c
++++ linux-stable/lib/radix-tree.c
+@@ -215,12 +215,13 @@ radix_tree_node_alloc(struct radix_tree_
+ * succeed in getting a node here (and never reach
+ * kmem_cache_alloc)
+ */
+- rtp = &__get_cpu_var(radix_tree_preloads);
++ rtp = &get_cpu_var(radix_tree_preloads);
+ if (rtp->nr) {
+ ret = rtp->nodes[rtp->nr - 1];
+ rtp->nodes[rtp->nr - 1] = NULL;
+ rtp->nr--;
+ }
++ put_cpu_var(radix_tree_preloads);
+ }
+ if (ret == NULL)
+ ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
+@@ -255,6 +256,7 @@ radix_tree_node_free(struct radix_tree_n
+ call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Load up this CPU's radix_tree_node buffer with sufficient objects to
+ * ensure that the addition of a single element in the tree cannot fail. On
+@@ -289,6 +291,7 @@ out:
+ return ret;
+ }
+ EXPORT_SYMBOL(radix_tree_preload);
++#endif
+
+ /*
+ * Return the maximum key which can be store into a
diff --git a/random-make-it-work-on-rt.patch b/random-make-it-work-on-rt.patch
new file mode 100644
index 0000000..edade40
--- /dev/null
+++ b/random-make-it-work-on-rt.patch
@@ -0,0 +1,124 @@
+Subject: random: Make it work on rt
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 21 Aug 2012 20:38:50 +0200
+
+Delegate the random insertion to the forced threaded interrupt
+handler. Store the return IP of the hard interrupt handler in the irq
+descriptor and feed it into the random generator as a source of
+entropy.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ drivers/char/random.c | 10 ++++++----
+ include/linux/irqdesc.h | 1 +
+ include/linux/random.h | 2 +-
+ kernel/irq/handle.c | 8 +++++++-
+ kernel/irq/manage.c | 6 ++++++
+ 5 files changed, 21 insertions(+), 6 deletions(-)
+
+Index: linux-stable/drivers/char/random.c
+===================================================================
+--- linux-stable.orig/drivers/char/random.c
++++ linux-stable/drivers/char/random.c
+@@ -745,18 +745,16 @@ EXPORT_SYMBOL_GPL(add_input_randomness);
+
+ static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+
+-void add_interrupt_randomness(int irq, int irq_flags)
++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
+ {
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+- struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ __u32 input[4], cycles = get_cycles();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+- if (regs) {
+- __u64 ip = instruction_pointer(regs);
++ if (ip) {
+ input[2] = ip;
+ input[3] = ip >> 32;
+ }
+@@ -770,7 +768,11 @@ void add_interrupt_randomness(int irq, i
+ fast_pool->last = now;
+
+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++#else
++ mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
++#endif
+ /*
+ * If we don't have a valid cycle counter, and we see
+ * back-to-back timer interrupts, then skip giving credit for
+Index: linux-stable/include/linux/irqdesc.h
+===================================================================
+--- linux-stable.orig/include/linux/irqdesc.h
++++ linux-stable/include/linux/irqdesc.h
+@@ -52,6 +52,7 @@ struct irq_desc {
+ unsigned int irq_count; /* For detecting broken IRQs */
+ unsigned long last_unhandled; /* Aging timer for unhandled count */
+ unsigned int irqs_unhandled;
++ u64 random_ip;
+ raw_spinlock_t lock;
+ struct cpumask *percpu_enabled;
+ #ifdef CONFIG_SMP
+Index: linux-stable/include/linux/random.h
+===================================================================
+--- linux-stable.orig/include/linux/random.h
++++ linux-stable/include/linux/random.h
+@@ -51,7 +51,7 @@ struct rnd_state {
+ extern void add_device_randomness(const void *, unsigned int);
+ extern void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value);
+-extern void add_interrupt_randomness(int irq, int irq_flags);
++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
+
+ extern void get_random_bytes(void *buf, int nbytes);
+ extern void get_random_bytes_arch(void *buf, int nbytes);
+Index: linux-stable/kernel/irq/handle.c
+===================================================================
+--- linux-stable.orig/kernel/irq/handle.c
++++ linux-stable/kernel/irq/handle.c
+@@ -132,6 +132,8 @@ static void irq_wake_thread(struct irq_d
+ irqreturn_t
+ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+ {
++ struct pt_regs *regs = get_irq_regs();
++ u64 ip = regs ? instruction_pointer(regs) : 0;
+ irqreturn_t retval = IRQ_NONE;
+ unsigned int flags = 0, irq = desc->irq_data.irq;
+
+@@ -172,7 +174,11 @@ handle_irq_event_percpu(struct irq_desc
+ action = action->next;
+ } while (action);
+
+- add_interrupt_randomness(irq, flags);
++#ifndef CONFIG_PREEMPT_RT_FULL
++ add_interrupt_randomness(irq, flags, ip);
++#else
++ desc->random_ip = ip;
++#endif
+
+ if (!noirqdebug)
+ note_interrupt(irq, desc, retval);
+Index: linux-stable/kernel/irq/manage.c
+===================================================================
+--- linux-stable.orig/kernel/irq/manage.c
++++ linux-stable/kernel/irq/manage.c
+@@ -852,6 +852,12 @@ static int irq_thread(void *data)
+ if (!noirqdebug)
+ note_interrupt(action->irq, desc, action_ret);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ migrate_disable();
++ add_interrupt_randomness(action->irq, 0,
++ desc->random_ip ^ (u64) action);
++ migrate_enable();
++#endif
+ wake_threads_waitq(desc);
+ }
+
diff --git a/rcu-fix-build-break.patch b/rcu-fix-build-break.patch
new file mode 100644
index 0000000..ecb6d96
--- /dev/null
+++ b/rcu-fix-build-break.patch
@@ -0,0 +1,60 @@
+Subject: rcu: Fix build break
+From: John Kacur <jkacur@redhat.com>
+Date: Fri, 13 Apr 2012 12:54:21 +0200
+
+A build break can occur with the following config options enabled
+
+CONFIG_PREEMPT_RT_FULL
+CONFIG_RCU_FAST_NO_HZ
+CONFIG_RCU_CPU_STALL_INFO
+
+This occurs because symbols in print_cpu_stall_fast_no_hz() such as
+rcu_idle_gp_timer are not accessible with CONFIG_PREEMPT_RT_FULL
+and results in the following type of build errors:
+
+kernel/rcutree_plugin.h: In function ‘print_cpu_stall_fast_no_hz’:
+kernel/rcutree_plugin.h:2195: error: ‘rcu_idle_gp_timer’ undeclared (first use in this function)
+
+This patch fixes the build break by limiting the PREEMPT_RT_FULL
+section to the function rcu_needs_cpu() instead of to the entire
+!defined(CONFIG_RCU_FAST_NO_NZ) section as was intended in the
+original "rcu: Make ksoftirqd do RCU quiescent states" patch.
+
+Signed-off-by: John Kacur <jkacur@redhat.com>
+Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
+Link: http://lkml.kernel.org/r/1334314461-8937-1-git-send-email-jkacur@redhat.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/rcutree_plugin.h | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+Index: linux-stable/kernel/rcutree_plugin.h
+===================================================================
+--- linux-stable.orig/kernel/rcutree_plugin.h
++++ linux-stable/kernel/rcutree_plugin.h
+@@ -1743,6 +1743,9 @@ int rcu_needs_cpu(int cpu, unsigned long
+ *delta_jiffies = ULONG_MAX;
+ return rcu_cpu_has_callbacks(cpu);
+ }
++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
++
++#if !defined(CONFIG_RCU_FAST_NO_HZ)
+
+ /*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
+@@ -1859,6 +1862,7 @@ static bool rcu_cpu_has_nonlazy_callback
+ rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+@@ -1902,6 +1906,7 @@ int rcu_needs_cpu(int cpu, unsigned long
+ }
+ return 0;
+ }
++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
+
+ /*
+ * Handler for smp_call_function_single(). The only point of this
diff --git a/rcu-force-preempt-rcu-for-rt.patch b/rcu-force-preempt-rcu-for-rt.patch
new file mode 100644
index 0000000..5de6462
--- /dev/null
+++ b/rcu-force-preempt-rcu-for-rt.patch
@@ -0,0 +1,28 @@
+Subject: RCU: Force PREEMPT_RCU for PREEMPT-RT
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:30 -0500
+
+PREEMPT_RT relies on PREEMPT_RCU - only allow RCU to be configured
+interactively in the !PREEMPT_RT case.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/n/tip-j1y0phicu6s6pu8guku2vca0@git.kernel.org
+---
+ init/Kconfig | 1 -
+ 1 file changed, 1 deletion(-)
+
+Index: linux-stable/init/Kconfig
+===================================================================
+--- linux-stable.orig/init/Kconfig
++++ linux-stable/init/Kconfig
+@@ -806,7 +806,6 @@ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
+- depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
diff --git a/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
new file mode 100644
index 0000000..a0eea94
--- /dev/null
+++ b/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -0,0 +1,266 @@
+Subject: rcu: Merge RCU-bh into RCU-preempt
+Date: Wed, 5 Oct 2011 11:59:38 -0700
+From: Thomas Gleixner <tglx@linutronix.de>
+
+The Linux kernel has long RCU-bh read-side critical sections that
+intolerably increase scheduling latency under mainline's RCU-bh rules,
+which include RCU-bh read-side critical sections being non-preemptible.
+This patch therefore arranges for RCU-bh to be implemented in terms of
+RCU-preempt for CONFIG_PREEMPT_RT_FULL=y.
+
+This has the downside of defeating the purpose of RCU-bh, namely,
+handling the case where the system is subjected to a network-based
+denial-of-service attack that keeps at least one CPU doing full-time
+softirq processing. This issue will be fixed by a later commit.
+
+The current commit will need some work to make it appropriate for
+mainline use, for example, it needs to be extended to cover Tiny RCU.
+
+[ paulmck: Added a useful changelog ]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/rcupdate.h | 25 +++++++++++++++++++++++++
+ include/linux/rcutree.h | 18 ++++++++++++++++--
+ kernel/rcupdate.c | 2 ++
+ kernel/rcutree.c | 10 ++++++++++
+ 4 files changed, 53 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/rcupdate.h
+===================================================================
+--- linux-stable.orig/include/linux/rcupdate.h
++++ linux-stable/include/linux/rcupdate.h
+@@ -101,6 +101,9 @@ extern void call_rcu(struct rcu_head *he
+
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#define call_rcu_bh call_rcu
++#else
+ /**
+ * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+@@ -121,6 +124,7 @@ extern void call_rcu(struct rcu_head *he
+ */
+ extern void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *head));
++#endif
+
+ /**
+ * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
+@@ -191,7 +195,13 @@ static inline int rcu_preempt_depth(void
+
+ /* Internal to kernel */
+ extern void rcu_sched_qs(int cpu);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void rcu_bh_qs(int cpu);
++#else
++static inline void rcu_bh_qs(int cpu) { }
++#endif
++
+ extern void rcu_check_callbacks(int cpu, int user);
+ struct notifier_block;
+ extern void rcu_idle_enter(void);
+@@ -328,7 +338,14 @@ static inline int rcu_read_lock_held(voi
+ * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
+ * hell.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++static inline int rcu_read_lock_bh_held(void)
++{
++ return rcu_read_lock_held();
++}
++#else
+ extern int rcu_read_lock_bh_held(void);
++#endif
+
+ /**
+ * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
+@@ -776,10 +793,14 @@ static inline void rcu_read_unlock(void)
+ static inline void rcu_read_lock_bh(void)
+ {
+ local_bh_disable();
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_lock();
++#else
+ __acquire(RCU_BH);
+ rcu_lock_acquire(&rcu_bh_lock_map);
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_lock_bh() used illegally while idle");
++#endif
+ }
+
+ /*
+@@ -789,10 +810,14 @@ static inline void rcu_read_lock_bh(void
+ */
+ static inline void rcu_read_unlock_bh(void)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ rcu_read_unlock();
++#else
+ rcu_lockdep_assert(!rcu_is_cpu_idle(),
+ "rcu_read_unlock_bh() used illegally while idle");
+ rcu_lock_release(&rcu_bh_lock_map);
+ __release(RCU_BH);
++#endif
+ local_bh_enable();
+ }
+
+Index: linux-stable/include/linux/rcutree.h
+===================================================================
+--- linux-stable.orig/include/linux/rcutree.h
++++ linux-stable/include/linux/rcutree.h
+@@ -45,7 +45,11 @@ static inline void rcu_virt_note_context
+ rcu_note_context_switch(cpu);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define synchronize_rcu_bh synchronize_rcu
++#else
+ extern void synchronize_rcu_bh(void);
++#endif
+ extern void synchronize_sched_expedited(void);
+ extern void synchronize_rcu_expedited(void);
+
+@@ -73,20 +77,30 @@ static inline void synchronize_rcu_bh_ex
+ }
+
+ extern void rcu_barrier(void);
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define rcu_barrier_bh rcu_barrier
++#else
+ extern void rcu_barrier_bh(void);
++#endif
+ extern void rcu_barrier_sched(void);
+
+ extern unsigned long rcutorture_testseq;
+ extern unsigned long rcutorture_vernum;
+ extern long rcu_batches_completed(void);
+-extern long rcu_batches_completed_bh(void);
+ extern long rcu_batches_completed_sched(void);
+
+ extern void rcu_force_quiescent_state(void);
+-extern void rcu_bh_force_quiescent_state(void);
+ extern void rcu_sched_force_quiescent_state(void);
+
+ extern void rcu_scheduler_starting(void);
+ extern int rcu_scheduler_active __read_mostly;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++extern void rcu_bh_force_quiescent_state(void);
++extern long rcu_batches_completed_bh(void);
++#else
++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state
++# define rcu_batches_completed_bh rcu_batches_completed
++#endif
++
+ #endif /* __LINUX_RCUTREE_H */
+Index: linux-stable/kernel/rcupdate.c
+===================================================================
+--- linux-stable.orig/kernel/rcupdate.c
++++ linux-stable/kernel/rcupdate.c
+@@ -149,6 +149,7 @@ int debug_lockdep_rcu_enabled(void)
+ }
+ EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
+ *
+@@ -175,6 +176,7 @@ int rcu_read_lock_bh_held(void)
+ return in_softirq() || irqs_disabled();
+ }
+ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
++#endif
+
+ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
+Index: linux-stable/kernel/rcutree.c
+===================================================================
+--- linux-stable.orig/kernel/rcutree.c
++++ linux-stable/kernel/rcutree.c
+@@ -182,6 +182,7 @@ void rcu_sched_qs(int cpu)
+ rdp->passed_quiesce = 1;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void rcu_bh_qs(int cpu)
+ {
+ struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
+@@ -192,6 +193,7 @@ void rcu_bh_qs(int cpu)
+ trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
+ rdp->passed_quiesce = 1;
+ }
++#endif
+
+ /*
+ * Note a context switch. This is a quiescent state for RCU-sched,
+@@ -238,6 +240,7 @@ long rcu_batches_completed_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Return the number of RCU BH batches processed thus far for debug & stats.
+ */
+@@ -255,6 +258,7 @@ void rcu_bh_force_quiescent_state(void)
+ force_quiescent_state(&rcu_bh_state, 0);
+ }
+ EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
++#endif
+
+ /*
+ * Record the number of times rcutorture tests have been initiated and
+@@ -1970,6 +1974,7 @@ void call_rcu_sched(struct rcu_head *hea
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue an RCU callback for invocation after a quicker grace period.
+ */
+@@ -1978,6 +1983,7 @@ void call_rcu_bh(struct rcu_head *head,
+ __call_rcu(head, func, &rcu_bh_state, 0);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
+
+ /*
+ * Because a context switch is a grace period for RCU-sched and RCU-bh,
+@@ -2034,6 +2040,7 @@ void synchronize_sched(void)
+ }
+ EXPORT_SYMBOL_GPL(synchronize_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
+ *
+@@ -2054,6 +2061,7 @@ void synchronize_rcu_bh(void)
+ wait_rcu_gp(call_rcu_bh);
+ }
+ EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
++#endif
+
+ static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
+ static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
+@@ -2460,6 +2468,7 @@ static void _rcu_barrier(struct rcu_stat
+ destroy_rcu_head_on_stack(&rd.barrier_head);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /**
+ * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
+ */
+@@ -2468,6 +2477,7 @@ void rcu_barrier_bh(void)
+ _rcu_barrier(&rcu_bh_state);
+ }
+ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
++#endif
+
+ /**
+ * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
diff --git a/rcu-tiny-merge-bh.patch b/rcu-tiny-merge-bh.patch
new file mode 100644
index 0000000..c697500
--- /dev/null
+++ b/rcu-tiny-merge-bh.patch
@@ -0,0 +1,26 @@
+Subject: rcu-more-fallout.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 14 Nov 2011 10:57:54 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/rcutiny.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: linux-stable/kernel/rcutiny.c
+===================================================================
+--- linux-stable.orig/kernel/rcutiny.c
++++ linux-stable/kernel/rcutiny.c
+@@ -368,6 +368,7 @@ void call_rcu_sched(struct rcu_head *hea
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_sched);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Post an RCU bottom-half callback to be invoked after any subsequent
+ * quiescent state.
+@@ -377,3 +378,4 @@ void call_rcu_bh(struct rcu_head *head,
+ __call_rcu(head, func, &rcu_bh_ctrlblk);
+ }
+ EXPORT_SYMBOL_GPL(call_rcu_bh);
++#endif
diff --git a/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/re-migrate_disable-race-with-cpu-hotplug-3f.patch
new file mode 100644
index 0000000..6851af9
--- /dev/null
+++ b/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -0,0 +1,36 @@
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Thu, 28 Jul 2011 11:16:00 +0800
+Subject: hotplug: Reread hotplug_pcp on pin_current_cpu() retry
+
+When retry happens, it's likely that the task has been migrated to
+another cpu (except unplug failed), but it still derefernces the
+original hotplug_pcp per cpu data.
+
+Update the pointer to hotplug_pcp in the retry path, so it points to
+the current cpu.
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/20110728031600.GA338@windriver.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/cpu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+Index: linux-stable/kernel/cpu.c
+===================================================================
+--- linux-stable.orig/kernel/cpu.c
++++ linux-stable/kernel/cpu.c
+@@ -81,9 +81,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+ */
+ void pin_current_cpu(void)
+ {
+- struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
++ struct hotplug_pcp *hp;
+
+ retry:
++ hp = &__get_cpu_var(hotplug_pcp);
++
+ if (!hp->unplug || hp->refcount || preempt_count() > 1 ||
+ hp->unplug == current || (current->flags & PF_STOMPER)) {
+ hp->refcount++;
diff --git a/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
new file mode 100644
index 0000000..5b9d311
--- /dev/null
+++ b/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -0,0 +1,71 @@
+Subject: ARM: Initialize ptl->lock for vector page
+From: Frank Rowand <frank.rowand@am.sony.com>
+Date: Sat, 1 Oct 2011 18:58:13 -0700
+
+Without this patch, ARM can not use SPLIT_PTLOCK_CPUS if
+PREEMPT_RT_FULL=y because vectors_user_mapping() creates a
+VM_ALWAYSDUMP mapping of the vector page (address 0xffff0000), but no
+ptl->lock has been allocated for the page. An attempt to coredump
+that page will result in a kernel NULL pointer dereference when
+follow_page() attempts to lock the page.
+
+The call tree to the NULL pointer dereference is:
+
+ do_notify_resume()
+ get_signal_to_deliver()
+ do_coredump()
+ elf_core_dump()
+ get_dump_page()
+ __get_user_pages()
+ follow_page()
+ pte_offset_map_lock() <----- a #define
+ ...
+ rt_spin_lock()
+
+The underlying problem is exposed by mm-shrink-the-page-frame-to-rt-size.patch.
+
+Signed-off-by: Frank Rowand <frank.rowand@am.sony.com>
+Cc: Frank <Frank_Rowand@sonyusa.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/4E87C535.2030907@am.sony.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/arm/kernel/process.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+Index: linux-stable/arch/arm/kernel/process.c
+===================================================================
+--- linux-stable.orig/arch/arm/kernel/process.c
++++ linux-stable/arch/arm/kernel/process.c
+@@ -508,6 +508,31 @@ unsigned long arch_randomize_brk(struct
+ }
+
+ #ifdef CONFIG_MMU
++
++/*
++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not
++ * initialized by pgtable_page_ctor() then a coredump of the vector page will
++ * fail.
++ */
++static int __init vectors_user_mapping_init_page(void)
++{
++ struct page *page;
++ unsigned long addr = 0xffff0000;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ page = pmd_page(*(pmd));
++
++ pgtable_page_ctor(page);
++
++ return 0;
++}
++late_initcall(vectors_user_mapping_init_page);
++
+ /*
+ * The vectors page is always readable from user space for the
+ * atomic helpers and the signal restart code. Insert it into the
diff --git a/relay-fix-timer-madness.patch b/relay-fix-timer-madness.patch
new file mode 100644
index 0000000..8ce2eec
--- /dev/null
+++ b/relay-fix-timer-madness.patch
@@ -0,0 +1,54 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:44:07 -0500
+Subject: relay: fix timer madness
+
+remove timer calls (!!!) from deep within the tracing infrastructure.
+This was totally bogus code that can cause lockups and worse. Poll
+the buffer every 2 jiffies for now.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/relay.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+Index: linux-stable/kernel/relay.c
+===================================================================
+--- linux-stable.orig/kernel/relay.c
++++ linux-stable/kernel/relay.c
+@@ -340,6 +340,10 @@ static void wakeup_readers(unsigned long
+ {
+ struct rchan_buf *buf = (struct rchan_buf *)data;
+ wake_up_interruptible(&buf->read_wait);
++ /*
++ * Stupid polling for now:
++ */
++ mod_timer(&buf->timer, jiffies + 1);
+ }
+
+ /**
+@@ -357,6 +361,7 @@ static void __relay_reset(struct rchan_b
+ init_waitqueue_head(&buf->read_wait);
+ kref_init(&buf->kref);
+ setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
++ mod_timer(&buf->timer, jiffies + 1);
+ } else
+ del_timer_sync(&buf->timer);
+
+@@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_
+ else
+ buf->early_bytes += buf->chan->subbuf_size -
+ buf->padding[old_subbuf];
+- smp_mb();
+- if (waitqueue_active(&buf->read_wait))
+- /*
+- * Calling wake_up_interruptible() from here
+- * will deadlock if we happen to be logging
+- * from the scheduler (trying to re-grab
+- * rq->lock), so defer it.
+- */
+- mod_timer(&buf->timer, jiffies + 1);
+ }
+
+ old = buf->data;
diff --git a/resource-counters-use-localirq-nort.patch b/resource-counters-use-localirq-nort.patch
new file mode 100644
index 0000000..bcb79ce
--- /dev/null
+++ b/resource-counters-use-localirq-nort.patch
@@ -0,0 +1,86 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:44:33 -0500
+Subject: core: Do not disable interrupts on RT in res_counter.c
+
+Frederic Weisbecker reported this warning:
+
+[ 45.228562] BUG: sleeping function called from invalid context at kernel/rtmutex.c:683
+[ 45.228571] in_atomic(): 0, irqs_disabled(): 1, pid: 4290, name: ntpdate
+[ 45.228576] INFO: lockdep is turned off.
+[ 45.228580] irq event stamp: 0
+[ 45.228583] hardirqs last enabled at (0): [<(null)>] (null)
+[ 45.228589] hardirqs last disabled at (0): [<ffffffff8025449d>] copy_process+0x68d/0x1500
+[ 45.228602] softirqs last enabled at (0): [<ffffffff8025449d>] copy_process+0x68d/0x1500
+[ 45.228609] softirqs last disabled at (0): [<(null)>] (null)
+[ 45.228617] Pid: 4290, comm: ntpdate Tainted: G W 2.6.29-rc4-rt1-tip #1
+[ 45.228622] Call Trace:
+[ 45.228632] [<ffffffff8027dfb0>] ? print_irqtrace_events+0xd0/0xe0
+[ 45.228639] [<ffffffff8024cd73>] __might_sleep+0x113/0x130
+[ 45.228646] [<ffffffff8077c811>] rt_spin_lock+0xa1/0xb0
+[ 45.228653] [<ffffffff80296a3d>] res_counter_charge+0x5d/0x130
+[ 45.228660] [<ffffffff802fb67f>] __mem_cgroup_try_charge+0x7f/0x180
+[ 45.228667] [<ffffffff802fc407>] mem_cgroup_charge_common+0x57/0x90
+[ 45.228674] [<ffffffff80212096>] ? ftrace_call+0x5/0x2b
+[ 45.228680] [<ffffffff802fc49d>] mem_cgroup_newpage_charge+0x5d/0x60
+[ 45.228688] [<ffffffff802d94ce>] __do_fault+0x29e/0x4c0
+[ 45.228694] [<ffffffff8077c843>] ? rt_spin_unlock+0x23/0x80
+[ 45.228700] [<ffffffff802db8b5>] handle_mm_fault+0x205/0x890
+[ 45.228707] [<ffffffff80212096>] ? ftrace_call+0x5/0x2b
+[ 45.228714] [<ffffffff8023495e>] do_page_fault+0x11e/0x2a0
+[ 45.228720] [<ffffffff8077e5a5>] page_fault+0x25/0x30
+[ 45.228727] [<ffffffff8043e1ed>] ? __clear_user+0x3d/0x70
+[ 45.228733] [<ffffffff8043e1d1>] ? __clear_user+0x21/0x70
+
+The reason is the raw IRQ flag use of kernel/res_counter.c.
+
+The irq flags tricks there seem a bit pointless: it cannot protect the
+c->parent linkage because local_irq_save() is only per CPU.
+
+So replace it with _nort(). This code needs a second look.
+
+Reported-by: Frederic Weisbecker <fweisbec@gmail.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/res_counter.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/kernel/res_counter.c
+===================================================================
+--- linux-stable.orig/kernel/res_counter.c
++++ linux-stable/kernel/res_counter.c
+@@ -49,7 +49,7 @@ static int __res_counter_charge(struct r
+
+ r = ret = 0;
+ *limit_fail_at = NULL;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ for (c = counter; c != NULL; c = c->parent) {
+ spin_lock(&c->lock);
+ r = res_counter_charge_locked(c, val, force);
+@@ -69,7 +69,7 @@ static int __res_counter_charge(struct r
+ spin_unlock(&u->lock);
+ }
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+
+ return ret;
+ }
+@@ -101,13 +101,13 @@ void res_counter_uncharge_until(struct r
+ unsigned long flags;
+ struct res_counter *c;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ for (c = counter; c != top; c = c->parent) {
+ spin_lock(&c->lock);
+ res_counter_uncharge_locked(c, val);
+ spin_unlock(&c->lock);
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
diff --git a/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch b/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch
new file mode 100644
index 0000000..973b055
--- /dev/null
+++ b/rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch
@@ -0,0 +1,47 @@
+Subject: printk: Don't call printk_tick in printk_needs_cpu() on RT
+From: Yong Zhang <yong.zhang0@gmail.com>
+Date: Sun, 16 Oct 2011 18:56:45 +0800
+
+printk_tick() can't be called in atomic context when RT is enabled,
+otherwise below warning will show:
+
+[ 117.597095] BUG: sleeping function called from invalid context at kernel/rtmutex.c:645
+[ 117.597102] in_atomic(): 1, irqs_disabled(): 1, pid: 0, name: kworker/0:0
+[ 117.597111] Pid: 0, comm: kworker/0:0 Not tainted 3.0.6-rt17-00284-gb76d419-dirty #7
+[ 117.597116] Call Trace:
+[ 117.597131] [<c06e3b61>] ? printk+0x1d/0x24
+[ 117.597142] [<c01390b6>] __might_sleep+0xe6/0x110
+[ 117.597151] [<c06e634c>] rt_spin_lock+0x1c/0x30
+[ 117.597158] [<c0142f26>] __wake_up+0x26/0x60
+[ 117.597166] [<c014c78e>] printk_tick+0x3e/0x40
+[ 117.597173] [<c014c7b4>] printk_needs_cpu+0x24/0x30
+[ 117.597181] [<c017ecc8>] tick_nohz_stop_sched_tick+0x2e8/0x410
+[ 117.597191] [<c017305a>] ? sched_clock_idle_wakeup_event+0x1a/0x20
+[ 117.597201] [<c010182a>] cpu_idle+0x4a/0xb0
+[ 117.597209] [<c06e0b97>] start_secondary+0xd3/0xd7
+
+Now this is a really rare case and it's very unlikely that we starve
+an logbuf waiter that way.
+
+Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
+Link: http://lkml.kernel.org/r/1318762607-2261-4-git-send-email-yong.zhang0@gmail.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/printk.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/kernel/printk.c
+===================================================================
+--- linux-stable.orig/kernel/printk.c
++++ linux-stable/kernel/printk.c
+@@ -2027,8 +2027,8 @@ void printk_tick(void)
+
+ int printk_needs_cpu(int cpu)
+ {
+- if (cpu_is_offline(cpu))
+- printk_tick();
++ if (unlikely(cpu_is_offline(cpu)))
++ __this_cpu_write(printk_pending, 0);
+ return __this_cpu_read(printk_pending);
+ }
+
diff --git a/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch b/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
new file mode 100644
index 0000000..f93d9a2
--- /dev/null
+++ b/rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
@@ -0,0 +1,48 @@
+Subject: sched/rt: Fix wait_task_interactive() to test rt_spin_lock state
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 01 Mar 2012 13:55:33 -0500
+
+The wait_task_interactive() will have a task sleep waiting for another
+task to have a certain state. But it ignores the rt_spin_locks state
+and can return with an incorrect result if the task it is waiting
+for is blocked on a rt_spin_lock() and is waking up.
+
+The rt_spin_locks save the tasks state in the saved_state field
+and the wait_task_interactive() must also test that state.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Carsten Emde <C.Emde@osadl.org>
+Cc: John Kacur <jkacur@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Clark Williams <clark.williams@gmail.com>
+Cc: stable-rt@vger.kernel.org
+Link: http://lkml.kernel.org/r/20120301190345.979435764@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -1172,7 +1172,8 @@ unsigned long wait_task_inactive(struct
+ * is actually now running somewhere else!
+ */
+ while (task_running(rq, p)) {
+- if (match_state && unlikely(p->state != match_state))
++ if (match_state && unlikely(p->state != match_state)
++ && unlikely(p->saved_state != match_state))
+ return 0;
+ cpu_relax();
+ }
+@@ -1187,7 +1188,8 @@ unsigned long wait_task_inactive(struct
+ running = task_running(rq, p);
+ on_rq = p->on_rq;
+ ncsw = 0;
+- if (!match_state || p->state == match_state)
++ if (!match_state || p->state == match_state
++ || p->saved_state == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_rq_unlock(rq, p, &flags);
+
diff --git a/rt-add-rt-locks.patch b/rt-add-rt-locks.patch
new file mode 100644
index 0000000..fdbc412
--- /dev/null
+++ b/rt-add-rt-locks.patch
@@ -0,0 +1,910 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 26 Jul 2009 19:39:56 +0200
+Subject: rt: Add the preempt-rt lock replacement APIs
+
+Map spinlocks, rwlocks, rw_semaphores and semaphores to the rt_mutex
+based locking functions for preempt-rt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/rwlock_rt.h | 123 ++++++++++
+ include/linux/spinlock.h | 12 -
+ include/linux/spinlock_api_smp.h | 4
+ include/linux/spinlock_rt.h | 158 +++++++++++++
+ kernel/Makefile | 9
+ kernel/rt.c | 442 +++++++++++++++++++++++++++++++++++++++
+ kernel/spinlock.c | 7
+ lib/spinlock_debug.c | 5
+ 8 files changed, 756 insertions(+), 4 deletions(-)
+
+Index: linux-stable/include/linux/rwlock_rt.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/rwlock_rt.h
+@@ -0,0 +1,123 @@
++#ifndef __LINUX_RWLOCK_RT_H
++#define __LINUX_RWLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#define rwlock_init(rwl) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(rwl)->lock); \
++ __rt_rwlock_init(rwl, #rwl, &__key); \
++} while (0)
++
++extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
++
++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock))
++
++#define write_trylock_irqsave(lock, flags) \
++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
++
++#define read_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ migrate_disable(); \
++ flags = rt_read_lock_irqsave(lock); \
++ } while (0)
++
++#define write_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ migrate_disable(); \
++ flags = rt_write_lock_irqsave(lock); \
++ } while (0)
++
++#define read_lock(lock) \
++ do { \
++ migrate_disable(); \
++ rt_read_lock(lock); \
++ } while (0)
++
++#define read_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ migrate_disable(); \
++ rt_read_lock(lock); \
++ } while (0)
++
++#define read_lock_irq(lock) read_lock(lock)
++
++#define write_lock(lock) \
++ do { \
++ migrate_disable(); \
++ rt_write_lock(lock); \
++ } while (0)
++
++#define write_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ migrate_disable(); \
++ rt_write_lock(lock); \
++ } while (0)
++
++#define write_lock_irq(lock) write_lock(lock)
++
++#define read_unlock(lock) \
++ do { \
++ rt_read_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define read_unlock_bh(lock) \
++ do { \
++ rt_read_unlock(lock); \
++ migrate_enable(); \
++ local_bh_enable(); \
++ } while (0)
++
++#define read_unlock_irq(lock) read_unlock(lock)
++
++#define write_unlock(lock) \
++ do { \
++ rt_write_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define write_unlock_bh(lock) \
++ do { \
++ rt_write_unlock(lock); \
++ migrate_enable(); \
++ local_bh_enable(); \
++ } while (0)
++
++#define write_unlock_irq(lock) write_unlock(lock)
++
++#define read_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_read_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define write_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ rt_write_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#endif
+Index: linux-stable/include/linux/spinlock.h
+===================================================================
+--- linux-stable.orig/include/linux/spinlock.h
++++ linux-stable/include/linux/spinlock.h
+@@ -254,7 +254,11 @@ static inline void do_raw_spin_unlock(ra
+ #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
+
+ /* Include rwlock functions */
+-#include <linux/rwlock.h>
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_rt.h>
++#else
++# include <linux/rwlock.h>
++#endif
+
+ /*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+@@ -265,6 +269,10 @@ static inline void do_raw_spin_unlock(ra
+ # include <linux/spinlock_api_up.h>
+ #endif
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ /*
+ * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
+ */
+@@ -394,4 +402,6 @@ extern int _atomic_dec_and_lock(atomic_t
+ #define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* __LINUX_SPINLOCK_H */
+Index: linux-stable/include/linux/spinlock_api_smp.h
+===================================================================
+--- linux-stable.orig/include/linux/spinlock_api_smp.h
++++ linux-stable/include/linux/spinlock_api_smp.h
+@@ -191,6 +191,8 @@ static inline int __raw_spin_trylock_bh(
+ return 0;
+ }
+
+-#include <linux/rwlock_api_smp.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/rwlock_api_smp.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_API_SMP_H */
+Index: linux-stable/include/linux/spinlock_rt.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/spinlock_rt.h
+@@ -0,0 +1,158 @@
++#ifndef __LINUX_SPINLOCK_RT_H
++#define __LINUX_SPINLOCK_RT_H
++
++#ifndef __LINUX_SPINLOCK_H
++#error Do not include directly. Use spinlock.h
++#endif
++
++#include <linux/bug.h>
++
++extern void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
++
++#define spin_lock_init(slock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(slock)->lock); \
++ __rt_spin_lock_init(slock, #slock, &__key); \
++} while (0)
++
++extern void __lockfunc rt_spin_lock(spinlock_t *lock);
++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
++extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
++extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
++
++/*
++ * lockdep-less calls, for derived types like rwlock:
++ * (for trylock they can use rt_mutex_trylock() directly.
++ */
++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
++
++#define spin_lock_local(lock) rt_spin_lock(lock)
++#define spin_unlock_local(lock) rt_spin_unlock(lock)
++
++#define spin_lock(lock) \
++ do { \
++ migrate_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_bh(lock) \
++ do { \
++ local_bh_disable(); \
++ migrate_disable(); \
++ rt_spin_lock(lock); \
++ } while (0)
++
++#define spin_lock_irq(lock) spin_lock(lock)
++
++#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++
++#ifdef CONFIG_LOCKDEP
++# define spin_lock_nested(lock, subclass) \
++ do { \
++ migrate_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ migrate_disable(); \
++ rt_spin_lock_nested(lock, subclass); \
++ } while (0)
++#else
++# define spin_lock_nested(lock, subclass) spin_lock(lock)
++
++# define spin_lock_irqsave_nested(lock, flags, subclass) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++#endif
++
++#define spin_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ spin_lock(lock); \
++ } while (0)
++
++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
++{
++ unsigned long flags = 0;
++#ifdef CONFIG_TRACE_IRQFLAGS
++ flags = rt_spin_lock_trace_flags(lock);
++#else
++ spin_lock(lock); /* lock_local */
++#endif
++ return flags;
++}
++
++/* FIXME: we need rt_spin_lock_nest_lock */
++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
++
++#define spin_unlock(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define spin_unlock_bh(lock) \
++ do { \
++ rt_spin_unlock(lock); \
++ migrate_enable(); \
++ local_bh_enable(); \
++ } while (0)
++
++#define spin_unlock_irq(lock) spin_unlock(lock)
++
++#define spin_unlock_irqrestore(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ (void) flags; \
++ spin_unlock(lock); \
++ } while (0)
++
++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock))
++#define spin_trylock_irq(lock) spin_trylock(lock)
++
++#define spin_trylock_irqsave(lock, flags) \
++ rt_spin_trylock_irqsave(lock, &(flags))
++
++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock)
++
++#ifdef CONFIG_GENERIC_LOCKBREAK
++# define spin_is_contended(lock) ((lock)->break_lock)
++#else
++# define spin_is_contended(lock) (((void)(lock), 0))
++#endif
++
++static inline int spin_can_lock(spinlock_t *lock)
++{
++ return !rt_mutex_is_locked(&lock->lock);
++}
++
++static inline int spin_is_locked(spinlock_t *lock)
++{
++ return rt_mutex_is_locked(&lock->lock);
++}
++
++static inline void assert_spin_locked(spinlock_t *lock)
++{
++ BUG_ON(!spin_is_locked(lock));
++}
++
++#define atomic_dec_and_lock(atomic, lock) \
++ atomic_dec_and_spin_lock(atomic, lock)
++
++#endif
+Index: linux-stable/kernel/Makefile
+===================================================================
+--- linux-stable.orig/kernel/Makefile
++++ linux-stable/kernel/Makefile
+@@ -7,8 +7,8 @@ obj-y = fork.o exec_domain.o panic.o
+ sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
+ signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
+ rcupdate.o extable.o params.o posix-timers.o \
+- kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
+- hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
++ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o \
++ hrtimer.o nsproxy.o srcu.o semaphore.o \
+ notifier.o ksysfs.o cred.o \
+ async.o range.o groups.o lglock.o
+
+@@ -32,7 +32,11 @@ obj-$(CONFIG_FREEZER) += freezer.o
+ obj-$(CONFIG_PROFILING) += profile.o
+ obj-$(CONFIG_STACKTRACE) += stacktrace.o
+ obj-y += time/
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
++obj-y += mutex.o
+ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
++obj-y += rwsem.o
++endif
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+@@ -44,6 +48,7 @@ endif
+ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
+ obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
+ obj-$(CONFIG_SMP) += smp.o
+ obj-$(CONFIG_SMP) += smpboot.o
+Index: linux-stable/kernel/rt.c
+===================================================================
+--- /dev/null
++++ linux-stable/kernel/rt.c
+@@ -0,0 +1,442 @@
++/*
++ * kernel/rt.c
++ *
++ * Real-Time Preemption Support
++ *
++ * started by Ingo Molnar:
++ *
++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ *
++ * historic credit for proving that Linux spinlocks can be implemented via
++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
++ * and others) who prototyped it on 2.4 and did lots of comparative
++ * research and analysis; TimeSys, for proving that you can implement a
++ * fully preemptible kernel via the use of IRQ threading and mutexes;
++ * Bill Huey for persuasively arguing on lkml that the mutex model is the
++ * right one; and to MontaVista, who ported pmutexes to 2.6.
++ *
++ * This code is a from-scratch implementation and is not based on pmutexes,
++ * but the idea of converting spinlocks to mutexes is used here too.
++ *
++ * lock debugging, locking tree, deadlock detection:
++ *
++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
++ * Released under the General Public License (GPL).
++ *
++ * Includes portions of the generic R/W semaphore implementation from:
++ *
++ * Copyright (c) 2001 David Howells (dhowells@redhat.com).
++ * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
++ * - Derived also from comments by Linus
++ *
++ * Pending ownership of locks and ownership stealing:
++ *
++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
++ *
++ * (also by Steven Rostedt)
++ * - Converted single pi_lock to individual task locks.
++ *
++ * By Esben Nielsen:
++ * Doing priority inheritance with help of the scheduler.
++ *
++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
++ * - major rework based on Esben Nielsens initial patch
++ * - replaced thread_info references by task_struct refs
++ * - removed task->pending_owner dependency
++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks
++ * in the scheduler return path as discussed with Steven Rostedt
++ *
++ * Copyright (C) 2006, Kihon Technologies Inc.
++ * Steven Rostedt <rostedt@goodmis.org>
++ * - debugged and patched Thomas Gleixner's rework.
++ * - added back the cmpxchg to the rework.
++ * - turned atomic require back on for SMP.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/rtmutex.h>
++#include <linux/sched.h>
++#include <linux/delay.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/syscalls.h>
++#include <linux/interrupt.h>
++#include <linux/plist.h>
++#include <linux/fs.h>
++#include <linux/futex.h>
++#include <linux/hrtimer.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * struct mutex functions
++ */
++void __mutex_do_init(struct mutex *mutex, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
++ lockdep_init_map(&mutex->dep_map, name, key, 0);
++#endif
++ mutex->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__mutex_do_init);
++
++void __lockfunc _mutex_lock(struct mutex *lock)
++{
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock);
++
++int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible);
++
++int __lockfunc _mutex_lock_killable(struct mutex *lock)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
++{
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nested);
++
++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
++{
++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_lock_nest_lock);
++
++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ ret = rt_mutex_lock_interruptible(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
++
++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
++{
++ int ret;
++
++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ ret = rt_mutex_lock_killable(&lock->lock, 0);
++ if (ret)
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_lock_killable_nested);
++#endif
++
++int __lockfunc _mutex_trylock(struct mutex *lock)
++{
++ int ret = rt_mutex_trylock(&lock->lock);
++
++ if (ret)
++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++
++ return ret;
++}
++EXPORT_SYMBOL(_mutex_trylock);
++
++void __lockfunc _mutex_unlock(struct mutex *lock)
++{
++ mutex_release(&lock->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&lock->lock);
++}
++EXPORT_SYMBOL(_mutex_unlock);
++
++/*
++ * rwlock_t functions
++ */
++int __lockfunc rt_write_trylock(rwlock_t *rwlock)
++{
++ int ret = rt_mutex_trylock(&rwlock->lock);
++
++ migrate_disable();
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock);
++
++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ migrate_disable();
++ ret = rt_write_trylock(rwlock);
++ if (!ret)
++ migrate_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_write_trylock_irqsave);
++
++int __lockfunc rt_read_trylock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++ int ret = 1;
++
++ /*
++ * recursive read locks succeed when current owns the lock,
++ * but not when read_depth == 0 which means that the lock is
++ * write locked.
++ */
++ migrate_disable();
++ if (rt_mutex_owner(lock) != current)
++ ret = rt_mutex_trylock(lock);
++ else if (!rwlock->read_depth)
++ ret = 0;
++
++ if (ret) {
++ rwlock->read_depth++;
++ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
++ } else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_read_trylock);
++
++void __lockfunc rt_write_lock(rwlock_t *rwlock)
++{
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
++ __rt_spin_lock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_lock);
++
++void __lockfunc rt_read_lock(rwlock_t *rwlock)
++{
++ struct rt_mutex *lock = &rwlock->lock;
++
++ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
++
++ /*
++ * recursive read locks succeed when current owns the lock
++ */
++ if (rt_mutex_owner(lock) != current)
++ __rt_spin_lock(lock);
++ rwlock->read_depth++;
++}
++
++EXPORT_SYMBOL(rt_read_lock);
++
++void __lockfunc rt_write_unlock(rwlock_t *rwlock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++ __rt_spin_unlock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_write_unlock);
++
++void __lockfunc rt_read_unlock(rwlock_t *rwlock)
++{
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
++
++ /* Release the lock only when read_depth is down to 0 */
++ if (--rwlock->read_depth == 0)
++ __rt_spin_unlock(&rwlock->lock);
++}
++EXPORT_SYMBOL(rt_read_unlock);
++
++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_write_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_write_lock_irqsave);
++
++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
++{
++ rt_read_lock(rwlock);
++
++ return 0;
++}
++EXPORT_SYMBOL(rt_read_lock_irqsave);
++
++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
++ lockdep_init_map(&rwlock->dep_map, name, key, 0);
++#endif
++ rwlock->lock.save_state = 1;
++ rwlock->read_depth = 0;
++}
++EXPORT_SYMBOL(__rt_rwlock_init);
++
++/*
++ * rw_semaphores
++ */
++
++void rt_up_write(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_write);
++
++void rt_up_read(struct rw_semaphore *rwsem)
++{
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
++ if (--rwsem->read_depth == 0)
++ rt_mutex_unlock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_up_read);
++
++/*
++ * downgrade a write lock into a read lock
++ * - just wake up any readers at the front of the queue
++ */
++void rt_downgrade_write(struct rw_semaphore *rwsem)
++{
++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
++ rwsem->read_depth = 1;
++}
++EXPORT_SYMBOL(rt_downgrade_write);
++
++int rt_down_write_trylock(struct rw_semaphore *rwsem)
++{
++ int ret = rt_mutex_trylock(&rwsem->lock);
++
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_write_trylock);
++
++void rt_down_write(struct rw_semaphore *rwsem)
++{
++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write);
++
++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ rt_mutex_lock(&rwsem->lock);
++}
++EXPORT_SYMBOL(rt_down_write_nested);
++
++int rt_down_read_trylock(struct rw_semaphore *rwsem)
++{
++ struct rt_mutex *lock = &rwsem->lock;
++ int ret = 1;
++
++ /*
++ * recursive read locks succeed when current owns the rwsem,
++ * but not when read_depth == 0 which means that the rwsem is
++ * write locked.
++ */
++ if (rt_mutex_owner(lock) != current)
++ ret = rt_mutex_trylock(&rwsem->lock);
++ else if (!rwsem->read_depth)
++ ret = 0;
++
++ if (ret) {
++ rwsem->read_depth++;
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(rt_down_read_trylock);
++
++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++{
++ struct rt_mutex *lock = &rwsem->lock;
++
++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
++
++ if (rt_mutex_owner(lock) != current)
++ rt_mutex_lock(&rwsem->lock);
++ rwsem->read_depth++;
++}
++
++void rt_down_read(struct rw_semaphore *rwsem)
++{
++ __rt_down_read(rwsem, 0);
++}
++EXPORT_SYMBOL(rt_down_read);
++
++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
++{
++ __rt_down_read(rwsem, subclass);
++}
++EXPORT_SYMBOL(rt_down_read_nested);
++
++void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
++ lockdep_init_map(&rwsem->dep_map, name, key, 0);
++#endif
++ rwsem->read_depth = 0;
++ rwsem->lock.save_state = 0;
++}
++EXPORT_SYMBOL(__rt_rwsem_init);
++
++/**
++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
++ * @cnt: the atomic which we are to dec
++ * @lock: the mutex to return holding if we dec to 0
++ *
++ * return true and hold lock if we dec to 0, return false otherwise
++ */
++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
++{
++ /* dec if we can't possibly hit 0 */
++ if (atomic_add_unless(cnt, -1, 1))
++ return 0;
++ /* we might hit 0, so take the lock */
++ mutex_lock(lock);
++ if (!atomic_dec_and_test(cnt)) {
++ /* when we actually did the dec, we didn't hit 0 */
++ mutex_unlock(lock);
++ return 0;
++ }
++ /* we hit 0, and we hold the lock */
++ return 1;
++}
++EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
+Index: linux-stable/kernel/spinlock.c
+===================================================================
+--- linux-stable.orig/kernel/spinlock.c
++++ linux-stable/kernel/spinlock.c
+@@ -110,8 +110,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
+ * __[spin|read|write]_lock_bh()
+ */
+ BUILD_LOCK_OPS(spin, raw_spinlock);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ BUILD_LOCK_OPS(read, rwlock);
+ BUILD_LOCK_OPS(write, rwlock);
++#endif
+
+ #endif
+
+@@ -195,6 +198,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_
+ EXPORT_SYMBOL(_raw_spin_unlock_bh);
+ #endif
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++
+ #ifndef CONFIG_INLINE_READ_TRYLOCK
+ int __lockfunc _raw_read_trylock(rwlock_t *lock)
+ {
+@@ -339,6 +344,8 @@ void __lockfunc _raw_write_unlock_bh(rwl
+ EXPORT_SYMBOL(_raw_write_unlock_bh);
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+ void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
+Index: linux-stable/lib/spinlock_debug.c
+===================================================================
+--- linux-stable.orig/lib/spinlock_debug.c
++++ linux-stable/lib/spinlock_debug.c
+@@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t
+
+ EXPORT_SYMBOL(__raw_spin_lock_init);
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+ {
+@@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const
+ }
+
+ EXPORT_SYMBOL(__rwlock_init);
++#endif
+
+ static void spin_dump(raw_spinlock_t *lock, const char *msg)
+ {
+@@ -155,6 +157,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
+ arch_spin_unlock(&lock->raw_lock);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static void rwlock_bug(rwlock_t *lock, const char *msg)
+ {
+ if (!debug_locks_off())
+@@ -296,3 +299,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+ debug_write_unlock(lock);
+ arch_write_unlock(&lock->raw_lock);
+ }
++
++#endif
diff --git a/rt-add-rt-spinlock-to-headers.patch b/rt-add-rt-spinlock-to-headers.patch
new file mode 100644
index 0000000..25de953
--- /dev/null
+++ b/rt-add-rt-spinlock-to-headers.patch
@@ -0,0 +1,124 @@
+Subject: rt-add-rt-spinlocks.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 19:43:35 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_types_rt.h | 33 +++++++++++++++++++++++++
+ include/linux/spinlock_types.h | 11 ++++++--
+ include/linux/spinlock_types_rt.h | 49 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 90 insertions(+), 3 deletions(-)
+
+Index: linux-stable/include/linux/rwlock_types_rt.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/rwlock_types_rt.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_RWLOCK_TYPES_RT_H
++#define __LINUX_RWLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * rwlocks - rtmutex which allows single reader recursion
++ */
++typedef struct {
++ struct rt_mutex lock;
++ int read_depth;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} rwlock_t;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
++#define __RW_LOCK_UNLOCKED(name) \
++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
++
++#endif
+Index: linux-stable/include/linux/spinlock_types.h
+===================================================================
+--- linux-stable.orig/include/linux/spinlock_types.h
++++ linux-stable/include/linux/spinlock_types.h
+@@ -11,8 +11,13 @@
+
+ #include <linux/spinlock_types_raw.h>
+
+-#include <linux/spinlock_types_nort.h>
+-
+-#include <linux/rwlock_types.h>
++#ifndef CONFIG_PREEMPT_RT_FULL
++# include <linux/spinlock_types_nort.h>
++# include <linux/rwlock_types.h>
++#else
++# include <linux/rtmutex.h>
++# include <linux/spinlock_types_rt.h>
++# include <linux/rwlock_types_rt.h>
++#endif
+
+ #endif /* __LINUX_SPINLOCK_TYPES_H */
+Index: linux-stable/include/linux/spinlock_types_rt.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/spinlock_types_rt.h
+@@ -0,0 +1,49 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RT_H
++#define __LINUX_SPINLOCK_TYPES_RT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
++ */
++typedef struct spinlock {
++ struct rt_mutex lock;
++ unsigned int break_lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} spinlock_t;
++
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ .file = __FILE__, \
++ .line = __LINE__ , \
++ }
++#else
++# define __RT_SPIN_INITIALIZER(name) \
++ { \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
++ .save_state = 1, \
++ }
++#endif
++
++/*
++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
++*/
++
++#define __SPIN_LOCK_UNLOCKED(name) \
++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \
++ SPIN_DEP_MAP_INIT(name) }
++
++#define __DEFINE_SPINLOCK(name) \
++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
++
++#define DEFINE_SPINLOCK(name) \
++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
++
++#endif
diff --git a/rt-add-rt-to-mutex-headers.patch b/rt-add-rt-to-mutex-headers.patch
new file mode 100644
index 0000000..2370949
--- /dev/null
+++ b/rt-add-rt-to-mutex-headers.patch
@@ -0,0 +1,145 @@
+Subject: rt-add-rt-to-mutex-headers.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 20:56:22 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/mutex.h | 21 +++++++----
+ include/linux/mutex_rt.h | 84 +++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 98 insertions(+), 7 deletions(-)
+
+Index: linux-stable/include/linux/mutex.h
+===================================================================
+--- linux-stable.orig/include/linux/mutex.h
++++ linux-stable/include/linux/mutex.h
+@@ -17,6 +17,17 @@
+
+ #include <linux/atomic.h>
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
++ , .dep_map = { .name = #lockname }
++#else
++# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
++#endif
++
++#ifdef CONFIG_PREEMPT_RT_FULL
++# include <linux/mutex_rt.h>
++#else
++
+ /*
+ * Simple, straightforward mutexes with strict semantics:
+ *
+@@ -95,13 +106,6 @@ do { \
+ static inline void mutex_destroy(struct mutex *lock) {}
+ #endif
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+- , .dep_map = { .name = #lockname }
+-#else
+-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+-#endif
+-
+ #define __MUTEX_INITIALIZER(lockname) \
+ { .count = ATOMIC_INIT(1) \
+ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
+@@ -167,6 +171,9 @@ extern int __must_check mutex_lock_killa
+ */
+ extern int mutex_trylock(struct mutex *lock);
+ extern void mutex_unlock(struct mutex *lock);
++
++#endif /* !PREEMPT_RT_FULL */
++
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+ #ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
+Index: linux-stable/include/linux/mutex_rt.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/mutex_rt.h
+@@ -0,0 +1,84 @@
++#ifndef __LINUX_MUTEX_RT_H
++#define __LINUX_MUTEX_RT_H
++
++#ifndef __LINUX_MUTEX_H
++#error "Please include mutex.h"
++#endif
++
++#include <linux/rtmutex.h>
++
++/* FIXME: Just for __lockfunc */
++#include <linux/spinlock.h>
++
++struct mutex {
++ struct rt_mutex lock;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __MUTEX_INITIALIZER(mutexname) \
++ { \
++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \
++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \
++ }
++
++#define DEFINE_MUTEX(mutexname) \
++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
++
++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
++extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
++extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
++extern int __lockfunc _mutex_trylock(struct mutex *lock);
++extern void __lockfunc _mutex_unlock(struct mutex *lock);
++
++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock)
++#define mutex_lock(l) _mutex_lock(l)
++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l)
++#define mutex_lock_killable(l) _mutex_lock_killable(l)
++#define mutex_trylock(l) _mutex_trylock(l)
++#define mutex_unlock(l) _mutex_unlock(l)
++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible_nested(l, s)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable_nested(l, s)
++
++# define mutex_lock_nest_lock(lock, nest_lock) \
++do { \
++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++} while (0)
++
++#else
++# define mutex_lock_nested(l, s) _mutex_lock(l)
++# define mutex_lock_interruptible_nested(l, s) \
++ _mutex_lock_interruptible(l)
++# define mutex_lock_killable_nested(l, s) \
++ _mutex_lock_killable(l)
++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++#endif
++
++# define mutex_init(mutex) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), #mutex, &__key); \
++} while (0)
++
++# define __mutex_init(mutex, name, key) \
++do { \
++ rt_mutex_init(&(mutex)->lock); \
++ __mutex_do_init((mutex), name, key); \
++} while (0)
++
++#endif
diff --git a/rt-disable-rt-group-sched.patch b/rt-disable-rt-group-sched.patch
new file mode 100644
index 0000000..418dd9d
--- /dev/null
+++ b/rt-disable-rt-group-sched.patch
@@ -0,0 +1,29 @@
+From f9e7eb3419db82245b3396074c137b687b42df06 Mon Sep 17 00:00:00 2001
+From: Carsten Emde <C.Emde@osadl.org>
+Date: Wed, 11 Jul 2012 22:05:18 +0000
+Subject: Disable RT_GROUP_SCHED in PREEMPT_RT_FULL
+
+Strange CPU stalls have been observed in RT when RT_GROUP_SCHED
+was configured.
+
+Disable it for now.
+
+Signed-off-by: Carsten Emde <C.Emde@osadl.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/init/Kconfig
+===================================================================
+--- linux-stable.orig/init/Kconfig
++++ linux-stable/init/Kconfig
+@@ -806,6 +806,7 @@ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
++ depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
diff --git a/rt-introduce-cpu-chill.patch b/rt-introduce-cpu-chill.patch
new file mode 100644
index 0000000..1fa2818
--- /dev/null
+++ b/rt-introduce-cpu-chill.patch
@@ -0,0 +1,30 @@
+Subject: rt: Introduce cpu_chill()
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 07 Mar 2012 20:51:03 +0100
+
+Retry loops on RT might loop forever when the modifying side was
+preempted. Add cpu_chill() to replace cpu_relax(). cpu_chill()
+defaults to cpu_relax() for non RT. On RT it puts the looping task to
+sleep for a tick so the preempted task can make progress.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ include/linux/delay.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+Index: linux-stable/include/linux/delay.h
+===================================================================
+--- linux-stable.orig/include/linux/delay.h
++++ linux-stable/include/linux/delay.h
+@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s
+ msleep(seconds * 1000);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define cpu_chill() msleep(1)
++#else
++# define cpu_chill() cpu_relax()
++#endif
++
+ #endif /* defined(_LINUX_DELAY_H) */
diff --git a/rt-local-irq-lock.patch b/rt-local-irq-lock.patch
new file mode 100644
index 0000000..b33ccec
--- /dev/null
+++ b/rt-local-irq-lock.patch
@@ -0,0 +1,244 @@
+Subject: rt-local-irq-lock.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 20 Jun 2011 09:03:47 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/locallock.h | 230 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 230 insertions(+)
+
+Index: linux-stable/include/linux/locallock.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/locallock.h
+@@ -0,0 +1,230 @@
++#ifndef _LINUX_LOCALLOCK_H
++#define _LINUX_LOCALLOCK_H
++
++#include <linux/spinlock.h>
++
++#ifdef CONFIG_PREEMPT_RT_BASE
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define LL_WARN(cond) WARN_ON(cond)
++#else
++# define LL_WARN(cond) do { } while (0)
++#endif
++
++/*
++ * per cpu lock based substitute for local_irq_*()
++ */
++struct local_irq_lock {
++ spinlock_t lock;
++ struct task_struct *owner;
++ int nestcnt;
++ unsigned long flags;
++};
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \
++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \
++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
++
++#define local_irq_lock_init(lvar) \
++ do { \
++ int __cpu; \
++ for_each_possible_cpu(__cpu) \
++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
++ } while (0)
++
++static inline void __local_lock(struct local_irq_lock *lv)
++{
++ if (lv->owner != current) {
++ spin_lock(&lv->lock);
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ }
++ lv->nestcnt++;
++}
++
++#define local_lock(lvar) \
++ do { __local_lock(&get_local_var(lvar)); } while (0)
++
++static inline int __local_trylock(struct local_irq_lock *lv)
++{
++ if (lv->owner != current && spin_trylock(&lv->lock)) {
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ lv->nestcnt = 1;
++ return 1;
++ }
++ return 0;
++}
++
++#define local_trylock(lvar) \
++ ({ \
++ int __locked; \
++ __locked = __local_trylock(&get_local_var(lvar)); \
++ if (!__locked) \
++ put_local_var(lvar); \
++ __locked; \
++ })
++
++static inline void __local_unlock(struct local_irq_lock *lv)
++{
++ LL_WARN(lv->nestcnt == 0);
++ LL_WARN(lv->owner != current);
++ if (--lv->nestcnt)
++ return;
++
++ lv->owner = NULL;
++ spin_unlock(&lv->lock);
++}
++
++#define local_unlock(lvar) \
++ do { \
++ __local_unlock(&__get_cpu_var(lvar)); \
++ put_local_var(lvar); \
++ } while (0)
++
++static inline void __local_lock_irq(struct local_irq_lock *lv)
++{
++ spin_lock_irqsave(&lv->lock, lv->flags);
++ LL_WARN(lv->owner);
++ LL_WARN(lv->nestcnt);
++ lv->owner = current;
++ lv->nestcnt = 1;
++}
++
++#define local_lock_irq(lvar) \
++ do { __local_lock_irq(&get_local_var(lvar)); } while (0)
++
++static inline void __local_unlock_irq(struct local_irq_lock *lv)
++{
++ LL_WARN(!lv->nestcnt);
++ LL_WARN(lv->owner != current);
++ lv->owner = NULL;
++ lv->nestcnt = 0;
++ spin_unlock_irq(&lv->lock);
++}
++
++#define local_unlock_irq(lvar) \
++ do { \
++ __local_unlock_irq(&__get_cpu_var(lvar)); \
++ put_local_var(lvar); \
++ } while (0)
++
++static inline int __local_lock_irqsave(struct local_irq_lock *lv)
++{
++ if (lv->owner != current) {
++ __local_lock_irq(lv);
++ return 0;
++ } else {
++ lv->nestcnt++;
++ return 1;
++ }
++}
++
++#define local_lock_irqsave(lvar, _flags) \
++ do { \
++ if (__local_lock_irqsave(&get_local_var(lvar))) \
++ put_local_var(lvar); \
++ _flags = __get_cpu_var(lvar).flags; \
++ } while (0)
++
++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
++ unsigned long flags)
++{
++ LL_WARN(!lv->nestcnt);
++ LL_WARN(lv->owner != current);
++ if (--lv->nestcnt)
++ return 0;
++
++ lv->owner = NULL;
++ spin_unlock_irqrestore(&lv->lock, lv->flags);
++ return 1;
++}
++
++#define local_unlock_irqrestore(lvar, flags) \
++ do { \
++ if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
++ put_local_var(lvar); \
++ } while (0)
++
++#define local_spin_trylock_irq(lvar, lock) \
++ ({ \
++ int __locked; \
++ local_lock_irq(lvar); \
++ __locked = spin_trylock(lock); \
++ if (!__locked) \
++ local_unlock_irq(lvar); \
++ __locked; \
++ })
++
++#define local_spin_lock_irq(lvar, lock) \
++ do { \
++ local_lock_irq(lvar); \
++ spin_lock(lock); \
++ } while (0)
++
++#define local_spin_unlock_irq(lvar, lock) \
++ do { \
++ spin_unlock(lock); \
++ local_unlock_irq(lvar); \
++ } while (0)
++
++#define local_spin_lock_irqsave(lvar, lock, flags) \
++ do { \
++ local_lock_irqsave(lvar, flags); \
++ spin_lock(lock); \
++ } while (0)
++
++#define local_spin_unlock_irqrestore(lvar, lock, flags) \
++ do { \
++ spin_unlock(lock); \
++ local_unlock_irqrestore(lvar, flags); \
++ } while (0)
++
++#define get_locked_var(lvar, var) \
++ (*({ \
++ local_lock(lvar); \
++ &__get_cpu_var(var); \
++ }))
++
++#define put_locked_var(lvar, var) local_unlock(lvar)
++
++#define local_lock_cpu(lvar) \
++ ({ \
++ local_lock(lvar); \
++ smp_processor_id(); \
++ })
++
++#define local_unlock_cpu(lvar) local_unlock(lvar)
++
++#else /* PREEMPT_RT_BASE */
++
++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar
++
++static inline void local_irq_lock_init(int lvar) { }
++
++#define local_lock(lvar) preempt_disable()
++#define local_unlock(lvar) preempt_enable()
++#define local_lock_irq(lvar) local_irq_disable()
++#define local_unlock_irq(lvar) local_irq_enable()
++#define local_lock_irqsave(lvar, flags) local_irq_save(flags)
++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags)
++
++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock)
++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock)
++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock)
++#define local_spin_lock_irqsave(lvar, lock, flags) \
++ spin_lock_irqsave(lock, flags)
++#define local_spin_unlock_irqrestore(lvar, lock, flags) \
++ spin_unlock_irqrestore(lock, flags)
++
++#define get_locked_var(lvar, var) get_cpu_var(var)
++#define put_locked_var(lvar, var) put_cpu_var(var)
++
++#define local_lock_cpu(lvar) get_cpu()
++#define local_unlock_cpu(lvar) put_cpu()
++
++#endif
++
++#endif
diff --git a/rt-mutex-add-sleeping-spinlocks-support.patch b/rt-mutex-add-sleeping-spinlocks-support.patch
new file mode 100644
index 0000000..566f529
--- /dev/null
+++ b/rt-mutex-add-sleeping-spinlocks-support.patch
@@ -0,0 +1,627 @@
+Subject: rt-mutex-add-sleeping-spinlocks-support.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 10 Jun 2011 11:21:25 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rtmutex.h | 35 +++-
+ kernel/futex.c | 3
+ kernel/rtmutex.c | 384 +++++++++++++++++++++++++++++++++++++++++++++---
+ kernel/rtmutex_common.h | 9 +
+ 4 files changed, 404 insertions(+), 27 deletions(-)
+
+Index: linux-stable/include/linux/rtmutex.h
+===================================================================
+--- linux-stable.orig/include/linux/rtmutex.h
++++ linux-stable/include/linux/rtmutex.h
+@@ -29,9 +29,10 @@ struct rt_mutex {
+ raw_spinlock_t wait_lock;
+ struct plist_head wait_list;
+ struct task_struct *owner;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+ int save_state;
+- const char *name, *file;
++#ifdef CONFIG_DEBUG_RT_MUTEXES
++ const char *file;
++ const char *name;
+ int line;
+ void *magic;
+ #endif
+@@ -56,19 +57,39 @@ struct hrtimer_sleeper;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ , .name = #mutexname, .file = __FILE__, .line = __LINE__
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
++
++# define rt_mutex_init(mutex) \
++ do { \
++ raw_spin_lock_init(&(mutex)->wait_lock); \
++ __rt_mutex_init(mutex, #mutex); \
++ } while (0)
++
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
++
++# define rt_mutex_init(mutex) \
++ do { \
++ raw_spin_lock_init(&(mutex)->wait_lock); \
++ __rt_mutex_init(mutex, #mutex); \
++ } while (0)
++
+ # define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+
+-#define __RT_MUTEX_INITIALIZER(mutexname) \
+- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \
+ , .owner = NULL \
+- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
++
++
++#define __RT_MUTEX_INITIALIZER(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
++
++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 1 }
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+Index: linux-stable/kernel/futex.c
+===================================================================
+--- linux-stable.orig/kernel/futex.c
++++ linux-stable/kernel/futex.c
+@@ -2302,8 +2302,7 @@ static int futex_wait_requeue_pi(u32 __u
+ * The waiter is allocated on our stack, manipulated by the requeue
+ * code while we sleep on uaddr.
+ */
+- debug_rt_mutex_init_waiter(&rt_waiter);
+- rt_waiter.task = NULL;
++ rt_mutex_init_waiter(&rt_waiter, false);
+
+ ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
+ if (unlikely(ret != 0))
+Index: linux-stable/kernel/rtmutex.c
+===================================================================
+--- linux-stable.orig/kernel/rtmutex.c
++++ linux-stable/kernel/rtmutex.c
+@@ -8,6 +8,12 @@
+ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+ * Copyright (C) 2006 Esben Nielsen
+ *
++ * Adaptive Spinlocks:
++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
++ * and Peter Morreale,
++ * Adaptive Spinlocks simplification:
++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
++ *
+ * See Documentation/rt-mutex-design.txt for details.
+ */
+ #include <linux/spinlock.h>
+@@ -96,6 +102,12 @@ static inline void mark_rt_mutex_waiters
+ }
+ #endif
+
++static inline void init_lists(struct rt_mutex *lock)
++{
++ if (unlikely(!lock->wait_list.node_list.prev))
++ plist_head_init(&lock->wait_list);
++}
++
+ /*
+ * Calculate task priority from the waiter list priority
+ *
+@@ -142,6 +154,14 @@ static void rt_mutex_adjust_prio(struct
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ }
+
++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
++{
++ if (waiter->savestate)
++ wake_up_lock_sleeper(waiter->task);
++ else
++ wake_up_process(waiter->task);
++}
++
+ /*
+ * Max number of times we'll walk the boosting chain:
+ */
+@@ -253,13 +273,15 @@ static int rt_mutex_adjust_prio_chain(st
+ /* Release the task */
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ if (!rt_mutex_owner(lock)) {
++ struct rt_mutex_waiter *lock_top_waiter;
++
+ /*
+ * If the requeue above changed the top waiter, then we need
+ * to wake the new top waiter up to try to get the lock.
+ */
+-
+- if (top_waiter != rt_mutex_top_waiter(lock))
+- wake_up_process(rt_mutex_top_waiter(lock)->task);
++ lock_top_waiter = rt_mutex_top_waiter(lock);
++ if (top_waiter != lock_top_waiter)
++ rt_mutex_wake_waiter(lock_top_waiter);
+ raw_spin_unlock(&lock->wait_lock);
+ goto out_put_task;
+ }
+@@ -304,6 +326,25 @@ static int rt_mutex_adjust_prio_chain(st
+ return ret;
+ }
+
++
++#define STEAL_NORMAL 0
++#define STEAL_LATERAL 1
++
++/*
++ * Note that RT tasks are excluded from lateral-steals to prevent the
++ * introduction of an unbounded latency
++ */
++static inline int lock_is_stealable(struct task_struct *task,
++ struct task_struct *pendowner, int mode)
++{
++ if (mode == STEAL_NORMAL || rt_task(task)) {
++ if (task->prio >= pendowner->prio)
++ return 0;
++ } else if (task->prio > pendowner->prio)
++ return 0;
++ return 1;
++}
++
+ /*
+ * Try to take an rt-mutex
+ *
+@@ -313,8 +354,9 @@ static int rt_mutex_adjust_prio_chain(st
+ * @task: the task which wants to acquire the lock
+ * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
+ */
+-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+- struct rt_mutex_waiter *waiter)
++static int
++__try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++ struct rt_mutex_waiter *waiter, int mode)
+ {
+ /*
+ * We have to be careful here if the atomic speedups are
+@@ -347,12 +389,14 @@ static int try_to_take_rt_mutex(struct r
+ * 3) it is top waiter
+ */
+ if (rt_mutex_has_waiters(lock)) {
+- if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
+- if (!waiter || waiter != rt_mutex_top_waiter(lock))
+- return 0;
+- }
++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
++
++ if (task != pown && !lock_is_stealable(task, pown, mode))
++ return 0;
+ }
+
++ /* We got the lock. */
++
+ if (waiter || rt_mutex_has_waiters(lock)) {
+ unsigned long flags;
+ struct rt_mutex_waiter *top;
+@@ -377,7 +421,6 @@ static int try_to_take_rt_mutex(struct r
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ }
+
+- /* We got the lock. */
+ debug_rt_mutex_lock(lock);
+
+ rt_mutex_set_owner(lock, task);
+@@ -387,6 +430,13 @@ static int try_to_take_rt_mutex(struct r
+ return 1;
+ }
+
++static inline int
++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
++ struct rt_mutex_waiter *waiter)
++{
++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
++}
++
+ /*
+ * Task blocks on lock.
+ *
+@@ -501,7 +551,7 @@ static void wakeup_next_waiter(struct rt
+
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+
+- wake_up_process(waiter->task);
++ rt_mutex_wake_waiter(waiter);
+ }
+
+ /*
+@@ -580,18 +630,315 @@ void rt_mutex_adjust_pi(struct task_stru
+ return;
+ }
+
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+-
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * preemptible spin_lock functions:
++ */
++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ might_sleep();
++
++ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
++ rt_mutex_deadlock_account_lock(lock, current);
++ else
++ slowfn(lock);
++}
++
++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
++ void (*slowfn)(struct rt_mutex *lock))
++{
++ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
++ rt_mutex_deadlock_account_unlock(current);
++ else
++ slowfn(lock);
++}
++
++#ifdef CONFIG_SMP
++/*
++ * Note that owner is a speculative pointer and dereferencing relies
++ * on rcu_read_lock() and the check against the lock owner.
++ */
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *owner)
++{
++ int res = 0;
++
++ rcu_read_lock();
++ for (;;) {
++ if (owner != rt_mutex_owner(lock))
++ break;
++ /*
++ * Ensure that owner->on_cpu is dereferenced _after_
++ * checking the above to be valid.
++ */
++ barrier();
++ if (!owner->on_cpu) {
++ res = 1;
++ break;
++ }
++ cpu_relax();
++ }
++ rcu_read_unlock();
++ return res;
++}
++#else
++static int adaptive_wait(struct rt_mutex *lock,
++ struct task_struct *orig_owner)
++{
++ return 1;
++}
++#endif
++
++# define pi_lock(lock) raw_spin_lock_irq(lock)
++# define pi_unlock(lock) raw_spin_unlock_irq(lock)
++
++/*
++ * Slow path lock function spin_lock style: this variant is very
++ * careful not to miss any non-lock wakeups.
++ *
++ * We store the current state under p->pi_lock in p->saved_state and
++ * the try_to_wake_up() code handles this accordingly.
++ */
++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++{
++ struct task_struct *lock_owner, *self = current;
++ struct rt_mutex_waiter waiter, *top_waiter;
++ int ret;
++
++ rt_mutex_init_waiter(&waiter, true);
++
++ raw_spin_lock(&lock->wait_lock);
++ init_lists(lock);
++
++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
++ raw_spin_unlock(&lock->wait_lock);
++ return;
++ }
++
++ BUG_ON(rt_mutex_owner(lock) == self);
++
++ /*
++ * We save whatever state the task is in and we'll restore it
++ * after acquiring the lock taking real wakeups into account
++ * as well. We are serialized via pi_lock against wakeups. See
++ * try_to_wake_up().
++ */
++ pi_lock(&self->pi_lock);
++ self->saved_state = self->state;
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ pi_unlock(&self->pi_lock);
++
++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
++ BUG_ON(ret);
++
++ for (;;) {
++ /* Try to acquire the lock again. */
++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
++ break;
++
++ top_waiter = rt_mutex_top_waiter(lock);
++ lock_owner = rt_mutex_owner(lock);
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ debug_rt_mutex_print_deadlock(&waiter);
++
++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
++ schedule_rt_mutex(lock);
++
++ raw_spin_lock(&lock->wait_lock);
++
++ pi_lock(&self->pi_lock);
++ __set_current_state(TASK_UNINTERRUPTIBLE);
++ pi_unlock(&self->pi_lock);
++ }
++
++ /*
++ * Restore the task state to current->saved_state. We set it
++ * to the original state above and the try_to_wake_up() code
++ * has possibly updated it when a real (non-rtmutex) wakeup
++ * happened while we were blocked. Clear saved_state so
++ * try_to_wakeup() does not get confused.
++ */
++ pi_lock(&self->pi_lock);
++ __set_current_state(self->saved_state);
++ self->saved_state = TASK_RUNNING;
++ pi_unlock(&self->pi_lock);
++
++ /*
++ * try_to_take_rt_mutex() sets the waiter bit
++ * unconditionally. We might have to fix that up:
++ */
++ fixup_rt_mutex_waiters(lock);
++
++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
++ BUG_ON(!plist_node_empty(&waiter.list_entry));
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++/*
++ * Slow path to release a rt_mutex spin_lock style
++ */
++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
++{
++ raw_spin_lock(&lock->wait_lock);
++
++ debug_rt_mutex_unlock(lock);
++
++ rt_mutex_deadlock_account_unlock(current);
++
++ if (!rt_mutex_has_waiters(lock)) {
++ lock->owner = NULL;
++ raw_spin_unlock(&lock->wait_lock);
++ return;
++ }
++
++ wakeup_next_waiter(lock);
++
++ raw_spin_unlock(&lock->wait_lock);
++
++ /* Undo pi boosting.when necessary */
++ rt_mutex_adjust_prio(current);
++}
++
++void __lockfunc rt_spin_lock(spinlock_t *lock)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock);
++
++void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++EXPORT_SYMBOL(__rt_spin_lock);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock_nested);
++#endif
++
++void __lockfunc rt_spin_unlock(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock);
++
++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(__rt_spin_unlock);
++
++/*
++ * Wait for the lock to get unlocked: instead of polling for an unlock
++ * (like raw spinlocks do), we lock and unlock, to force the kernel to
++ * schedule if there's contention:
++ */
++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
++{
++ spin_lock(lock);
++ spin_unlock(lock);
++}
++EXPORT_SYMBOL(rt_spin_unlock_wait);
++
++int __lockfunc rt_spin_trylock(spinlock_t *lock)
++{
++ int ret;
++
++ migrate_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock);
++
++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
++{
++ int ret;
++
++ local_bh_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret) {
++ migrate_disable();
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ } else
++ local_bh_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_bh);
++
++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
++{
++ int ret;
++
++ *flags = 0;
++ migrate_disable();
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock_irqsave);
++
++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
++{
++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
++ if (atomic_add_unless(atomic, -1, 1))
++ return 0;
++ migrate_disable();
++ rt_spin_lock(lock);
++ if (atomic_dec_and_test(atomic))
++ return 1;
++ rt_spin_unlock(lock);
++ migrate_enable();
++ return 0;
++}
++EXPORT_SYMBOL(atomic_dec_and_spin_lock);
++
++void
++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held lock:
++ */
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
++ lockdep_init_map(&lock->dep_map, name, key, 0);
++#endif
++}
++EXPORT_SYMBOL(__rt_spin_lock_init);
++
++#endif /* PREEMPT_RT_FULL */
++
+ /**
+ * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
+ * @lock: the rt_mutex to take
+ * @state: the state the task should block in (TASK_INTERRUPTIBLE
+- * or TASK_UNINTERRUPTIBLE)
++ * or TASK_UNINTERRUPTIBLE)
+ * @timeout: the pre-initialized and started timer, or NULL for none
+ * @waiter: the pre-initialized rt_mutex_waiter
+ *
+@@ -647,9 +994,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ struct rt_mutex_waiter waiter;
+ int ret = 0;
+
+- debug_rt_mutex_init_waiter(&waiter);
++ rt_mutex_init_waiter(&waiter, false);
+
+ raw_spin_lock(&lock->wait_lock);
++ init_lists(lock);
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
+@@ -702,6 +1050,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
+ int ret = 0;
+
+ raw_spin_lock(&lock->wait_lock);
++ init_lists(lock);
+
+ if (likely(rt_mutex_owner(lock) != current)) {
+
+@@ -934,12 +1283,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+ void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+ {
+ lock->owner = NULL;
+- raw_spin_lock_init(&lock->wait_lock);
+ plist_head_init(&lock->wait_list);
+
+ debug_rt_mutex_init(lock, name);
+ }
+-EXPORT_SYMBOL_GPL(__rt_mutex_init);
++EXPORT_SYMBOL(__rt_mutex_init);
+
+ /**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+@@ -954,7 +1302,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+ {
+- __rt_mutex_init(lock, NULL);
++ rt_mutex_init(lock);
+ debug_rt_mutex_proxy_lock(lock, proxy_owner);
+ rt_mutex_set_owner(lock, proxy_owner);
+ rt_mutex_deadlock_account_lock(lock, proxy_owner);
+Index: linux-stable/kernel/rtmutex_common.h
+===================================================================
+--- linux-stable.orig/kernel/rtmutex_common.h
++++ linux-stable/kernel/rtmutex_common.h
+@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
+ struct plist_node pi_list_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
++ bool savestate;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ unsigned long ip;
+ struct pid *deadlock_task_pid;
+@@ -126,4 +127,12 @@ extern int rt_mutex_finish_proxy_lock(st
+ # include "rtmutex.h"
+ #endif
+
++static inline void
++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
++{
++ debug_rt_mutex_init_waiter(waiter);
++ waiter->task = NULL;
++ waiter->savestate = savestate;
++}
++
+ #endif
diff --git a/rt-preempt-base-config.patch b/rt-preempt-base-config.patch
new file mode 100644
index 0000000..cbff745
--- /dev/null
+++ b/rt-preempt-base-config.patch
@@ -0,0 +1,51 @@
+Subject: rt-preempt-base-config.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 17 Jun 2011 12:39:57 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/Kconfig.preempt | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+Index: linux-stable/kernel/Kconfig.preempt
+===================================================================
+--- linux-stable.orig/kernel/Kconfig.preempt
++++ linux-stable/kernel/Kconfig.preempt
+@@ -1,3 +1,10 @@
++config PREEMPT
++ bool
++ select PREEMPT_COUNT
++
++config PREEMPT_RT_BASE
++ bool
++ select PREEMPT
+
+ choice
+ prompt "Preemption Model"
+@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY
+
+ Select this if you are building a kernel for a desktop system.
+
+-config PREEMPT
++config PREEMPT__LL
+ bool "Preemptible Kernel (Low-Latency Desktop)"
+- select PREEMPT_COUNT
++ select PREEMPT
+ select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
+ help
+ This option reduces the latency of the kernel by making
+@@ -52,6 +59,14 @@ config PREEMPT
+ embedded system with latency requirements in the milliseconds
+ range.
+
++config PREEMPT_RTB
++ bool "Preemptible Kernel (Basic RT)"
++ select PREEMPT_RT_BASE
++ help
++ This option is basically the same as (Low-Latency Desktop) but
++ enables changes which are preliminary for the full preemptiple
++ RT kernel.
++
+ endchoice
+
+ config PREEMPT_COUNT
diff --git a/rt-rcutree-warn-fix.patch b/rt-rcutree-warn-fix.patch
new file mode 100644
index 0000000..8b82a75
--- /dev/null
+++ b/rt-rcutree-warn-fix.patch
@@ -0,0 +1,44 @@
+Subject: rt/rcutree: Move misplaced prototype
+From: Ingo Molnar <mingo@elte.hu>
+Date: Wed Dec 14 12:51:28 CET 2011
+
+Fix this warning on x86 defconfig:
+
+ kernel/rcutree.h:433:13: warning: ‘rcu_preempt_qs’ declared ‘static’ but never defined [-Wunused-function]
+
+The #ifdefs and prototypes here are a maze, move it closer to the
+usage site that needs it.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+---
+ kernel/rcutree.c | 2 ++
+ kernel/rcutree.h | 1 -
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+Index: linux-stable/kernel/rcutree.c
+===================================================================
+--- linux-stable.orig/kernel/rcutree.c
++++ linux-stable/kernel/rcutree.c
+@@ -183,6 +183,8 @@ void rcu_sched_qs(int cpu)
+ }
+
+ #ifdef CONFIG_PREEMPT_RT_FULL
++static void rcu_preempt_qs(int cpu);
++
+ void rcu_bh_qs(int cpu)
+ {
+ rcu_preempt_qs(cpu);
+Index: linux-stable/kernel/rcutree.h
+===================================================================
+--- linux-stable.orig/kernel/rcutree.h
++++ linux-stable/kernel/rcutree.h
+@@ -463,7 +463,6 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
+ /* Forward declarations for rcutree_plugin.h */
+ static void rcu_bootup_announce(void);
+ long rcu_batches_completed(void);
+-static void rcu_preempt_qs(int cpu);
+ static void rcu_preempt_note_context_switch(int cpu);
+ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
+ #ifdef CONFIG_HOTPLUG_CPU
diff --git a/rt-rw-lockdep-annotations.patch b/rt-rw-lockdep-annotations.patch
new file mode 100644
index 0000000..5e85861
--- /dev/null
+++ b/rt-rw-lockdep-annotations.patch
@@ -0,0 +1,121 @@
+Subject: rt: rwsem/rwlock: lockdep annotations
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 28 Sep 2012 10:49:42 +0100
+
+rwlocks and rwsems on RT do not allow multiple readers. Annotate the
+lockdep acquire functions accordingly.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/rt.c | 46 +++++++++++++++++++++++++---------------------
+ 1 file changed, 25 insertions(+), 21 deletions(-)
+
+Index: linux-stable/kernel/rt.c
+===================================================================
+--- linux-stable.orig/kernel/rt.c
++++ linux-stable/kernel/rt.c
+@@ -216,15 +216,17 @@ int __lockfunc rt_read_trylock(rwlock_t
+ * write locked.
+ */
+ migrate_disable();
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
+ ret = rt_mutex_trylock(lock);
+- else if (!rwlock->read_depth)
++ if (ret)
++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
++ } else if (!rwlock->read_depth) {
+ ret = 0;
++ }
+
+- if (ret) {
++ if (ret)
+ rwlock->read_depth++;
+- rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+- } else
++ else
+ migrate_enable();
+
+ return ret;
+@@ -242,13 +244,13 @@ void __lockfunc rt_read_lock(rwlock_t *r
+ {
+ struct rt_mutex *lock = &rwlock->lock;
+
+- rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+-
+ /*
+ * recursive read locks succeed when current owns the lock
+ */
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(lock);
++ }
+ rwlock->read_depth++;
+ }
+
+@@ -264,11 +266,11 @@ EXPORT_SYMBOL(rt_write_unlock);
+
+ void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+ {
+- rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+-
+ /* Release the lock only when read_depth is down to 0 */
+- if (--rwlock->read_depth == 0)
++ if (--rwlock->read_depth == 0) {
++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+ __rt_spin_unlock(&rwlock->lock);
++ }
+ }
+ EXPORT_SYMBOL(rt_read_unlock);
+
+@@ -315,9 +317,10 @@ EXPORT_SYMBOL(rt_up_write);
+
+ void rt_up_read(struct rw_semaphore *rwsem)
+ {
+- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+- if (--rwsem->read_depth == 0)
++ if (--rwsem->read_depth == 0) {
++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+ rt_mutex_unlock(&rwsem->lock);
++ }
+ }
+ EXPORT_SYMBOL(rt_up_read);
+
+@@ -366,15 +369,16 @@ int rt_down_read_trylock(struct rw_sema
+ * but not when read_depth == 0 which means that the rwsem is
+ * write locked.
+ */
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
+ ret = rt_mutex_trylock(&rwsem->lock);
+- else if (!rwsem->read_depth)
++ if (ret)
++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
++ } else if (!rwsem->read_depth) {
+ ret = 0;
++ }
+
+- if (ret) {
++ if (ret)
+ rwsem->read_depth++;
+- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+- }
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_down_read_trylock);
+@@ -383,10 +387,10 @@ static void __rt_down_read(struct rw_sem
+ {
+ struct rt_mutex *lock = &rwsem->lock;
+
+- rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
+-
+- if (rt_mutex_owner(lock) != current)
++ if (rt_mutex_owner(lock) != current) {
++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+ rt_mutex_lock(&rwsem->lock);
++ }
+ rwsem->read_depth++;
+ }
+
diff --git a/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch b/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch
new file mode 100644
index 0000000..a5ca035
--- /dev/null
+++ b/rt-sched-do-not-compare-cpu-masks-in-scheduler.patch
@@ -0,0 +1,40 @@
+Subject: sched: Do not compare cpu masks in scheduler
+Date: Tue, 27 Sep 2011 08:40:24 -0400
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Clark Williams <williams@redhat.com>
+Link: http://lkml.kernel.org/r/20110927124423.128129033@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/sched/core.c | 14 +++++---------
+ 1 file changed, 5 insertions(+), 9 deletions(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -3398,16 +3398,12 @@ static inline void update_migrate_disabl
+ */
+ mask = tsk_cpus_allowed(p);
+
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->nr_cpus_allowed = cpumask_weight(mask);
+
+- if (!cpumask_equal(&p->cpus_allowed, mask)) {
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->nr_cpus_allowed = cpumask_weight(mask);
+-
+- /* Let migrate_enable know to fix things back up */
+- p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+- }
++ /* Let migrate_enable know to fix things back up */
++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
+ }
+
+ void migrate_disable(void)
diff --git a/rt-sched-have-migrate_disable-ignore-bounded-threads.patch b/rt-sched-have-migrate_disable-ignore-bounded-threads.patch
new file mode 100644
index 0000000..405477b
--- /dev/null
+++ b/rt-sched-have-migrate_disable-ignore-bounded-threads.patch
@@ -0,0 +1,70 @@
+Subject: sched: Have migrate_disable ignore bounded threads
+Date: Tue, 27 Sep 2011 08:40:25 -0400
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Clark Williams <williams@redhat.com>
+Link: http://lkml.kernel.org/r/20110927124423.567944215@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/sched/core.c | 23 +++++++++--------------
+ 1 file changed, 9 insertions(+), 14 deletions(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -3410,7 +3410,7 @@ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+
+- if (in_atomic()) {
++ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+ #endif
+@@ -3441,7 +3441,7 @@ void migrate_enable(void)
+ unsigned long flags;
+ struct rq *rq;
+
+- if (in_atomic()) {
++ if (in_atomic() || p->flags & PF_THREAD_BOUND) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+ #endif
+@@ -3462,26 +3462,21 @@ void migrate_enable(void)
+
+ if (unlikely(migrate_disabled_updated(p))) {
+ /*
+- * See comment in update_migrate_disable() about locking.
++ * Undo whatever update_migrate_disable() did, also see there
++ * about locking.
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
+- mask = tsk_cpus_allowed(p);
++
+ /*
+ * Clearing migrate_disable causes tsk_cpus_allowed to
+ * show the tasks original cpu affinity.
+ */
+ p->migrate_disable = 0;
+-
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+-
+- if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
+- /* Get the mask now that migration is enabled */
+- mask = tsk_cpus_allowed(p);
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->nr_cpus_allowed = cpumask_weight(mask);
+- }
++ mask = tsk_cpus_allowed(p);
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->nr_cpus_allowed = cpumask_weight(mask);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ } else
+ p->migrate_disable = 0;
diff --git a/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch b/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
new file mode 100644
index 0000000..49e1e4f
--- /dev/null
+++ b/rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
@@ -0,0 +1,307 @@
+Subject: sched: Postpone actual migration disalbe to schedule
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Tue, 27 Sep 2011 08:40:23 -0400
+
+The migrate_disable() can cause a bit of a overhead to the RT kernel,
+as changing the affinity is expensive to do at every lock encountered.
+As a running task can not migrate, the actual disabling of migration
+does not need to occur until the task is about to schedule out.
+
+In most cases, a task that disables migration will enable it before
+it schedules making this change improve performance tremendously.
+
+[ Frank Rowand: UP compile fix ]
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Clark Williams <williams@redhat.com>
+Link: http://lkml.kernel.org/r/20110927124422.779693167@goodmis.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/sched/core.c | 251 +++++++++++++++++++++++++++-------------------------
+ 1 file changed, 132 insertions(+), 119 deletions(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -3370,6 +3370,135 @@ static inline void schedule_debug(struct
+ schedstat_inc(this_rq(), sched_count);
+ }
+
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
++#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
++#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
++#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
++
++static inline void update_migrate_disable(struct task_struct *p)
++{
++ const struct cpumask *mask;
++
++ if (likely(!p->migrate_disable))
++ return;
++
++ /* Did we already update affinity? */
++ if (unlikely(migrate_disabled_updated(p)))
++ return;
++
++ /*
++ * Since this is always current we can get away with only locking
++ * rq->lock, the ->cpus_allowed value can normally only be changed
++ * while holding both p->pi_lock and rq->lock, but seeing that this
++ * is current, we cannot actually be waking up, so all code that
++ * relies on serialization against p->pi_lock is out of scope.
++ *
++ * Having rq->lock serializes us against things like
++ * set_cpus_allowed_ptr() that can still happen concurrently.
++ */
++ mask = tsk_cpus_allowed(p);
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->nr_cpus_allowed = cpumask_weight(mask);
++
++ /* Let migrate_enable know to fix things back up */
++ p->migrate_disable |= MIGRATE_DISABLE_SET_AFFIN;
++ }
++}
++
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++ preempt_disable();
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ preempt_enable();
++ return;
++ }
++
++ pin_current_cpu();
++ p->migrate_disable = 1;
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++
++ preempt_disable();
++ if (migrate_disable_count(p) > 1) {
++ p->migrate_disable--;
++ preempt_enable();
++ return;
++ }
++
++ if (unlikely(migrate_disabled_updated(p))) {
++ /*
++ * See comment in update_migrate_disable() about locking.
++ */
++ rq = this_rq();
++ raw_spin_lock_irqsave(&rq->lock, flags);
++ mask = tsk_cpus_allowed(p);
++ /*
++ * Clearing migrate_disable causes tsk_cpus_allowed to
++ * show the tasks original cpu affinity.
++ */
++ p->migrate_disable = 0;
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (unlikely(!cpumask_equal(&p->cpus_allowed, mask))) {
++ /* Get the mask now that migration is enabled */
++ mask = tsk_cpus_allowed(p);
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->nr_cpus_allowed = cpumask_weight(mask);
++ }
++ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ } else
++ p->migrate_disable = 0;
++
++ unpin_current_cpu();
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_enable);
++#else
++static inline void update_migrate_disable(struct task_struct *p) { }
++#define migrate_disabled_updated(p) 0
++#endif
++
+ static void put_prev_task(struct rq *rq, struct task_struct *prev)
+ {
+ if (prev->on_rq || rq->skip_clock_update < 0)
+@@ -3429,6 +3558,8 @@ need_resched:
+
+ raw_spin_lock_irq(&rq->lock);
+
++ update_migrate_disable(prev);
++
+ switch_count = &prev->nivcsw;
+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
+ if (unlikely(signal_pending_state(prev->state, prev))) {
+@@ -5203,7 +5334,7 @@ void __cpuinit init_idle(struct task_str
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (!__migrate_disabled(p)) {
++ if (!migrate_disabled_updated(p)) {
+ if (p->sched_class && p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+@@ -5278,124 +5409,6 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+-#ifdef CONFIG_PREEMPT_RT_FULL
+-void migrate_disable(void)
+-{
+- struct task_struct *p = current;
+- const struct cpumask *mask;
+- unsigned long flags;
+- struct rq *rq;
+-
+- if (in_atomic()) {
+-#ifdef CONFIG_SCHED_DEBUG
+- p->migrate_disable_atomic++;
+-#endif
+- return;
+- }
+-
+-#ifdef CONFIG_SCHED_DEBUG
+- WARN_ON_ONCE(p->migrate_disable_atomic);
+-#endif
+-
+- preempt_disable();
+- if (p->migrate_disable) {
+- p->migrate_disable++;
+- preempt_enable();
+- return;
+- }
+-
+- pin_current_cpu();
+- if (unlikely(!scheduler_running)) {
+- p->migrate_disable = 1;
+- preempt_enable();
+- return;
+- }
+-
+- /*
+- * Since this is always current we can get away with only locking
+- * rq->lock, the ->cpus_allowed value can normally only be changed
+- * while holding both p->pi_lock and rq->lock, but seeing that this
+- * it current, we cannot actually be waking up, so all code that
+- * relies on serialization against p->pi_lock is out of scope.
+- *
+- * Taking rq->lock serializes us against things like
+- * set_cpus_allowed_ptr() that can still happen concurrently.
+- */
+- rq = this_rq();
+- raw_spin_lock_irqsave(&rq->lock, flags);
+- p->migrate_disable = 1;
+- mask = tsk_cpus_allowed(p);
+-
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+-
+- if (!cpumask_equal(&p->cpus_allowed, mask)) {
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->nr_cpus_allowed = cpumask_weight(mask);
+- }
+- raw_spin_unlock_irqrestore(&rq->lock, flags);
+- preempt_enable();
+-}
+-EXPORT_SYMBOL(migrate_disable);
+-
+-void migrate_enable(void)
+-{
+- struct task_struct *p = current;
+- const struct cpumask *mask;
+- unsigned long flags;
+- struct rq *rq;
+-
+- if (in_atomic()) {
+-#ifdef CONFIG_SCHED_DEBUG
+- p->migrate_disable_atomic--;
+-#endif
+- return;
+- }
+-
+-#ifdef CONFIG_SCHED_DEBUG
+- WARN_ON_ONCE(p->migrate_disable_atomic);
+-#endif
+- WARN_ON_ONCE(p->migrate_disable <= 0);
+-
+- preempt_disable();
+- if (p->migrate_disable > 1) {
+- p->migrate_disable--;
+- preempt_enable();
+- return;
+- }
+-
+- if (unlikely(!scheduler_running)) {
+- p->migrate_disable = 0;
+- unpin_current_cpu();
+- preempt_enable();
+- return;
+- }
+-
+- /*
+- * See comment in migrate_disable().
+- */
+- rq = this_rq();
+- raw_spin_lock_irqsave(&rq->lock, flags);
+- mask = tsk_cpus_allowed(p);
+- p->migrate_disable = 0;
+-
+- WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+-
+- if (!cpumask_equal(&p->cpus_allowed, mask)) {
+- /* Get the mask now that migration is enabled */
+- mask = tsk_cpus_allowed(p);
+- if (p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, mask);
+- p->nr_cpus_allowed = cpumask_weight(mask);
+- }
+-
+- raw_spin_unlock_irqrestore(&rq->lock, flags);
+- unpin_current_cpu();
+- preempt_enable();
+-}
+-EXPORT_SYMBOL(migrate_enable);
+-#endif /* CONFIG_PREEMPT_RT_FULL */
+-
+ /*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
diff --git a/rt-serial-warn-fix.patch b/rt-serial-warn-fix.patch
new file mode 100644
index 0000000..1dec8a3
--- /dev/null
+++ b/rt-serial-warn-fix.patch
@@ -0,0 +1,54 @@
+Subject: rt: Improve the serial console PASS_LIMIT
+From: Ingo Molnar <mingo@elte.hu>
+Date: Wed Dec 14 13:05:54 CET 2011
+
+Beyond the warning:
+
+ drivers/tty/serial/8250/8250.c:1613:6: warning: unused variable ‘pass_counter’ [-Wunused-variable]
+
+the solution of just looping infinitely was ugly - up it to 1 million to
+give it a chance to continue in some really ugly situation.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/tty/serial/8250/8250.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+Index: linux-stable/drivers/tty/serial/8250/8250.c
+===================================================================
+--- linux-stable.orig/drivers/tty/serial/8250/8250.c
++++ linux-stable/drivers/tty/serial/8250/8250.c
+@@ -80,7 +80,16 @@ static unsigned int skip_txen_test; /* f
+ #define DEBUG_INTR(fmt...) do { } while (0)
+ #endif
+
+-#define PASS_LIMIT 512
++/*
++ * On -rt we can have a more delays, and legitimately
++ * so - so don't drop work spuriously and spam the
++ * syslog:
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define PASS_LIMIT 1000000
++#else
++# define PASS_LIMIT 512
++#endif
+
+ #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+@@ -1549,14 +1558,12 @@ static irqreturn_t serial8250_interrupt(
+
+ l = l->next;
+
+-#ifndef CONFIG_PREEMPT_RT_FULL
+ if (l == i->head && pass_counter++ > PASS_LIMIT) {
+ /* If we hit this, we're dead. */
+ printk_ratelimited(KERN_ERR
+ "serial8250: too much work for irq%d\n", irq);
+ break;
+ }
+-#endif
+ } while (l != end);
+
+ spin_unlock(&i->lock);
diff --git a/rt-tracing-show-padding-as-unsigned-short.patch b/rt-tracing-show-padding-as-unsigned-short.patch
new file mode 100644
index 0000000..4d0434c
--- /dev/null
+++ b/rt-tracing-show-padding-as-unsigned-short.patch
@@ -0,0 +1,48 @@
+Subject: tracing: Show padding as unsigned short
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Wed, 16 Nov 2011 13:19:35 -0500
+
+RT added two bytes to trace migrate disable counting to the trace events
+and used two bytes of the padding to make the change. The structures and
+all were updated correctly, but the display in the event formats was
+not:
+
+cat /debug/tracing/events/sched/sched_switch/format
+
+name: sched_switch
+ID: 51
+format:
+ field:unsigned short common_type; offset:0; size:2; signed:0;
+ field:unsigned char common_flags; offset:2; size:1; signed:0;
+ field:unsigned char common_preempt_count; offset:3; size:1; signed:0;
+ field:int common_pid; offset:4; size:4; signed:1;
+ field:unsigned short common_migrate_disable; offset:8; size:2; signed:0;
+ field:int common_padding; offset:10; size:2; signed:0;
+
+
+The field for common_padding has the correct size and offset, but the
+use of "int" might confuse some parsers (and people that are reading
+it). This needs to be changed to "unsigned short".
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/1321467575.4181.36.camel@frodo
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/trace/trace_events.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/trace/trace_events.c
+===================================================================
+--- linux-stable.orig/kernel/trace/trace_events.c
++++ linux-stable/kernel/trace/trace_events.c
+@@ -117,7 +117,7 @@ static int trace_define_common_fields(vo
+ __common_field(unsigned char, preempt_count);
+ __common_field(int, pid);
+ __common_field(unsigned short, migrate_disable);
+- __common_field(int, padding);
++ __common_field(unsigned short, padding);
+
+ return ret;
+ }
diff --git a/rtmutex-avoid-include-hell.patch b/rtmutex-avoid-include-hell.patch
new file mode 100644
index 0000000..9f141dc
--- /dev/null
+++ b/rtmutex-avoid-include-hell.patch
@@ -0,0 +1,22 @@
+Subject: rtmutex-avoid-include-hell.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 20:06:39 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rtmutex.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/rtmutex.h
+===================================================================
+--- linux-stable.orig/include/linux/rtmutex.h
++++ linux-stable/include/linux/rtmutex.h
+@@ -14,7 +14,7 @@
+
+ #include <linux/linkage.h>
+ #include <linux/plist.h>
+-#include <linux/spinlock_types.h>
++#include <linux/spinlock_types_raw.h>
+
+ extern int max_lock_depth; /* for sysctl */
+
diff --git a/rtmutex-futex-prepare-rt.patch b/rtmutex-futex-prepare-rt.patch
new file mode 100644
index 0000000..2c0a84d
--- /dev/null
+++ b/rtmutex-futex-prepare-rt.patch
@@ -0,0 +1,221 @@
+Subject: rtmutex-futex-prepare-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 10 Jun 2011 11:04:15 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/futex.c | 77 ++++++++++++++++++++++++++++++++++++++----------
+ kernel/rtmutex.c | 31 ++++++++++++++++---
+ kernel/rtmutex_common.h | 2 +
+ 3 files changed, 91 insertions(+), 19 deletions(-)
+
+Index: linux-stable/kernel/futex.c
+===================================================================
+--- linux-stable.orig/kernel/futex.c
++++ linux-stable/kernel/futex.c
+@@ -1423,6 +1423,16 @@ retry_private:
+ requeue_pi_wake_futex(this, &key2, hb2);
+ drop_count++;
+ continue;
++ } else if (ret == -EAGAIN) {
++ /*
++ * Waiter was woken by timeout or
++ * signal and has set pi_blocked_on to
++ * PI_WAKEUP_INPROGRESS before we
++ * tried to enqueue it on the rtmutex.
++ */
++ this->pi_state = NULL;
++ free_pi_state(pi_state);
++ continue;
+ } else if (ret) {
+ /* -EDEADLK */
+ this->pi_state = NULL;
+@@ -2267,7 +2277,7 @@ static int futex_wait_requeue_pi(u32 __u
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct rt_mutex_waiter rt_waiter;
+ struct rt_mutex *pi_mutex = NULL;
+- struct futex_hash_bucket *hb;
++ struct futex_hash_bucket *hb, *hb2;
+ union futex_key key2 = FUTEX_KEY_INIT;
+ struct futex_q q = futex_q_init;
+ int res, ret;
+@@ -2314,20 +2324,55 @@ static int futex_wait_requeue_pi(u32 __u
+ /* Queue the futex_q, drop the hb lock, wait for wakeup. */
+ futex_wait_queue_me(hb, &q, to);
+
+- spin_lock(&hb->lock);
+- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+- spin_unlock(&hb->lock);
+- if (ret)
+- goto out_put_keys;
++ /*
++ * On RT we must avoid races with requeue and trying to block
++ * on two mutexes (hb->lock and uaddr2's rtmutex) by
++ * serializing access to pi_blocked_on with pi_lock.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ if (current->pi_blocked_on) {
++ /*
++ * We have been requeued or are in the process of
++ * being requeued.
++ */
++ raw_spin_unlock_irq(&current->pi_lock);
++ } else {
++ /*
++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
++ * prevents a concurrent requeue from moving us to the
++ * uaddr2 rtmutex. After that we can safely acquire
++ * (and possibly block on) hb->lock.
++ */
++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ spin_lock(&hb->lock);
++
++ /*
++ * Clean up pi_blocked_on. We might leak it otherwise
++ * when we succeeded with the hb->lock in the fast
++ * path.
++ */
++ raw_spin_lock_irq(&current->pi_lock);
++ current->pi_blocked_on = NULL;
++ raw_spin_unlock_irq(&current->pi_lock);
++
++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
++ spin_unlock(&hb->lock);
++ if (ret)
++ goto out_put_keys;
++ }
+
+ /*
+- * In order for us to be here, we know our q.key == key2, and since
+- * we took the hb->lock above, we also know that futex_requeue() has
+- * completed and we no longer have to concern ourselves with a wakeup
+- * race with the atomic proxy lock acquisition by the requeue code. The
+- * futex_requeue dropped our key1 reference and incremented our key2
+- * reference count.
++ * In order to be here, we have either been requeued, are in
++ * the process of being requeued, or requeue successfully
++ * acquired uaddr2 on our behalf. If pi_blocked_on was
++ * non-null above, we may be racing with a requeue. Do not
++ * rely on q->lock_ptr to be hb2->lock until after blocking on
++ * hb->lock or hb2->lock. The futex_requeue dropped our key1
++ * reference and incremented our key2 reference count.
+ */
++ hb2 = hash_futex(&key2);
+
+ /* Check if the requeue code acquired the second futex for us. */
+ if (!q.rt_waiter) {
+@@ -2336,9 +2381,10 @@ static int futex_wait_requeue_pi(u32 __u
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (q.pi_state && (q.pi_state->owner != current)) {
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ ret = fixup_pi_state_owner(uaddr2, &q, current);
+- spin_unlock(q.lock_ptr);
++ spin_unlock(&hb2->lock);
+ }
+ } else {
+ /*
+@@ -2351,7 +2397,8 @@ static int futex_wait_requeue_pi(u32 __u
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+
+- spin_lock(q.lock_ptr);
++ spin_lock(&hb2->lock);
++ BUG_ON(&hb2->lock != q.lock_ptr);
+ /*
+ * Fixup the pi_state owner and possibly acquire the lock if we
+ * haven't already.
+Index: linux-stable/kernel/rtmutex.c
+===================================================================
+--- linux-stable.orig/kernel/rtmutex.c
++++ linux-stable/kernel/rtmutex.c
+@@ -67,6 +67,11 @@ static void fixup_rt_mutex_waiters(struc
+ clear_rt_mutex_waiters(lock);
+ }
+
++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
++{
++ return waiter && waiter != PI_WAKEUP_INPROGRESS;
++}
++
+ /*
+ * We can speed up the acquire/release, if the architecture
+ * supports cmpxchg and if there's no debugging state to be set up
+@@ -196,7 +201,7 @@ static int rt_mutex_adjust_prio_chain(st
+ * reached or the state of the chain has changed while we
+ * dropped the locks.
+ */
+- if (!waiter)
++ if (!rt_mutex_real_waiter(waiter))
+ goto out_unlock_pi;
+
+ /*
+@@ -399,6 +404,23 @@ static int task_blocks_on_rt_mutex(struc
+ int chain_walk = 0, res;
+
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
++
++ /*
++ * In the case of futex requeue PI, this will be a proxy
++ * lock. The task will wake unaware that it is enqueueed on
++ * this lock. Avoid blocking on two locks and corrupting
++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS
++ * flag. futex_wait_requeue_pi() sets this when it wakes up
++ * before requeue (due to a signal or timeout). Do not enqueue
++ * the task if PI_WAKEUP_INPROGRESS is set.
++ */
++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ return -EAGAIN;
++ }
++
++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
++
+ __rt_mutex_adjust_prio(task);
+ waiter->task = task;
+ waiter->lock = lock;
+@@ -423,7 +445,7 @@ static int task_blocks_on_rt_mutex(struc
+ plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+
+ __rt_mutex_adjust_prio(owner);
+- if (owner->pi_blocked_on)
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ chain_walk = 1;
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+@@ -517,7 +539,7 @@ static void remove_waiter(struct rt_mute
+ }
+ __rt_mutex_adjust_prio(owner);
+
+- if (owner->pi_blocked_on)
++ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ chain_walk = 1;
+
+ raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+@@ -551,7 +573,8 @@ void rt_mutex_adjust_pi(struct task_stru
+ raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+- if (!waiter || waiter->list_entry.prio == task->prio) {
++ if (!rt_mutex_real_waiter(waiter) ||
++ waiter->list_entry.prio == task->prio) {
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+Index: linux-stable/kernel/rtmutex_common.h
+===================================================================
+--- linux-stable.orig/kernel/rtmutex_common.h
++++ linux-stable/kernel/rtmutex_common.h
+@@ -103,6 +103,8 @@ static inline struct task_struct *rt_mut
+ /*
+ * PI-futex support (proxy locking functions, etc.):
+ */
++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
++
+ extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+ extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
diff --git a/rtmutex-lock-killable.patch b/rtmutex-lock-killable.patch
new file mode 100644
index 0000000..828da23
--- /dev/null
+++ b/rtmutex-lock-killable.patch
@@ -0,0 +1,84 @@
+Subject: rtmutex-lock-killable.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 09 Jun 2011 11:43:52 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rtmutex.h | 1 +
+ kernel/rtmutex.c | 33 +++++++++++++++++++++++++++------
+ 2 files changed, 28 insertions(+), 6 deletions(-)
+
+Index: linux-stable/include/linux/rtmutex.h
+===================================================================
+--- linux-stable.orig/include/linux/rtmutex.h
++++ linux-stable/include/linux/rtmutex.h
+@@ -90,6 +90,7 @@ extern void rt_mutex_destroy(struct rt_m
+ extern void rt_mutex_lock(struct rt_mutex *lock);
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ int detect_deadlock);
++extern int rt_mutex_lock_killable(struct rt_mutex *lock, int detect_deadlock);
+ extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock);
+Index: linux-stable/kernel/rtmutex.c
+===================================================================
+--- linux-stable.orig/kernel/rtmutex.c
++++ linux-stable/kernel/rtmutex.c
+@@ -791,12 +791,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ /**
+ * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+ *
+- * @lock: the rt_mutex to be locked
++ * @lock: the rt_mutex to be locked
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+- * 0 on success
+- * -EINTR when interrupted by a signal
++ * 0 on success
++ * -EINTR when interrupted by a signal
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
+@@ -810,17 +810,38 @@ int __sched rt_mutex_lock_interruptible(
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+ /**
++ * rt_mutex_lock_killable - lock a rt_mutex killable
++ *
++ * @lock: the rt_mutex to be locked
++ * @detect_deadlock: deadlock detection on/off
++ *
++ * Returns:
++ * 0 on success
++ * -EINTR when interrupted by a signal
++ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
++ */
++int __sched rt_mutex_lock_killable(struct rt_mutex *lock,
++ int detect_deadlock)
++{
++ might_sleep();
++
++ return rt_mutex_fastlock(lock, TASK_KILLABLE,
++ detect_deadlock, rt_mutex_slowlock);
++}
++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
++
++/**
+ * rt_mutex_timed_lock - lock a rt_mutex interruptible
+ * the timeout structure is provided
+ * by the caller
+ *
+- * @lock: the rt_mutex to be locked
++ * @lock: the rt_mutex to be locked
+ * @timeout: timeout structure or NULL (no timeout)
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+- * 0 on success
+- * -EINTR when interrupted by a signal
++ * 0 on success
++ * -EINTR when interrupted by a signal
+ * -ETIMEDOUT when the timeout expired
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
diff --git a/rwsem-add-rt-variant.patch b/rwsem-add-rt-variant.patch
new file mode 100644
index 0000000..e192641
--- /dev/null
+++ b/rwsem-add-rt-variant.patch
@@ -0,0 +1,159 @@
+Subject: rwsem-add-rt-variant.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 21:02:53 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwsem.h | 6 ++
+ include/linux/rwsem_rt.h | 105 +++++++++++++++++++++++++++++++++++++++++++++++
+ lib/Makefile | 3 +
+ 3 files changed, 114 insertions(+)
+
+Index: linux-stable/include/linux/rwsem.h
+===================================================================
+--- linux-stable.orig/include/linux/rwsem.h
++++ linux-stable/include/linux/rwsem.h
+@@ -16,6 +16,10 @@
+
+ #include <linux/atomic.h>
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++#include <linux/rwsem_rt.h>
++#else /* PREEMPT_RT_FULL */
++
+ struct rw_semaphore;
+
+ #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
+@@ -130,4 +134,6 @@ extern void down_write_nested(struct rw_
+ # define down_write_nested(sem, subclass) down_write(sem)
+ #endif
+
++#endif /* !PREEMPT_RT_FULL */
++
+ #endif /* _LINUX_RWSEM_H */
+Index: linux-stable/include/linux/rwsem_rt.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/rwsem_rt.h
+@@ -0,0 +1,105 @@
++#ifndef _LINUX_RWSEM_RT_H
++#define _LINUX_RWSEM_RT_H
++
++#ifndef _LINUX_RWSEM_H
++#error "Include rwsem.h"
++#endif
++
++/*
++ * RW-semaphores are a spinlock plus a reader-depth count.
++ *
++ * Note that the semantics are different from the usual
++ * Linux rw-sems, in PREEMPT_RT mode we do not allow
++ * multiple readers to hold the lock at once, we only allow
++ * a read-lock owner to read-lock recursively. This is
++ * better for latency, makes the implementation inherently
++ * fair and makes it simpler as well.
++ */
++
++#include <linux/rtmutex.h>
++
++struct rw_semaphore {
++ struct rt_mutex lock;
++ int read_depth;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++};
++
++#define __RWSEM_INITIALIZER(name) \
++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
++ RW_DEP_MAP_INIT(name) }
++
++#define DECLARE_RWSEM(lockname) \
++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
++
++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, char *name,
++ struct lock_class_key *key);
++
++# define rt_init_rwsem(sem) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_init(&(sem)->lock); \
++ __rt_rwsem_init((sem), #sem, &__key); \
++} while (0)
++
++extern void rt_down_write(struct rw_semaphore *rwsem);
++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
++extern void rt_down_read(struct rw_semaphore *rwsem);
++extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
++extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
++extern void rt_up_read(struct rw_semaphore *rwsem);
++extern void rt_up_write(struct rw_semaphore *rwsem);
++extern void rt_downgrade_write(struct rw_semaphore *rwsem);
++
++#define init_rwsem(sem) rt_init_rwsem(sem)
++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
++
++static inline void down_read(struct rw_semaphore *sem)
++{
++ rt_down_read(sem);
++}
++
++static inline int down_read_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_read_trylock(sem);
++}
++
++static inline void down_write(struct rw_semaphore *sem)
++{
++ rt_down_write(sem);
++}
++
++static inline int down_write_trylock(struct rw_semaphore *sem)
++{
++ return rt_down_write_trylock(sem);
++}
++
++static inline void up_read(struct rw_semaphore *sem)
++{
++ rt_up_read(sem);
++}
++
++static inline void up_write(struct rw_semaphore *sem)
++{
++ rt_up_write(sem);
++}
++
++static inline void downgrade_write(struct rw_semaphore *sem)
++{
++ rt_downgrade_write(sem);
++}
++
++static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
++{
++ return rt_down_read_nested(sem, subclass);
++}
++
++static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
++{
++ rt_down_write_nested(sem, subclass);
++}
++
++#endif
+Index: linux-stable/lib/Makefile
+===================================================================
+--- linux-stable.orig/lib/Makefile
++++ linux-stable/lib/Makefile
+@@ -38,8 +38,11 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o
+ obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
+ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
++
++ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
++endif
+
+ CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
+ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/sched-better-debug-output-for-might-sleep.patch b/sched-better-debug-output-for-might-sleep.patch
new file mode 100644
index 0000000..471a05c
--- /dev/null
+++ b/sched-better-debug-output-for-might-sleep.patch
@@ -0,0 +1,76 @@
+Subject: sched: Better debug output for might sleep
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 05 Oct 2012 08:56:15 +0100
+
+might sleep can tell us where interrupts have been disabled, but we
+have no idea what disabled preemption. Add some debug infrastructure.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 4 ++++
+ kernel/sched/core.c | 23 +++++++++++++++++++++--
+ 2 files changed, 25 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1624,6 +1624,10 @@ struct task_struct {
+ int kmap_idx;
+ pte_t kmap_pte[KM_TYPE_NR];
+ #endif
++
++#ifdef CONFIG_DEBUG_PREEMPT
++ unsigned long preempt_disable_ip;
++#endif
+ };
+
+ #ifdef CONFIG_PREEMPT_RT_FULL
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -3311,8 +3311,13 @@ void __kprobes add_preempt_count(int val
+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
+ PREEMPT_MASK - 10);
+ #endif
+- if (preempt_count() == val)
+- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
++ if (preempt_count() == val) {
++ unsigned long ip = get_parent_ip(CALLER_ADDR1);
++#ifdef CONFIG_DEBUG_PREEMPT
++ current->preempt_disable_ip = ip;
++#endif
++ trace_preempt_off(CALLER_ADDR0, ip);
++ }
+ }
+ EXPORT_SYMBOL(add_preempt_count);
+
+@@ -3355,6 +3360,13 @@ static noinline void __schedule_bug(stru
+ print_modules();
+ if (irqs_disabled())
+ print_irqtrace_events(prev);
++#ifdef DEBUG_PREEMPT
++ if (in_atomic_preempt_off()) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(current->preempt_disable_ip);
++ pr_cont("\n");
++ }
++#endif
+ dump_stack();
+ add_taint(TAINT_WARN);
+ }
+@@ -7700,6 +7712,13 @@ void __might_sleep(const char *file, int
+ debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
++#ifdef DEBUG_PREEMPT
++ if (!preempt_count_equals(preempt_offset)) {
++ pr_err("Preemption disabled at:");
++ print_ip_sym(current->preempt_disable_ip);
++ pr_cont("\n");
++ }
++#endif
+ dump_stack();
+ }
+ EXPORT_SYMBOL(__might_sleep);
diff --git a/sched-clear-pf-thread-bound-on-fallback-rq.patch b/sched-clear-pf-thread-bound-on-fallback-rq.patch
new file mode 100644
index 0000000..82f7bf2
--- /dev/null
+++ b/sched-clear-pf-thread-bound-on-fallback-rq.patch
@@ -0,0 +1,26 @@
+Subject: sched-clear-pf-thread-bound-on-fallback-rq.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 04 Nov 2011 20:48:36 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/core.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -1323,6 +1323,12 @@ out:
+ }
+ }
+
++ /*
++ * Clear PF_THREAD_BOUND, otherwise we wreckage
++ * migrate_disable/enable. See optimization for
++ * PF_THREAD_BOUND tasks there.
++ */
++ p->flags &= ~PF_THREAD_BOUND;
+ return dest_cpu;
+ }
+
diff --git a/sched-cond-resched.patch b/sched-cond-resched.patch
new file mode 100644
index 0000000..01b1f84
--- /dev/null
+++ b/sched-cond-resched.patch
@@ -0,0 +1,34 @@
+Subject: sched-cond-resched.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 07 Jun 2011 11:25:03 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/core.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -4781,9 +4781,17 @@ static inline int should_resched(void)
+
+ static void __cond_resched(void)
+ {
+- add_preempt_count(PREEMPT_ACTIVE);
+- __schedule();
+- sub_preempt_count(PREEMPT_ACTIVE);
++ do {
++ add_preempt_count(PREEMPT_ACTIVE);
++ __schedule();
++ sub_preempt_count(PREEMPT_ACTIVE);
++ /*
++ * Check again in case we missed a preemption
++ * opportunity between schedule and now.
++ */
++ barrier();
++
++ } while (need_resched());
+ }
+
+ int __sched _cond_resched(void)
diff --git a/sched-delay-put-task.patch b/sched-delay-put-task.patch
new file mode 100644
index 0000000..96ebac0
--- /dev/null
+++ b/sched-delay-put-task.patch
@@ -0,0 +1,71 @@
+Subject: sched-delay-put-task.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 31 May 2011 16:59:16 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 13 +++++++++++++
+ kernel/fork.c | 11 +++++++++++
+ 2 files changed, 24 insertions(+)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1607,6 +1607,9 @@ struct task_struct {
+ #ifdef CONFIG_UPROBES
+ struct uprobe_task *utask;
+ #endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head put_rcu;
++#endif
+ };
+
+ /* Future-safe accessor for struct task_struct's cpus_allowed. */
+@@ -1791,6 +1794,15 @@ extern struct pid *cad_pid;
+ extern void free_task(struct task_struct *tsk);
+ #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __put_task_struct_cb(struct rcu_head *rhp);
++
++static inline void put_task_struct(struct task_struct *t)
++{
++ if (atomic_dec_and_test(&t->usage))
++ call_rcu(&t->put_rcu, __put_task_struct_cb);
++}
++#else
+ extern void __put_task_struct(struct task_struct *t);
+
+ static inline void put_task_struct(struct task_struct *t)
+@@ -1798,6 +1810,7 @@ static inline void put_task_struct(struc
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+ }
++#endif
+
+ extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st);
+Index: linux-stable/kernel/fork.c
+===================================================================
+--- linux-stable.orig/kernel/fork.c
++++ linux-stable/kernel/fork.c
+@@ -244,7 +244,18 @@ void __put_task_struct(struct task_struc
+ if (!profile_handoff_task(tsk))
+ free_task(tsk);
+ }
++#ifndef CONFIG_PREEMPT_RT_BASE
+ EXPORT_SYMBOL_GPL(__put_task_struct);
++#else
++void __put_task_struct_cb(struct rcu_head *rhp)
++{
++ struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
++
++ __put_task_struct(tsk);
++
++}
++EXPORT_SYMBOL_GPL(__put_task_struct_cb);
++#endif
+
+ void __init __weak arch_task_cache_init(void) { }
+
diff --git a/sched-disable-rt-group-sched-on-rt.patch b/sched-disable-rt-group-sched-on-rt.patch
new file mode 100644
index 0000000..da5e093
--- /dev/null
+++ b/sched-disable-rt-group-sched-on-rt.patch
@@ -0,0 +1,30 @@
+Subject: sched: Disable CONFIG_RT_GROUP_SCHED on RT
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 17:03:52 +0200
+
+Carsten reported problems when running:
+
+ taskset 01 chrt -f 1 sleep 1
+
+from within rc.local on a F15 machine. The task stays running and
+never gets on the run queue because some of the run queues have
+rt_throttled=1 which does not go away. Works nice from a ssh login
+shell. Disabling CONFIG_RT_GROUP_SCHED solves that as well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ init/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/init/Kconfig
+===================================================================
+--- linux-stable.orig/init/Kconfig
++++ linux-stable/init/Kconfig
+@@ -806,6 +806,7 @@ config RT_GROUP_SCHED
+ bool "Group scheduling for SCHED_RR/FIFO"
+ depends on EXPERIMENTAL
+ depends on CGROUP_SCHED
++ depends on !PREEMPT_RT_FULL
+ default n
+ help
+ This feature lets you explicitly allocate real CPU bandwidth
diff --git a/sched-disable-ttwu-queue.patch b/sched-disable-ttwu-queue.patch
new file mode 100644
index 0000000..276b75e
--- /dev/null
+++ b/sched-disable-ttwu-queue.patch
@@ -0,0 +1,29 @@
+Subject: sched-disable-ttwu-queue.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 13 Sep 2011 16:42:35 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/features.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+Index: linux-stable/kernel/sched/features.h
+===================================================================
+--- linux-stable.orig/kernel/sched/features.h
++++ linux-stable/kernel/sched/features.h
+@@ -60,11 +60,15 @@ SCHED_FEAT(OWNER_SPIN, true)
+ */
+ SCHED_FEAT(NONTASK_POWER, true)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * Queue remote wakeups on the target CPU and process them
+ * using the scheduler IPI. Reduces rq->lock contention/bounces.
+ */
+ SCHED_FEAT(TTWU_QUEUE, true)
++#else
++SCHED_FEAT(TTWU_QUEUE, false)
++#endif
+
+ SCHED_FEAT(FORCE_SD_OVERLAP, false)
+ SCHED_FEAT(RT_RUNTIME_SHARE, true)
diff --git a/sched-limit-nr-migrate.patch b/sched-limit-nr-migrate.patch
new file mode 100644
index 0000000..2eca354
--- /dev/null
+++ b/sched-limit-nr-migrate.patch
@@ -0,0 +1,25 @@
+Subject: sched-limit-nr-migrate.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 06 Jun 2011 12:12:51 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -263,7 +263,11 @@ late_initcall(sched_init_debug);
+ * Number of tasks to iterate in a single balance run.
+ * Limited because this is done with IRQs disabled.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ const_debug unsigned int sysctl_sched_nr_migrate = 32;
++#else
++const_debug unsigned int sysctl_sched_nr_migrate = 8;
++#endif
+
+ /*
+ * period over which we average the RT time consumption, measured
diff --git a/sched-might-sleep-do-not-account-rcu-depth.patch b/sched-might-sleep-do-not-account-rcu-depth.patch
new file mode 100644
index 0000000..12c5373
--- /dev/null
+++ b/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -0,0 +1,49 @@
+Subject: sched-might-sleep-do-not-account-rcu-depth.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 07 Jun 2011 09:19:06 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rcupdate.h | 7 +++++++
+ kernel/sched/core.c | 3 ++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/rcupdate.h
+===================================================================
+--- linux-stable.orig/include/linux/rcupdate.h
++++ linux-stable/include/linux/rcupdate.h
+@@ -157,6 +157,11 @@ void synchronize_rcu(void);
+ * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
+ */
+ #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
++#ifndef CONFIG_PREEMPT_RT_FULL
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++#else
++static inline int sched_rcu_preempt_depth(void) { return 0; }
++#endif
+
+ #else /* #ifdef CONFIG_PREEMPT_RCU */
+
+@@ -180,6 +185,8 @@ static inline int rcu_preempt_depth(void
+ return 0;
+ }
+
++#define sched_rcu_preempt_depth() rcu_preempt_depth()
++
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+ /* Internal to kernel */
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -7450,7 +7450,8 @@ void __init sched_init(void)
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ static inline int preempt_count_equals(int preempt_offset)
+ {
+- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
++ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
++ sched_rcu_preempt_depth();
+
+ return (nested == preempt_offset);
+ }
diff --git a/sched-migrate-disable.patch b/sched-migrate-disable.patch
new file mode 100644
index 0000000..cdd15f1
--- /dev/null
+++ b/sched-migrate-disable.patch
@@ -0,0 +1,200 @@
+Subject: sched-migrate-disable.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 16 Jun 2011 13:26:08 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/preempt.h | 8 ++++
+ include/linux/sched.h | 13 +++++--
+ kernel/sched/core.c | 88 +++++++++++++++++++++++++++++++++++++++++++++---
+ lib/smp_processor_id.c | 6 +--
+ 4 files changed, 104 insertions(+), 11 deletions(-)
+
+Index: linux-stable/include/linux/preempt.h
+===================================================================
+--- linux-stable.orig/include/linux/preempt.h
++++ linux-stable/include/linux/preempt.h
+@@ -108,6 +108,14 @@ do { \
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
++#ifdef CONFIG_SMP
++extern void migrate_disable(void);
++extern void migrate_enable(void);
++#else
++# define migrate_disable() do { } while (0)
++# define migrate_enable() do { } while (0)
++#endif
++
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ # define preempt_disable_rt() preempt_disable()
+ # define preempt_enable_rt() preempt_enable()
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1275,6 +1275,7 @@ struct task_struct {
+ #endif
+
+ unsigned int policy;
++ int migrate_disable;
+ int nr_cpus_allowed;
+ cpumask_t cpus_allowed;
+
+@@ -1614,9 +1615,6 @@ struct task_struct {
+ #endif
+ };
+
+-/* Future-safe accessor for struct task_struct's cpus_allowed. */
+-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
+-
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
+ #else
+@@ -2773,6 +2771,15 @@ static inline void set_task_cpu(struct t
+
+ #endif /* CONFIG_SMP */
+
++/* Future-safe accessor for struct task_struct's cpus_allowed. */
++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
++{
++ if (p->migrate_disable)
++ return cpumask_of(task_cpu(p));
++
++ return &p->cpus_allowed;
++}
++
+ extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -5203,11 +5203,12 @@ void __cpuinit init_idle(struct task_str
+ #ifdef CONFIG_SMP
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- if (p->sched_class && p->sched_class->set_cpus_allowed)
+- p->sched_class->set_cpus_allowed(p, new_mask);
+-
++ if (!p->migrate_disable) {
++ if (p->sched_class && p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, new_mask);
++ p->nr_cpus_allowed = cpumask_weight(new_mask);
++ }
+ cpumask_copy(&p->cpus_allowed, new_mask);
+- p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+
+ /*
+@@ -5258,7 +5259,7 @@ int set_cpus_allowed_ptr(struct task_str
+ do_set_cpus_allowed(p, new_mask);
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || p->migrate_disable)
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -5277,6 +5278,83 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ preempt_disable();
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ preempt_enable();
++ return;
++ }
++
++ pin_current_cpu();
++ if (unlikely(!scheduler_running)) {
++ p->migrate_disable = 1;
++ preempt_enable();
++ return;
++ }
++ rq = task_rq_lock(p, &flags);
++ p->migrate_disable = 1;
++ mask = tsk_cpus_allowed(p);
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->nr_cpus_allowed = cpumask_weight(mask);
++ }
++ task_rq_unlock(rq, p, &flags);
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++ const struct cpumask *mask;
++ unsigned long flags;
++ struct rq *rq;
++
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++
++ preempt_disable();
++ if (p->migrate_disable > 1) {
++ p->migrate_disable--;
++ preempt_enable();
++ return;
++ }
++
++ if (unlikely(!scheduler_running)) {
++ p->migrate_disable = 0;
++ unpin_current_cpu();
++ preempt_enable();
++ return;
++ }
++
++ rq = task_rq_lock(p, &flags);
++ p->migrate_disable = 0;
++ mask = tsk_cpus_allowed(p);
++
++ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
++
++ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ if (p->sched_class->set_cpus_allowed)
++ p->sched_class->set_cpus_allowed(p, mask);
++ p->nr_cpus_allowed = cpumask_weight(mask);
++ }
++
++ task_rq_unlock(rq, p, &flags);
++ unpin_current_cpu();
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_enable);
++
+ /*
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+Index: linux-stable/lib/smp_processor_id.c
+===================================================================
+--- linux-stable.orig/lib/smp_processor_id.c
++++ linux-stable/lib/smp_processor_id.c
+@@ -39,9 +39,9 @@ notrace unsigned int debug_smp_processor
+ if (!printk_ratelimit())
+ goto out_enable;
+
+- printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] "
+- "code: %s/%d\n",
+- preempt_count() - 1, current->comm, current->pid);
++ printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x %08x] "
++ "code: %s/%d\n", preempt_count() - 1,
++ current->migrate_disable, current->comm, current->pid);
+ print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ dump_stack();
+
diff --git a/sched-mmdrop-delayed.patch b/sched-mmdrop-delayed.patch
new file mode 100644
index 0000000..67565fa
--- /dev/null
+++ b/sched-mmdrop-delayed.patch
@@ -0,0 +1,153 @@
+Subject: sched-mmdrop-delayed.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 06 Jun 2011 12:20:33 +0200
+
+Needs thread context (pgd_lock) -> ifdeffed. workqueues wont work with
+RT
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/mm_types.h | 4 ++++
+ include/linux/sched.h | 12 ++++++++++++
+ kernel/fork.c | 15 ++++++++++++++-
+ kernel/sched/core.c | 21 +++++++++++++++++++--
+ 4 files changed, 49 insertions(+), 3 deletions(-)
+
+Index: linux-stable/include/linux/mm_types.h
+===================================================================
+--- linux-stable.orig/include/linux/mm_types.h
++++ linux-stable/include/linux/mm_types.h
+@@ -12,6 +12,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/page-debug-flags.h>
++#include <linux/rcupdate.h>
+ #include <linux/uprobes.h>
+ #include <asm/page.h>
+ #include <asm/mmu.h>
+@@ -409,6 +410,9 @@ struct mm_struct {
+ struct cpumask cpumask_allocation;
+ #endif
+ struct uprobes_state uprobes_state;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ struct rcu_head delayed_drop;
++#endif
+ };
+
+ static inline void mm_init_cpumask(struct mm_struct *mm)
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -2324,12 +2324,24 @@ extern struct mm_struct * mm_alloc(void)
+
+ /* mmdrop drops the mm and the page tables */
+ extern void __mmdrop(struct mm_struct *);
++
+ static inline void mmdrop(struct mm_struct * mm)
+ {
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++extern void __mmdrop_delayed(struct rcu_head *rhp);
++static inline void mmdrop_delayed(struct mm_struct *mm)
++{
++ if (atomic_dec_and_test(&mm->mm_count))
++ call_rcu(&mm->delayed_drop, __mmdrop_delayed);
++}
++#else
++# define mmdrop_delayed(mm) mmdrop(mm)
++#endif
++
+ /* mmput gets rid of the mappings and all user-space */
+ extern void mmput(struct mm_struct *);
+ /* Grab a reference to a task's mm, if it is not already going away */
+Index: linux-stable/kernel/fork.c
+===================================================================
+--- linux-stable.orig/kernel/fork.c
++++ linux-stable/kernel/fork.c
+@@ -249,7 +249,7 @@ EXPORT_SYMBOL_GPL(__put_task_struct);
+ #else
+ void __put_task_struct_cb(struct rcu_head *rhp)
+ {
+- struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
+
+ __put_task_struct(tsk);
+
+@@ -608,6 +608,19 @@ void __mmdrop(struct mm_struct *mm)
+ }
+ EXPORT_SYMBOL_GPL(__mmdrop);
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++/*
++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
++ * want another facility to make this work.
++ */
++void __mmdrop_delayed(struct rcu_head *rhp)
++{
++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
++
++ __mmdrop(mm);
++}
++#endif
++
+ /*
+ * Decrement the use count and release all resources for an mm.
+ */
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -1970,8 +1970,12 @@ static void finish_task_switch(struct rq
+ finish_arch_post_lock_switch();
+
+ fire_sched_in_preempt_notifiers(current);
++ /*
++ * We use mmdrop_delayed() here so we don't have to do the
++ * full __mmdrop() when we are the last user.
++ */
+ if (mm)
+- mmdrop(mm);
++ mmdrop_delayed(mm);
+ if (unlikely(prev_state == TASK_DEAD)) {
+ /*
+ * Remove function-return probe instances associated with this
+@@ -5302,6 +5306,8 @@ static int migration_cpu_stop(void *data
+
+ #ifdef CONFIG_HOTPLUG_CPU
+
++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
++
+ /*
+ * Ensures that the idle task is using init_mm right before its cpu goes
+ * offline.
+@@ -5314,7 +5320,12 @@ void idle_task_exit(void)
+
+ if (mm != &init_mm)
+ switch_mm(mm, &init_mm, current);
+- mmdrop(mm);
++
++ /*
++ * Defer the cleanup to an alive cpu. On RT we can neither
++ * call mmdrop() nor mmdrop_delayed() from here.
++ */
++ per_cpu(idle_last_mm, smp_processor_id()) = mm;
+ }
+
+ /*
+@@ -5621,6 +5632,12 @@ migration_call(struct notifier_block *nf
+
+ calc_load_migrate(rq);
+ break;
++ case CPU_DEAD:
++ if (per_cpu(idle_last_mm, cpu)) {
++ mmdrop(per_cpu(idle_last_mm, cpu));
++ per_cpu(idle_last_mm, cpu) = NULL;
++ }
++ break;
+ #endif
+ }
+
diff --git a/sched-rt-fix-migrate_enable-thinko.patch b/sched-rt-fix-migrate_enable-thinko.patch
new file mode 100644
index 0000000..76fabf4
--- /dev/null
+++ b/sched-rt-fix-migrate_enable-thinko.patch
@@ -0,0 +1,67 @@
+Subject: sched, rt: Fix migrate_enable() thinko
+From: Mike Galbraith <efault@gmx.de>
+Date: Tue, 23 Aug 2011 16:12:43 +0200
+
+Assigning mask = tsk_cpus_allowed(p) after p->migrate_disable = 0 ensures
+that we won't see a mask change.. no push/pull, we stack tasks on one CPU.
+
+Also add a couple fields to sched_debug for the next guy.
+
+[ Build fix from Stratos Psomadakis <psomas@gentoo.org> ]
+
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Cc: Paul E. McKenney <paulmck@us.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1314108763.6689.4.camel@marge.simson.net
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/sched/core.c | 4 +++-
+ kernel/sched/debug.c | 7 +++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -5355,12 +5355,14 @@ void migrate_enable(void)
+ */
+ rq = this_rq();
+ raw_spin_lock_irqsave(&rq->lock, flags);
+- p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
++ p->migrate_disable = 0;
+
+ WARN_ON(!cpumask_test_cpu(smp_processor_id(), mask));
+
+ if (!cpumask_equal(&p->cpus_allowed, mask)) {
++ /* Get the mask now that migration is enabled */
++ mask = tsk_cpus_allowed(p);
+ if (p->sched_class->set_cpus_allowed)
+ p->sched_class->set_cpus_allowed(p, mask);
+ p->nr_cpus_allowed = cpumask_weight(mask);
+Index: linux-stable/kernel/sched/debug.c
+===================================================================
+--- linux-stable.orig/kernel/sched/debug.c
++++ linux-stable/kernel/sched/debug.c
+@@ -237,6 +237,9 @@ void print_rt_rq(struct seq_file *m, int
+ P(rt_throttled);
+ PN(rt_time);
+ PN(rt_runtime);
++#ifdef CONFIG_SMP
++ P(rt_nr_migratory);
++#endif
+
+ #undef PN
+ #undef P
+@@ -491,6 +494,10 @@ void proc_sched_show_task(struct task_st
+ P(se.load.weight);
+ P(policy);
+ P(prio);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ P(migrate_disable);
++#endif
++ P(nr_cpus_allowed);
+ #undef PN
+ #undef __PN
+ #undef P
diff --git a/sched-rt-mutex-wakeup.patch b/sched-rt-mutex-wakeup.patch
new file mode 100644
index 0000000..b7ee8e2
--- /dev/null
+++ b/sched-rt-mutex-wakeup.patch
@@ -0,0 +1,88 @@
+Subject: sched-rt-mutex-wakeup.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 25 Jun 2011 09:21:04 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 3 +++
+ kernel/sched/core.c | 31 ++++++++++++++++++++++++++++++-
+ 2 files changed, 33 insertions(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1080,6 +1080,7 @@ struct sched_domain;
+ #define WF_SYNC 0x01 /* waker goes to sleep after wakup */
+ #define WF_FORK 0x02 /* child wakeup after fork */
+ #define WF_MIGRATED 0x04 /* internal use, task got migrated */
++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */
+
+ #define ENQUEUE_WAKEUP 1
+ #define ENQUEUE_HEAD 2
+@@ -1234,6 +1235,7 @@ enum perf_event_task_context {
+
+ struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
++ volatile long saved_state; /* saved state for "spinlock sleepers" */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+@@ -2220,6 +2222,7 @@ extern void xtime_update(unsigned long t
+
+ extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+ extern int wake_up_process(struct task_struct *tsk);
++extern int wake_up_lock_sleeper(struct task_struct * tsk);
+ extern void wake_up_new_task(struct task_struct *tsk);
+ #ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -1588,8 +1588,25 @@ try_to_wake_up(struct task_struct *p, un
+
+ smp_wmb();
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+- if (!(p->state & state))
++ if (!(p->state & state)) {
++ /*
++ * The task might be running due to a spinlock sleeper
++ * wakeup. Check the saved state and set it to running
++ * if the wakeup condition is true.
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER)) {
++ if (p->saved_state & state)
++ p->saved_state = TASK_RUNNING;
++ }
+ goto out;
++ }
++
++ /*
++ * If this is a regular wakeup, then we can unconditionally
++ * clear the saved state of a "lock sleeper".
++ */
++ if (!(wake_flags & WF_LOCK_SLEEPER))
++ p->saved_state = TASK_RUNNING;
+
+ success = 1; /* we're going to change ->state */
+ cpu = task_cpu(p);
+@@ -1695,6 +1712,18 @@ int wake_up_process(struct task_struct *
+ }
+ EXPORT_SYMBOL(wake_up_process);
+
++/**
++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock"
++ * @p: The process to be woken up.
++ *
++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate
++ * the nature of the wakeup.
++ */
++int wake_up_lock_sleeper(struct task_struct *p)
++{
++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER);
++}
++
+ int wake_up_state(struct task_struct *p, unsigned int state)
+ {
+ return try_to_wake_up(p, state, 0);
diff --git a/sched-teach-migrate_disable-about-atomic-contexts.patch b/sched-teach-migrate_disable-about-atomic-contexts.patch
new file mode 100644
index 0000000..0b3bd74
--- /dev/null
+++ b/sched-teach-migrate_disable-about-atomic-contexts.patch
@@ -0,0 +1,92 @@
+Subject: sched: Teach migrate_disable about atomic contexts
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 02 Sep 2011 14:41:37 +0200
+
+Subject: sched: teach migrate_disable about atomic contexts
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri Sep 02 14:29:27 CEST 2011
+
+ <NMI> [<ffffffff812dafd8>] spin_bug+0x94/0xa8
+ [<ffffffff812db07f>] do_raw_spin_lock+0x43/0xea
+ [<ffffffff814fa9be>] _raw_spin_lock_irqsave+0x6b/0x85
+ [<ffffffff8106ff9e>] ? migrate_disable+0x75/0x12d
+ [<ffffffff81078aaf>] ? pin_current_cpu+0x36/0xb0
+ [<ffffffff8106ff9e>] migrate_disable+0x75/0x12d
+ [<ffffffff81115b9d>] pagefault_disable+0xe/0x1f
+ [<ffffffff81047027>] copy_from_user_nmi+0x74/0xe6
+ [<ffffffff810489d7>] perf_callchain_user+0xf3/0x135
+
+Now clearly we can't go around taking locks from NMI context, cure
+this by short-circuiting migrate_disable() when we're in an atomic
+context already.
+
+Add some extra debugging to avoid things like:
+
+ preempt_disable()
+ migrate_disable();
+
+ preempt_enable();
+ migrate_enable();
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/1314967297.1301.14.camel@twins
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/n/tip-wbot4vsmwhi8vmbf83hsclk6@git.kernel.org
+---
+ include/linux/sched.h | 3 +++
+ kernel/sched/core.c | 21 +++++++++++++++++++++
+ 2 files changed, 24 insertions(+)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1277,6 +1277,9 @@ struct task_struct {
+ unsigned int policy;
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ int migrate_disable;
++# ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++# endif
+ #endif
+ int nr_cpus_allowed;
+ cpumask_t cpus_allowed;
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -5286,6 +5286,17 @@ void migrate_disable(void)
+ unsigned long flags;
+ struct rq *rq;
+
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
+ preempt_disable();
+ if (p->migrate_disable) {
+ p->migrate_disable++;
+@@ -5334,6 +5345,16 @@ void migrate_enable(void)
+ unsigned long flags;
+ struct rq *rq;
+
++ if (in_atomic()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
+ WARN_ON_ONCE(p->migrate_disable <= 0);
+
+ preempt_disable();
diff --git a/sched-ttwu-ensure-success-return-is-correct.patch b/sched-ttwu-ensure-success-return-is-correct.patch
new file mode 100644
index 0000000..2299dd8
--- /dev/null
+++ b/sched-ttwu-ensure-success-return-is-correct.patch
@@ -0,0 +1,36 @@
+Subject: sched: ttwu: Return success when only changing the saved_state value
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 13 Dec 2011 21:42:19 +0100
+
+When a task blocks on a rt lock, it saves the current state in
+p->saved_state, so a lock related wake up will not destroy the
+original state.
+
+When a real wakeup happens, while the task is running due to a lock
+wakeup already, we update p->saved_state to TASK_RUNNING, but we do
+not return success, which might cause another wakeup in the waitqueue
+code and the task remains in the waitqueue list. Return success in
+that case as well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/sched/core.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -1595,8 +1595,10 @@ try_to_wake_up(struct task_struct *p, un
+ * if the wakeup condition is true.
+ */
+ if (!(wake_flags & WF_LOCK_SLEEPER)) {
+- if (p->saved_state & state)
++ if (p->saved_state & state) {
+ p->saved_state = TASK_RUNNING;
++ success = 1;
++ }
+ }
+ goto out;
+ }
diff --git a/scsi-fcoe-rt-aware.patch b/scsi-fcoe-rt-aware.patch
new file mode 100644
index 0000000..4713348
--- /dev/null
+++ b/scsi-fcoe-rt-aware.patch
@@ -0,0 +1,117 @@
+Subject: scsi-fcoe-rt-aware.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 12 Nov 2011 14:00:48 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/scsi/fcoe/fcoe.c | 18 +++++++++---------
+ drivers/scsi/fcoe/fcoe_ctlr.c | 4 ++--
+ drivers/scsi/libfc/fc_exch.c | 4 ++--
+ 3 files changed, 13 insertions(+), 13 deletions(-)
+
+Index: linux-stable/drivers/scsi/fcoe/fcoe.c
+===================================================================
+--- linux-stable.orig/drivers/scsi/fcoe/fcoe.c
++++ linux-stable/drivers/scsi/fcoe/fcoe.c
+@@ -1272,7 +1272,7 @@ static void fcoe_percpu_thread_destroy(u
+ struct sk_buff *skb;
+ #ifdef CONFIG_SMP
+ struct fcoe_percpu_s *p0;
+- unsigned targ_cpu = get_cpu();
++ unsigned targ_cpu = get_cpu_light();
+ #endif /* CONFIG_SMP */
+
+ FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
+@@ -1328,7 +1328,7 @@ static void fcoe_percpu_thread_destroy(u
+ kfree_skb(skb);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ }
+- put_cpu();
++ put_cpu_light();
+ #else
+ /*
+ * This a non-SMP scenario where the singular Rx thread is
+@@ -1546,11 +1546,11 @@ err2:
+ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
+ {
+ struct fcoe_percpu_s *fps;
+- int rc;
++ int rc, cpu = get_cpu_light();
+
+- fps = &get_cpu_var(fcoe_percpu);
++ fps = &per_cpu(fcoe_percpu, cpu);
+ rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
+- put_cpu_var(fcoe_percpu);
++ put_cpu_light();
+
+ return rc;
+ }
+@@ -1745,11 +1745,11 @@ static inline int fcoe_filter_frames(str
+ return 0;
+ }
+
+- stats = per_cpu_ptr(lport->stats, get_cpu());
++ stats = per_cpu_ptr(lport->stats, get_cpu_light());
+ stats->InvalidCRCCount++;
+ if (stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+- put_cpu();
++ put_cpu_light();
+ return -EINVAL;
+ }
+
+@@ -1825,13 +1825,13 @@ static void fcoe_recv_frame(struct sk_bu
+ goto drop;
+
+ if (!fcoe_filter_frames(lport, fp)) {
+- put_cpu();
++ put_cpu_light();
+ fc_exch_recv(lport, fp);
+ return;
+ }
+ drop:
+ stats->ErrorFrames++;
+- put_cpu();
++ put_cpu_light();
+ kfree_skb(skb);
+ }
+
+Index: linux-stable/drivers/scsi/fcoe/fcoe_ctlr.c
+===================================================================
+--- linux-stable.orig/drivers/scsi/fcoe/fcoe_ctlr.c
++++ linux-stable/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -792,7 +792,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+
+ INIT_LIST_HEAD(&del_list);
+
+- stats = per_cpu_ptr(fip->lp->stats, get_cpu());
++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
+
+ list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
+ deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
+@@ -828,7 +828,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+ sel_time = fcf->time;
+ }
+ }
+- put_cpu();
++ put_cpu_light();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+Index: linux-stable/drivers/scsi/libfc/fc_exch.c
+===================================================================
+--- linux-stable.orig/drivers/scsi/libfc/fc_exch.c
++++ linux-stable/drivers/scsi/libfc/fc_exch.c
+@@ -730,10 +730,10 @@ static struct fc_exch *fc_exch_em_alloc(
+ }
+ memset(ep, 0, sizeof(*ep));
+
+- cpu = get_cpu();
++ cpu = get_cpu_light();
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+- put_cpu();
++ put_cpu_light();
+
+ /* peek cache of free slot */
+ if (pool->left != FC_XID_UNKNOWN) {
diff --git a/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch b/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
new file mode 100644
index 0000000..575a5a7
--- /dev/null
+++ b/scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
@@ -0,0 +1,49 @@
+Subject: scsi: qla2xxx: Use local_irq_save_nort() in qla2x00_poll
+From: John Kacur <jkacur@redhat.com>
+Date: Fri, 27 Apr 2012 12:48:46 +0200
+
+RT triggers the following:
+
+[ 11.307652] [<ffffffff81077b27>] __might_sleep+0xe7/0x110
+[ 11.307663] [<ffffffff8150e524>] rt_spin_lock+0x24/0x60
+[ 11.307670] [<ffffffff8150da78>] ? rt_spin_lock_slowunlock+0x78/0x90
+[ 11.307703] [<ffffffffa0272d83>] qla24xx_intr_handler+0x63/0x2d0 [qla2xxx]
+[ 11.307736] [<ffffffffa0262307>] qla2x00_poll+0x67/0x90 [qla2xxx]
+
+Function qla2x00_poll does local_irq_save() before calling qla24xx_intr_handler
+which has a spinlock. Since spinlocks are sleepable on rt, it is not allowed
+to call them with interrupts disabled. Therefore we use local_irq_save_nort()
+instead which saves flags without disabling interrupts.
+
+This fix needs to be applied to v3.0-rt, v3.2-rt and v3.4-rt
+
+Suggested-by: Thomas Gleixner
+Signed-off-by: John Kacur <jkacur@redhat.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: David Sommerseth <davids@redhat.com>
+Link: http://lkml.kernel.org/r/1335523726-10024-1-git-send-email-jkacur@redhat.com
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/scsi/qla2xxx/qla_inline.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/drivers/scsi/qla2xxx/qla_inline.h
+===================================================================
+--- linux-stable.orig/drivers/scsi/qla2xxx/qla_inline.h
++++ linux-stable/drivers/scsi/qla2xxx/qla_inline.h
+@@ -36,12 +36,12 @@ qla2x00_poll(struct rsp_que *rsp)
+ {
+ unsigned long flags;
+ struct qla_hw_data *ha = rsp->hw;
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (IS_QLA82XX(ha))
+ qla82xx_poll(0, rsp);
+ else
+ ha->isp_ops->intr_handler(0, rsp);
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ static inline uint8_t *
diff --git a/seqlock-prevent-rt-starvation.patch b/seqlock-prevent-rt-starvation.patch
new file mode 100644
index 0000000..3ea39fb
--- /dev/null
+++ b/seqlock-prevent-rt-starvation.patch
@@ -0,0 +1,168 @@
+Subject: seqlock: Prevent rt starvation
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 22 Feb 2012 12:03:30 +0100
+
+If a low prio writer gets preempted while holding the seqlock write
+locked, a high prio reader spins forever on RT.
+
+To prevent this let the reader grab the spinlock, so it blocks and
+eventually boosts the writer. This way the writer can proceed and
+endless spinning is prevented.
+
+For seqcount writers we disable preemption over the update code
+path. Thaanks to Al Viro for distangling some VFS code to make that
+possible.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+
+---
+ include/linux/seqlock.h | 55 +++++++++++++++++++++++++++++++++++++++---------
+ include/net/neighbour.h | 2 -
+ 2 files changed, 46 insertions(+), 11 deletions(-)
+
+Index: linux-stable/include/linux/seqlock.h
+===================================================================
+--- linux-stable.orig/include/linux/seqlock.h
++++ linux-stable/include/linux/seqlock.h
+@@ -146,18 +146,30 @@ static inline int read_seqcount_retry(co
+ * Sequence counter only version assumes that callers are using their
+ * own mutexing.
+ */
+-static inline void write_seqcount_begin(seqcount_t *s)
++static inline void __write_seqcount_begin(seqcount_t *s)
+ {
+ s->sequence++;
+ smp_wmb();
+ }
+
+-static inline void write_seqcount_end(seqcount_t *s)
++static inline void write_seqcount_begin(seqcount_t *s)
++{
++ preempt_disable_rt();
++ __write_seqcount_begin(s);
++}
++
++static inline void __write_seqcount_end(seqcount_t *s)
+ {
+ smp_wmb();
+ s->sequence++;
+ }
+
++static inline void write_seqcount_end(seqcount_t *s)
++{
++ __write_seqcount_end(s);
++ preempt_enable_rt();
++}
++
+ /**
+ * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * @s: pointer to seqcount_t
+@@ -198,10 +210,33 @@ typedef struct {
+ /*
+ * Read side functions for starting and finalizing a read side section.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline unsigned read_seqbegin(const seqlock_t *sl)
+ {
+ return read_seqcount_begin(&sl->seqcount);
+ }
++#else
++/*
++ * Starvation safe read side for RT
++ */
++static inline unsigned read_seqbegin(seqlock_t *sl)
++{
++ unsigned ret;
++
++repeat:
++ ret = sl->seqcount.sequence;
++ if (unlikely(ret & 1)) {
++ /*
++ * Take the lock and let the writer proceed (i.e. evtl
++ * boost it), otherwise we could loop here forever.
++ */
++ spin_lock(&sl->lock);
++ spin_unlock(&sl->lock);
++ goto repeat;
++ }
++ return ret;
++}
++#endif
+
+ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ {
+@@ -216,36 +251,36 @@ static inline unsigned read_seqretry(con
+ static inline void write_seqlock(seqlock_t *sl)
+ {
+ spin_lock(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock(&sl->lock);
+ }
+
+ static inline void write_seqlock_bh(seqlock_t *sl)
+ {
+ spin_lock_bh(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_bh(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock_bh(&sl->lock);
+ }
+
+ static inline void write_seqlock_irq(seqlock_t *sl)
+ {
+ spin_lock_irq(&sl->lock);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ }
+
+ static inline void write_sequnlock_irq(seqlock_t *sl)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock_irq(&sl->lock);
+ }
+
+@@ -254,7 +289,7 @@ static inline unsigned long __write_seql
+ unsigned long flags;
+
+ spin_lock_irqsave(&sl->lock, flags);
+- write_seqcount_begin(&sl->seqcount);
++ __write_seqcount_begin(&sl->seqcount);
+ return flags;
+ }
+
+@@ -264,7 +299,7 @@ static inline unsigned long __write_seql
+ static inline void
+ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ {
+- write_seqcount_end(&sl->seqcount);
++ __write_seqcount_end(&sl->seqcount);
+ spin_unlock_irqrestore(&sl->lock, flags);
+ }
+
+Index: linux-stable/include/net/neighbour.h
+===================================================================
+--- linux-stable.orig/include/net/neighbour.h
++++ linux-stable/include/net/neighbour.h
+@@ -385,7 +385,7 @@ struct neighbour_cb {
+
+ #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
+
+-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
+ const struct net_device *dev)
+ {
+ unsigned int seq;
diff --git a/seqlock-remove-unused-functions.patch b/seqlock-remove-unused-functions.patch
new file mode 100644
index 0000000..7bbaaac
--- /dev/null
+++ b/seqlock-remove-unused-functions.patch
@@ -0,0 +1,46 @@
+Subject: seqlock: Remove unused functions
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Jul 2011 18:38:22 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/seqlock.h | 21 ---------------------
+ 1 file changed, 21 deletions(-)
+
+Index: linux-stable/include/linux/seqlock.h
+===================================================================
+--- linux-stable.orig/include/linux/seqlock.h
++++ linux-stable/include/linux/seqlock.h
+@@ -69,17 +69,6 @@ static inline void write_sequnlock(seqlo
+ spin_unlock(&sl->lock);
+ }
+
+-static inline int write_tryseqlock(seqlock_t *sl)
+-{
+- int ret = spin_trylock(&sl->lock);
+-
+- if (ret) {
+- ++sl->sequence;
+- smp_wmb();
+- }
+- return ret;
+-}
+-
+ /* Start of read calculation -- fetch last complete writer token */
+ static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
+ {
+@@ -269,14 +258,4 @@ static inline void write_seqcount_barrie
+ #define write_sequnlock_bh(lock) \
+ do { write_sequnlock(lock); local_bh_enable(); } while(0)
+
+-#define read_seqbegin_irqsave(lock, flags) \
+- ({ local_irq_save(flags); read_seqbegin(lock); })
+-
+-#define read_seqretry_irqrestore(lock, iv, flags) \
+- ({ \
+- int ret = read_seqretry(lock, iv); \
+- local_irq_restore(flags); \
+- ret; \
+- })
+-
+ #endif /* __LINUX_SEQLOCK_H */
diff --git a/seqlock-use-seqcount.patch b/seqlock-use-seqcount.patch
new file mode 100644
index 0000000..c86cc9b
--- /dev/null
+++ b/seqlock-use-seqcount.patch
@@ -0,0 +1,220 @@
+Subject: seqlock: Use seqcount
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 16 Jul 2011 18:40:26 +0200
+
+No point in having different implementations for the same thing.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/seqlock.h | 176 +++++++++++++++++++++++++-----------------------
+ 1 file changed, 93 insertions(+), 83 deletions(-)
+
+Index: linux-stable/include/linux/seqlock.h
+===================================================================
+--- linux-stable.orig/include/linux/seqlock.h
++++ linux-stable/include/linux/seqlock.h
+@@ -30,81 +30,12 @@
+ #include <linux/preempt.h>
+ #include <asm/processor.h>
+
+-typedef struct {
+- unsigned sequence;
+- spinlock_t lock;
+-} seqlock_t;
+-
+-/*
+- * These macros triggered gcc-3.x compile-time problems. We think these are
+- * OK now. Be cautious.
+- */
+-#define __SEQLOCK_UNLOCKED(lockname) \
+- { 0, __SPIN_LOCK_UNLOCKED(lockname) }
+-
+-#define seqlock_init(x) \
+- do { \
+- (x)->sequence = 0; \
+- spin_lock_init(&(x)->lock); \
+- } while (0)
+-
+-#define DEFINE_SEQLOCK(x) \
+- seqlock_t x = __SEQLOCK_UNLOCKED(x)
+-
+-/* Lock out other writers and update the count.
+- * Acts like a normal spin_lock/unlock.
+- * Don't need preempt_disable() because that is in the spin_lock already.
+- */
+-static inline void write_seqlock(seqlock_t *sl)
+-{
+- spin_lock(&sl->lock);
+- ++sl->sequence;
+- smp_wmb();
+-}
+-
+-static inline void write_sequnlock(seqlock_t *sl)
+-{
+- smp_wmb();
+- sl->sequence++;
+- spin_unlock(&sl->lock);
+-}
+-
+-/* Start of read calculation -- fetch last complete writer token */
+-static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
+-{
+- unsigned ret;
+-
+-repeat:
+- ret = ACCESS_ONCE(sl->sequence);
+- if (unlikely(ret & 1)) {
+- cpu_relax();
+- goto repeat;
+- }
+- smp_rmb();
+-
+- return ret;
+-}
+-
+-/*
+- * Test if reader processed invalid data.
+- *
+- * If sequence value changed then writer changed data while in section.
+- */
+-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
+-{
+- smp_rmb();
+-
+- return unlikely(sl->sequence != start);
+-}
+-
+-
+ /*
+ * Version using sequence counter only.
+ * This can be used when code has its own mutex protecting the
+ * updating starting before the write_seqcountbeqin() and ending
+ * after the write_seqcount_end().
+ */
+-
+ typedef struct seqcount {
+ unsigned sequence;
+ } seqcount_t;
+@@ -207,7 +138,6 @@ static inline int __read_seqcount_retry(
+ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+ {
+ smp_rmb();
+-
+ return __read_seqcount_retry(s, start);
+ }
+
+@@ -241,21 +171,101 @@ static inline void write_seqcount_barrie
+ s->sequence+=2;
+ }
+
++typedef struct {
++ struct seqcount seqcount;
++ spinlock_t lock;
++} seqlock_t;
++
+ /*
+- * Possible sw/hw IRQ protected versions of the interfaces.
++ * These macros triggered gcc-3.x compile-time problems. We think these are
++ * OK now. Be cautious.
+ */
++#define __SEQLOCK_UNLOCKED(lockname) \
++ { \
++ .seqcount = SEQCNT_ZERO, \
++ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
++ }
++
++#define seqlock_init(x) \
++ do { \
++ seqcount_init(&(x)->seqcount); \
++ spin_lock_init(&(x)->lock); \
++ } while (0)
++
++#define DEFINE_SEQLOCK(x) \
++ seqlock_t x = __SEQLOCK_UNLOCKED(x)
++
++/*
++ * Read side functions for starting and finalizing a read side section.
++ */
++static inline unsigned read_seqbegin(const seqlock_t *sl)
++{
++ return read_seqcount_begin(&sl->seqcount);
++}
++
++static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
++{
++ return read_seqcount_retry(&sl->seqcount, start);
++}
++
++/*
++ * Lock out other writers and update the count.
++ * Acts like a normal spin_lock/unlock.
++ * Don't need preempt_disable() because that is in the spin_lock already.
++ */
++static inline void write_seqlock(seqlock_t *sl)
++{
++ spin_lock(&sl->lock);
++ write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void write_sequnlock(seqlock_t *sl)
++{
++ write_seqcount_end(&sl->seqcount);
++ spin_unlock(&sl->lock);
++}
++
++static inline void write_seqlock_bh(seqlock_t *sl)
++{
++ spin_lock_bh(&sl->lock);
++ write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void write_sequnlock_bh(seqlock_t *sl)
++{
++ write_seqcount_end(&sl->seqcount);
++ spin_unlock_bh(&sl->lock);
++}
++
++static inline void write_seqlock_irq(seqlock_t *sl)
++{
++ spin_lock_irq(&sl->lock);
++ write_seqcount_begin(&sl->seqcount);
++}
++
++static inline void write_sequnlock_irq(seqlock_t *sl)
++{
++ write_seqcount_end(&sl->seqcount);
++ spin_unlock_irq(&sl->lock);
++}
++
++static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&sl->lock, flags);
++ write_seqcount_begin(&sl->seqcount);
++ return flags;
++}
++
+ #define write_seqlock_irqsave(lock, flags) \
+- do { local_irq_save(flags); write_seqlock(lock); } while (0)
+-#define write_seqlock_irq(lock) \
+- do { local_irq_disable(); write_seqlock(lock); } while (0)
+-#define write_seqlock_bh(lock) \
+- do { local_bh_disable(); write_seqlock(lock); } while (0)
+-
+-#define write_sequnlock_irqrestore(lock, flags) \
+- do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
+-#define write_sequnlock_irq(lock) \
+- do { write_sequnlock(lock); local_irq_enable(); } while(0)
+-#define write_sequnlock_bh(lock) \
+- do { write_sequnlock(lock); local_bh_enable(); } while(0)
++ do { flags = __write_seqlock_irqsave(lock); } while (0)
++
++static inline void
++write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
++{
++ write_seqcount_end(&sl->seqcount);
++ spin_unlock_irqrestore(&sl->lock, flags);
++}
+
+ #endif /* __LINUX_SEQLOCK_H */
diff --git a/series b/series
new file mode 100644
index 0000000..dfaa057
--- /dev/null
+++ b/series
@@ -0,0 +1,598 @@
+###########################################################
+# DELTA against a known Linus release
+###########################################################
+
+############################################################
+# UPSTREAM changes queued
+############################################################
+
+############################################################
+# UPSTREAM FIXES, patches pending
+############################################################
+
+############################################################
+# Stuff broken upstream, patches submitted
+############################################################
+x86-kprobes-remove-bogus-preempt-enable.patch
+
+############################################################
+# Stuff which needs addressing upstream, but requires more
+# information
+############################################################
+x86-hpet-disable-msi-on-lenovo-w510.patch
+
+############################################################
+# Stuff broken upstream, need to be sent
+############################################################
+
+############################################################
+# Submitted on LKML
+############################################################
+# SCHED BLOCK/WQ
+block-shorten-interrupt-disabled-regions.patch
+
+# CHECKME sched-distangle-worker-accounting-from-rq-3elock.patch
+
+############################################################
+# Submitted to mips ML
+############################################################
+mips-enable-interrupts-in-signal.patch
+
+############################################################
+# Submitted to ARM ML
+############################################################
+
+############################################################
+# Submitted on LKML
+############################################################
+
+# JBD
+
+# SCHED
+
+############################################################
+# Submitted on ppc-devel
+############################################################
+
+############################################################
+# Submitted to net-dev
+############################################################
+
+############################################################
+# Pending in tip
+############################################################
+
+# WATCHDOG
+
+# CLOCKSOURCE
+
+# RTMUTEX CLEANUP
+
+# RAW SPINLOCKS
+
+# X86
+
+############################################################
+# Pending in peterz's scheduler queue
+############################################################
+
+
+############################################################
+# Stuff which should go upstream ASAP
+############################################################
+
+# GENIRQ
+genirq-add-default-mask-cmdline-option.patch
+
+# PPC
+ppc-mark-low-level-handlers-no-thread.patch
+
+# Timekeeping / VDSO
+
+# SEQLOCK
+seqlock-remove-unused-functions.patch
+seqlock-use-seqcount.patch
+
+# RAW SPINLOCKS
+timekeeping-split-xtime-lock.patch
+intel_idle-convert-i7300_idle_lock-to-raw-spinlock.patch
+ntp-make-ntp-lock-raw-sigh.patch
+
+# MM memcg
+
+# Tracing
+tracing-account-for-preempt-off-in-preempt_schedule.patch
+
+# PTRACE/SIGNAL crap
+signal-revert-ptrace-preempt-magic.patch
+
+# ARM IRQF_NO_TRHEAD / IRQ THREADING SUPPORT
+arm-mark-pmu-interupt-no-thread.patch
+arm-allow-irq-threading.patch
+arm-convert-boot-lock-to-raw.patch
+arm-omap-make-wakeupgen_lock-raw.patch
+
+# PREEMPT_ENABLE_NO_RESCHED
+
+# SIGNALS / POSIXTIMERS
+signals-do-not-wake-self.patch
+posix-timers-no-broadcast.patch
+signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+oleg-signal-rt-fix.patch
+
+# SCHED
+
+# GENERIC CMPXCHG
+generic-cmpxchg-use-raw-local-irq.patch
+
+# SHORTEN PREEMPT DISABLED
+drivers-random-reduce-preempt-disabled-region.patch
+
+# CLOCKSOURCE
+arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+clocksource-tclib-allow-higher-clockrates.patch
+
+# HW LATENCY DETECTOR - this really wants a rewrite
+#hw-latency-detector.patch
+
+# DRIVERS NET
+drivers-net-tulip-add-missing-pci-disable.patch
+drivers-net-8139-disable-irq-nosync.patch
+drivers-net-ehea-mark-rx-irq-no-thread.patch
+drivers-net-at91-make-mdio-protection-rt-safe.patch
+
+# PREEMPT
+
+# PAGEFAULT DISABLE
+mm-prepare-pf-disable-discoupling.patch
+arch-use-pagefault-disabled.patch
+peter_zijlstra-frob-pagefault_disable.patch
+peterz-raw_pagefault_disable.patch
+# highmem-explicitly-disable-preemption.patch -- peterz
+filemap-fix-up.patch
+mm-remove-preempt-count-from-pf.patch
+
+# HIGHMEM
+x86-highmem-warn.patch
+
+# PM
+suspend-prevernt-might-sleep-splats.patch
+
+# DEVICE TREE
+of-fixup-recursive-locking.patch
+of-convert-devtree-lock.patch
+
+# MM/LISTS
+list-add-list-last-entry.patch
+mm-page-alloc-use-list-last-entry.patch
+mm-slab-move-debug-out.patch
+
+# INCLUDE MESS
+pid-h-include-atomic-h.patch
+sysctl-include-atomic-h.patch
+
+# NETWORKING
+net-flip-lock-dep-thingy.patch
+
+# SOFTIRQ
+softirq-thread-do-softirq.patch
+softirq-split-out-code.patch
+
+# X86
+x86-io-apic-migra-no-unmask.patch
+fix-rt-int3-x86_32-3.2-rt.patch
+
+# RCU
+
+# LOCKING INIT FIXES
+locking-various-init-fixes.patch
+
+# PCI
+pci-access-use-__wake_up_all_locked.patch
+
+#####################################################
+# Stuff which should go mainline, but wants some care
+#####################################################
+
+# SEQLOCK
+
+# ANON RW SEMAPHORES
+
+# TRACING
+latency-hist.patch
+
+# HW latency detector
+hwlatdetect.patch
+
+##################################################
+# REAL RT STUFF starts here
+##################################################
+
+# Add RT to version
+localversion.patch
+
+# PRINTK
+early-printk-consolidate.patch
+printk-kill.patch
+printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+
+# BASE RT CONFIG
+rt-preempt-base-config.patch
+
+# WARN/BUG_ON_RT
+bug-rt-dependend-variants.patch
+
+# LOCAL_IRQ_RT/NON_RT
+local-irq-rt-depending-variants.patch
+
+# PREEMPT NORT
+preempt-nort-rt-variants.patch
+
+# ANNOTATE local_irq_disable sites
+ata-disable-interrupts-if-non-rt.patch
+ide-use-nort-local-irq-variants.patch
+infiniband-mellanox-ib-use-nort-irq.patch
+inpt-gameport-use-local-irq-nort.patch
+acpi-use-local-irq-nort.patch
+user-use-local-irq-nort.patch
+resource-counters-use-localirq-nort.patch
+usb-hcd-use-local-irq-nort.patch
+tty-use-local-irq-nort.patch
+mm-scatterlist-dont-disable-irqs-on-RT.patch
+
+# Sigh
+signal-fix-up-rcu-wreckage.patch
+
+# ANNOTATE BUG/WARNON
+net-wireless-warn-nort.patch
+
+# BIT SPINLOCKS - SIGH
+mm-cgroup-page-bit-spinlock.patch
+fs-replace-bh_uptodate_lock-for-rt.patch
+fs-jbd-replace-bh_state-lock.patch
+
+# GENIRQ
+genirq-nodebug-shirq.patch
+genirq-disable-irqpoll-on-rt.patch
+genirq-force-threading.patch
+
+# DRIVERS NET
+drivers-net-fix-livelock-issues.patch
+drivers-net-vortex-fix-locking-issues.patch
+drivers-net-gianfar-make-rt-aware.patch
+
+# DRIVERS USB
+# Revisit. Looks weird
+#usb-rt-support.patch
+usb-fix-mouse-problem-copying-large-data.patch
+
+# LOCAL_IRQ_LOCKS
+local-var.patch
+rt-local-irq-lock.patch
+cpu-rt-variants.patch
+
+# MM SLAB
+mm-slab-wrap-functions.patch
+mm-slab-more-lock-breaks.patch
+
+# MM PAGE_ALLOC
+mm-page_alloc-rt-friendly-per-cpu-pages.patch
+mm-page_alloc-reduce-lock-sections-further.patch
+mm-page-alloc-fix.patch
+
+# MM SWAP
+mm-convert-swap-to-percpu-locked.patch
+
+# MM vmstat
+mm-make-vmstat-rt-aware.patch
+
+# MM memory
+#mm-memory-rt.patch - ZAP... is unused
+mm-shrink-the-page-frame-to-rt-size.patch
+re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+
+# MM SLAB only
+mm-allow-slab-rt.patch
+
+# Revisit for avr/frv/ia64/mn10300/sh/sparc ...
+#mm-quicklists-percpu-locked.patch
+
+# RADIX TREE
+radix-tree-rt-aware.patch
+
+# PANIC
+panic-disable-random-on-rt.patch
+
+# IPC
+ipc-make-rt-aware.patch
+ipc-mqueue-add-a-critical-section-to-avoid-a-deadlock.patch
+
+# RELAY
+relay-fix-timer-madness.patch
+
+# NETWORKING
+
+# WORKQUEUE SIGH
+
+# TIMERS
+timers-prepare-for-full-preemption.patch
+timers-preempt-rt-support.patch
+timers-mov-printk_tick-to-soft-interrupt.patch
+timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+timers-avoid-the-base-null-otptimization-on-rt.patch
+
+# More PRINTK
+rfc-printk-don-27t-call-printk_tick-in-printk_needs_cpu.patch
+
+# HRTIMERS
+hrtimers-prepare-full-preemption.patch
+hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+peter_zijlstra-frob-hrtimer.patch
+hrtimer-add-missing-debug_activate-aid-was-re-announce-3-0-6-rt17.patch
+hrtimer-fix-reprogram-madness.patch
+timer-fd-avoid-live-lock.patch
+
+# POSIX-CPU-TIMERS
+posix-timers-thread-posix-cpu-timers-on-rt.patch
+posix-timers-shorten-cpu-timers-thread.patch
+posix-timers-avoid-wakeups-when-no-timers-are-active.patch
+
+# SCHEDULER
+sched-delay-put-task.patch
+sched-limit-nr-migrate.patch
+sched-mmdrop-delayed.patch
+sched-rt-mutex-wakeup.patch
+sched-might-sleep-do-not-account-rcu-depth.patch
+# CHECKME sched-load-balance-break-on-rq-contention.patch
+sched-cond-resched.patch
+cond-resched-softirq-rt.patch
+cond-resched-lock-rt-tweak.patch
+sched-disable-ttwu-queue.patch
+sched-disable-rt-group-sched-on-rt.patch
+sched-ttwu-ensure-success-return-is-correct.patch
+
+# STOP MACHINE
+stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+stomp-machine-mark-stomper-thread.patch
+stomp-machine-raw-lock.patch
+
+# MIGRATE DISABLE AND PER CPU
+hotplug-light-get-online-cpus.patch
+hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+re-migrate_disable-race-with-cpu-hotplug-3f.patch
+sched-migrate-disable.patch
+hotplug-use-migrate-disable.patch
+hotplug-call-cpu_unplug_begin-a-little-early.patch
+
+ftrace-migrate-disable-tracing.patch
+rt-tracing-show-padding-as-unsigned-short.patch
+
+migrate-disable-rt-variant.patch
+peter_zijlstra-frob-migrate_disable.patch
+peter_zijlstra-frob-migrate_disable-2.patch
+sched-rt-fix-migrate_enable-thinko.patch
+sched-teach-migrate_disable-about-atomic-contexts.patch
+rt-sched-postpone-actual-migration-disalbe-to-schedule.patch
+rt-sched-do-not-compare-cpu-masks-in-scheduler.patch
+rt-sched-have-migrate_disable-ignore-bounded-threads.patch
+sched-clear-pf-thread-bound-on-fallback-rq.patch
+
+# FTRACE
+ftrace-crap.patch
+# CHECKME rt-ring-buffer-convert-reader_lock-from-raw_spin_lock-into-spin_lock.patch
+# CHECKME rfc-ring-buffer-rt-check-for-irqs-disabled-before-grabbing-reader-lock.patch
+
+# NETWORKING
+net-netif_rx_ni-migrate-disable.patch
+
+# NOHZ
+softirq-sanitize-softirq-pending.patch
+
+# LOCKDEP
+lockdep-no-softirq-accounting-on-rt.patch
+
+# SOFTIRQ local lock
+mutex-no-spin-on-rt.patch
+softirq-local-lock.patch
+softirq-export-in-serving-softirq.patch
+harirq-h-define-softirq_count-as-oul-to-kill-build-warning.patch
+softirq-fix-unplug-deadlock.patch
+softirq-disable-softirq-stacks-for-rt.patch
+softirq-make-fifo.patch
+tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+
+# LOCAL VARS and GETCPU STUFF
+local-vars-migrate-disable.patch
+
+# RAID5
+md-raid5-percpu-handling-rt-aware.patch
+
+# RTMUTEX
+rtmutex-lock-killable.patch
+
+# FUTEX/RTMUTEX
+rtmutex-futex-prepare-rt.patch
+futex-requeue-pi-fix.patch
+
+# RTMUTEX
+rt-mutex-add-sleeping-spinlocks-support.patch
+spinlock-types-separate-raw.patch
+rtmutex-avoid-include-hell.patch
+rt-add-rt-spinlock-to-headers.patch
+rt-add-rt-to-mutex-headers.patch
+rwsem-add-rt-variant.patch
+rt-add-rt-locks.patch
+
+# RTMUTEX Fallout
+tasklist-lock-fix-section-conflict.patch
+
+# NOHZ/RTMUTEX
+timer-handle-idle-trylock-in-get-next-timer-irq.patch
+
+# RCU
+rcu-force-preempt-rcu-for-rt.patch
+peter_zijlstra-frob-rcu.patch
+rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+rcu-tiny-merge-bh.patch
+patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+rcu-fix-build-break.patch
+rt-rcutree-warn-fix.patch
+
+# LGLOCKS - lovely
+lglocks-rt.patch
+
+# DRIVERS SERIAL
+drivers-serial-cleanup-locking-for-rt.patch
+drivers-serial-call-flush_to_ldisc-when-the-irq-is-t.patch
+drivers-tty-fix-omap-lock-crap.patch
+rt-serial-warn-fix.patch
+
+# FS
+fs-namespace-preemption-fix.patch
+mm-protect-activate-switch-mm.patch
+mm-protect-activate_mm-by-preempt_-disable-7cenable-_rt.patch
+fs-block-rt-support.patch
+fs-ntfs-disable-interrupt-non-rt.patch
+
+# X86
+x86-mce-timer-hrtimer.patch
+x86-stackprot-no-random-on-rt.patch
+# x86-no-highmem-with-rt.patch -- peterz
+x86-use-gen-rwsem-spinlocks-rt.patch
+x86-disable-debug-stack.patch
+
+# CPU get light
+workqueue-use-get-cpu-light.patch
+epoll-use-get-cpu-light.patch
+mm-vmalloc-use-get-cpu-light.patch
+
+# WORKQUEUE more fixes
+# CHECKME workqueue-sanity.patch
+# CHECKME workqueue-fix-PF_THREAD_BOUND.patch
+# CHECKME workqueue-hotplug-fix.patch
+# CHECKME workqueue-more-hotplug-fallout.patch
+
+# DEBUGOBJECTS
+debugobjects-rt.patch
+
+# JUMPLABEL
+jump-label-rt.patch
+
+# NET
+skbufhead-raw-lock.patch
+
+# PERF
+perf-move-irq-work-to-softirq-in-rt.patch
+
+# CONSOLE. NEEDS more thought !!!
+printk-rt-aware.patch
+
+# POWERC
+power-use-generic-rwsem-on-rt.patch
+power-disable-highmem-on-rt.patch
+
+# ARM
+arm-disable-highmem-on-rt.patch
+arm-at91-tclib-default-to-tclib-timer-for-rt.patch
+
+# MIPS
+mips-disable-highmem-on-rt.patch
+
+# NETWORK livelock fix
+net-tx-action-avoid-livelock-on-rt.patch
+
+# NETWORK DEBUGGING AID
+ping-sysrq.patch
+
+# KGDB
+kgb-serial-hackaround.patch
+
+# SYSFS - RT indicator
+sysfs-realtime-entry.patch
+
+# KMAP/HIGHMEM
+mm-rt-kmap-atomic-scheduling.patch
+
+# IPC
+ipc-sem-rework-semaphore-wakeups.patch
+
+# SYSRQ
+
+# KVM require constant freq TSC (smp function call -> cpufreq)
+x86-kvm-require-const-tsc-for-rt.patch
+
+# SCSI/FCOE
+scsi-fcoe-rt-aware.patch
+
+# X86 crypto
+x86-crypto-reduce-preempt-disabled-regions.patch
+
+# Device mapper
+dm-make-rt-aware.patch
+
+# ACPI
+# Dropped those two as they cause a scheduling in atomic failure and
+# we have no clue why we made those locks raw in the first place.
+# acpi-make-gbl-hardware-lock-raw.patch
+# acpi-make-ec-lock-raw-as-well.patch
+
+# This one is just a follow up to the raw spin locks
+# Simple raw spinlock based waitqueue
+# wait-simple-version.patch
+# acpi-gpe-use-wait-simple.patch
+
+# CPUMASK OFFSTACK
+cpumask-disable-offstack-on-rt.patch
+
+# Various fixes - fold them back
+seqlock-prevent-rt-starvation.patch
+#fs-protect-opencoded-isize-seqcount.patch
+#net-u64-stat-protect-seqcount.patch
+rfc-sched-rt-fix-wait_task_interactive-to-test-rt_spin_lock-state.patch
+
+cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+
+softirq-preempt-fix-3-re.txt
+scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
+upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+
+# FS LIVELOCK PREVENTION
+rt-introduce-cpu-chill.patch
+fs-dcache-use-cpu-chill-in-trylock-loops.patch
+net-use-cpu-chill.patch
+
+# LOCKDEP
+lockdep-selftest-convert-spinlock-to-raw-spinlock.patch
+lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+
+rt-disable-rt-group-sched.patch
+fs-jbd-pull-plug-when-waiting-for-space.patch
+perf-make-swevent-hrtimer-irqsafe.patch
+cpu-rt-rework-cpu-down.patch
+
+# Stable-rt stuff: Fold back when Steve grabbed it
+random-make-it-work-on-rt.patch
+softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch
+mm-slab-fix-potential-deadlock.patch
+mm-page-alloc-use-local-lock-on-target-cpu.patch
+rt-rw-lockdep-annotations.patch
+sched-better-debug-output-for-might-sleep.patch
+stomp-machine-deal-clever-with-stopper-lock.patch
+
+# 3.6 specific updates
+net-another-local-irq-disable-alloc-atomic-headache.patch
+net-use-cpu-light-in-ip-send-unicast-reply.patch
+peterz-srcu-crypto-chain.patch
+x86-perf-uncore-deal-with-kfree.patch
+softirq-make-serving-softirqs-a-task-flag.patch
+softirq-split-handling-function.patch
+softirq-split-locks.patch
+
+# Needs more thought
+# block-wrap-raise-softirq-in-local-bh-to-avoid-context-switches.patch
+# nohz-fix-sirq-fallout.patch
+
+# Enable full RT
+kconfig-disable-a-few-options-rt.patch
+kconfig-preempt-rt-full.patch
diff --git a/signal-fix-up-rcu-wreckage.patch b/signal-fix-up-rcu-wreckage.patch
new file mode 100644
index 0000000..5171fd5
--- /dev/null
+++ b/signal-fix-up-rcu-wreckage.patch
@@ -0,0 +1,37 @@
+Subject: signal-fix-up-rcu-wreckage.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 22 Jul 2011 08:07:08 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/signal.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+Index: linux-stable/kernel/signal.c
+===================================================================
+--- linux-stable.orig/kernel/signal.c
++++ linux-stable/kernel/signal.c
+@@ -1394,12 +1394,12 @@ struct sighand_struct *__lock_task_sigha
+ struct sighand_struct *sighand;
+
+ for (;;) {
+- local_irq_save(*flags);
++ local_irq_save_nort(*flags);
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ break;
+ }
+
+@@ -1410,7 +1410,7 @@ struct sighand_struct *__lock_task_sigha
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+- local_irq_restore(*flags);
++ local_irq_restore_nort(*flags);
+ }
+
+ return sighand;
diff --git a/signal-revert-ptrace-preempt-magic.patch b/signal-revert-ptrace-preempt-magic.patch
new file mode 100644
index 0000000..87a663b
--- /dev/null
+++ b/signal-revert-ptrace-preempt-magic.patch
@@ -0,0 +1,29 @@
+Subject: signal-revert-ptrace-preempt-magic.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 21 Sep 2011 19:57:12 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/signal.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+Index: linux-stable/kernel/signal.c
+===================================================================
+--- linux-stable.orig/kernel/signal.c
++++ linux-stable/kernel/signal.c
+@@ -1898,15 +1898,7 @@ static void ptrace_stop(int exit_code, i
+ if (gstop_done && ptrace_reparented(current))
+ do_notify_parent_cldstop(current, false, why);
+
+- /*
+- * Don't want to allow preemption here, because
+- * sys_ptrace() needs this task to be inactive.
+- *
+- * XXX: implement read_unlock_no_resched().
+- */
+- preempt_disable();
+ read_unlock(&tasklist_lock);
+- preempt_enable_no_resched();
+ schedule();
+ } else {
+ /*
diff --git a/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
new file mode 100644
index 0000000..8f230db
--- /dev/null
+++ b/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -0,0 +1,215 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:56 -0500
+Subject: signals: Allow rt tasks to cache one sigqueue struct
+
+To avoid allocation allow rt tasks to cache one sigqueue struct in
+task struct.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/sched.h | 1
+ include/linux/signal.h | 1
+ kernel/exit.c | 2 -
+ kernel/fork.c | 1
+ kernel/signal.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++---
+ 5 files changed, 83 insertions(+), 5 deletions(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1403,6 +1403,7 @@ struct task_struct {
+ /* signal handlers */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
++ struct sigqueue *sigqueue_cache;
+
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+Index: linux-stable/include/linux/signal.h
+===================================================================
+--- linux-stable.orig/include/linux/signal.h
++++ linux-stable/include/linux/signal.h
+@@ -229,6 +229,7 @@ static inline void init_sigpending(struc
+ }
+
+ extern void flush_sigqueue(struct sigpending *queue);
++extern void flush_task_sigqueue(struct task_struct *tsk);
+
+ /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
+ static inline int valid_signal(unsigned long sig)
+Index: linux-stable/kernel/exit.c
+===================================================================
+--- linux-stable.orig/kernel/exit.c
++++ linux-stable/kernel/exit.c
+@@ -155,7 +155,7 @@ static void __exit_signal(struct task_st
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+- flush_sigqueue(&tsk->pending);
++ flush_task_sigqueue(tsk);
+ tsk->sighand = NULL;
+ spin_unlock(&sighand->siglock);
+
+Index: linux-stable/kernel/fork.c
+===================================================================
+--- linux-stable.orig/kernel/fork.c
++++ linux-stable/kernel/fork.c
+@@ -1239,6 +1239,7 @@ static struct task_struct *copy_process(
+ spin_lock_init(&p->alloc_lock);
+
+ init_sigpending(&p->pending);
++ p->sigqueue_cache = NULL;
+
+ p->utime = p->stime = p->gtime = 0;
+ p->utimescaled = p->stimescaled = 0;
+Index: linux-stable/kernel/signal.c
+===================================================================
+--- linux-stable.orig/kernel/signal.c
++++ linux-stable/kernel/signal.c
+@@ -346,13 +346,45 @@ static bool task_participate_group_stop(
+ return false;
+ }
+
++#ifdef __HAVE_ARCH_CMPXCHG
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ struct sigqueue *q = t->sigqueue_cache;
++
++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q)
++ return NULL;
++ return q;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL)
++ return 0;
++ return 1;
++}
++
++#else
++
++static inline struct sigqueue *get_task_cache(struct task_struct *t)
++{
++ return NULL;
++}
++
++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
++{
++ return 1;
++}
++
++#endif
++
+ /*
+ * allocate a new signal queue record
+ * - this may be called without locks if and only if t == current, otherwise an
+ * appropriate lock must be held to stop the target task from exiting
+ */
+ static struct sigqueue *
+-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit, int fromslab)
+ {
+ struct sigqueue *q = NULL;
+ struct user_struct *user;
+@@ -369,7 +401,10 @@ __sigqueue_alloc(int sig, struct task_st
+ if (override_rlimit ||
+ atomic_read(&user->sigpending) <=
+ task_rlimit(t, RLIMIT_SIGPENDING)) {
+- q = kmem_cache_alloc(sigqueue_cachep, flags);
++ if (!fromslab)
++ q = get_task_cache(t);
++ if (!q)
++ q = kmem_cache_alloc(sigqueue_cachep, flags);
+ } else {
+ print_dropped_signal(sig);
+ }
+@@ -386,6 +421,13 @@ __sigqueue_alloc(int sig, struct task_st
+ return q;
+ }
+
++static struct sigqueue *
++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags,
++ int override_rlimit)
++{
++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0);
++}
++
+ static void __sigqueue_free(struct sigqueue *q)
+ {
+ if (q->flags & SIGQUEUE_PREALLOC)
+@@ -395,6 +437,21 @@ static void __sigqueue_free(struct sigqu
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+
++static void sigqueue_free_current(struct sigqueue *q)
++{
++ struct user_struct *up;
++
++ if (q->flags & SIGQUEUE_PREALLOC)
++ return;
++
++ up = q->user;
++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) {
++ atomic_dec(&up->sigpending);
++ free_uid(up);
++ } else
++ __sigqueue_free(q);
++}
++
+ void flush_sigqueue(struct sigpending *queue)
+ {
+ struct sigqueue *q;
+@@ -408,6 +465,21 @@ void flush_sigqueue(struct sigpending *q
+ }
+
+ /*
++ * Called from __exit_signal. Flush tsk->pending and
++ * tsk->sigqueue_cache
++ */
++void flush_task_sigqueue(struct task_struct *tsk)
++{
++ struct sigqueue *q;
++
++ flush_sigqueue(&tsk->pending);
++
++ q = get_task_cache(tsk);
++ if (q)
++ kmem_cache_free(sigqueue_cachep, q);
++}
++
++/*
+ * Flush all pending signals for a task.
+ */
+ void __flush_signals(struct task_struct *t)
+@@ -556,7 +628,7 @@ static void collect_signal(int sig, stru
+ still_pending:
+ list_del_init(&first->list);
+ copy_siginfo(info, &first->info);
+- __sigqueue_free(first);
++ sigqueue_free_current(first);
+ } else {
+ /*
+ * Ok, it wasn't in the queue. This must be
+@@ -602,6 +674,8 @@ int dequeue_signal(struct task_struct *t
+ {
+ int signr;
+
++ WARN_ON_ONCE(tsk != current);
++
+ /* We only dequeue private signals from ourselves, we don't let
+ * signalfd steal them
+ */
+@@ -1548,7 +1622,8 @@ EXPORT_SYMBOL(kill_pid);
+ */
+ struct sigqueue *sigqueue_alloc(void)
+ {
+- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
++ /* Preallocated sigqueue objects always from the slabcache ! */
++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1);
+
+ if (q)
+ q->flags |= SIGQUEUE_PREALLOC;
diff --git a/signals-do-not-wake-self.patch b/signals-do-not-wake-self.patch
new file mode 100644
index 0000000..411045e
--- /dev/null
+++ b/signals-do-not-wake-self.patch
@@ -0,0 +1,27 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:44 -0500
+Subject: signals: Do not wakeup self
+
+Signals which are delivered by current to current can do without
+waking up current :)
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/signal.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+Index: linux-stable/kernel/signal.c
+===================================================================
+--- linux-stable.orig/kernel/signal.c
++++ linux-stable/kernel/signal.c
+@@ -684,6 +684,9 @@ void signal_wake_up(struct task_struct *
+
+ set_tsk_thread_flag(t, TIF_SIGPENDING);
+
++ if (unlikely(t == current))
++ return;
++
+ /*
+ * For SIGKILL, we want to wake it up in the stopped/traced/killable
+ * case. We don't check t->state here because there is a race with it
diff --git a/skbufhead-raw-lock.patch b/skbufhead-raw-lock.patch
new file mode 100644
index 0000000..28a52ce
--- /dev/null
+++ b/skbufhead-raw-lock.patch
@@ -0,0 +1,133 @@
+Subject: skbufhead-raw-lock.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 12 Jul 2011 15:38:34 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/netdevice.h | 1 +
+ include/linux/skbuff.h | 7 +++++++
+ net/core/dev.c | 26 ++++++++++++++++++++------
+ 3 files changed, 28 insertions(+), 6 deletions(-)
+
+Index: linux-stable/include/linux/netdevice.h
+===================================================================
+--- linux-stable.orig/include/linux/netdevice.h
++++ linux-stable/include/linux/netdevice.h
+@@ -1765,6 +1765,7 @@ struct softnet_data {
+ unsigned int dropped;
+ struct sk_buff_head input_pkt_queue;
+ struct napi_struct backlog;
++ struct sk_buff_head tofree_queue;
+ };
+
+ static inline void input_queue_head_incr(struct softnet_data *sd)
+Index: linux-stable/include/linux/skbuff.h
+===================================================================
+--- linux-stable.orig/include/linux/skbuff.h
++++ linux-stable/include/linux/skbuff.h
+@@ -132,6 +132,7 @@ struct sk_buff_head {
+
+ __u32 qlen;
+ spinlock_t lock;
++ raw_spinlock_t raw_lock;
+ };
+
+ struct sk_buff;
+@@ -995,6 +996,12 @@ static inline void skb_queue_head_init(s
+ __skb_queue_head_init(list);
+ }
+
++static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
++{
++ raw_spin_lock_init(&list->raw_lock);
++ __skb_queue_head_init(list);
++}
++
+ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
+ struct lock_class_key *class)
+ {
+Index: linux-stable/net/core/dev.c
+===================================================================
+--- linux-stable.orig/net/core/dev.c
++++ linux-stable/net/core/dev.c
+@@ -221,14 +221,14 @@ static inline struct hlist_head *dev_ind
+ static inline void rps_lock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_lock(&sd->input_pkt_queue.lock);
++ raw_spin_lock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+ static inline void rps_unlock(struct softnet_data *sd)
+ {
+ #ifdef CONFIG_RPS
+- spin_unlock(&sd->input_pkt_queue.lock);
++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock);
+ #endif
+ }
+
+@@ -3399,7 +3399,7 @@ static void flush_backlog(void *arg)
+ skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->input_pkt_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
+@@ -3408,10 +3408,13 @@ static void flush_backlog(void *arg)
+ skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &sd->process_queue);
+- kfree_skb(skb);
++ __skb_queue_tail(&sd->tofree_queue, skb);
+ input_queue_head_incr(sd);
+ }
+ }
++
++ if (!skb_queue_empty(&sd->tofree_queue))
++ raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ }
+
+ static int napi_gro_complete(struct sk_buff *skb)
+@@ -3900,10 +3903,17 @@ static void net_rx_action(struct softirq
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ unsigned long time_limit = jiffies + 2;
+ int budget = netdev_budget;
++ struct sk_buff *skb;
+ void *have;
+
+ local_irq_disable();
+
++ while ((skb = __skb_dequeue(&sd->tofree_queue))) {
++ local_irq_enable();
++ kfree_skb(skb);
++ local_irq_disable();
++ }
++
+ while (!list_empty(&sd->poll_list)) {
+ struct napi_struct *n;
+ int work, weight;
+@@ -6334,6 +6344,9 @@ static int dev_cpu_callback(struct notif
+ netif_rx(skb);
+ input_queue_head_incr(oldsd);
+ }
++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) {
++ kfree_skb(skb);
++ }
+
+ return NOTIFY_OK;
+ }
+@@ -6598,8 +6611,9 @@ static int __init net_dev_init(void)
+ struct softnet_data *sd = &per_cpu(softnet_data, i);
+
+ memset(sd, 0, sizeof(*sd));
+- skb_queue_head_init(&sd->input_pkt_queue);
+- skb_queue_head_init(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->input_pkt_queue);
++ skb_queue_head_init_raw(&sd->process_queue);
++ skb_queue_head_init_raw(&sd->tofree_queue);
+ sd->completion_queue = NULL;
+ INIT_LIST_HEAD(&sd->poll_list);
+ sd->output_queue = NULL;
diff --git a/softirq-disable-softirq-stacks-for-rt.patch b/softirq-disable-softirq-stacks-for-rt.patch
new file mode 100644
index 0000000..c1e3500
--- /dev/null
+++ b/softirq-disable-softirq-stacks-for-rt.patch
@@ -0,0 +1,191 @@
+Subject: softirq-disable-softirq-stacks-for-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 18 Jul 2011 13:59:17 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/powerpc/kernel/irq.c | 2 ++
+ arch/powerpc/kernel/misc_32.S | 2 ++
+ arch/powerpc/kernel/misc_64.S | 2 ++
+ arch/sh/kernel/irq.c | 2 ++
+ arch/sparc/kernel/irq_64.c | 2 ++
+ arch/x86/kernel/entry_64.S | 2 ++
+ arch/x86/kernel/irq_32.c | 2 ++
+ arch/x86/kernel/irq_64.c | 3 ++-
+ include/linux/interrupt.h | 3 +--
+ 9 files changed, 17 insertions(+), 3 deletions(-)
+
+Index: linux-stable/arch/powerpc/kernel/irq.c
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/irq.c
++++ linux-stable/arch/powerpc/kernel/irq.c
+@@ -584,6 +584,7 @@ void irq_ctx_init(void)
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline void do_softirq_onstack(void)
+ {
+ struct thread_info *curtp, *irqtp;
+@@ -620,6 +621,7 @@ void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
++#endif
+
+ irq_hw_number_t virq_to_hw(unsigned int virq)
+ {
+Index: linux-stable/arch/powerpc/kernel/misc_32.S
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/misc_32.S
++++ linux-stable/arch/powerpc/kernel/misc_32.S
+@@ -36,6 +36,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ stw r0,4(r1)
+@@ -46,6 +47,7 @@ _GLOBAL(call_do_softirq)
+ lwz r0,4(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_handle_irq)
+ mflr r0
+Index: linux-stable/arch/powerpc/kernel/misc_64.S
+===================================================================
+--- linux-stable.orig/arch/powerpc/kernel/misc_64.S
++++ linux-stable/arch/powerpc/kernel/misc_64.S
+@@ -29,6 +29,7 @@
+
+ .text
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ _GLOBAL(call_do_softirq)
+ mflr r0
+ std r0,16(r1)
+@@ -39,6 +40,7 @@ _GLOBAL(call_do_softirq)
+ ld r0,16(r1)
+ mtlr r0
+ blr
++#endif
+
+ _GLOBAL(call_handle_irq)
+ ld r8,0(r6)
+Index: linux-stable/arch/sh/kernel/irq.c
+===================================================================
+--- linux-stable.orig/arch/sh/kernel/irq.c
++++ linux-stable/arch/sh/kernel/irq.c
+@@ -149,6 +149,7 @@ void irq_ctx_exit(int cpu)
+ hardirq_ctx[cpu] = NULL;
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+@@ -191,6 +192,7 @@ asmlinkage void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
++#endif
+ #else
+ static inline void handle_one_irq(unsigned int irq)
+ {
+Index: linux-stable/arch/sparc/kernel/irq_64.c
+===================================================================
+--- linux-stable.orig/arch/sparc/kernel/irq_64.c
++++ linux-stable/arch/sparc/kernel/irq_64.c
+@@ -698,6 +698,7 @@ void __irq_entry handler_irq(int pil, st
+ set_irq_regs(old_regs);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ void do_softirq(void)
+ {
+ unsigned long flags;
+@@ -723,6 +724,7 @@ void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
++#endif
+
+ #ifdef CONFIG_HOTPLUG_CPU
+ void fixup_irqs(void)
+Index: linux-stable/arch/x86/kernel/entry_64.S
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/entry_64.S
++++ linux-stable/arch/x86/kernel/entry_64.S
+@@ -1252,6 +1252,7 @@ ENTRY(kernel_execve)
+ CFI_ENDPROC
+ END(kernel_execve)
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /* Call softirq on interrupt stack. Interrupts are off. */
+ ENTRY(call_softirq)
+ CFI_STARTPROC
+@@ -1271,6 +1272,7 @@ ENTRY(call_softirq)
+ ret
+ CFI_ENDPROC
+ END(call_softirq)
++#endif
+
+ #ifdef CONFIG_XEN
+ zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
+Index: linux-stable/arch/x86/kernel/irq_32.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/irq_32.c
++++ linux-stable/arch/x86/kernel/irq_32.c
+@@ -149,6 +149,7 @@ void __cpuinit irq_ctx_init(int cpu)
+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void)
+ {
+ unsigned long flags;
+@@ -179,6 +180,7 @@ asmlinkage void do_softirq(void)
+
+ local_irq_restore(flags);
+ }
++#endif
+
+ bool handle_irq(unsigned irq, struct pt_regs *regs)
+ {
+Index: linux-stable/arch/x86/kernel/irq_64.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/irq_64.c
++++ linux-stable/arch/x86/kernel/irq_64.c
+@@ -88,7 +88,7 @@ bool handle_irq(unsigned irq, struct pt_
+ return true;
+ }
+
+-
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern void call_softirq(void);
+
+ asmlinkage void do_softirq(void)
+@@ -108,3 +108,4 @@ asmlinkage void do_softirq(void)
+ }
+ local_irq_restore(flags);
+ }
++#endif
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -448,10 +448,9 @@ struct softirq_action
+ void (*action)(struct softirq_action *);
+ };
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
+-
+-#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline void thread_do_softirq(void) { do_softirq(); }
+ #else
+ extern void thread_do_softirq(void);
diff --git a/softirq-export-in-serving-softirq.patch b/softirq-export-in-serving-softirq.patch
new file mode 100644
index 0000000..0ddf8ac
--- /dev/null
+++ b/softirq-export-in-serving-softirq.patch
@@ -0,0 +1,30 @@
+Subject: softirq: Export in_serving_softirq()
+From: John Kacur <jkacur@redhat.com>
+Date: Mon, 14 Nov 2011 02:44:43 +0100
+
+ERROR: "in_serving_softirq" [net/sched/cls_cgroup.ko] undefined!
+
+The above can be fixed by exporting in_serving_softirq
+
+Signed-off-by: John Kacur <jkacur@redhat.com>
+Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
+Cc: stable-rt@vger.kernel.org
+Link: http://lkml.kernel.org/r/1321235083-21756-2-git-send-email-jkacur@redhat.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/softirq.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -448,6 +448,7 @@ int in_serving_softirq(void)
+ preempt_enable();
+ return res;
+ }
++EXPORT_SYMBOL(in_serving_softirq);
+
+ /*
+ * Called with bh and local interrupts disabled. For full RT cpu must
diff --git a/softirq-fix-unplug-deadlock.patch b/softirq-fix-unplug-deadlock.patch
new file mode 100644
index 0000000..302ee53
--- /dev/null
+++ b/softirq-fix-unplug-deadlock.patch
@@ -0,0 +1,68 @@
+Subject: softirq: Fix unplug deadlock
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 30 Sep 2011 15:59:16 +0200
+
+Subject: [RT] softirq: Fix unplug deadlock
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri Sep 30 15:52:14 CEST 2011
+
+If ksoftirqd gets woken during hot-unplug, __thread_do_softirq() will
+call pin_current_cpu() which will block on the held cpu_hotplug.lock.
+Moving the offline check in __thread_do_softirq() before the
+pin_current_cpu() call doesn't work, since the wakeup can happen
+before we mark the cpu offline.
+
+So here we have the ksoftirq thread stuck until hotplug finishes, but
+then the ksoftirq CPU_DOWN notifier issues kthread_stop() which will
+wait for the ksoftirq thread to go away -- while holding the hotplug
+lock.
+
+Sort this by delaying the kthread_stop() until CPU_POST_DEAD, which is
+outside of the cpu_hotplug.lock, but still serialized by the
+cpu_add_remove_lock.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: rostedt <rostedt@goodmis.org>
+Cc: Clark Williams <williams@redhat.com>
+Link: http://lkml.kernel.org/r/1317391156.12973.3.camel@twins
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/softirq.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -1087,9 +1087,8 @@ static int __cpuinit cpu_callback(struct
+ int hotcpu = (unsigned long)hcpu;
+ struct task_struct *p;
+
+- switch (action) {
++ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+- case CPU_UP_PREPARE_FROZEN:
+ p = kthread_create_on_node(run_ksoftirqd,
+ hcpu,
+ cpu_to_node(hotcpu),
+@@ -1102,19 +1101,16 @@ static int __cpuinit cpu_callback(struct
+ per_cpu(ksoftirqd, hotcpu) = p;
+ break;
+ case CPU_ONLINE:
+- case CPU_ONLINE_FROZEN:
+ wake_up_process(per_cpu(ksoftirqd, hotcpu));
+ break;
+ #ifdef CONFIG_HOTPLUG_CPU
+ case CPU_UP_CANCELED:
+- case CPU_UP_CANCELED_FROZEN:
+ if (!per_cpu(ksoftirqd, hotcpu))
+ break;
+ /* Unbind so it can run. Fall thru. */
+ kthread_bind(per_cpu(ksoftirqd, hotcpu),
+ cpumask_any(cpu_online_mask));
+- case CPU_DEAD:
+- case CPU_DEAD_FROZEN: {
++ case CPU_POST_DEAD: {
+ static const struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO-1
+ };
diff --git a/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch b/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch
new file mode 100644
index 0000000..5f0cdf3
--- /dev/null
+++ b/softirq-init-softirq-local-lock-after-per-cpu-section-is-set-up.patch
@@ -0,0 +1,135 @@
+Subject: softirq: Init softirq local lock after per cpu section is set up
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 04 Oct 2012 11:02:04 -0400
+
+I discovered this bug when booting 3.4-rt on my powerpc box. It crashed
+with the following report:
+
+------------[ cut here ]------------
+kernel BUG at /work/rt/stable-rt.git/kernel/rtmutex_common.h:75!
+Oops: Exception in kernel mode, sig: 5 [#1]
+PREEMPT SMP NR_CPUS=64 NUMA PA Semi PWRficient
+Modules linked in:
+NIP: c0000000004aa03c LR: c0000000004aa01c CTR: c00000000009b2ac
+REGS: c00000003e8d7950 TRAP: 0700 Not tainted (3.4.11-test-rt19)
+MSR: 9000000000029032 <SF,HV,EE,ME,IR,DR,RI> CR: 24000082 XER: 20000000
+SOFTE: 0
+TASK = c00000003e8fdcd0[11] 'ksoftirqd/1' THREAD: c00000003e8d4000 CPU: 1
+GPR00: 0000000000000001 c00000003e8d7bd0 c000000000d6cbb0 0000000000000000
+GPR04: c00000003e8fdcd0 0000000000000000 0000000024004082 c000000000011454
+GPR08: 0000000000000000 0000000080000001 c00000003e8fdcd1 0000000000000000
+GPR12: 0000000024000084 c00000000fff0280 ffffffffffffffff 000000003ffffad8
+GPR16: ffffffffffffffff 000000000072c798 0000000000000060 0000000000000000
+GPR20: 0000000000642741 000000000072c858 000000003ffffaf0 0000000000000417
+GPR24: 000000000072dcd0 c00000003e7ff990 0000000000000000 0000000000000001
+GPR28: 0000000000000000 c000000000792340 c000000000ccec78 c000000001182338
+NIP [c0000000004aa03c] .wakeup_next_waiter+0x44/0xb8
+LR [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8
+Call Trace:
+[c00000003e8d7bd0] [c0000000004aa01c] .wakeup_next_waiter+0x24/0xb8 (unreliable)
+[c00000003e8d7c60] [c0000000004a0320] .rt_spin_lock_slowunlock+0x8c/0xe4
+[c00000003e8d7ce0] [c0000000004a07cc] .rt_spin_unlock+0x54/0x64
+[c00000003e8d7d60] [c0000000000636bc] .__thread_do_softirq+0x130/0x174
+[c00000003e8d7df0] [c00000000006379c] .run_ksoftirqd+0x9c/0x1a4
+[c00000003e8d7ea0] [c000000000080b68] .kthread+0xa8/0xb4
+[c00000003e8d7f90] [c00000000001c2f8] .kernel_thread+0x54/0x70
+Instruction dump:
+60000000 e86d01c8 38630730 4bff7061 60000000 ebbf0008 7c7c1b78 e81d0040
+7fe00278 7c000074 7800d182 68000001 <0b000000> e88d01c8 387d0010 38840738
+
+The rtmutex_common.h:75 is:
+
+rt_mutex_top_waiter(struct rt_mutex *lock)
+{
+ struct rt_mutex_waiter *w;
+
+ w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
+ list_entry);
+ BUG_ON(w->lock != lock);
+
+ return w;
+}
+
+Where the waiter->lock is corrupted. I saw various other random bugs
+that all had to with the softirq lock and plist. As plist needs to be
+initialized before it is used I investigated how this lock is
+initialized. It's initialized with:
+
+void __init softirq_early_init(void)
+{
+ local_irq_lock_init(local_softirq_lock);
+}
+
+Where:
+
+#define local_irq_lock_init(lvar) \
+ do { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) \
+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \
+ } while (0)
+
+As the softirq lock is a local_irq_lock, which is a per_cpu lock, the
+initialization is done to all per_cpu versions of the lock. But lets
+look at where the softirq_early_init() is called from.
+
+In init/main.c: start_kernel()
+
+/*
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
+ softirq_early_init();
+ tick_init();
+ boot_cpu_init();
+ page_address_init();
+ printk(KERN_NOTICE "%s", linux_banner);
+ setup_arch(&command_line);
+ mm_init_owner(&init_mm, &init_task);
+ mm_init_cpumask(&init_mm);
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+
+One of the first things that is called is the initialization of the
+softirq lock. But if you look further down, we see the per_cpu areas
+have not been set up yet. Thus initializing a local_irq_lock() before
+the per_cpu section is set up, may not work as it is initializing the
+per cpu locks before the per cpu exists.
+
+By moving the softirq_early_init() right after setup_per_cpu_areas(),
+the kernel boots fine.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Cc: Clark Williams <clark@redhat.com>
+Cc: John Kacur <jkacur@redhat.com>
+Cc: Carsten Emde <cbe@osadl.org>
+Cc: vomlehn@texas.net
+Link: http://lkml.kernel.org/r/1349362924.6755.18.camel@gandalf.local.home
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ init/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/init/main.c
+===================================================================
+--- linux-stable.orig/init/main.c
++++ linux-stable/init/main.c
+@@ -491,7 +491,6 @@ asmlinkage void __init start_kernel(void
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
+- softirq_early_init();
+ tick_init();
+ boot_cpu_init();
+ page_address_init();
+@@ -502,6 +501,7 @@ asmlinkage void __init start_kernel(void
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
++ softirq_early_init();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
+
+ build_all_zonelists(NULL, NULL);
diff --git a/softirq-local-lock.patch b/softirq-local-lock.patch
new file mode 100644
index 0000000..640d88c
--- /dev/null
+++ b/softirq-local-lock.patch
@@ -0,0 +1,325 @@
+Subject: softirq-local-lock.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 28 Jun 2011 15:57:18 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/hardirq.h | 16 +++-
+ include/linux/interrupt.h | 12 +++
+ include/linux/sched.h | 1
+ init/main.c | 1
+ kernel/softirq.c | 166 +++++++++++++++++++++++++++++++++++++++++++++-
+ 5 files changed, 191 insertions(+), 5 deletions(-)
+
+Index: linux-stable/include/linux/hardirq.h
+===================================================================
+--- linux-stable.orig/include/linux/hardirq.h
++++ linux-stable/include/linux/hardirq.h
+@@ -60,7 +60,11 @@
+ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+ #define NMI_OFFSET (1UL << NMI_SHIFT)
+
+-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++#else
++# define SOFTIRQ_DISABLE_OFFSET (0)
++#endif
+
+ #ifndef PREEMPT_ACTIVE
+ #define PREEMPT_ACTIVE_BITS 1
+@@ -73,10 +77,17 @@
+ #endif
+
+ #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+ #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
++#else
++# define softirq_count() (0U)
++extern int in_serving_softirq(void);
++#endif
++
+ /*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+@@ -86,7 +97,6 @@
+ #define in_irq() (hardirq_count())
+ #define in_softirq() (softirq_count())
+ #define in_interrupt() (irq_count())
+-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+ /*
+ * Are we in NMI context?
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -450,7 +450,13 @@ struct softirq_action
+
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
++
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static inline void thread_do_softirq(void) { do_softirq(); }
++#else
++extern void thread_do_softirq(void);
++#endif
++
+ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
+@@ -637,6 +643,12 @@ void tasklet_hrtimer_cancel(struct taskl
+ tasklet_kill(&ttimer->tasklet);
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++extern void softirq_early_init(void);
++#else
++static inline void softirq_early_init(void) { }
++#endif
++
+ /*
+ * Autoprobing for irqs:
+ *
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1617,6 +1617,7 @@ struct task_struct {
+ #endif
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
++ int softirq_nestcnt;
+ #endif
+ };
+
+Index: linux-stable/init/main.c
+===================================================================
+--- linux-stable.orig/init/main.c
++++ linux-stable/init/main.c
+@@ -491,6 +491,7 @@ asmlinkage void __init start_kernel(void
+ * Interrupts are still disabled. Do necessary setups, then
+ * enable them
+ */
++ softirq_early_init();
+ tick_init();
+ boot_cpu_init();
+ page_address_init();
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -24,6 +24,7 @@
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
++#include <linux/locallock.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -165,6 +166,7 @@ static void handle_pending_softirqs(u32
+ local_irq_disable();
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -377,6 +379,162 @@ asmlinkage void do_softirq(void)
+
+ #endif
+
++static inline void local_bh_disable_nort(void) { local_bh_disable(); }
++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++
++#else /* !PREEMPT_RT_FULL */
++
++/*
++ * On RT we serialize softirq execution with a cpu local lock
++ */
++static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
++static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
++
++static void __do_softirq(void);
++
++void __init softirq_early_init(void)
++{
++ local_irq_lock_init(local_softirq_lock);
++}
++
++void local_bh_disable(void)
++{
++ migrate_disable();
++ current->softirq_nestcnt++;
++}
++EXPORT_SYMBOL(local_bh_disable);
++
++void local_bh_enable(void)
++{
++ if (WARN_ON(current->softirq_nestcnt == 0))
++ return;
++
++ if ((current->softirq_nestcnt == 1) &&
++ local_softirq_pending() &&
++ local_trylock(local_softirq_lock)) {
++
++ local_irq_disable();
++ if (local_softirq_pending())
++ __do_softirq();
++ local_irq_enable();
++ local_unlock(local_softirq_lock);
++ WARN_ON(current->softirq_nestcnt != 1);
++ }
++ current->softirq_nestcnt--;
++ migrate_enable();
++}
++EXPORT_SYMBOL(local_bh_enable);
++
++void local_bh_enable_ip(unsigned long ip)
++{
++ local_bh_enable();
++}
++EXPORT_SYMBOL(local_bh_enable_ip);
++
++/* For tracing */
++int notrace __in_softirq(void)
++{
++ if (__get_cpu_var(local_softirq_lock).owner == current)
++ return __get_cpu_var(local_softirq_lock).nestcnt;
++ return 0;
++}
++
++int in_serving_softirq(void)
++{
++ int res;
++
++ preempt_disable();
++ res = __get_cpu_var(local_softirq_runner) == current;
++ preempt_enable();
++ return res;
++}
++
++/*
++ * Called with bh and local interrupts disabled. For full RT cpu must
++ * be pinned.
++ */
++static void __do_softirq(void)
++{
++ u32 pending = local_softirq_pending();
++ int cpu = smp_processor_id();
++
++ current->softirq_nestcnt++;
++
++ /* Reset the pending bitmask before enabling irqs */
++ set_softirq_pending(0);
++
++ __get_cpu_var(local_softirq_runner) = current;
++
++ lockdep_softirq_enter();
++
++ handle_pending_softirqs(pending, cpu);
++
++ pending = local_softirq_pending();
++ if (pending)
++ wakeup_softirqd();
++
++ lockdep_softirq_exit();
++ __get_cpu_var(local_softirq_runner) = NULL;
++
++ current->softirq_nestcnt--;
++}
++
++static int __thread_do_softirq(int cpu)
++{
++ /*
++ * Prevent the current cpu from going offline.
++ * pin_current_cpu() can reenable preemption and block on the
++ * hotplug mutex. When it returns, the current cpu is
++ * pinned. It might be the wrong one, but the offline check
++ * below catches that.
++ */
++ pin_current_cpu();
++ /*
++ * If called from ksoftirqd (cpu >= 0) we need to check
++ * whether we are on the wrong cpu due to cpu offlining. If
++ * called via thread_do_softirq() no action required.
++ */
++ if (cpu >= 0 && cpu_is_offline(cpu)) {
++ unpin_current_cpu();
++ return -1;
++ }
++ preempt_enable();
++ local_lock(local_softirq_lock);
++ local_irq_disable();
++ /*
++ * We cannot switch stacks on RT as we want to be able to
++ * schedule!
++ */
++ if (local_softirq_pending())
++ __do_softirq();
++ local_unlock(local_softirq_lock);
++ unpin_current_cpu();
++ preempt_disable();
++ local_irq_enable();
++ return 0;
++}
++
++/*
++ * Called from netif_rx_ni(). Preemption enabled.
++ */
++void thread_do_softirq(void)
++{
++ if (!in_serving_softirq()) {
++ preempt_disable();
++ __thread_do_softirq(-1);
++ preempt_enable();
++ }
++}
++
++static int ksoftirqd_do_softirq(int cpu)
++{
++ return __thread_do_softirq(cpu);
++}
++
++static inline void local_bh_disable_nort(void) { }
++static inline void _local_bh_enable_nort(void) { }
++
++#endif /* PREEMPT_RT_FULL */
+ /*
+ * Enter an interrupt context.
+ */
+@@ -390,9 +548,9 @@ void irq_enter(void)
+ * Prevent raise_softirq from needlessly waking up ksoftirqd
+ * here, as softirq will be serviced on return from interrupt.
+ */
+- local_bh_disable();
++ local_bh_disable_nort();
+ tick_check_idle(cpu);
+- _local_bh_enable();
++ _local_bh_enable_nort();
+ }
+
+ __irq_enter();
+@@ -400,6 +558,7 @@ void irq_enter(void)
+
+ static inline void invoke_softirq(void)
+ {
++#ifndef CONFIG_PREEMPT_RT_FULL
+ if (!force_irqthreads) {
+ #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
+ __do_softirq();
+@@ -412,6 +571,9 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
++#else
++ wakeup_softirqd();
++#endif
+ }
+
+ /*
diff --git a/softirq-make-fifo.patch b/softirq-make-fifo.patch
new file mode 100644
index 0000000..9286dc5
--- /dev/null
+++ b/softirq-make-fifo.patch
@@ -0,0 +1,60 @@
+Subject: softirq-make-fifo.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Jul 2011 21:06:43 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/softirq.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -381,6 +381,8 @@ asmlinkage void do_softirq(void)
+
+ static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+ static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
++static inline void ksoftirqd_set_sched_params(void) { }
++static inline void ksoftirqd_clr_sched_params(void) { }
+
+ #else /* !PREEMPT_RT_FULL */
+
+@@ -535,6 +537,20 @@ static int ksoftirqd_do_softirq(int cpu)
+ static inline void local_bh_disable_nort(void) { }
+ static inline void _local_bh_enable_nort(void) { }
+
++static inline void ksoftirqd_set_sched_params(void)
++{
++ struct sched_param param = { .sched_priority = 1 };
++
++ sched_setscheduler(current, SCHED_FIFO, &param);
++}
++
++static inline void ksoftirqd_clr_sched_params(void)
++{
++ struct sched_param param = { .sched_priority = 0 };
++
++ sched_setscheduler(current, SCHED_NORMAL, &param);
++}
++
+ #endif /* PREEMPT_RT_FULL */
+ /*
+ * Enter an interrupt context.
+@@ -986,6 +1002,8 @@ void __init softirq_init(void)
+
+ static int run_ksoftirqd(void * __bind_cpu)
+ {
++ ksoftirqd_set_sched_params();
++
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (!kthread_should_stop()) {
+@@ -1011,6 +1029,7 @@ static int run_ksoftirqd(void * __bind_c
+
+ wait_to_die:
+ preempt_enable();
++ ksoftirqd_clr_sched_params();
+ /* Wait for kthread_stop */
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
diff --git a/softirq-make-serving-softirqs-a-task-flag.patch b/softirq-make-serving-softirqs-a-task-flag.patch
new file mode 100644
index 0000000..e290a67
--- /dev/null
+++ b/softirq-make-serving-softirqs-a-task-flag.patch
@@ -0,0 +1,78 @@
+Subject: softirq: Make serving softirqs a task flag
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 04 Oct 2012 14:30:25 +0100
+
+Avoid the percpu softirq_runner pointer magic by using a task flag.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ kernel/softirq.c | 20 +++-----------------
+ 2 files changed, 4 insertions(+), 17 deletions(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1845,6 +1845,7 @@ extern void thread_group_times(struct ta
+ #define PF_MEMALLOC 0x00000800 /* Allocating memory */
+ #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
+ #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
++#define PF_IN_SOFTIRQ 0x00004000 /* Task is serving softirq */
+ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
+ #define PF_FROZEN 0x00010000 /* frozen for system suspend */
+ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -392,7 +392,6 @@ static inline void ksoftirqd_clr_sched_p
+ * On RT we serialize softirq execution with a cpu local lock
+ */
+ static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
+-static DEFINE_PER_CPU(struct task_struct *, local_softirq_runner);
+
+ static void __do_softirq_common(int need_rcu_bh_qs);
+
+@@ -447,22 +446,9 @@ void _local_bh_enable(void)
+ }
+ EXPORT_SYMBOL(_local_bh_enable);
+
+-/* For tracing */
+-int notrace __in_softirq(void)
+-{
+- if (__get_cpu_var(local_softirq_lock).owner == current)
+- return __get_cpu_var(local_softirq_lock).nestcnt;
+- return 0;
+-}
+-
+ int in_serving_softirq(void)
+ {
+- int res;
+-
+- preempt_disable();
+- res = __get_cpu_var(local_softirq_runner) == current;
+- preempt_enable();
+- return res;
++ return current->flags & PF_IN_SOFTIRQ;
+ }
+ EXPORT_SYMBOL(in_serving_softirq);
+
+@@ -480,7 +466,7 @@ static void __do_softirq_common(int need
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+- __get_cpu_var(local_softirq_runner) = current;
++ current->flags |= PF_IN_SOFTIRQ;
+
+ lockdep_softirq_enter();
+
+@@ -491,7 +477,7 @@ static void __do_softirq_common(int need
+ wakeup_softirqd();
+
+ lockdep_softirq_exit();
+- __get_cpu_var(local_softirq_runner) = NULL;
++ current->flags &= ~PF_IN_SOFTIRQ;
+
+ current->softirq_nestcnt--;
+ }
diff --git a/softirq-preempt-fix-3-re.txt b/softirq-preempt-fix-3-re.txt
new file mode 100644
index 0000000..040053e
--- /dev/null
+++ b/softirq-preempt-fix-3-re.txt
@@ -0,0 +1,153 @@
+Subject: softirq: Check preemption after reenabling interrupts
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 13 Nov 2011 17:17:09 +0100 (CET)
+
+raise_softirq_irqoff() disables interrupts and wakes the softirq
+daemon, but after reenabling interrupts there is no preemption check,
+so the execution of the softirq thread might be delayed arbitrarily.
+
+In principle we could add that check to local_irq_enable/restore, but
+that's overkill as the rasie_softirq_irqoff() sections are the only
+ones which show this behaviour.
+
+Reported-by: Carsten Emde <cbe@osadl.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ block/blk-iopoll.c | 3 +++
+ block/blk-softirq.c | 3 +++
+ include/linux/preempt.h | 3 +++
+ net/core/dev.c | 6 ++++++
+ 4 files changed, 15 insertions(+)
+
+Index: linux-stable/block/blk-iopoll.c
+===================================================================
+--- linux-stable.orig/block/blk-iopoll.c
++++ linux-stable/block/blk-iopoll.c
+@@ -38,6 +38,7 @@ void blk_iopoll_sched(struct blk_iopoll
+ list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(blk_iopoll_sched);
+
+@@ -135,6 +136,7 @@ static void blk_iopoll_softirq(struct so
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ /**
+@@ -204,6 +206,7 @@ static int __cpuinit blk_iopoll_cpu_noti
+ &__get_cpu_var(blk_cpu_iopoll));
+ __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+Index: linux-stable/block/blk-softirq.c
+===================================================================
+--- linux-stable.orig/block/blk-softirq.c
++++ linux-stable/block/blk-softirq.c
+@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /*
+@@ -93,6 +94,7 @@ static int __cpuinit blk_cpu_notify(stru
+ &__get_cpu_var(blk_cpu_done));
+ raise_softirq_irqoff(BLOCK_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ return NOTIFY_OK;
+@@ -150,6 +152,7 @@ do_local:
+ goto do_local;
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ /**
+Index: linux-stable/include/linux/preempt.h
+===================================================================
+--- linux-stable.orig/include/linux/preempt.h
++++ linux-stable/include/linux/preempt.h
+@@ -56,8 +56,10 @@ do { \
+
+ #ifndef CONFIG_PREEMPT_RT_BASE
+ # define preempt_enable_no_resched() sched_preempt_enable_no_resched()
++# define preempt_check_resched_rt() do { } while (0)
+ #else
+ # define preempt_enable_no_resched() preempt_enable()
++# define preempt_check_resched_rt() preempt_check_resched()
+ #endif
+
+ #define preempt_enable() \
+@@ -105,6 +107,7 @@ do { \
+ #define preempt_disable_notrace() do { } while (0)
+ #define preempt_enable_no_resched_notrace() do { } while (0)
+ #define preempt_enable_notrace() do { } while (0)
++#define preempt_check_resched_rt() do { } while (0)
+
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+Index: linux-stable/net/core/dev.c
+===================================================================
+--- linux-stable.orig/net/core/dev.c
++++ linux-stable/net/core/dev.c
+@@ -1836,6 +1836,7 @@ static inline void __netif_reschedule(st
+ sd->output_queue_tailp = &q->next_sched;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+
+ void __netif_schedule(struct Qdisc *q)
+@@ -1857,6 +1858,7 @@ void dev_kfree_skb_irq(struct sk_buff *s
+ sd->completion_queue = skb;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ }
+ EXPORT_SYMBOL(dev_kfree_skb_irq);
+@@ -2927,6 +2929,7 @@ enqueue:
+ rps_unlock(sd);
+
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+
+ atomic_long_inc(&skb->dev->rx_dropped);
+ kfree_skb(skb);
+@@ -3787,6 +3790,7 @@ static void net_rps_action_and_irq_enabl
+ } else
+ #endif
+ local_irq_enable();
++ preempt_check_resched_rt();
+ }
+
+ static int process_backlog(struct napi_struct *napi, int quota)
+@@ -3859,6 +3863,7 @@ void __napi_schedule(struct napi_struct
+ local_irq_save(flags);
+ ____napi_schedule(&__get_cpu_var(softnet_data), n);
+ local_irq_restore(flags);
++ preempt_check_resched_rt();
+ }
+ EXPORT_SYMBOL(__napi_schedule);
+
+@@ -6364,6 +6369,7 @@ static int dev_cpu_callback(struct notif
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
++ preempt_check_resched_rt();
+
+ /* Process offline CPU's input_pkt_queue */
+ while ((skb = __skb_dequeue(&oldsd->process_queue))) {
diff --git a/softirq-sanitize-softirq-pending.patch b/softirq-sanitize-softirq-pending.patch
new file mode 100644
index 0000000..ecc72b0
--- /dev/null
+++ b/softirq-sanitize-softirq-pending.patch
@@ -0,0 +1,116 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 13:16:38 -0500
+Subject: softirq: Sanitize softirq pending for NOHZ/RT
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/interrupt.h | 2 +
+ kernel/softirq.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++
+ kernel/time/tick-sched.c | 8 ------
+ 3 files changed, 64 insertions(+), 7 deletions(-)
+
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -458,6 +458,8 @@ extern void __raise_softirq_irqoff(unsig
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
+
++extern void softirq_check_pending_idle(void);
++
+ /* This is the worklist that queues up per-cpu softirq work.
+ *
+ * send_remote_sendirq() adds work to these lists, and
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -61,6 +61,67 @@ char *softirq_to_name[NR_SOFTIRQS] = {
+ "TASKLET", "SCHED", "HRTIMER", "RCU"
+ };
+
++#ifdef CONFIG_NO_HZ
++# ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * On preempt-rt a softirq might be blocked on a lock. There might be
++ * no other runnable task on this CPU because the lock owner runs on
++ * some other CPU. So we have to go into idle with the pending bit
++ * set. Therefor we need to check this otherwise we warn about false
++ * positives which confuses users and defeats the whole purpose of
++ * this test.
++ *
++ * This code is called with interrupts disabled.
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++ u32 warnpending = 0, pending = local_softirq_pending();
++
++ if (rate_limit >= 10)
++ return;
++
++ if (pending) {
++ struct task_struct *tsk;
++
++ tsk = __get_cpu_var(ksoftirqd);
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ raw_spin_lock(&tsk->pi_lock);
++
++ if (!tsk->pi_blocked_on && !(tsk->state == TASK_RUNNING))
++ warnpending = 1;
++
++ raw_spin_unlock(&tsk->pi_lock);
++ }
++
++ if (warnpending) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ pending);
++ rate_limit++;
++ }
++}
++# else
++/*
++ * On !PREEMPT_RT we just printk rate limited:
++ */
++void softirq_check_pending_idle(void)
++{
++ static int rate_limit;
++
++ if (rate_limit < 10) {
++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
++ local_softirq_pending());
++ rate_limit++;
++ }
++}
++# endif
++#endif
++
+ /*
+ * we cannot loop indefinitely here to avoid userspace starvation,
+ * but we also don't want to introduce a worst case 1/HZ latency
+Index: linux-stable/kernel/time/tick-sched.c
+===================================================================
+--- linux-stable.orig/kernel/time/tick-sched.c
++++ linux-stable/kernel/time/tick-sched.c
+@@ -438,13 +438,7 @@ static bool can_stop_idle_tick(int cpu,
+ return false;
+
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+- static int ratelimit;
+-
+- if (ratelimit < 10) {
+- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+- (unsigned int) local_softirq_pending());
+- ratelimit++;
+- }
++ softirq_check_pending_idle();
+ return false;
+ }
+
diff --git a/softirq-split-handling-function.patch b/softirq-split-handling-function.patch
new file mode 100644
index 0000000..f81c029
--- /dev/null
+++ b/softirq-split-handling-function.patch
@@ -0,0 +1,70 @@
+Subject: softirq: Split handling function
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 04 Oct 2012 15:33:53 +0100
+
+Split out the inner handling function, so RT can reuse it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/softirq.c | 43 +++++++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 20 deletions(-)
+
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -139,31 +139,34 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
+
+-static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
++static void handle_softirq(unsigned int vec_nr, int cpu, int need_rcu_bh_qs)
+ {
+- struct softirq_action *h = softirq_vec;
++ struct softirq_action *h = softirq_vec + vec_nr;
+ unsigned int prev_count = preempt_count();
+
+- local_irq_enable();
+- for ( ; pending; h++, pending >>= 1) {
+- unsigned int vec_nr = h - softirq_vec;
++ kstat_incr_softirqs_this_cpu(vec_nr);
++ trace_softirq_entry(vec_nr);
++ h->action(h);
++ trace_softirq_exit(vec_nr);
++
++ if (unlikely(prev_count != preempt_count())) {
++ pr_err("softirq %u %s %p preempt count leak: %08x -> %08x\n",
++ vec_nr, softirq_to_name[vec_nr], h->action,
++ prev_count, (unsigned int) preempt_count());
++ preempt_count() = prev_count;
++ }
++ if (need_rcu_bh_qs)
++ rcu_bh_qs(cpu);
++}
+
+- if (!(pending & 1))
+- continue;
++static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
++{
++ unsigned int vec_nr;
+
+- kstat_incr_softirqs_this_cpu(vec_nr);
+- trace_softirq_entry(vec_nr);
+- h->action(h);
+- trace_softirq_exit(vec_nr);
+- if (unlikely(prev_count != preempt_count())) {
+- printk(KERN_ERR
+- "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
+- vec_nr, softirq_to_name[vec_nr], h->action,
+- prev_count, (unsigned int) preempt_count());
+- preempt_count() = prev_count;
+- }
+- if (need_rcu_bh_qs)
+- rcu_bh_qs(cpu);
++ local_irq_enable();
++ for (vec_nr = 0; pending; vec_nr++, pending >>= 1) {
++ if (pending & 1)
++ handle_softirq(vec_nr, cpu, need_rcu_bh_qs);
+ }
+ local_irq_disable();
+ }
diff --git a/softirq-split-locks.patch b/softirq-split-locks.patch
new file mode 100644
index 0000000..81217d8
--- /dev/null
+++ b/softirq-split-locks.patch
@@ -0,0 +1,449 @@
+Subject: softirq: Split softirq locks
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 04 Oct 2012 14:20:47 +0100
+
+The 3.x RT series removed the split softirq implementation in favour
+of pushing softirq processing into the context of the thread which
+raised it. Though this prevents us from handling the various softirqs
+at different priorities. Now instead of reintroducing the split
+softirq threads we split the locks which serialize the softirq
+processing.
+
+If a softirq is raised in context of a thread, then the softirq is
+noted on a per thread field, if the thread is in a bh disabled
+region. If the softirq is raised from hard interrupt context, then the
+bit is set in the flag field of ksoftirqd and ksoftirqd is invoked.
+When a thread leaves a bh disabled region, then it tries to execute
+the softirqs which have been raised in its own context. It acquires
+the per softirq / per cpu lock for the softirq and then checks,
+whether the softirq is still pending in the per cpu
+local_softirq_pending() field. If yes, it runs the softirq. If no,
+then some other task executed it already. This allows for zero config
+softirq elevation in the context of user space tasks or interrupt
+threads.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 1
+ kernel/softirq.c | 287 ++++++++++++++++++++++++++++++++------------------
+ 2 files changed, 189 insertions(+), 99 deletions(-)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1619,6 +1619,7 @@ struct task_struct {
+ #ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head put_rcu;
+ int softirq_nestcnt;
++ unsigned int softirqs_raised;
+ #endif
+ #if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+ int kmap_idx;
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -159,6 +159,7 @@ static void handle_softirq(unsigned int
+ rcu_bh_qs(cpu);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ static void handle_pending_softirqs(u32 pending, int cpu, int need_rcu_bh_qs)
+ {
+ unsigned int vec_nr;
+@@ -171,7 +172,6 @@ static void handle_pending_softirqs(u32
+ local_irq_disable();
+ }
+
+-#ifndef CONFIG_PREEMPT_RT_FULL
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -384,28 +384,123 @@ asmlinkage void do_softirq(void)
+
+ #endif
+
++/*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ __raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an interrupt or softirq, we're done
++ * (this also catches softirq-disabled code). We will
++ * actually run the softirq once we return from
++ * the irq or softirq.
++ *
++ * Otherwise we wake up ksoftirqd to make sure we
++ * schedule the softirq soon.
++ */
++ if (!in_interrupt())
++ wakeup_softirqd();
++}
++
++void __raise_softirq_irqoff(unsigned int nr)
++{
++ trace_softirq_raise(nr);
++ or_softirq_pending(1UL << nr);
++}
++
+ static inline void local_bh_disable_nort(void) { local_bh_disable(); }
+ static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
+ static inline void ksoftirqd_set_sched_params(void) { }
+ static inline void ksoftirqd_clr_sched_params(void) { }
+
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return local_softirq_pending();
++}
++
+ #else /* !PREEMPT_RT_FULL */
+
+ /*
+- * On RT we serialize softirq execution with a cpu local lock
++ * On RT we serialize softirq execution with a cpu local lock per softirq
+ */
+-static DEFINE_LOCAL_IRQ_LOCK(local_softirq_lock);
++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
++
++void __init softirq_early_init(void)
++{
++ int i;
+
+-static void __do_softirq_common(int need_rcu_bh_qs);
++ for (i = 0; i < NR_SOFTIRQS; i++)
++ local_irq_lock_init(local_softirq_locks[i]);
++}
+
+-void __do_softirq(void)
++static void lock_softirq(int which)
+ {
+- __do_softirq_common(0);
++ __local_lock(&__get_cpu_var(local_softirq_locks[which]));
+ }
+
+-void __init softirq_early_init(void)
++static void unlock_softirq(int which)
+ {
+- local_irq_lock_init(local_softirq_lock);
++ __local_unlock(&__get_cpu_var(local_softirq_locks[which]));
++}
++
++static void do_single_softirq(int which, int need_rcu_bh_qs)
++{
++ unsigned long old_flags = current->flags;
++
++ current->flags &= ~PF_MEMALLOC;
++ account_system_vtime(current);
++ current->flags |= PF_IN_SOFTIRQ;
++ lockdep_softirq_enter();
++ local_irq_enable();
++ handle_softirq(which, smp_processor_id(), need_rcu_bh_qs);
++ local_irq_disable();
++ lockdep_softirq_exit();
++ current->flags &= ~PF_IN_SOFTIRQ;
++ account_system_vtime(current);
++ tsk_restore_flags(current, old_flags, PF_MEMALLOC);
++}
++
++/*
++ * Called with interrupts disabled. Process softirqs which were raised
++ * in current context (or on behalf of ksoftirqd).
++ */
++static void do_current_softirqs(int need_rcu_bh_qs)
++{
++ int retry = 0;
++
++ while (current->softirqs_raised) {
++ int i = __ffs(current->softirqs_raised);
++ unsigned int pending, mask = (1U << i);
++
++ current->softirqs_raised &= ~mask;
++ local_irq_enable();
++
++ /*
++ * If the lock is contended, we boost the owner to
++ * process the softirq or leave the critical section
++ * now.
++ */
++ lock_softirq(i);
++ local_irq_disable();
++ /*
++ * Check with the local_softirq_pending() bits,
++ * whether we need to process this still or if someone
++ * else took care of it.
++ */
++ pending = local_softirq_pending();
++ if (pending & mask) {
++ set_softirq_pending(pending & ~mask);
++ do_single_softirq(i, need_rcu_bh_qs);
++ }
++ unlock_softirq(i);
++ WARN_ON(current->softirq_nestcnt != 1);
++ if (++retry == 20) {
++ printk(KERN_ERR "Crap, %s looping forever in softirq\n",
++ current->comm);
++ }
++ }
+ }
+
+ void local_bh_disable(void)
+@@ -420,17 +515,11 @@ void local_bh_enable(void)
+ if (WARN_ON(current->softirq_nestcnt == 0))
+ return;
+
+- if ((current->softirq_nestcnt == 1) &&
+- local_softirq_pending() &&
+- local_trylock(local_softirq_lock)) {
++ local_irq_disable();
++ if (current->softirq_nestcnt == 1 && current->softirqs_raised)
++ do_current_softirqs(1);
++ local_irq_enable();
+
+- local_irq_disable();
+- if (local_softirq_pending())
+- __do_softirq();
+- local_irq_enable();
+- local_unlock(local_softirq_lock);
+- WARN_ON(current->softirq_nestcnt != 1);
+- }
+ current->softirq_nestcnt--;
+ migrate_enable();
+ }
+@@ -455,37 +544,8 @@ int in_serving_softirq(void)
+ }
+ EXPORT_SYMBOL(in_serving_softirq);
+
+-/*
+- * Called with bh and local interrupts disabled. For full RT cpu must
+- * be pinned.
+- */
+-static void __do_softirq_common(int need_rcu_bh_qs)
+-{
+- u32 pending = local_softirq_pending();
+- int cpu = smp_processor_id();
+-
+- current->softirq_nestcnt++;
+-
+- /* Reset the pending bitmask before enabling irqs */
+- set_softirq_pending(0);
+-
+- current->flags |= PF_IN_SOFTIRQ;
+-
+- lockdep_softirq_enter();
+-
+- handle_pending_softirqs(pending, cpu, need_rcu_bh_qs);
+-
+- pending = local_softirq_pending();
+- if (pending)
+- wakeup_softirqd();
+-
+- lockdep_softirq_exit();
+- current->flags &= ~PF_IN_SOFTIRQ;
+-
+- current->softirq_nestcnt--;
+-}
+-
+-static int __thread_do_softirq(int cpu)
++/* Called with preemption disabled */
++static int ksoftirqd_do_softirq(int cpu)
+ {
+ /*
+ * Prevent the current cpu from going offline.
+@@ -496,45 +556,90 @@ static int __thread_do_softirq(int cpu)
+ */
+ pin_current_cpu();
+ /*
+- * If called from ksoftirqd (cpu >= 0) we need to check
+- * whether we are on the wrong cpu due to cpu offlining. If
+- * called via thread_do_softirq() no action required.
++ * We need to check whether we are on the wrong cpu due to cpu
++ * offlining.
+ */
+- if (cpu >= 0 && cpu_is_offline(cpu)) {
++ if (cpu_is_offline(cpu)) {
+ unpin_current_cpu();
+ return -1;
+ }
+ preempt_enable();
+- local_lock(local_softirq_lock);
+ local_irq_disable();
+- /*
+- * We cannot switch stacks on RT as we want to be able to
+- * schedule!
+- */
+- if (local_softirq_pending())
+- __do_softirq_common(cpu >= 0);
+- local_unlock(local_softirq_lock);
+- unpin_current_cpu();
+- preempt_disable();
++ current->softirq_nestcnt++;
++ do_current_softirqs(1);
++ current->softirq_nestcnt--;
+ local_irq_enable();
++
++ preempt_disable();
++ unpin_current_cpu();
+ return 0;
+ }
+
+ /*
+- * Called from netif_rx_ni(). Preemption enabled.
++ * Called from netif_rx_ni(). Preemption enabled, but migration
++ * disabled. So the cpu can't go away under us.
+ */
+ void thread_do_softirq(void)
+ {
+- if (!in_serving_softirq()) {
+- preempt_disable();
+- __thread_do_softirq(-1);
+- preempt_enable();
++ if (!in_serving_softirq() && current->softirqs_raised) {
++ current->softirq_nestcnt++;
++ do_current_softirqs(0);
++ current->softirq_nestcnt--;
+ }
+ }
+
+-static int ksoftirqd_do_softirq(int cpu)
++void __raise_softirq_irqoff(unsigned int nr)
+ {
+- return __thread_do_softirq(cpu);
++ trace_softirq_raise(nr);
++ or_softirq_pending(1UL << nr);
++
++ /*
++ * If we are not in a hard interrupt and inside a bh disabled
++ * region, we simply raise the flag on current. local_bh_enable()
++ * will make sure that the softirq is executed. Otherwise we
++ * delegate it to ksoftirqd.
++ */
++ if (!in_irq() && current->softirq_nestcnt)
++ current->softirqs_raised |= (1U << nr);
++ else if (__this_cpu_read(ksoftirqd))
++ __this_cpu_read(ksoftirqd)->softirqs_raised |= (1U << nr);
++}
++
++/*
++ * This function must run with irqs disabled!
++ */
++void raise_softirq_irqoff(unsigned int nr)
++{
++ __raise_softirq_irqoff(nr);
++
++ /*
++ * If we're in an hard interrupt we let irq return code deal
++ * with the wakeup of ksoftirqd.
++ */
++ if (in_irq())
++ return;
++
++ /*
++ * If we are in thread context but outside of a bh disabled
++ * region, we need to wake ksoftirqd as well.
++ *
++ * CHECKME: Some of the places which do that could be wrapped
++ * into local_bh_disable/enable pairs. Though it's unclear
++ * whether this is worth the effort. To find those places just
++ * raise a WARN() if the condition is met.
++ */
++ if (!current->softirq_nestcnt)
++ wakeup_softirqd();
++}
++
++void do_raise_softirq_irqoff(unsigned int nr)
++{
++ raise_softirq_irqoff(nr);
++}
++
++static inline int ksoftirqd_softirq_pending(void)
++{
++ return current->softirqs_raised;
+ }
+
+ static inline void local_bh_disable_nort(void) { }
+@@ -545,6 +650,10 @@ static inline void ksoftirqd_set_sched_p
+ struct sched_param param = { .sched_priority = 1 };
+
+ sched_setscheduler(current, SCHED_FIFO, &param);
++ /* Take over all pending softirqs when starting */
++ local_irq_disable();
++ current->softirqs_raised = local_softirq_pending();
++ local_irq_enable();
+ }
+
+ static inline void ksoftirqd_clr_sched_params(void)
+@@ -591,8 +700,14 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
+-#else
++#else /* PREEMPT_RT_FULL */
++ unsigned long flags;
++
++ local_irq_save(flags);
++ if (__this_cpu_read(ksoftirqd) &&
++ __this_cpu_read(ksoftirqd)->softirqs_raised)
+ wakeup_softirqd();
++ local_irq_restore(flags);
+ #endif
+ }
+
+@@ -616,26 +731,6 @@ void irq_exit(void)
+ sched_preempt_enable_no_resched();
+ }
+
+-/*
+- * This function must run with irqs disabled!
+- */
+-inline void raise_softirq_irqoff(unsigned int nr)
+-{
+- __raise_softirq_irqoff(nr);
+-
+- /*
+- * If we're in an interrupt or softirq, we're done
+- * (this also catches softirq-disabled code). We will
+- * actually run the softirq once we return from
+- * the irq or softirq.
+- *
+- * Otherwise we wake up ksoftirqd to make sure we
+- * schedule the softirq soon.
+- */
+- if (!in_interrupt())
+- wakeup_softirqd();
+-}
+-
+ void raise_softirq(unsigned int nr)
+ {
+ unsigned long flags;
+@@ -645,12 +740,6 @@ void raise_softirq(unsigned int nr)
+ local_irq_restore(flags);
+ }
+
+-void __raise_softirq_irqoff(unsigned int nr)
+-{
+- trace_softirq_raise(nr);
+- or_softirq_pending(1UL << nr);
+-}
+-
+ void open_softirq(int nr, void (*action)(struct softirq_action *))
+ {
+ softirq_vec[nr].action = action;
+@@ -1102,12 +1191,12 @@ static int run_ksoftirqd(void * __bind_c
+
+ while (!kthread_should_stop()) {
+ preempt_disable();
+- if (!local_softirq_pending())
++ if (!ksoftirqd_softirq_pending())
+ schedule_preempt_disabled();
+
+ __set_current_state(TASK_RUNNING);
+
+- while (local_softirq_pending()) {
++ while (ksoftirqd_softirq_pending()) {
+ if (ksoftirqd_do_softirq((long) __bind_cpu))
+ goto wait_to_die;
+ sched_preempt_enable_no_resched();
diff --git a/softirq-split-out-code.patch b/softirq-split-out-code.patch
new file mode 100644
index 0000000..4e5e405
--- /dev/null
+++ b/softirq-split-out-code.patch
@@ -0,0 +1,155 @@
+Subject: softirq-split-out-code.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 28 Jun 2011 15:46:49 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/softirq.c | 94 ++++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 52 insertions(+), 42 deletions(-)
+
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -76,6 +76,34 @@ static void wakeup_softirqd(void)
+ wake_up_process(tsk);
+ }
+
++static void handle_pending_softirqs(u32 pending, int cpu)
++{
++ struct softirq_action *h = softirq_vec;
++ unsigned int prev_count = preempt_count();
++
++ local_irq_enable();
++ for ( ; pending; h++, pending >>= 1) {
++ unsigned int vec_nr = h - softirq_vec;
++
++ if (!(pending & 1))
++ continue;
++
++ kstat_incr_softirqs_this_cpu(vec_nr);
++ trace_softirq_entry(vec_nr);
++ h->action(h);
++ trace_softirq_exit(vec_nr);
++ if (unlikely(prev_count != preempt_count())) {
++ printk(KERN_ERR
++ "huh, entered softirq %u %s %p with preempt_count %08x exited with %08x?\n",
++ vec_nr, softirq_to_name[vec_nr], h->action,
++ prev_count, (unsigned int) preempt_count());
++ preempt_count() = prev_count;
++ }
++ rcu_bh_qs(cpu);
++ }
++ local_irq_disable();
++}
++
+ /*
+ * preempt_count and SOFTIRQ_OFFSET usage:
+ * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
+@@ -206,7 +234,6 @@ EXPORT_SYMBOL(local_bh_enable_ip);
+
+ asmlinkage void __do_softirq(void)
+ {
+- struct softirq_action *h;
+ __u32 pending;
+ int max_restart = MAX_SOFTIRQ_RESTART;
+ int cpu;
+@@ -223,7 +250,7 @@ asmlinkage void __do_softirq(void)
+ account_system_vtime(current);
+
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+- SOFTIRQ_OFFSET);
++ SOFTIRQ_OFFSET);
+ lockdep_softirq_enter();
+
+ cpu = smp_processor_id();
+@@ -231,36 +258,7 @@ restart:
+ /* Reset the pending bitmask before enabling irqs */
+ set_softirq_pending(0);
+
+- local_irq_enable();
+-
+- h = softirq_vec;
+-
+- do {
+- if (pending & 1) {
+- unsigned int vec_nr = h - softirq_vec;
+- int prev_count = preempt_count();
+-
+- kstat_incr_softirqs_this_cpu(vec_nr);
+-
+- trace_softirq_entry(vec_nr);
+- h->action(h);
+- trace_softirq_exit(vec_nr);
+- if (unlikely(prev_count != preempt_count())) {
+- printk(KERN_ERR "huh, entered softirq %u %s %p"
+- "with preempt_count %08x,"
+- " exited with %08x?\n", vec_nr,
+- softirq_to_name[vec_nr], h->action,
+- prev_count, preempt_count());
+- preempt_count() = prev_count;
+- }
+-
+- rcu_bh_qs(cpu);
+- }
+- h++;
+- pending >>= 1;
+- } while (pending);
+-
+- local_irq_disable();
++ handle_pending_softirqs(pending, cpu);
+
+ pending = local_softirq_pending();
+ if (pending && --max_restart)
+@@ -276,6 +274,26 @@ restart:
+ tsk_restore_flags(current, old_flags, PF_MEMALLOC);
+ }
+
++/*
++ * Called with preemption disabled from run_ksoftirqd()
++ */
++static int ksoftirqd_do_softirq(int cpu)
++{
++ /*
++ * Preempt disable stops cpu going offline.
++ * If already offline, we'll be on wrong CPU:
++ * don't process.
++ */
++ if (cpu_is_offline(cpu))
++ return -1;
++
++ local_irq_disable();
++ if (local_softirq_pending())
++ __do_softirq();
++ local_irq_enable();
++ return 0;
++}
++
+ #ifndef __ARCH_HAS_DO_SOFTIRQ
+
+ asmlinkage void do_softirq(void)
+@@ -748,22 +766,14 @@ static int run_ksoftirqd(void * __bind_c
+
+ while (!kthread_should_stop()) {
+ preempt_disable();
+- if (!local_softirq_pending()) {
++ if (!local_softirq_pending())
+ schedule_preempt_disabled();
+- }
+
+ __set_current_state(TASK_RUNNING);
+
+ while (local_softirq_pending()) {
+- /* Preempt disable stops cpu going offline.
+- If already offline, we'll be on wrong CPU:
+- don't process */
+- if (cpu_is_offline((long)__bind_cpu))
++ if (ksoftirqd_do_softirq((long) __bind_cpu))
+ goto wait_to_die;
+- local_irq_disable();
+- if (local_softirq_pending())
+- __do_softirq();
+- local_irq_enable();
+ sched_preempt_enable_no_resched();
+ cond_resched();
+ preempt_disable();
diff --git a/softirq-thread-do-softirq.patch b/softirq-thread-do-softirq.patch
new file mode 100644
index 0000000..7746fe2
--- /dev/null
+++ b/softirq-thread-do-softirq.patch
@@ -0,0 +1,35 @@
+Subject: softirq-thread-do-softirq.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 28 Jun 2011 15:44:15 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/interrupt.h | 1 +
+ net/core/dev.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -446,6 +446,7 @@ struct softirq_action
+
+ asmlinkage void do_softirq(void);
+ asmlinkage void __do_softirq(void);
++static inline void thread_do_softirq(void) { do_softirq(); }
+ extern void open_softirq(int nr, void (*action)(struct softirq_action *));
+ extern void softirq_init(void);
+ extern void __raise_softirq_irqoff(unsigned int nr);
+Index: linux-stable/net/core/dev.c
+===================================================================
+--- linux-stable.orig/net/core/dev.c
++++ linux-stable/net/core/dev.c
+@@ -2993,7 +2993,7 @@ int netif_rx_ni(struct sk_buff *skb)
+ preempt_disable();
+ err = netif_rx(skb);
+ if (local_softirq_pending())
+- do_softirq();
++ thread_do_softirq();
+ preempt_enable();
+
+ return err;
diff --git a/spinlock-types-separate-raw.patch b/spinlock-types-separate-raw.patch
new file mode 100644
index 0000000..dff05b6
--- /dev/null
+++ b/spinlock-types-separate-raw.patch
@@ -0,0 +1,212 @@
+Subject: spinlock-types-separate-raw.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 19:34:01 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_types.h | 4 +
+ include/linux/spinlock_types.h | 74 ------------------------------------
+ include/linux/spinlock_types_nort.h | 33 ++++++++++++++++
+ include/linux/spinlock_types_raw.h | 56 +++++++++++++++++++++++++++
+ 4 files changed, 95 insertions(+), 72 deletions(-)
+
+Index: linux-stable/include/linux/rwlock_types.h
+===================================================================
+--- linux-stable.orig/include/linux/rwlock_types.h
++++ linux-stable/include/linux/rwlock_types.h
+@@ -1,6 +1,10 @@
+ #ifndef __LINUX_RWLOCK_TYPES_H
+ #define __LINUX_RWLOCK_TYPES_H
+
++#if !defined(__LINUX_SPINLOCK_TYPES_H)
++# error "Do not include directly, include spinlock_types.h"
++#endif
++
+ /*
+ * include/linux/rwlock_types.h - generic rwlock type definitions
+ * and initializers
+Index: linux-stable/include/linux/spinlock_types.h
+===================================================================
+--- linux-stable.orig/include/linux/spinlock_types.h
++++ linux-stable/include/linux/spinlock_types.h
+@@ -9,79 +9,9 @@
+ * Released under the General Public License (GPL).
+ */
+
+-#if defined(CONFIG_SMP)
+-# include <asm/spinlock_types.h>
+-#else
+-# include <linux/spinlock_types_up.h>
+-#endif
++#include <linux/spinlock_types_raw.h>
+
+-#include <linux/lockdep.h>
+-
+-typedef struct raw_spinlock {
+- arch_spinlock_t raw_lock;
+-#ifdef CONFIG_GENERIC_LOCKBREAK
+- unsigned int break_lock;
+-#endif
+-#ifdef CONFIG_DEBUG_SPINLOCK
+- unsigned int magic, owner_cpu;
+- void *owner;
+-#endif
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- struct lockdep_map dep_map;
+-#endif
+-} raw_spinlock_t;
+-
+-#define SPINLOCK_MAGIC 0xdead4ead
+-
+-#define SPINLOCK_OWNER_INIT ((void *)-1L)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+-#else
+-# define SPIN_DEP_MAP_INIT(lockname)
+-#endif
+-
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-# define SPIN_DEBUG_INIT(lockname) \
+- .magic = SPINLOCK_MAGIC, \
+- .owner_cpu = -1, \
+- .owner = SPINLOCK_OWNER_INIT,
+-#else
+-# define SPIN_DEBUG_INIT(lockname)
+-#endif
+-
+-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+- { \
+- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+- SPIN_DEBUG_INIT(lockname) \
+- SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+-
+-typedef struct spinlock {
+- union {
+- struct raw_spinlock rlock;
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+- struct {
+- u8 __padding[LOCK_PADSIZE];
+- struct lockdep_map dep_map;
+- };
+-#endif
+- };
+-} spinlock_t;
+-
+-#define __SPIN_LOCK_INITIALIZER(lockname) \
+- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+-
+-#define __SPIN_LOCK_UNLOCKED(lockname) \
+- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++#include <linux/spinlock_types_nort.h>
+
+ #include <linux/rwlock_types.h>
+
+Index: linux-stable/include/linux/spinlock_types_nort.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/spinlock_types_nort.h
+@@ -0,0 +1,33 @@
++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
++#define __LINUX_SPINLOCK_TYPES_NORT_H
++
++#ifndef __LINUX_SPINLOCK_TYPES_H
++#error "Do not include directly. Include spinlock_types.h instead"
++#endif
++
++/*
++ * The non RT version maps spinlocks to raw_spinlocks
++ */
++typedef struct spinlock {
++ union {
++ struct raw_spinlock rlock;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
++ struct {
++ u8 __padding[LOCK_PADSIZE];
++ struct lockdep_map dep_map;
++ };
++#endif
++ };
++} spinlock_t;
++
++#define __SPIN_LOCK_INITIALIZER(lockname) \
++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
++
++#define __SPIN_LOCK_UNLOCKED(lockname) \
++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
++
++#endif
+Index: linux-stable/include/linux/spinlock_types_raw.h
+===================================================================
+--- /dev/null
++++ linux-stable/include/linux/spinlock_types_raw.h
+@@ -0,0 +1,56 @@
++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
++#define __LINUX_SPINLOCK_TYPES_RAW_H
++
++#if defined(CONFIG_SMP)
++# include <asm/spinlock_types.h>
++#else
++# include <linux/spinlock_types_up.h>
++#endif
++
++#include <linux/lockdep.h>
++
++typedef struct raw_spinlock {
++ arch_spinlock_t raw_lock;
++#ifdef CONFIG_GENERIC_LOCKBREAK
++ unsigned int break_lock;
++#endif
++#ifdef CONFIG_DEBUG_SPINLOCK
++ unsigned int magic, owner_cpu;
++ void *owner;
++#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ struct lockdep_map dep_map;
++#endif
++} raw_spinlock_t;
++
++#define SPINLOCK_MAGIC 0xdead4ead
++
++#define SPINLOCK_OWNER_INIT ((void *)-1L)
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
++#else
++# define SPIN_DEP_MAP_INIT(lockname)
++#endif
++
++#ifdef CONFIG_DEBUG_SPINLOCK
++# define SPIN_DEBUG_INIT(lockname) \
++ .magic = SPINLOCK_MAGIC, \
++ .owner_cpu = -1, \
++ .owner = SPINLOCK_OWNER_INIT,
++#else
++# define SPIN_DEBUG_INIT(lockname)
++#endif
++
++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
++ { \
++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
++ SPIN_DEBUG_INIT(lockname) \
++ SPIN_DEP_MAP_INIT(lockname) }
++
++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
++
++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
++
++#endif
diff --git a/stomp-machine-deal-clever-with-stopper-lock.patch b/stomp-machine-deal-clever-with-stopper-lock.patch
new file mode 100644
index 0000000..74388dd
--- /dev/null
+++ b/stomp-machine-deal-clever-with-stopper-lock.patch
@@ -0,0 +1,60 @@
+Subject: stomp_machine: Use mutex_trylock when called from inactive cpu
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 03 Oct 2012 17:21:53 +0100
+
+If the stop machinery is called from inactive CPU we cannot use
+mutex_lock, because some other stomp machine invokation might be in
+progress and the mutex can be contended. We cannot schedule from this
+context, so trylock and loop.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/stop_machine.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+Index: linux-stable/kernel/stop_machine.c
+===================================================================
+--- linux-stable.orig/kernel/stop_machine.c
++++ linux-stable/kernel/stop_machine.c
+@@ -158,7 +158,7 @@ static DEFINE_PER_CPU(struct cpu_stop_wo
+
+ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg,
+- struct cpu_stop_done *done)
++ struct cpu_stop_done *done, bool inactive)
+ {
+ struct cpu_stop_work *work;
+ unsigned int cpu;
+@@ -175,7 +175,12 @@ static void queue_stop_cpus_work(const s
+ * Make sure that all work is queued on all cpus before we
+ * any of the cpus can execute it.
+ */
+- mutex_lock(&stopper_lock);
++ if (!inactive) {
++ mutex_lock(&stopper_lock);
++ } else {
++ while (!mutex_trylock(&stopper_lock))
++ cpu_relax();
++ }
+ for_each_cpu(cpu, cpumask)
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
+ &per_cpu(stop_cpus_work, cpu));
+@@ -188,7 +193,7 @@ static int __stop_cpus(const struct cpum
+ struct cpu_stop_done done;
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+- queue_stop_cpus_work(cpumask, fn, arg, &done);
++ queue_stop_cpus_work(cpumask, fn, arg, &done, false);
+ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+@@ -601,7 +606,7 @@ int stop_machine_from_inactive_cpu(int (
+ set_state(&smdata, STOPMACHINE_PREPARE);
+ cpu_stop_init_done(&done, num_active_cpus());
+ queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
+- &done);
++ &done, true);
+ ret = stop_machine_cpu_stop(&smdata);
+
+ /* Busy wait for completion. */
diff --git a/stomp-machine-mark-stomper-thread.patch b/stomp-machine-mark-stomper-thread.patch
new file mode 100644
index 0000000..c933698
--- /dev/null
+++ b/stomp-machine-mark-stomper-thread.patch
@@ -0,0 +1,34 @@
+Subject: stomp-machine-mark-stomper-thread.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 19:53:19 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sched.h | 1 +
+ kernel/stop_machine.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+Index: linux-stable/include/linux/sched.h
+===================================================================
+--- linux-stable.orig/include/linux/sched.h
++++ linux-stable/include/linux/sched.h
+@@ -1836,6 +1836,7 @@ extern void thread_group_times(struct ta
+ #define PF_FROZEN 0x00010000 /* frozen for system suspend */
+ #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
+ #define PF_KSWAPD 0x00040000 /* I am kswapd */
++#define PF_STOMPER 0x00080000 /* I am a stomp machine thread */
+ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+ #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+Index: linux-stable/kernel/stop_machine.c
+===================================================================
+--- linux-stable.orig/kernel/stop_machine.c
++++ linux-stable/kernel/stop_machine.c
+@@ -327,6 +327,7 @@ static int __cpuinit cpu_stop_cpu_callba
+ if (IS_ERR(p))
+ return notifier_from_errno(PTR_ERR(p));
+ get_task_struct(p);
++ p->flags |= PF_STOMPER;
+ kthread_bind(p, cpu);
+ sched_set_stop_task(cpu, p);
+ stopper->thread = p;
diff --git a/stomp-machine-raw-lock.patch b/stomp-machine-raw-lock.patch
new file mode 100644
index 0000000..51898f6
--- /dev/null
+++ b/stomp-machine-raw-lock.patch
@@ -0,0 +1,176 @@
+Subject: stomp-machine-raw-lock.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 29 Jun 2011 11:01:51 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/stop_machine.c | 58 +++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 41 insertions(+), 17 deletions(-)
+
+Index: linux-stable/kernel/stop_machine.c
+===================================================================
+--- linux-stable.orig/kernel/stop_machine.c
++++ linux-stable/kernel/stop_machine.c
+@@ -29,12 +29,12 @@ struct cpu_stop_done {
+ atomic_t nr_todo; /* nr left to execute */
+ bool executed; /* actually executed? */
+ int ret; /* collected return value */
+- struct completion completion; /* fired if nr_todo reaches 0 */
++ struct task_struct *waiter; /* woken when nr_todo reaches 0 */
+ };
+
+ /* the actual stopper, one per every possible cpu, enabled on online cpus */
+ struct cpu_stopper {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ bool enabled; /* is this stopper enabled? */
+ struct list_head works; /* list of pending works */
+ struct task_struct *thread; /* stopper thread */
+@@ -47,7 +47,7 @@ static void cpu_stop_init_done(struct cp
+ {
+ memset(done, 0, sizeof(*done));
+ atomic_set(&done->nr_todo, nr_todo);
+- init_completion(&done->completion);
++ done->waiter = current;
+ }
+
+ /* signal completion unless @done is NULL */
+@@ -56,8 +56,10 @@ static void cpu_stop_signal_done(struct
+ if (done) {
+ if (executed)
+ done->executed = true;
+- if (atomic_dec_and_test(&done->nr_todo))
+- complete(&done->completion);
++ if (atomic_dec_and_test(&done->nr_todo)) {
++ wake_up_process(done->waiter);
++ done->waiter = NULL;
++ }
+ }
+ }
+
+@@ -67,7 +69,7 @@ static void cpu_stop_queue_work(struct c
+ {
+ unsigned long flags;
+
+- spin_lock_irqsave(&stopper->lock, flags);
++ raw_spin_lock_irqsave(&stopper->lock, flags);
+
+ if (stopper->enabled) {
+ list_add_tail(&work->list, &stopper->works);
+@@ -75,7 +77,23 @@ static void cpu_stop_queue_work(struct c
+ } else
+ cpu_stop_signal_done(work->done, false);
+
+- spin_unlock_irqrestore(&stopper->lock, flags);
++ raw_spin_unlock_irqrestore(&stopper->lock, flags);
++}
++
++static void wait_for_stop_done(struct cpu_stop_done *done)
++{
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ while (atomic_read(&done->nr_todo)) {
++ schedule();
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ }
++ /*
++ * We need to wait until cpu_stop_signal_done() has cleared
++ * done->waiter.
++ */
++ while (done->waiter)
++ cpu_relax();
++ set_current_state(TASK_RUNNING);
+ }
+
+ /**
+@@ -109,7 +127,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
+
+ cpu_stop_init_done(&done, 1);
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
+- wait_for_completion(&done.completion);
++ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -171,7 +189,7 @@ static int __stop_cpus(const struct cpum
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+ queue_stop_cpus_work(cpumask, fn, arg, &done);
+- wait_for_completion(&done.completion);
++ wait_for_stop_done(&done);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -259,13 +277,13 @@ repeat:
+ }
+
+ work = NULL;
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ if (!list_empty(&stopper->works)) {
+ work = list_first_entry(&stopper->works,
+ struct cpu_stop_work, list);
+ list_del_init(&work->list);
+ }
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+
+ if (work) {
+ cpu_stop_fn_t fn = work->fn;
+@@ -299,7 +317,13 @@ repeat:
+ kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
+ ksym_buf), arg);
+
++ /*
++ * Make sure that the wakeup and setting done->waiter
++ * to NULL is atomic.
++ */
++ local_irq_disable();
+ cpu_stop_signal_done(done, true);
++ local_irq_enable();
+ } else
+ schedule();
+
+@@ -337,9 +361,9 @@ static int __cpuinit cpu_stop_cpu_callba
+ /* strictly unnecessary, as first user will wake it */
+ wake_up_process(stopper->thread);
+ /* mark enabled */
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ stopper->enabled = true;
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+ break;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+@@ -352,11 +376,11 @@ static int __cpuinit cpu_stop_cpu_callba
+ /* kill the stopper */
+ kthread_stop(stopper->thread);
+ /* drain remaining works */
+- spin_lock_irq(&stopper->lock);
++ raw_spin_lock_irq(&stopper->lock);
+ list_for_each_entry(work, &stopper->works, list)
+ cpu_stop_signal_done(work->done, false);
+ stopper->enabled = false;
+- spin_unlock_irq(&stopper->lock);
++ raw_spin_unlock_irq(&stopper->lock);
+ /* release the stopper */
+ put_task_struct(stopper->thread);
+ stopper->thread = NULL;
+@@ -387,7 +411,7 @@ static int __init cpu_stop_init(void)
+ for_each_possible_cpu(cpu) {
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+
+- spin_lock_init(&stopper->lock);
++ raw_spin_lock_init(&stopper->lock);
+ INIT_LIST_HEAD(&stopper->works);
+ }
+
+@@ -581,7 +605,7 @@ int stop_machine_from_inactive_cpu(int (
+ ret = stop_machine_cpu_stop(&smdata);
+
+ /* Busy wait for completion. */
+- while (!completion_done(&done.completion))
++ while (atomic_read(&done.nr_todo))
+ cpu_relax();
+
+ mutex_unlock(&stop_cpus_mutex);
diff --git a/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
new file mode 100644
index 0000000..f5f6c19
--- /dev/null
+++ b/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -0,0 +1,64 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:27 -0500
+Subject: stop_machine: convert stop_machine_run() to PREEMPT_RT
+
+Instead of playing with non-preemption, introduce explicit
+startup serialization. This is more robust and cleaner as
+well.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/stop_machine.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+Index: linux-stable/kernel/stop_machine.c
+===================================================================
+--- linux-stable.orig/kernel/stop_machine.c
++++ linux-stable/kernel/stop_machine.c
+@@ -135,6 +135,7 @@ void stop_one_cpu_nowait(unsigned int cp
+
+ /* static data for stop_cpus */
+ static DEFINE_MUTEX(stop_cpus_mutex);
++static DEFINE_MUTEX(stopper_lock);
+ static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
+
+ static void queue_stop_cpus_work(const struct cpumask *cpumask,
+@@ -153,15 +154,14 @@ static void queue_stop_cpus_work(const s
+ }
+
+ /*
+- * Disable preemption while queueing to avoid getting
+- * preempted by a stopper which might wait for other stoppers
+- * to enter @fn which can lead to deadlock.
++ * Make sure that all work is queued on all cpus before we
++ * any of the cpus can execute it.
+ */
+- preempt_disable();
++ mutex_lock(&stopper_lock);
+ for_each_cpu(cpu, cpumask)
+ cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
+ &per_cpu(stop_cpus_work, cpu));
+- preempt_enable();
++ mutex_unlock(&stopper_lock);
+ }
+
+ static int __stop_cpus(const struct cpumask *cpumask,
+@@ -275,6 +275,16 @@ repeat:
+
+ __set_current_state(TASK_RUNNING);
+
++ /*
++ * Wait until the stopper finished scheduling on all
++ * cpus
++ */
++ mutex_lock(&stopper_lock);
++ /*
++ * Let other cpu threads continue as well
++ */
++ mutex_unlock(&stopper_lock);
++
+ /* cpu stop callbacks are not allowed to sleep */
+ preempt_disable();
+
diff --git a/suspend-prevernt-might-sleep-splats.patch b/suspend-prevernt-might-sleep-splats.patch
new file mode 100644
index 0000000..3ae7192
--- /dev/null
+++ b/suspend-prevernt-might-sleep-splats.patch
@@ -0,0 +1,112 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 15 Jul 2010 10:29:00 +0200
+Subject: suspend: Prevent might sleep splats
+
+timekeeping suspend/resume calls read_persistant_clock() which takes
+rtc_lock. That results in might sleep warnings because at that point
+we run with interrupts disabled.
+
+We cannot convert rtc_lock to a raw spinlock as that would trigger
+other might sleep warnings.
+
+As a temporary workaround we disable the might sleep warnings by
+setting system_state to SYSTEM_SUSPEND before calling sysdev_suspend()
+and restoring it to SYSTEM_RUNNING afer sysdev_resume().
+
+Needs to be revisited.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/kernel.h | 1 +
+ kernel/power/hibernate.c | 7 +++++++
+ kernel/power/suspend.c | 4 ++++
+ 3 files changed, 12 insertions(+)
+
+Index: linux-stable/include/linux/kernel.h
+===================================================================
+--- linux-stable.orig/include/linux/kernel.h
++++ linux-stable/include/linux/kernel.h
+@@ -385,6 +385,7 @@ extern enum system_states {
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
++ SYSTEM_SUSPEND,
+ } system_state;
+
+ #define TAINT_PROPRIETARY_MODULE 0
+Index: linux-stable/kernel/power/hibernate.c
+===================================================================
+--- linux-stable.orig/kernel/power/hibernate.c
++++ linux-stable/kernel/power/hibernate.c
+@@ -275,6 +275,8 @@ static int create_image(int platform_mod
+
+ local_irq_disable();
+
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (error) {
+ printk(KERN_ERR "PM: Some system devices failed to power down, "
+@@ -302,6 +304,7 @@ static int create_image(int platform_mod
+ syscore_resume();
+
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+
+ Enable_cpus:
+@@ -427,6 +430,7 @@ static int resume_target_kernel(bool pla
+ goto Enable_cpus;
+
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+
+ error = syscore_suspend();
+ if (error)
+@@ -460,6 +464,7 @@ static int resume_target_kernel(bool pla
+ syscore_resume();
+
+ Enable_irqs:
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+
+ Enable_cpus:
+@@ -542,6 +547,7 @@ int hibernation_platform_enter(void)
+ goto Platform_finish;
+
+ local_irq_disable();
++ system_state = SYSTEM_SUSPEND;
+ syscore_suspend();
+ if (pm_wakeup_pending()) {
+ error = -EAGAIN;
+@@ -554,6 +560,7 @@ int hibernation_platform_enter(void)
+
+ Power_up:
+ syscore_resume();
++ system_state = SYSTEM_RUNNING;
+ local_irq_enable();
+ enable_nonboot_cpus();
+
+Index: linux-stable/kernel/power/suspend.c
+===================================================================
+--- linux-stable.orig/kernel/power/suspend.c
++++ linux-stable/kernel/power/suspend.c
+@@ -165,6 +165,8 @@ static int suspend_enter(suspend_state_t
+ arch_suspend_disable_irqs();
+ BUG_ON(!irqs_disabled());
+
++ system_state = SYSTEM_SUSPEND;
++
+ error = syscore_suspend();
+ if (!error) {
+ *wakeup = pm_wakeup_pending();
+@@ -175,6 +177,8 @@ static int suspend_enter(suspend_state_t
+ syscore_resume();
+ }
+
++ system_state = SYSTEM_RUNNING;
++
+ arch_suspend_enable_irqs();
+ BUG_ON(irqs_disabled());
+
diff --git a/sysctl-include-atomic-h.patch b/sysctl-include-atomic-h.patch
new file mode 100644
index 0000000..426a6d2
--- /dev/null
+++ b/sysctl-include-atomic-h.patch
@@ -0,0 +1,21 @@
+Subject: sysctl-include-fix.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 14 Nov 2011 10:52:34 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/sysctl.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+Index: linux-stable/include/linux/sysctl.h
+===================================================================
+--- linux-stable.orig/include/linux/sysctl.h
++++ linux-stable/include/linux/sysctl.h
+@@ -933,6 +933,7 @@ enum
+ #include <linux/rcupdate.h>
+ #include <linux/wait.h>
+ #include <linux/rbtree.h>
++#include <linux/atomic.h>
+
+ /* For the /proc/sys support */
+ struct ctl_table;
diff --git a/sysfs-realtime-entry.patch b/sysfs-realtime-entry.patch
new file mode 100644
index 0000000..5645db2
--- /dev/null
+++ b/sysfs-realtime-entry.patch
@@ -0,0 +1,49 @@
+Subject: add /sys/kernel/realtime entry
+From: Clark Williams <williams@redhat.com>
+Date: Sat Jul 30 21:55:53 2011 -0500
+
+Add a /sys/kernel entry to indicate that the kernel is a
+realtime kernel.
+
+Clark says that he needs this for udev rules, udev needs to evaluate
+if its a PREEMPT_RT kernel a few thousand times and parsing uname
+output is too slow or so.
+
+Are there better solutions? Should it exist and return 0 on !-rt?
+
+Signed-off-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+---
+ kernel/ksysfs.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+Index: linux-stable/kernel/ksysfs.c
+===================================================================
+--- linux-stable.orig/kernel/ksysfs.c
++++ linux-stable/kernel/ksysfs.c
+@@ -133,6 +133,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
+
+ #endif /* CONFIG_KEXEC */
+
++#if defined(CONFIG_PREEMPT_RT_FULL)
++static ssize_t realtime_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return sprintf(buf, "%d\n", 1);
++}
++KERNEL_ATTR_RO(realtime);
++#endif
++
+ /* whether file capabilities are enabled */
+ static ssize_t fscaps_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+@@ -182,6 +191,9 @@ static struct attribute * kernel_attrs[]
+ &kexec_crash_size_attr.attr,
+ &vmcoreinfo_attr.attr,
+ #endif
++#ifdef CONFIG_PREEMPT_RT_FULL
++ &realtime_attr.attr,
++#endif
+ NULL
+ };
+
diff --git a/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
new file mode 100644
index 0000000..20d98fe
--- /dev/null
+++ b/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -0,0 +1,406 @@
+Subject: tasklet: Prevent tasklets from going into infinite spin in RT
+From: Ingo Molnar <mingo@elte.hu>
+Date: Tue Nov 29 20:18:22 2011 -0500
+
+When CONFIG_PREEMPT_RT_FULL is enabled, tasklets run as threads,
+and spinlocks turn are mutexes. But this can cause issues with
+tasks disabling tasklets. A tasklet runs under ksoftirqd, and
+if a tasklets are disabled with tasklet_disable(), the tasklet
+count is increased. When a tasklet runs, it checks this counter
+and if it is set, it adds itself back on the softirq queue and
+returns.
+
+The problem arises in RT because ksoftirq will see that a softirq
+is ready to run (the tasklet softirq just re-armed itself), and will
+not sleep, but instead run the softirqs again. The tasklet softirq
+will still see that the count is non-zero and will not execute
+the tasklet and requeue itself on the softirq again, which will
+cause ksoftirqd to run it again and again and again.
+
+It gets worse because ksoftirqd runs as a real-time thread.
+If it preempted the task that disabled tasklets, and that task
+has migration disabled, or can't run for other reasons, the tasklet
+softirq will never run because the count will never be zero, and
+ksoftirqd will go into an infinite loop. As an RT task, it this
+becomes a big problem.
+
+This is a hack solution to have tasklet_disable stop tasklets, and
+when a tasklet runs, instead of requeueing the tasklet softirqd
+it delays it. When tasklet_enable() is called, and tasklets are
+waiting, then the tasklet_enable() will kick the tasklets to continue.
+This prevents the lock up from ksoftirq going into an infinite loop.
+
+[ rostedt@goodmis.org: ported to 3.0-rt ]
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/interrupt.h | 39 ++++----
+ kernel/softirq.c | 208 +++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 170 insertions(+), 77 deletions(-)
+
+Index: linux-stable/include/linux/interrupt.h
+===================================================================
+--- linux-stable.orig/include/linux/interrupt.h
++++ linux-stable/include/linux/interrupt.h
+@@ -505,8 +505,9 @@ extern void __send_remote_softirq(struct
+ to be executed on some cpu at least once after this.
+ * If the tasklet is already scheduled, but its execution is still not
+ started, it will be executed only once.
+- * If this tasklet is already running on another CPU (or schedule is called
+- from tasklet itself), it is rescheduled for later.
++ * If this tasklet is already running on another CPU, it is rescheduled
++ for later.
++ * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * Tasklet is strictly serialized wrt itself, but not
+ wrt another tasklets. If client needs some intertask synchronization,
+ he makes it with spinlocks.
+@@ -531,27 +532,36 @@ struct tasklet_struct name = { NULL, 0,
+ enum
+ {
+ TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
+- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
++ TASKLET_STATE_PENDING /* Tasklet is pending */
+ };
+
+-#ifdef CONFIG_SMP
++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
++
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ static inline int tasklet_trylock(struct tasklet_struct *t)
+ {
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
++static inline int tasklet_tryunlock(struct tasklet_struct *t)
++{
++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
++}
++
+ static inline void tasklet_unlock(struct tasklet_struct *t)
+ {
+ smp_mb__before_clear_bit();
+ clear_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+-}
++extern void tasklet_unlock_wait(struct tasklet_struct *t);
++
+ #else
+ #define tasklet_trylock(t) 1
++#define tasklet_tryunlock(t) 1
+ #define tasklet_unlock_wait(t) do { } while (0)
+ #define tasklet_unlock(t) do { } while (0)
+ #endif
+@@ -600,17 +610,8 @@ static inline void tasklet_disable(struc
+ smp_mb();
+ }
+
+-static inline void tasklet_enable(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic_dec();
+- atomic_dec(&t->count);
+-}
+-
+-static inline void tasklet_hi_enable(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic_dec();
+- atomic_dec(&t->count);
+-}
++extern void tasklet_enable(struct tasklet_struct *t);
++extern void tasklet_hi_enable(struct tasklet_struct *t);
+
+ extern void tasklet_kill(struct tasklet_struct *t);
+ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
+Index: linux-stable/kernel/softirq.c
+===================================================================
+--- linux-stable.orig/kernel/softirq.c
++++ linux-stable/kernel/softirq.c
+@@ -21,6 +21,7 @@
+ #include <linux/freezer.h>
+ #include <linux/kthread.h>
+ #include <linux/rcupdate.h>
++#include <linux/delay.h>
+ #include <linux/ftrace.h>
+ #include <linux/smp.h>
+ #include <linux/tick.h>
+@@ -665,15 +666,45 @@ struct tasklet_head
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
+ static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
+
++static void inline
++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr)
++{
++ if (tasklet_trylock(t)) {
++again:
++ /* We may have been preempted before tasklet_trylock
++ * and __tasklet_action may have already run.
++ * So double check the sched bit while the takslet
++ * is locked before adding it to the list.
++ */
++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
++ t->next = NULL;
++ *head->tail = t;
++ head->tail = &(t->next);
++ raise_softirq_irqoff(nr);
++ tasklet_unlock(t);
++ } else {
++ /* This is subtle. If we hit the corner case above
++ * It is possible that we get preempted right here,
++ * and another task has successfully called
++ * tasklet_schedule(), then this function, and
++ * failed on the trylock. Thus we must be sure
++ * before releasing the tasklet lock, that the
++ * SCHED_BIT is clear. Otherwise the tasklet
++ * may get its SCHED_BIT set, but not added to the
++ * list
++ */
++ if (!tasklet_tryunlock(t))
++ goto again;
++ }
++ }
++}
++
+ void __tasklet_schedule(struct tasklet_struct *t)
+ {
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- raise_softirq_irqoff(TASKLET_SOFTIRQ);
++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_vec), TASKLET_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+
+@@ -684,10 +715,7 @@ void __tasklet_hi_schedule(struct taskle
+ unsigned long flags;
+
+ local_irq_save(flags);
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_common_schedule(t, &__get_cpu_var(tasklet_hi_vec), HI_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+
+@@ -695,50 +723,119 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
+ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
+ {
+- BUG_ON(!irqs_disabled());
+-
+- t->next = __this_cpu_read(tasklet_hi_vec.head);
+- __this_cpu_write(tasklet_hi_vec.head, t);
+- __raise_softirq_irqoff(HI_SOFTIRQ);
++ __tasklet_hi_schedule(t);
+ }
+
+ EXPORT_SYMBOL(__tasklet_hi_schedule_first);
+
+-static void tasklet_action(struct softirq_action *a)
++void tasklet_enable(struct tasklet_struct *t)
+ {
+- struct tasklet_struct *list;
++ if (!atomic_dec_and_test(&t->count))
++ return;
++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++ tasklet_schedule(t);
++}
+
+- local_irq_disable();
+- list = __this_cpu_read(tasklet_vec.head);
+- __this_cpu_write(tasklet_vec.head, NULL);
+- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
+- local_irq_enable();
++EXPORT_SYMBOL(tasklet_enable);
++
++void tasklet_hi_enable(struct tasklet_struct *t)
++{
++ if (!atomic_dec_and_test(&t->count))
++ return;
++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
++ tasklet_hi_schedule(t);
++}
++
++EXPORT_SYMBOL(tasklet_hi_enable);
++
++static void
++__tasklet_action(struct softirq_action *a, struct tasklet_struct *list)
++{
++ int loops = 1000000;
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
++ /*
++ * Should always succeed - after a tasklist got on the
++ * list (after getting the SCHED bit set from 0 to 1),
++ * nothing but the tasklet softirq it got queued to can
++ * lock it:
++ */
++ if (!tasklet_trylock(t)) {
++ WARN_ON(1);
++ continue;
+ }
+
+- local_irq_disable();
+ t->next = NULL;
+- *__this_cpu_read(tasklet_vec.tail) = t;
+- __this_cpu_write(tasklet_vec.tail, &(t->next));
+- __raise_softirq_irqoff(TASKLET_SOFTIRQ);
+- local_irq_enable();
++
++ /*
++ * If we cannot handle the tasklet because it's disabled,
++ * mark it as pending. tasklet_enable() will later
++ * re-schedule the tasklet.
++ */
++ if (unlikely(atomic_read(&t->count))) {
++out_disabled:
++ /* implicit unlock: */
++ wmb();
++ t->state = TASKLET_STATEF_PENDING;
++ continue;
++ }
++
++ /*
++ * After this point on the tasklet might be rescheduled
++ * on another CPU, but it can only be added to another
++ * CPU's tasklet list if we unlock the tasklet (which we
++ * dont do yet).
++ */
++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ WARN_ON(1);
++
++again:
++ t->func(t->data);
++
++ /*
++ * Try to unlock the tasklet. We must use cmpxchg, because
++ * another CPU might have scheduled or disabled the tasklet.
++ * We only allow the STATE_RUN -> 0 transition here.
++ */
++ while (!tasklet_tryunlock(t)) {
++ /*
++ * If it got disabled meanwhile, bail out:
++ */
++ if (atomic_read(&t->count))
++ goto out_disabled;
++ /*
++ * If it got scheduled meanwhile, re-execute
++ * the tasklet function:
++ */
++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
++ goto again;
++ if (!--loops) {
++ printk("hm, tasklet state: %08lx\n", t->state);
++ WARN_ON(1);
++ tasklet_unlock(t);
++ break;
++ }
++ }
+ }
+ }
+
++static void tasklet_action(struct softirq_action *a)
++{
++ struct tasklet_struct *list;
++
++ local_irq_disable();
++ list = __get_cpu_var(tasklet_vec).head;
++ __get_cpu_var(tasklet_vec).head = NULL;
++ __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
++ local_irq_enable();
++
++ __tasklet_action(a, list);
++}
++
+ static void tasklet_hi_action(struct softirq_action *a)
+ {
+ struct tasklet_struct *list;
+@@ -749,29 +846,7 @@ static void tasklet_hi_action(struct sof
+ __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
+ local_irq_enable();
+
+- while (list) {
+- struct tasklet_struct *t = list;
+-
+- list = list->next;
+-
+- if (tasklet_trylock(t)) {
+- if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+- BUG();
+- t->func(t->data);
+- tasklet_unlock(t);
+- continue;
+- }
+- tasklet_unlock(t);
+- }
+-
+- local_irq_disable();
+- t->next = NULL;
+- *__this_cpu_read(tasklet_hi_vec.tail) = t;
+- __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
+- __raise_softirq_irqoff(HI_SOFTIRQ);
+- local_irq_enable();
+- }
++ __tasklet_action(a, list);
+ }
+
+
+@@ -794,7 +869,7 @@ void tasklet_kill(struct tasklet_struct
+
+ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ do {
+- yield();
++ msleep(1);
+ } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+ }
+ tasklet_unlock_wait(t);
+@@ -1000,6 +1075,23 @@ void __init softirq_init(void)
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+ }
+
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
++void tasklet_unlock_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
++ /*
++ * Hack for now to avoid this busy-loop:
++ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ msleep(1);
++#else
++ barrier();
++#endif
++ }
++}
++EXPORT_SYMBOL(tasklet_unlock_wait);
++#endif
++
+ static int run_ksoftirqd(void * __bind_cpu)
+ {
+ ksoftirqd_set_sched_params();
diff --git a/tasklist-lock-fix-section-conflict.patch b/tasklist-lock-fix-section-conflict.patch
new file mode 100644
index 0000000..0516c09
--- /dev/null
+++ b/tasklist-lock-fix-section-conflict.patch
@@ -0,0 +1,61 @@
+Subject: rwlocks: Fix section mismatch
+From: John Kacur <jkacur@redhat.com>
+Date: Mon, 19 Sep 2011 11:09:27 +0200 (CEST)
+
+This fixes the following build error for the preempt-rt kernel.
+
+make kernel/fork.o
+ CC kernel/fork.o
+kernel/fork.c:90: error: section of ¡tasklist_lock¢ conflicts with previous declaration
+make[2]: *** [kernel/fork.o] Error 1
+make[1]: *** [kernel/fork.o] Error 2
+
+The rt kernel cache aligns the RWLOCK in DEFINE_RWLOCK by default.
+The non-rt kernels explicitly cache align only the tasklist_lock in
+kernel/fork.c
+That can create a build conflict. This fixes the build problem by making the
+non-rt kernels cache align RWLOCKs by default. The side effect is that
+the other RWLOCKs are also cache aligned for non-rt.
+
+This is a short term solution for rt only.
+The longer term solution would be to push the cache aligned DEFINE_RWLOCK
+to mainline. If there are objections, then we could create a
+DEFINE_RWLOCK_CACHE_ALIGNED or something of that nature.
+
+Comments? Objections?
+
+Signed-off-by: John Kacur <jkacur@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/alpine.LFD.2.00.1109191104010.23118@localhost6.localdomain6
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/rwlock_types.h | 3 ++-
+ kernel/fork.c | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+Index: linux-stable/include/linux/rwlock_types.h
+===================================================================
+--- linux-stable.orig/include/linux/rwlock_types.h
++++ linux-stable/include/linux/rwlock_types.h
+@@ -47,6 +47,7 @@ typedef struct {
+ RW_DEP_MAP_INIT(lockname) }
+ #endif
+
+-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+ #endif /* __LINUX_RWLOCK_TYPES_H */
+Index: linux-stable/kernel/fork.c
+===================================================================
+--- linux-stable.orig/kernel/fork.c
++++ linux-stable/kernel/fork.c
+@@ -93,7 +93,7 @@ int max_threads; /* tunable limit on nr
+
+ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
+
+-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
++DEFINE_RWLOCK(tasklist_lock); /* outer */
+
+ #ifdef CONFIG_PROVE_RCU
+ int lockdep_tasklist_lock_is_held(void)
diff --git a/timekeeping-split-xtime-lock.patch b/timekeeping-split-xtime-lock.patch
new file mode 100644
index 0000000..7fb90db
--- /dev/null
+++ b/timekeeping-split-xtime-lock.patch
@@ -0,0 +1,527 @@
+Subject: timekeeping: Split xtime_lock
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 01 Mar 2012 15:14:06 +0100
+
+xtime_lock is going to be split apart in mainline, so we can shorten
+the seqcount protected regions and avoid updating seqcount in some
+code pathes. This is a straight forward split, so we can avoid the
+whole mess with raw seqlocks for RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/time/jiffies.c | 4 -
+ kernel/time/tick-common.c | 10 ++-
+ kernel/time/tick-internal.h | 3 -
+ kernel/time/tick-sched.c | 16 +++---
+ kernel/time/timekeeping.c | 116 +++++++++++++++++++++++++-------------------
+ 5 files changed, 88 insertions(+), 61 deletions(-)
+
+Index: linux-stable/kernel/time/jiffies.c
+===================================================================
+--- linux-stable.orig/kernel/time/jiffies.c
++++ linux-stable/kernel/time/jiffies.c
+@@ -74,9 +74,9 @@ u64 get_jiffies_64(void)
+ u64 ret;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ ret = jiffies_64;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+ return ret;
+ }
+ EXPORT_SYMBOL(get_jiffies_64);
+Index: linux-stable/kernel/time/tick-common.c
+===================================================================
+--- linux-stable.orig/kernel/time/tick-common.c
++++ linux-stable/kernel/time/tick-common.c
+@@ -63,13 +63,15 @@ int tick_is_oneshot_available(void)
+ static void tick_periodic(int cpu)
+ {
+ if (tick_do_timer_cpu == cpu) {
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+
+ /* Keep track of the next tick event */
+ tick_next_period = ktime_add(tick_next_period, tick_period);
+
+ do_timer(1);
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ }
+
+ update_process_times(user_mode(get_irq_regs()));
+@@ -130,9 +132,9 @@ void tick_setup_periodic(struct clock_ev
+ ktime_t next;
+
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ next = tick_next_period;
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
+
+Index: linux-stable/kernel/time/tick-internal.h
+===================================================================
+--- linux-stable.orig/kernel/time/tick-internal.h
++++ linux-stable/kernel/time/tick-internal.h
+@@ -141,4 +141,5 @@ static inline int tick_device_is_functio
+ #endif
+
+ extern void do_timer(unsigned long ticks);
+-extern seqlock_t xtime_lock;
++extern raw_spinlock_t xtime_lock;
++extern seqcount_t xtime_seq;
+Index: linux-stable/kernel/time/tick-sched.c
+===================================================================
+--- linux-stable.orig/kernel/time/tick-sched.c
++++ linux-stable/kernel/time/tick-sched.c
+@@ -56,7 +56,8 @@ static void tick_do_update_jiffies64(kti
+ return;
+
+ /* Reevalute with xtime_lock held */
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+
+ delta = ktime_sub(now, last_jiffies_update);
+ if (delta.tv64 >= tick_period.tv64) {
+@@ -79,7 +80,8 @@ static void tick_do_update_jiffies64(kti
+ /* Keep the tick_next_period variable up to date */
+ tick_next_period = ktime_add(last_jiffies_update, tick_period);
+ }
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ }
+
+ /*
+@@ -89,12 +91,14 @@ static ktime_t tick_init_jiffy_update(vo
+ {
+ ktime_t period;
+
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+ /* Did we start the jiffies update yet ? */
+ if (last_jiffies_update.tv64 == 0)
+ last_jiffies_update = tick_next_period;
+ period = last_jiffies_update;
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ return period;
+ }
+
+@@ -282,11 +286,11 @@ static ktime_t tick_nohz_stop_sched_tick
+
+ /* Read jiffies and the time when jiffies were updated last */
+ do {
+- seq = read_seqbegin(&xtime_lock);
++ seq = read_seqcount_begin(&xtime_seq);
+ last_update = last_jiffies_update;
+ last_jiffies = jiffies;
+ time_delta = timekeeping_max_deferment();
+- } while (read_seqretry(&xtime_lock, seq));
++ } while (read_seqcount_retry(&xtime_seq, seq));
+
+ if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
+ arch_needs_cpu(cpu)) {
+Index: linux-stable/kernel/time/timekeeping.c
+===================================================================
+--- linux-stable.orig/kernel/time/timekeeping.c
++++ linux-stable/kernel/time/timekeeping.c
+@@ -74,7 +74,8 @@ struct timekeeper {
+ /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
+ struct timespec raw_time;
+ /* Seqlock for all timekeeper values */
+- seqlock_t lock;
++ seqcount_t seq;
++ raw_spinlock_t lock;
+ };
+
+ static struct timekeeper timekeeper;
+@@ -83,7 +84,8 @@ static struct timekeeper timekeeper;
+ * This read-write spinlock protects us from races in SMP while
+ * playing with xtime.
+ */
+-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(xtime_lock);
++seqcount_t xtime_seq;
+
+ /* flag for if timekeeping is suspended */
+ int __read_mostly timekeeping_suspended;
+@@ -300,12 +302,12 @@ void getnstimeofday(struct timespec *ts)
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+
+ ts->tv_sec = tk->xtime_sec;
+ nsecs = timekeeping_get_ns(tk);
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ ts->tv_nsec = 0;
+ timespec_add_ns(ts, nsecs);
+@@ -321,11 +323,11 @@ ktime_t ktime_get(void)
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+ secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
+ nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+ /*
+ * Use ktime_set/ktime_add_ns to create a proper ktime on
+ * 32-bit architectures without CONFIG_KTIME_SCALAR.
+@@ -352,12 +354,12 @@ void ktime_get_ts(struct timespec *ts)
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+ ts->tv_sec = tk->xtime_sec;
+ nsec = timekeeping_get_ns(tk);
+ tomono = tk->wall_to_monotonic;
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ ts->tv_sec += tomono.tv_sec;
+ ts->tv_nsec = 0;
+@@ -385,7 +387,7 @@ void getnstime_raw_and_real(struct times
+ WARN_ON_ONCE(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+
+ *ts_raw = tk->raw_time;
+ ts_real->tv_sec = tk->xtime_sec;
+@@ -394,7 +396,7 @@ void getnstime_raw_and_real(struct times
+ nsecs_raw = timekeeping_get_ns_raw(tk);
+ nsecs_real = timekeeping_get_ns(tk);
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ timespec_add_ns(ts_raw, nsecs_raw);
+ timespec_add_ns(ts_real, nsecs_real);
+@@ -434,7 +436,8 @@ int do_settimeofday(const struct timespe
+ if (!timespec_valid_strict(tv))
+ return -EINVAL;
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+
+ timekeeping_forward_now(tk);
+
+@@ -448,7 +451,8 @@ int do_settimeofday(const struct timespe
+
+ timekeeping_update(tk, true);
+
+- write_sequnlock_irqrestore(&tk->lock, flags);
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+@@ -473,7 +477,8 @@ int timekeeping_inject_offset(struct tim
+ if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+
+ timekeeping_forward_now(tk);
+
+@@ -490,7 +495,8 @@ int timekeeping_inject_offset(struct tim
+ error: /* even if we error out, we forwarded the time, so call update */
+ timekeeping_update(tk, true);
+
+- write_sequnlock_irqrestore(&tk->lock, flags);
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+@@ -512,7 +518,8 @@ static int change_clocksource(void *data
+
+ new = (struct clocksource *) data;
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+
+ timekeeping_forward_now(tk);
+ if (!new->enable || new->enable(new) == 0) {
+@@ -523,7 +530,8 @@ static int change_clocksource(void *data
+ }
+ timekeeping_update(tk, true);
+
+- write_sequnlock_irqrestore(&tk->lock, flags);
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+
+ return 0;
+ }
+@@ -573,11 +581,11 @@ void getrawmonotonic(struct timespec *ts
+ s64 nsecs;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+ nsecs = timekeeping_get_ns_raw(tk);
+ *ts = tk->raw_time;
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ timespec_add_ns(ts, nsecs);
+ }
+@@ -593,11 +601,11 @@ int timekeeping_valid_for_hres(void)
+ int ret;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+
+ ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ return ret;
+ }
+@@ -612,11 +620,11 @@ u64 timekeeping_max_deferment(void)
+ u64 ret;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+
+ ret = tk->clock->max_idle_ns;
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ return ret;
+ }
+@@ -677,11 +685,13 @@ void __init timekeeping_init(void)
+ boot.tv_nsec = 0;
+ }
+
+- seqlock_init(&tk->lock);
++ raw_spin_lock_init(&tk->lock);
++ seqcount_init(&tk->seq);
+
+ ntp_init();
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+ clock = clocksource_default_clock();
+ if (clock->enable)
+ clock->enable(clock);
+@@ -700,7 +710,8 @@ void __init timekeeping_init(void)
+ tmp.tv_nsec = 0;
+ tk_set_sleep_time(tk, tmp);
+
+- write_sequnlock_irqrestore(&tk->lock, flags);
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+ }
+
+ /* time in seconds when suspend began */
+@@ -747,7 +758,8 @@ void timekeeping_inject_sleeptime(struct
+ if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
+ return;
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+
+ timekeeping_forward_now(tk);
+
+@@ -755,7 +767,8 @@ void timekeeping_inject_sleeptime(struct
+
+ timekeeping_update(tk, true);
+
+- write_sequnlock_irqrestore(&tk->lock, flags);
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+@@ -778,7 +791,8 @@ static void timekeeping_resume(void)
+
+ clocksource_resume();
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+
+ if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
+ ts = timespec_sub(ts, timekeeping_suspend_time);
+@@ -789,7 +803,8 @@ static void timekeeping_resume(void)
+ tk->ntp_error = 0;
+ timekeeping_suspended = 0;
+ timekeeping_update(tk, false);
+- write_sequnlock_irqrestore(&tk->lock, flags);
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+
+ touch_softlockup_watchdog();
+
+@@ -808,7 +823,8 @@ static int timekeeping_suspend(void)
+
+ read_persistent_clock(&timekeeping_suspend_time);
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+ timekeeping_forward_now(tk);
+ timekeeping_suspended = 1;
+
+@@ -831,7 +847,8 @@ static int timekeeping_suspend(void)
+ timekeeping_suspend_time =
+ timespec_add(timekeeping_suspend_time, delta_delta);
+ }
+- write_sequnlock_irqrestore(&tk->lock, flags);
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
+ clocksource_suspend();
+@@ -1141,7 +1158,8 @@ static void update_wall_time(void)
+ unsigned long flags;
+ s64 remainder;
+
+- write_seqlock_irqsave(&tk->lock, flags);
++ raw_spin_lock_irqsave(&tk->lock, flags);
++ write_seqcount_begin(&tk->seq);
+
+ /* Make sure we're fully resumed: */
+ if (unlikely(timekeeping_suspended))
+@@ -1205,8 +1223,8 @@ static void update_wall_time(void)
+ timekeeping_update(tk, false);
+
+ out:
+- write_sequnlock_irqrestore(&tk->lock, flags);
+-
++ write_seqcount_end(&tk->seq);
++ raw_spin_unlock_irqrestore(&tk->lock, flags);
+ }
+
+ /**
+@@ -1253,13 +1271,13 @@ void get_monotonic_boottime(struct times
+ WARN_ON(timekeeping_suspended);
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+ ts->tv_sec = tk->xtime_sec;
+ nsec = timekeeping_get_ns(tk);
+ tomono = tk->wall_to_monotonic;
+ sleep = tk->total_sleep_time;
+
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
+ ts->tv_nsec = 0;
+@@ -1318,10 +1336,10 @@ struct timespec current_kernel_time(void
+ unsigned long seq;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+
+ now = tk_xtime(tk);
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ return now;
+ }
+@@ -1334,11 +1352,11 @@ struct timespec get_monotonic_coarse(voi
+ unsigned long seq;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+
+ now = tk_xtime(tk);
+ mono = tk->wall_to_monotonic;
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
+ now.tv_nsec + mono.tv_nsec);
+@@ -1371,11 +1389,11 @@ void get_xtime_and_monotonic_and_sleep_o
+ unsigned long seq;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+ *xtim = tk_xtime(tk);
+ *wtom = tk->wall_to_monotonic;
+ *sleep = tk->total_sleep_time;
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+ }
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+@@ -1395,14 +1413,14 @@ ktime_t ktime_get_update_offsets(ktime_t
+ u64 secs, nsecs;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+
+ secs = tk->xtime_sec;
+ nsecs = timekeeping_get_ns(tk);
+
+ *offs_real = tk->offs_real;
+ *offs_boot = tk->offs_boot;
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+ now = ktime_sub(now, *offs_real);
+@@ -1420,9 +1438,9 @@ ktime_t ktime_get_monotonic_offset(void)
+ struct timespec wtom;
+
+ do {
+- seq = read_seqbegin(&tk->lock);
++ seq = read_seqcount_begin(&tk->seq);
+ wtom = tk->wall_to_monotonic;
+- } while (read_seqretry(&tk->lock, seq));
++ } while (read_seqcount_retry(&tk->seq, seq));
+
+ return timespec_to_ktime(wtom);
+ }
+@@ -1436,7 +1454,9 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_of
+ */
+ void xtime_update(unsigned long ticks)
+ {
+- write_seqlock(&xtime_lock);
++ raw_spin_lock(&xtime_lock);
++ write_seqcount_begin(&xtime_seq);
+ do_timer(ticks);
+- write_sequnlock(&xtime_lock);
++ write_seqcount_end(&xtime_seq);
++ raw_spin_unlock(&xtime_lock);
+ }
diff --git a/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
new file mode 100644
index 0000000..f347403
--- /dev/null
+++ b/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -0,0 +1,77 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 21 Aug 2009 11:56:45 +0200
+Subject: timer: delay waking softirqs from the jiffy tick
+
+People were complaining about broken balancing with the recent -rt
+series.
+
+A look at /proc/sched_debug yielded:
+
+cpu#0, 2393.874 MHz
+ .nr_running : 0
+ .load : 0
+ .cpu_load[0] : 177522
+ .cpu_load[1] : 177522
+ .cpu_load[2] : 177522
+ .cpu_load[3] : 177522
+ .cpu_load[4] : 177522
+cpu#1, 2393.874 MHz
+ .nr_running : 4
+ .load : 4096
+ .cpu_load[0] : 181618
+ .cpu_load[1] : 180850
+ .cpu_load[2] : 180274
+ .cpu_load[3] : 179938
+ .cpu_load[4] : 179758
+
+Which indicated the cpu_load computation was hosed, the 177522 value
+indicates that there is one RT task runnable. Initially I thought the
+old problem of calculating the cpu_load from a softirq had re-surfaced,
+however looking at the code shows its being done from scheduler_tick().
+
+[ we really should fix this RT/cfs interaction some day... ]
+
+A few trace_printk()s later:
+
+ sirq-timer/1-19 [001] 174.289744: 19: 50:S ==> [001] 0:140:R <idle>
+ <idle>-0 [001] 174.290724: enqueue_task_rt: adding task: 19/sirq-timer/1 with load: 177522
+ <idle>-0 [001] 174.290725: 0:140:R + [001] 19: 50:S sirq-timer/1
+ <idle>-0 [001] 174.290730: scheduler_tick: current load: 177522
+ <idle>-0 [001] 174.290732: scheduler_tick: current: 0/swapper
+ <idle>-0 [001] 174.290736: 0:140:R ==> [001] 19: 50:R sirq-timer/1
+ sirq-timer/1-19 [001] 174.290741: dequeue_task_rt: removing task: 19/sirq-timer/1 with load: 177522
+ sirq-timer/1-19 [001] 174.290743: 19: 50:S ==> [001] 0:140:R <idle>
+
+We see that we always raise the timer softirq before doing the load
+calculation. Avoid this by re-ordering the scheduler_tick() call in
+update_process_times() to occur before we deal with timers.
+
+This lowers the load back to sanity and restores regular load-balancing
+behaviour.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/timer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -1398,13 +1398,13 @@ void update_process_times(int user_tick)
+
+ /* Note: this timer irq context must be accounted for as well. */
+ account_process_tick(p, user_tick);
++ scheduler_tick();
+ run_local_timers();
+ rcu_check_callbacks(cpu, user_tick);
+ #ifdef CONFIG_IRQ_WORK
+ if (in_irq())
+ irq_work_run();
+ #endif
+- scheduler_tick();
+ run_posix_cpu_timers(p);
+ }
+
diff --git a/timer-fd-avoid-live-lock.patch b/timer-fd-avoid-live-lock.patch
new file mode 100644
index 0000000..8f6afe7
--- /dev/null
+++ b/timer-fd-avoid-live-lock.patch
@@ -0,0 +1,29 @@
+Subject: timer-fd: Prevent live lock
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 25 Jan 2012 11:08:40 +0100
+
+If hrtimer_try_to_cancel() requires a retry, then depending on the
+priority setting te retry loop might prevent timer callback completion
+on RT. Prevent that by waiting for completion on RT, no change for a
+non RT kernel.
+
+Reported-by: Sankara Muthukrishnan <sankara.m@gmail.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ fs/timerfd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/fs/timerfd.c
+===================================================================
+--- linux-stable.orig/fs/timerfd.c
++++ linux-stable/fs/timerfd.c
+@@ -313,7 +313,7 @@ SYSCALL_DEFINE4(timerfd_settime, int, uf
+ if (hrtimer_try_to_cancel(&ctx->tmr) >= 0)
+ break;
+ spin_unlock_irq(&ctx->wqh.lock);
+- cpu_relax();
++ hrtimer_wait_for_timer(&ctx->tmr);
+ }
+
+ /*
diff --git a/timer-handle-idle-trylock-in-get-next-timer-irq.patch b/timer-handle-idle-trylock-in-get-next-timer-irq.patch
new file mode 100644
index 0000000..e7cb331
--- /dev/null
+++ b/timer-handle-idle-trylock-in-get-next-timer-irq.patch
@@ -0,0 +1,81 @@
+Subject: timer-handle-idle-trylock-in-get-next-timer-irq.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 22:08:38 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/spinlock_rt.h | 12 +++++++++++-
+ kernel/rtmutex.c | 7 +------
+ kernel/timer.c | 7 ++++---
+ 3 files changed, 16 insertions(+), 10 deletions(-)
+
+Index: linux-stable/include/linux/spinlock_rt.h
+===================================================================
+--- linux-stable.orig/include/linux/spinlock_rt.h
++++ linux-stable/include/linux/spinlock_rt.h
+@@ -53,7 +53,17 @@ extern void __lockfunc __rt_spin_unlock(
+
+ #define spin_lock_irq(lock) spin_lock(lock)
+
+-#define spin_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock))
++
++#define spin_trylock(lock) \
++({ \
++ int __locked; \
++ migrate_disable(); \
++ __locked = spin_do_trylock(lock); \
++ if (!__locked) \
++ migrate_enable(); \
++ __locked; \
++})
+
+ #ifdef CONFIG_LOCKDEP
+ # define spin_lock_nested(lock, subclass) \
+Index: linux-stable/kernel/rtmutex.c
+===================================================================
+--- linux-stable.orig/kernel/rtmutex.c
++++ linux-stable/kernel/rtmutex.c
+@@ -861,15 +861,10 @@ EXPORT_SYMBOL(rt_spin_unlock_wait);
+
+ int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ {
+- int ret;
++ int ret = rt_mutex_trylock(&lock->lock);
+
+- migrate_disable();
+- ret = rt_mutex_trylock(&lock->lock);
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+- else
+- migrate_enable();
+-
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_spin_trylock);
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -1391,9 +1391,10 @@ unsigned long get_next_timer_interrupt(u
+ /*
+ * On PREEMPT_RT we cannot sleep here. If the trylock does not
+ * succeed then we return the worst-case 'expires in 1 tick'
+- * value:
++ * value. We use the rt functions here directly to avoid a
++ * migrate_disable() call.
+ */
+- if (!spin_trylock(&base->lock))
++ if (!spin_do_trylock(&base->lock))
+ return now + 1;
+ #else
+ spin_lock(&base->lock);
+@@ -1403,7 +1404,7 @@ unsigned long get_next_timer_interrupt(u
+ base->next_timer = __next_timer_interrupt(base);
+ expires = base->next_timer;
+ }
+- spin_unlock(&base->lock);
++ rt_spin_unlock(&base->lock);
+
+ if (time_before_eq(expires, now))
+ return now;
diff --git a/timers-avoid-the-base-null-otptimization-on-rt.patch b/timers-avoid-the-base-null-otptimization-on-rt.patch
new file mode 100644
index 0000000..c038f1c
--- /dev/null
+++ b/timers-avoid-the-base-null-otptimization-on-rt.patch
@@ -0,0 +1,70 @@
+Subject: timers: Avoid the switch timers base set to NULL trick on RT
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Jul 2011 15:23:39 +0200
+
+On RT that code is preemptible, so we cannot assign NULL to timers
+base as a preempter would spin forever in lock_timer_base().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/timer.c | 40 ++++++++++++++++++++++++++++++++--------
+ 1 file changed, 32 insertions(+), 8 deletions(-)
+
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -737,6 +737,36 @@ static struct tvec_base *lock_timer_base
+ }
+ }
+
++#ifndef CONFIG_PREEMPT_RT_FULL
++static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
++ struct tvec_base *old,
++ struct tvec_base *new)
++{
++ /* See the comment in lock_timer_base() */
++ timer_set_base(timer, NULL);
++ spin_unlock(&old->lock);
++ spin_lock(&new->lock);
++ timer_set_base(timer, new);
++ return new;
++}
++#else
++static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
++ struct tvec_base *old,
++ struct tvec_base *new)
++{
++ /*
++ * We cannot do the above because we might be preempted and
++ * then the preempter would see NULL and loop forever.
++ */
++ if (spin_trylock(&new->lock)) {
++ timer_set_base(timer, new);
++ spin_unlock(&old->lock);
++ return new;
++ }
++ return old;
++}
++#endif
++
+ static inline int
+ __mod_timer(struct timer_list *timer, unsigned long expires,
+ bool pending_only, int pinned)
+@@ -775,14 +805,8 @@ __mod_timer(struct timer_list *timer, un
+ * handler yet has not finished. This also guarantees that
+ * the timer is serialized wrt itself.
+ */
+- if (likely(base->running_timer != timer)) {
+- /* See the comment in lock_timer_base() */
+- timer_set_base(timer, NULL);
+- spin_unlock(&base->lock);
+- base = new_base;
+- spin_lock(&base->lock);
+- timer_set_base(timer, base);
+- }
++ if (likely(base->running_timer != timer))
++ base = switch_timer_base(timer, base, new_base);
+ }
+
+ timer->expires = expires;
diff --git a/timers-mov-printk_tick-to-soft-interrupt.patch b/timers-mov-printk_tick-to-soft-interrupt.patch
new file mode 100644
index 0000000..80eaabe
--- /dev/null
+++ b/timers-mov-printk_tick-to-soft-interrupt.patch
@@ -0,0 +1,31 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 3 Jul 2009 08:44:30 -0500
+Subject: timers: mov printk_tick to soft interrupt
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+---
+ kernel/timer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -1400,7 +1400,6 @@ void update_process_times(int user_tick)
+ account_process_tick(p, user_tick);
+ run_local_timers();
+ rcu_check_callbacks(cpu, user_tick);
+- printk_tick();
+ #ifdef CONFIG_IRQ_WORK
+ if (in_irq())
+ irq_work_run();
+@@ -1416,6 +1415,7 @@ static void run_timer_softirq(struct sof
+ {
+ struct tvec_base *base = __this_cpu_read(tvec_bases);
+
++ printk_tick();
+ hrtimer_run_pending();
+
+ if (time_after_eq(jiffies, base->timer_jiffies))
diff --git a/timers-preempt-rt-support.patch b/timers-preempt-rt-support.patch
new file mode 100644
index 0000000..99ff6af
--- /dev/null
+++ b/timers-preempt-rt-support.patch
@@ -0,0 +1,58 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:30:20 -0500
+Subject: timers: preempt-rt support
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/timer.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -1363,7 +1363,17 @@ unsigned long get_next_timer_interrupt(u
+ if (cpu_is_offline(smp_processor_id()))
+ return expires;
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * On PREEMPT_RT we cannot sleep here. If the trylock does not
++ * succeed then we return the worst-case 'expires in 1 tick'
++ * value:
++ */
++ if (!spin_trylock(&base->lock))
++ return now + 1;
++#else
+ spin_lock(&base->lock);
++#endif
+ if (base->active_timers) {
+ if (time_before_eq(base->next_timer, base->timer_jiffies))
+ base->next_timer = __next_timer_interrupt(base);
+@@ -1373,7 +1383,6 @@ unsigned long get_next_timer_interrupt(u
+
+ if (time_before_eq(expires, now))
+ return now;
+-
+ return cmp_next_hrtimer_event(now, expires);
+ }
+ #endif
+@@ -1763,7 +1772,7 @@ static void __cpuinit migrate_timers(int
+
+ BUG_ON(cpu_online(cpu));
+ old_base = per_cpu(tvec_bases, cpu);
+- new_base = get_cpu_var(tvec_bases);
++ new_base = get_local_var(tvec_bases);
+ /*
+ * The caller is globally serialized and nobody else
+ * takes two locks at once, deadlock is not possible.
+@@ -1784,7 +1793,7 @@ static void __cpuinit migrate_timers(int
+
+ spin_unlock(&old_base->lock);
+ spin_unlock_irq(&new_base->lock);
+- put_cpu_var(tvec_bases);
++ put_local_var(tvec_bases);
+ }
+ #endif /* CONFIG_HOTPLUG_CPU */
+
diff --git a/timers-prepare-for-full-preemption.patch b/timers-prepare-for-full-preemption.patch
new file mode 100644
index 0000000..aee04f2
--- /dev/null
+++ b/timers-prepare-for-full-preemption.patch
@@ -0,0 +1,126 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:34 -0500
+Subject: timers: prepare for full preemption
+
+When softirqs can be preempted we need to make sure that cancelling
+the timer from the active thread can not deadlock vs. a running timer
+callback. Add a waitqueue to resolve that.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ include/linux/timer.h | 2 +-
+ kernel/timer.c | 35 ++++++++++++++++++++++++++++++++---
+ 2 files changed, 33 insertions(+), 4 deletions(-)
+
+Index: linux-stable/include/linux/timer.h
+===================================================================
+--- linux-stable.orig/include/linux/timer.h
++++ linux-stable/include/linux/timer.h
+@@ -276,7 +276,7 @@ extern void add_timer(struct timer_list
+
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ extern int del_timer_sync(struct timer_list *timer);
+ #else
+ # define del_timer_sync(t) del_timer(t)
+Index: linux-stable/kernel/timer.c
+===================================================================
+--- linux-stable.orig/kernel/timer.c
++++ linux-stable/kernel/timer.c
+@@ -75,6 +75,7 @@ struct tvec_root {
+ struct tvec_base {
+ spinlock_t lock;
+ struct timer_list *running_timer;
++ wait_queue_head_t wait_for_running_timer;
+ unsigned long timer_jiffies;
+ unsigned long next_timer;
+ unsigned long active_timers;
+@@ -755,12 +756,15 @@ __mod_timer(struct timer_list *timer, un
+
+ debug_activate(timer, expires);
+
++ preempt_disable_rt();
+ cpu = smp_processor_id();
+
+ #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
+ if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
+ cpu = get_nohz_timer_target();
+ #endif
++ preempt_enable_rt();
++
+ new_base = per_cpu(tvec_bases, cpu);
+
+ if (base != new_base) {
+@@ -961,6 +965,29 @@ void add_timer_on(struct timer_list *tim
+ }
+ EXPORT_SYMBOL_GPL(add_timer_on);
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++/*
++ * Wait for a running timer
++ */
++static void wait_for_running_timer(struct timer_list *timer)
++{
++ struct tvec_base *base = timer->base;
++
++ if (base->running_timer == timer)
++ wait_event(base->wait_for_running_timer,
++ base->running_timer != timer);
++}
++
++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_tunning_timer)
++#else
++static inline void wait_for_running_timer(struct timer_list *timer)
++{
++ cpu_relax();
++}
++
++# define wakeup_timer_waiters(b) do { } while (0)
++#endif
++
+ /**
+ * del_timer - deactive a timer.
+ * @timer: the timer to be deactivated
+@@ -1018,7 +1045,7 @@ int try_to_del_timer_sync(struct timer_l
+ }
+ EXPORT_SYMBOL(try_to_del_timer_sync);
+
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+ /**
+ * del_timer_sync - deactivate a timer and wait for the handler to finish.
+ * @timer: the timer to be deactivated
+@@ -1078,7 +1105,7 @@ int del_timer_sync(struct timer_list *ti
+ int ret = try_to_del_timer_sync(timer);
+ if (ret >= 0)
+ return ret;
+- cpu_relax();
++ wait_for_running_timer(timer);
+ }
+ }
+ EXPORT_SYMBOL(del_timer_sync);
+@@ -1192,10 +1219,11 @@ static inline void __run_timers(struct t
+
+ spin_unlock_irq(&base->lock);
+ call_timer_fn(timer, fn, data);
++ base->running_timer = NULL;
+ spin_lock_irq(&base->lock);
+ }
+ }
+- base->running_timer = NULL;
++ wake_up(&base->wait_for_running_timer);
+ spin_unlock_irq(&base->lock);
+ }
+
+@@ -1696,6 +1724,7 @@ static int __cpuinit init_timers_cpu(int
+ }
+
+ spin_lock_init(&base->lock);
++ init_waitqueue_head(&base->wait_for_running_timer);
+
+ for (j = 0; j < TVN_SIZE; j++) {
+ INIT_LIST_HEAD(base->tv5.vec + j);
diff --git a/tracing-account-for-preempt-off-in-preempt_schedule.patch b/tracing-account-for-preempt-off-in-preempt_schedule.patch
new file mode 100644
index 0000000..3250535
--- /dev/null
+++ b/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -0,0 +1,48 @@
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 29 Sep 2011 12:24:30 -0500
+Subject: tracing: Account for preempt off in preempt_schedule()
+
+The preempt_schedule() uses the preempt_disable_notrace() version
+because it can cause infinite recursion by the function tracer as
+the function tracer uses preempt_enable_notrace() which may call
+back into the preempt_schedule() code as the NEED_RESCHED is still
+set and the PREEMPT_ACTIVE has not been set yet.
+
+See commit: d1f74e20b5b064a130cd0743a256c2d3cfe84010 that made this
+change.
+
+The preemptoff and preemptirqsoff latency tracers require the first
+and last preempt count modifiers to enable tracing. But this skips
+the checks. Since we can not convert them back to the non notrace
+version, we can use the idle() hooks for the latency tracers here.
+That is, the start/stop_critical_timings() works well to manually
+start and stop the latency tracer for preempt off timings.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/sched/core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+Index: linux-stable/kernel/sched/core.c
+===================================================================
+--- linux-stable.orig/kernel/sched/core.c
++++ linux-stable/kernel/sched/core.c
+@@ -3544,7 +3544,16 @@ asmlinkage void __sched notrace preempt_
+
+ do {
+ add_preempt_count_notrace(PREEMPT_ACTIVE);
++ /*
++ * The add/subtract must not be traced by the function
++ * tracer. But we still want to account for the
++ * preempt off latency tracer. Since the _notrace versions
++ * of add/subtract skip the accounting for latency tracer
++ * we must force it manually.
++ */
++ start_critical_timings();
+ __schedule();
++ stop_critical_timings();
+ sub_preempt_count_notrace(PREEMPT_ACTIVE);
+
+ /*
diff --git a/tty-use-local-irq-nort.patch b/tty-use-local-irq-nort.patch
new file mode 100644
index 0000000..cecab62
--- /dev/null
+++ b/tty-use-local-irq-nort.patch
@@ -0,0 +1,49 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 17 Aug 2009 19:49:19 +0200
+Subject: tty: Do not disable interrupts in put_ldisc on -rt
+
+Fixes the following on PREEMPT_RT:
+
+BUG: sleeping function called from invalid context at kernel/rtmutex.c:684
+in_atomic(): 0, irqs_disabled(): 1, pid: 9116, name: sshd
+Pid: 9116, comm: sshd Not tainted 2.6.31-rc6-rt2 #6
+Call Trace:
+[<ffffffff81034a4f>] __might_sleep+0xec/0xee
+[<ffffffff812fbc6d>] rt_spin_lock+0x34/0x75
+[ffffffff81064a83>] atomic_dec_and_spin_lock+0x36/0x54
+[<ffffffff811df7c7>] put_ldisc+0x57/0xa6
+[<ffffffff811dfb87>] tty_ldisc_hangup+0xe7/0x19f
+[<ffffffff811d9224>] do_tty_hangup+0xff/0x319
+[<ffffffff811d9453>] tty_vhangup+0x15/0x17
+[<ffffffff811e1263>] pty_close+0x127/0x12b
+[<ffffffff811dac41>] tty_release_dev+0x1ad/0x4c0
+....
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/tty/tty_ldisc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/drivers/tty/tty_ldisc.c
+===================================================================
+--- linux-stable.orig/drivers/tty/tty_ldisc.c
++++ linux-stable/drivers/tty/tty_ldisc.c
+@@ -52,7 +52,7 @@ static void put_ldisc(struct tty_ldisc *
+ * We really want an "atomic_dec_and_lock_irqsave()",
+ * but we don't have it, so this does it by hand.
+ */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
+ struct tty_ldisc_ops *ldo = ld->ops;
+
+@@ -63,7 +63,7 @@ static void put_ldisc(struct tty_ldisc *
+ kfree(ld);
+ return;
+ }
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ wake_up(&ld->wq_idle);
+ }
+
diff --git a/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
new file mode 100644
index 0000000..442c38d
--- /dev/null
+++ b/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -0,0 +1,67 @@
+Subject: [UPSTREAM]net,RT:REmove preemption disabling in netif_rx()
+From: Priyanka Jain <Priyanka.Jain@freescale.com>
+Date: Thu, 17 May 2012 09:35:11 +0530
+
+1)enqueue_to_backlog() (called from netif_rx) should be
+ bind to a particluar CPU. This can be achieved by
+ disabling migration. No need to disable preemption
+
+2)Fixes crash "BUG: scheduling while atomic: ksoftirqd"
+ in case of RT.
+ If preemption is disabled, enqueue_to_backog() is called
+ in atomic context. And if backlog exceeds its count,
+ kfree_skb() is called. But in RT, kfree_skb() might
+ gets scheduled out, so it expects non atomic context.
+
+3)When CONFIG_PREEMPT_RT_FULL is not defined,
+ migrate_enable(), migrate_disable() maps to
+ preempt_enable() and preempt_disable(), so no
+ change in functionality in case of non-RT.
+
+-Replace preempt_enable(), preempt_disable() with
+ migrate_enable(), migrate_disable() respectively
+-Replace get_cpu(), put_cpu() with get_cpu_light(),
+ put_cpu_light() respectively
+
+Signed-off-by: Priyanka Jain <Priyanka.Jain@freescale.com>
+Acked-by: Rajan Srivastava <Rajan.Srivastava@freescale.com>
+Cc: <rostedt@goodmis.orgn>
+Link: http://lkml.kernel.org/r/1337227511-2271-1-git-send-email-Priyanka.Jain@freescale.com
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ Testing: Tested successfully on p4080ds(8-core SMP system)
+
+ net/core/dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+Index: linux-stable/net/core/dev.c
+===================================================================
+--- linux-stable.orig/net/core/dev.c
++++ linux-stable/net/core/dev.c
+@@ -2967,7 +2967,7 @@ int netif_rx(struct sk_buff *skb)
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+ int cpu;
+
+- preempt_disable();
++ migrate_disable();
+ rcu_read_lock();
+
+ cpu = get_rps_cpu(skb->dev, skb, &rflow);
+@@ -2977,13 +2977,13 @@ int netif_rx(struct sk_buff *skb)
+ ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+
+ rcu_read_unlock();
+- preempt_enable();
++ migrate_enable();
+ } else
+ #endif
+ {
+ unsigned int qtail;
+- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
+- put_cpu();
++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
++ put_cpu_light();
+ }
+ return ret;
+ }
diff --git a/usb-fix-mouse-problem-copying-large-data.patch b/usb-fix-mouse-problem-copying-large-data.patch
new file mode 100644
index 0000000..16dc2c2
--- /dev/null
+++ b/usb-fix-mouse-problem-copying-large-data.patch
@@ -0,0 +1,38 @@
+From: Wu Zhangjin <wuzj@lemote.com>
+Date: Mon, 4 Jan 2010 11:33:02 +0800
+Subject: USB: Fix the mouse problem when copying large amounts of data
+
+When copying large amounts of data between the USB storage devices and
+the hard disk, the USB mouse will not work, this patch fixes it.
+
+[NOTE: This problem have been found in the Loongson family machines, not
+sure whether it is producible on other platforms]
+
+Signed-off-by: Hu Hongbing <huhb@lemote.com>
+Signed-off-by: Wu Zhangjin <wuzhangjin@gmail.com>
+
+---
+ drivers/usb/host/ohci-hcd.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+Index: linux-stable/drivers/usb/host/ohci-hcd.c
+===================================================================
+--- linux-stable.orig/drivers/usb/host/ohci-hcd.c
++++ linux-stable/drivers/usb/host/ohci-hcd.c
+@@ -829,9 +829,13 @@ static irqreturn_t ohci_irq (struct usb_
+ }
+
+ if (ints & OHCI_INTR_WDH) {
+- spin_lock (&ohci->lock);
+- dl_done_list (ohci);
+- spin_unlock (&ohci->lock);
++ if (ohci->hcca->done_head == 0) {
++ ints &= ~OHCI_INTR_WDH;
++ } else {
++ spin_lock (&ohci->lock);
++ dl_done_list (ohci);
++ spin_unlock (&ohci->lock);
++ }
+ }
+
+ if (quirk_zfmicro(ohci) && (ints & OHCI_INTR_SF)) {
diff --git a/usb-hcd-use-local-irq-nort.patch b/usb-hcd-use-local-irq-nort.patch
new file mode 100644
index 0000000..b3b2323
--- /dev/null
+++ b/usb-hcd-use-local-irq-nort.patch
@@ -0,0 +1,36 @@
+From: Steven Rostedt <srostedt@redhat.com>
+Date: Fri, 3 Jul 2009 08:44:26 -0500
+Subject: usb: Use local_irq_*_nort() variants
+
+[ tglx: Now that irqf_disabled is dead we should kill that ]
+
+Signed-off-by: Steven Rostedt <srostedt@redhat.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ drivers/usb/core/hcd.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/drivers/usb/core/hcd.c
+===================================================================
+--- linux-stable.orig/drivers/usb/core/hcd.c
++++ linux-stable/drivers/usb/core/hcd.c
+@@ -2158,7 +2158,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
+ * when the first handler doesn't use it. So let's just
+ * assume it's never used.
+ */
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+
+ if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
+ rc = IRQ_NONE;
+@@ -2167,7 +2167,7 @@ irqreturn_t usb_hcd_irq (int irq, void *
+ else
+ rc = IRQ_HANDLED;
+
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(usb_hcd_irq);
diff --git a/user-use-local-irq-nort.patch b/user-use-local-irq-nort.patch
new file mode 100644
index 0000000..4ab6f28
--- /dev/null
+++ b/user-use-local-irq-nort.patch
@@ -0,0 +1,31 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 21 Jul 2009 23:06:05 +0200
+Subject: core: Do not disable interrupts on RT in kernel/users.c
+
+Use the local_irq_*_nort variants to reduce latencies in RT. The code
+is serialized by the locks. No need to disable interrupts.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ kernel/user.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/kernel/user.c
+===================================================================
+--- linux-stable.orig/kernel/user.c
++++ linux-stable/kernel/user.c
+@@ -147,11 +147,11 @@ void free_uid(struct user_struct *up)
+ if (!up)
+ return;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
+ free_user(up, flags);
+ else
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ }
+
+ struct user_struct *alloc_uid(kuid_t uid)
diff --git a/workqueue-use-get-cpu-light.patch b/workqueue-use-get-cpu-light.patch
new file mode 100644
index 0000000..88cad8f
--- /dev/null
+++ b/workqueue-use-get-cpu-light.patch
@@ -0,0 +1,24 @@
+Subject: workqueue-use-get-cpu-light.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 17 Jul 2011 21:42:26 +0200
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/workqueue.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/kernel/workqueue.c
+===================================================================
+--- linux-stable.orig/kernel/workqueue.c
++++ linux-stable/kernel/workqueue.c
+@@ -1067,8 +1067,8 @@ int queue_work(struct workqueue_struct *
+ {
+ int ret;
+
+- ret = queue_work_on(get_cpu(), wq, work);
+- put_cpu();
++ ret = queue_work_on(get_cpu_light(), wq, work);
++ put_cpu_light();
+
+ return ret;
+ }
diff --git a/x86-crypto-reduce-preempt-disabled-regions.patch b/x86-crypto-reduce-preempt-disabled-regions.patch
new file mode 100644
index 0000000..fe0b339
--- /dev/null
+++ b/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -0,0 +1,114 @@
+Subject: x86: crypto: Reduce preempt disabled regions
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 14 Nov 2011 18:19:27 +0100
+
+Restrict the preempt disabled regions to the actual floating point
+operations and enable preemption for the administrative actions.
+
+This is necessary on RT to avoid that kfree and other operations are
+called with preemption disabled.
+
+Reported-and-tested-by: Carsten Emde <cbe@osadl.org>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+Index: linux-stable/arch/x86/crypto/aesni-intel_glue.c
+===================================================================
+--- linux-stable.orig/arch/x86/crypto/aesni-intel_glue.c
++++ linux-stable/arch/x86/crypto/aesni-intel_glue.c
+@@ -245,14 +245,14 @@ static int ecb_encrypt(struct blkcipher_
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+- nbytes & AES_BLOCK_MASK);
++ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -269,14 +269,14 @@ static int ecb_decrypt(struct blkcipher_
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -293,14 +293,14 @@ static int cbc_encrypt(struct blkcipher_
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -317,14 +317,14 @@ static int cbc_decrypt(struct blkcipher_
+ err = blkcipher_walk_virt(desc, &walk);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes)) {
++ kernel_fpu_begin();
+ aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
+@@ -357,18 +357,20 @@ static int ctr_crypt(struct blkcipher_de
+ err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+- kernel_fpu_begin();
+ while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
++ kernel_fpu_begin();
+ aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes & AES_BLOCK_MASK, walk.iv);
++ kernel_fpu_end();
+ nbytes &= AES_BLOCK_SIZE - 1;
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+ if (walk.nbytes) {
++ kernel_fpu_begin();
+ ctr_crypt_final(ctx, &walk);
++ kernel_fpu_end();
+ err = blkcipher_walk_done(desc, &walk, 0);
+ }
+- kernel_fpu_end();
+
+ return err;
+ }
diff --git a/x86-disable-debug-stack.patch b/x86-disable-debug-stack.patch
new file mode 100644
index 0000000..b3096ea
--- /dev/null
+++ b/x86-disable-debug-stack.patch
@@ -0,0 +1,108 @@
+From: Andi Kleen <ak@suse.de>
+Date: Fri, 3 Jul 2009 08:44:10 -0500
+Subject: x86: Disable IST stacks for debug/int 3/stack fault for PREEMPT_RT
+
+Normally the x86-64 trap handlers for debug/int 3/stack fault run
+on a special interrupt stack to make them more robust
+when dealing with kernel code.
+
+The PREEMPT_RT kernel can sleep in locks even while allocating
+GFP_ATOMIC memory. When one of these trap handlers needs to send
+real time signals for ptrace it allocates memory and could then
+try to to schedule. But it is not allowed to schedule on a
+IST stack. This can cause warnings and hangs.
+
+This patch disables the IST stacks for these handlers for PREEMPT_RT
+kernel. Instead let them run on the normal process stack.
+
+The kernel only really needs the ISTs here to make kernel debuggers more
+robust in case someone sets a break point somewhere where the stack is
+invalid. But there are no kernel debuggers in the standard kernel
+that do this.
+
+It also means kprobes cannot be set in situations with invalid stack;
+but that sounds like a reasonable restriction.
+
+The stack fault change could minimally impact oops quality, but not very
+much because stack faults are fairly rare.
+
+A better solution would be to use similar logic as the NMI "paranoid"
+path: check if signal is for user space, if yes go back to entry.S, switch stack,
+call sync_regs, then do the signal sending etc.
+
+But this patch is much simpler and should work too with minimal impact.
+
+Signed-off-by: Andi Kleen <ak@suse.de>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/include/asm/page_64_types.h | 21 +++++++++++++++------
+ arch/x86/kernel/cpu/common.c | 2 ++
+ arch/x86/kernel/dumpstack_64.c | 4 ++++
+ 3 files changed, 21 insertions(+), 6 deletions(-)
+
+Index: linux-stable/arch/x86/include/asm/page_64_types.h
+===================================================================
+--- linux-stable.orig/arch/x86/include/asm/page_64_types.h
++++ linux-stable/arch/x86/include/asm/page_64_types.h
+@@ -14,12 +14,21 @@
+ #define IRQ_STACK_ORDER 2
+ #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+
+-#define STACKFAULT_STACK 1
+-#define DOUBLEFAULT_STACK 2
+-#define NMI_STACK 3
+-#define DEBUG_STACK 4
+-#define MCE_STACK 5
+-#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++#ifdef CONFIG_PREEMPT_RT_FULL
++# define STACKFAULT_STACK 0
++# define DOUBLEFAULT_STACK 1
++# define NMI_STACK 2
++# define DEBUG_STACK 0
++# define MCE_STACK 3
++# define N_EXCEPTION_STACKS 3 /* hw limit: 7 */
++#else
++# define STACKFAULT_STACK 1
++# define DOUBLEFAULT_STACK 2
++# define NMI_STACK 3
++# define DEBUG_STACK 4
++# define MCE_STACK 5
++# define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++#endif
+
+ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
+ #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
+Index: linux-stable/arch/x86/kernel/cpu/common.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/cpu/common.c
++++ linux-stable/arch/x86/kernel/cpu/common.c
+@@ -1089,7 +1089,9 @@ DEFINE_PER_CPU(struct task_struct *, fpu
+ */
+ static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
++#if DEBUG_STACK > 0
+ [DEBUG_STACK - 1] = DEBUG_STKSZ
++#endif
+ };
+
+ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+Index: linux-stable/arch/x86/kernel/dumpstack_64.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/dumpstack_64.c
++++ linux-stable/arch/x86/kernel/dumpstack_64.c
+@@ -21,10 +21,14 @@
+ (N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
+
+ static char x86_stack_ids[][8] = {
++#if DEBUG_STACK > 0
+ [ DEBUG_STACK-1 ] = "#DB",
++#endif
+ [ NMI_STACK-1 ] = "NMI",
+ [ DOUBLEFAULT_STACK-1 ] = "#DF",
++#if STACKFAULT_STACK > 0
+ [ STACKFAULT_STACK-1 ] = "#SS",
++#endif
+ [ MCE_STACK-1 ] = "#MC",
+ #if DEBUG_STKSZ > EXCEPTION_STKSZ
+ [ N_EXCEPTION_STACKS ...
diff --git a/x86-highmem-warn.patch b/x86-highmem-warn.patch
new file mode 100644
index 0000000..64dbe51
--- /dev/null
+++ b/x86-highmem-warn.patch
@@ -0,0 +1,27 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:25 -0500
+Subject: x86: highmem: Replace BUG_ON by WARN_ON
+
+The machine might survive that problem and be at least in a state
+which allows us to get more information about the problem.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/x86/mm/highmem_32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-stable/arch/x86/mm/highmem_32.c
+===================================================================
+--- linux-stable.orig/arch/x86/mm/highmem_32.c
++++ linux-stable/arch/x86/mm/highmem_32.c
+@@ -43,7 +43,7 @@ void *kmap_atomic_prot(struct page *page
+ type = kmap_atomic_idx_push();
+ idx = type + KM_TYPE_NR*smp_processor_id();
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- BUG_ON(!pte_none(*(kmap_pte-idx)));
++ WARN_ON(!pte_none(*(kmap_pte-idx)));
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
+ arch_flush_lazy_mmu_mode();
+
diff --git a/x86-hpet-disable-msi-on-lenovo-w510.patch b/x86-hpet-disable-msi-on-lenovo-w510.patch
new file mode 100644
index 0000000..1027a1d
--- /dev/null
+++ b/x86-hpet-disable-msi-on-lenovo-w510.patch
@@ -0,0 +1,66 @@
+Subject: x86: hpet: Disable MSI on Lenovo W510
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 30 Sep 2011 20:03:37 +0200
+
+MSI based per cpu timers lose interrupts when intel_idle() is enabled
+- independent of the c-state. With idle=poll the problem cannot be
+observed. We have no idea yet, whether this is a W510 specific issue
+or a general chipset oddity. Blacklist the known problem machine.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/hpet.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
+
+Index: linux-stable/arch/x86/kernel/hpet.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/hpet.c
++++ linux-stable/arch/x86/kernel/hpet.c
+@@ -8,6 +8,7 @@
+ #include <linux/slab.h>
+ #include <linux/hpet.h>
+ #include <linux/init.h>
++#include <linux/dmi.h>
+ #include <linux/cpu.h>
+ #include <linux/pm.h>
+ #include <linux/io.h>
+@@ -573,6 +574,30 @@ static void init_one_hpet_msi_clockevent
+ #define RESERVE_TIMERS 0
+ #endif
+
++static int __init dmi_disable_hpet_msi(const struct dmi_system_id *d)
++{
++ hpet_msi_disable = 1;
++ return 0;
++}
++
++static struct dmi_system_id __initdata dmi_hpet_table[] = {
++ /*
++ * MSI based per cpu timers lose interrupts when intel_idle()
++ * is enabled - independent of the c-state. With idle=poll the
++ * problem cannot be observed. We have no idea yet, whether
++ * this is a W510 specific issue or a general chipset oddity.
++ */
++ {
++ .callback = dmi_disable_hpet_msi,
++ .ident = "Lenovo W510",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
++ },
++ },
++ {}
++};
++
+ static void hpet_msi_capability_lookup(unsigned int start_timer)
+ {
+ unsigned int id;
+@@ -580,6 +605,8 @@ static void hpet_msi_capability_lookup(u
+ unsigned int num_timers_used = 0;
+ int i;
+
++ dmi_check_system(dmi_hpet_table);
++
+ if (hpet_msi_disable)
+ return;
+
diff --git a/x86-io-apic-migra-no-unmask.patch b/x86-io-apic-migra-no-unmask.patch
new file mode 100644
index 0000000..e3a516f
--- /dev/null
+++ b/x86-io-apic-migra-no-unmask.patch
@@ -0,0 +1,28 @@
+From: Ingo Molnar <mingo@elte.hu>
+Date: Fri, 3 Jul 2009 08:29:27 -0500
+Subject: x86: Do not unmask io_apic when interrupt is in progress
+
+With threaded interrupts we might see an interrupt in progress on
+migration. Do not unmask it when this is the case.
+
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/x86/kernel/apic/io_apic.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/x86/kernel/apic/io_apic.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/apic/io_apic.c
++++ linux-stable/arch/x86/kernel/apic/io_apic.c
+@@ -2437,7 +2437,8 @@ static bool io_apic_level_ack_pending(st
+ static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+ {
+ /* If we are moving the irq we need to mask it */
+- if (unlikely(irqd_is_setaffinity_pending(data))) {
++ if (unlikely(irqd_is_setaffinity_pending(data) &&
++ !irqd_irq_inprogress(data))) {
+ mask_ioapic(cfg);
+ return true;
+ }
diff --git a/x86-kprobes-remove-bogus-preempt-enable.patch b/x86-kprobes-remove-bogus-preempt-enable.patch
new file mode 100644
index 0000000..9f0ae36
--- /dev/null
+++ b/x86-kprobes-remove-bogus-preempt-enable.patch
@@ -0,0 +1,29 @@
+Subject: x86: kprobes: Remove remove bogus preempt_enable
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 17 Mar 2011 11:02:15 +0100
+
+The CONFIG_PREEMPT=n section of setup_singlestep() contains:
+
+ preempt_enable_no_resched();
+
+That's bogus as it is asymetric - no preempt_disable() - and it just
+never blew up because preempt_enable_no_resched() is a NOP when
+CONFIG_PREEMPT=n. Remove it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/kprobes.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+Index: linux-stable/arch/x86/kernel/kprobes.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/kprobes.c
++++ linux-stable/arch/x86/kernel/kprobes.c
+@@ -486,7 +486,6 @@ setup_singlestep(struct kprobe *p, struc
+ * stepping.
+ */
+ regs->ip = (unsigned long)p->ainsn.insn;
+- preempt_enable_no_resched();
+ return;
+ }
+ #endif
diff --git a/x86-kvm-require-const-tsc-for-rt.patch b/x86-kvm-require-const-tsc-for-rt.patch
new file mode 100644
index 0000000..556c442
--- /dev/null
+++ b/x86-kvm-require-const-tsc-for-rt.patch
@@ -0,0 +1,27 @@
+Subject: x86-kvm-require-const-tsc-for-rt.patch
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 06 Nov 2011 12:26:18 +0100
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kvm/x86.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+Index: linux-stable/arch/x86/kvm/x86.c
+===================================================================
+--- linux-stable.orig/arch/x86/kvm/x86.c
++++ linux-stable/arch/x86/kvm/x86.c
+@@ -4911,6 +4911,13 @@ int kvm_arch_init(void *opaque)
+ goto out;
+ }
+
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
++ return -EOPNOTSUPP;
++ }
++#endif
++
+ r = kvm_mmu_module_init();
+ if (r)
+ goto out;
diff --git a/x86-mce-timer-hrtimer.patch b/x86-mce-timer-hrtimer.patch
new file mode 100644
index 0000000..7541a6f
--- /dev/null
+++ b/x86-mce-timer-hrtimer.patch
@@ -0,0 +1,134 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 13 Dec 2010 16:33:39 +0100
+Subject: x86: Convert mce timer to hrtimer
+
+mce_timer is started in atomic contexts of cpu bringup. This results
+in might_sleep() warnings on RT. Convert mce_timer to a hrtimer to
+avoid this.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/x86/kernel/cpu/mcheck/mce.c | 36 ++++++++++++++++--------------------
+ 1 file changed, 16 insertions(+), 20 deletions(-)
+
+Index: linux-stable/arch/x86/kernel/cpu/mcheck/mce.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/cpu/mcheck/mce.c
++++ linux-stable/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -41,6 +41,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/irq_work.h>
+ #include <linux/export.h>
++#include <linux/jiffies.h>
+
+ #include <asm/processor.h>
+ #include <asm/mce.h>
+@@ -1264,15 +1265,12 @@ void mce_log_therm_throt_event(__u64 sta
+ static unsigned long check_interval = 5 * 60; /* 5 minutes */
+
+ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
+-static DEFINE_PER_CPU(struct timer_list, mce_timer);
++static DEFINE_PER_CPU(struct hrtimer, mce_timer);
+
+-static void mce_timer_fn(unsigned long data)
++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
+ {
+- struct timer_list *t = &__get_cpu_var(mce_timer);
+ unsigned long iv;
+
+- WARN_ON(smp_processor_id() != data);
+-
+ if (mce_available(__this_cpu_ptr(&cpu_info))) {
+ machine_check_poll(MCP_TIMESTAMP,
+ &__get_cpu_var(mce_poll_banks));
+@@ -1289,17 +1287,18 @@ static void mce_timer_fn(unsigned long d
+ iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
+ __this_cpu_write(mce_next_interval, iv);
+
+- t->expires = jiffies + iv;
+- add_timer_on(t, smp_processor_id());
++ hrtimer_forward(timer, timer->base->get_time(),
++ ns_to_ktime(jiffies_to_usecs(iv)));
++ return HRTIMER_RESTART;
+ }
+
+-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
++/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
+ static void mce_timer_delete_all(void)
+ {
+ int cpu;
+
+ for_each_online_cpu(cpu)
+- del_timer_sync(&per_cpu(mce_timer, cpu));
++ hrtimer_cancel(&per_cpu(mce_timer, cpu));
+ }
+
+ static void mce_do_trigger(struct work_struct *work)
+@@ -1596,10 +1595,11 @@ static void __mcheck_cpu_init_vendor(str
+
+ static void __mcheck_cpu_init_timer(void)
+ {
+- struct timer_list *t = &__get_cpu_var(mce_timer);
++ struct hrtimer *t = &__get_cpu_var(mce_timer);
+ unsigned long iv = check_interval * HZ;
+
+- setup_timer(t, mce_timer_fn, smp_processor_id());
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_timer_fn;
+
+ if (mce_ignore_ce)
+ return;
+@@ -1607,8 +1607,8 @@ static void __mcheck_cpu_init_timer(void
+ __this_cpu_write(mce_next_interval, iv);
+ if (!iv)
+ return;
+- t->expires = round_jiffies(jiffies + iv);
+- add_timer_on(t, smp_processor_id());
++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000),
++ 0, HRTIMER_MODE_REL_PINNED);
+ }
+
+ /* Handle unconfigured int18 (should never happen) */
+@@ -2259,6 +2259,8 @@ static void __cpuinit mce_disable_cpu(vo
+ if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ return;
+
++ hrtimer_cancel(&__get_cpu_var(mce_timer));
++
+ if (!(action & CPU_TASKS_FROZEN))
+ cmci_clear();
+ for (i = 0; i < banks; i++) {
+@@ -2285,6 +2287,7 @@ static void __cpuinit mce_reenable_cpu(v
+ if (b->init)
+ wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
+ }
++ __mcheck_cpu_init_timer();
+ }
+
+ /* Get notified when a cpu comes on/off. Be hotplug friendly. */
+@@ -2292,7 +2295,6 @@ static int __cpuinit
+ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ {
+ unsigned int cpu = (unsigned long)hcpu;
+- struct timer_list *t = &per_cpu(mce_timer, cpu);
+
+ switch (action) {
+ case CPU_ONLINE:
+@@ -2309,16 +2311,10 @@ mce_cpu_callback(struct notifier_block *
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+- del_timer_sync(t);
+ smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+- if (!mce_ignore_ce && check_interval) {
+- t->expires = round_jiffies(jiffies +
+- per_cpu(mce_next_interval, cpu));
+- add_timer_on(t, cpu);
+- }
+ smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+ break;
+ case CPU_POST_DEAD:
diff --git a/x86-perf-uncore-deal-with-kfree.patch b/x86-perf-uncore-deal-with-kfree.patch
new file mode 100644
index 0000000..8519984
--- /dev/null
+++ b/x86-perf-uncore-deal-with-kfree.patch
@@ -0,0 +1,76 @@
+Subject: x86: perf: Deal with kfree from atomic contexts
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 04 Oct 2012 13:32:46 +0100
+
+The x86 perf code allocates memory upfront because it might need
+it. The detection that it is not needed happens in atomic context and
+calls kfree from there. RT cant do that. Use kfree_rcu instead.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/cpu/perf_event.h | 1 +
+ arch/x86/kernel/cpu/perf_event_intel.c | 2 +-
+ arch/x86/kernel/cpu/perf_event_intel_uncore.c | 5 +++--
+ arch/x86/kernel/cpu/perf_event_intel_uncore.h | 1 +
+ 4 files changed, 6 insertions(+), 3 deletions(-)
+
+Index: linux-stable/arch/x86/kernel/cpu/perf_event.h
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/cpu/perf_event.h
++++ linux-stable/arch/x86/kernel/cpu/perf_event.h
+@@ -108,6 +108,7 @@ struct intel_shared_regs {
+ struct er_account regs[EXTRA_REG_MAX];
+ int refcnt; /* per-core: #HT threads */
+ unsigned core_id; /* per-core: core id */
++ struct rcu_head rcu;
+ };
+
+ #define MAX_LBR_ENTRIES 16
+Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel.c
++++ linux-stable/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -1707,7 +1707,7 @@ static void intel_pmu_cpu_dying(int cpu)
+ pc = cpuc->shared_regs;
+ if (pc) {
+ if (pc->core_id == -1 || --pc->refcnt == 0)
+- kfree(pc);
++ kfree_rcu(pc, rcu);
+ cpuc->shared_regs = NULL;
+ }
+
+Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.c
++++ linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+@@ -2603,7 +2603,7 @@ static void __cpuinit uncore_cpu_dying(i
+ box = *per_cpu_ptr(pmu->box, cpu);
+ *per_cpu_ptr(pmu->box, cpu) = NULL;
+ if (box && atomic_dec_and_test(&box->refcnt))
+- kfree(box);
++ kfree_rcu(box, rcu);
+ }
+ }
+ }
+@@ -2633,7 +2633,8 @@ static int __cpuinit uncore_cpu_starting
+ if (exist && exist->phys_id == phys_id) {
+ atomic_inc(&exist->refcnt);
+ *per_cpu_ptr(pmu->box, cpu) = exist;
+- kfree(box);
++ if (box)
++ kfree_rcu(box, rcu);
+ box = NULL;
+ break;
+ }
+Index: linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+===================================================================
+--- linux-stable.orig/arch/x86/kernel/cpu/perf_event_intel_uncore.h
++++ linux-stable/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+@@ -419,6 +419,7 @@ struct intel_uncore_box {
+ struct hrtimer hrtimer;
+ struct list_head list;
+ struct intel_uncore_extra_reg shared_regs[0];
++ struct rcu_head rcu;
+ };
+
+ #define UNCORE_BOX_FLAG_INITIATED 0
diff --git a/x86-stackprot-no-random-on-rt.patch b/x86-stackprot-no-random-on-rt.patch
new file mode 100644
index 0000000..8a2b17e
--- /dev/null
+++ b/x86-stackprot-no-random-on-rt.patch
@@ -0,0 +1,49 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 16 Dec 2010 14:25:18 +0100
+Subject: x86: stackprotector: Avoid random pool on rt
+
+CPU bringup calls into the random pool to initialize the stack
+canary. During boot that works nicely even on RT as the might sleep
+checks are disabled. During CPU hotplug the might sleep checks
+trigger. Making the locks in random raw is a major PITA, so avoid the
+call on RT is the only sensible solution. This is basically the same
+randomness which we get during boot where the random pool has no
+entropy and we rely on the TSC randomnness.
+
+Reported-by: Carsten Emde <carsten.emde@osadl.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/x86/include/asm/stackprotector.h | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+Index: linux-stable/arch/x86/include/asm/stackprotector.h
+===================================================================
+--- linux-stable.orig/arch/x86/include/asm/stackprotector.h
++++ linux-stable/arch/x86/include/asm/stackprotector.h
+@@ -57,7 +57,7 @@
+ */
+ static __always_inline void boot_init_stack_canary(void)
+ {
+- u64 canary;
++ u64 uninitialized_var(canary);
+ u64 tsc;
+
+ #ifdef CONFIG_X86_64
+@@ -68,8 +68,16 @@ static __always_inline void boot_init_st
+ * of randomness. The TSC only matters for very early init,
+ * there it already has some randomness on most systems. Later
+ * on during the bootup the random pool has true entropy too.
++ *
++ * For preempt-rt we need to weaken the randomness a bit, as
++ * we can't call into the random generator from atomic context
++ * due to locking constraints. We just leave canary
++ * uninitialized and use the TSC based randomness on top of
++ * it.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ get_random_bytes(&canary, sizeof(canary));
++#endif
+ tsc = __native_read_tsc();
+ canary += tsc + (tsc << 32UL);
+
diff --git a/x86-use-gen-rwsem-spinlocks-rt.patch b/x86-use-gen-rwsem-spinlocks-rt.patch
new file mode 100644
index 0000000..d7dd32b
--- /dev/null
+++ b/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -0,0 +1,30 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 26 Jul 2009 02:21:32 +0200
+Subject: x86: Use generic rwsem_spinlocks on -rt
+
+Simplifies the separation of anon_rw_semaphores and rw_semaphores for
+-rt.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+---
+ arch/x86/Kconfig | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-stable/arch/x86/Kconfig
+===================================================================
+--- linux-stable.orig/arch/x86/Kconfig
++++ linux-stable/arch/x86/Kconfig
+@@ -153,10 +153,10 @@ config ARCH_MAY_HAVE_PC_FDC
+ def_bool ISA_DMA_API
+
+ config RWSEM_GENERIC_SPINLOCK
+- def_bool !X86_XADD
++ def_bool !X86_XADD || PREEMPT_RT_FULL
+
+ config RWSEM_XCHGADD_ALGORITHM
+- def_bool X86_XADD
++ def_bool X86_XADD && !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
+
+ config GENERIC_CALIBRATE_DELAY
+ def_bool y