summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2015-12-23 23:25:26 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2015-12-23 23:52:17 +0100
commit7bbd599b8bb8ca9e9b7cf0fdf77da61fc1eb7d11 (patch)
treea218f64a3fd415012d135ec0c1202211d66600fa
parent1ccda4237a810cc0ef5c801ed1fd512543c70785 (diff)
download4.9-rt-patches-7bbd599b8bb8ca9e9b7cf0fdf77da61fc1eb7d11.tar.gz
[ANNOUNCE] 4.4-rc6-rt1
Please don't continue reading before christmas eve (or morning, depending on your schedule). If you don't celebrate christmas, well go ahead. Dear RT folks! I'm pleased to announce the v4.4-rc6-rt1 patch set. I tested it on my AMD A10, 64bit. Nothing exploded so far, filesystem is still there. I haven't tested it on anything else. Before someone asks: this does not mean it does *not* work on ARM I simply did not try it. If you are brave then download it, install it and have fun. If something breaks, please report it. If your machine starts blinking like a christmas tree while using the patch then *please* send a photo. Changes since v4.1.15-rt17: - rebase to v4.4-rc6 Known issues (inherited from v4.1-RT): - bcache stays disabled - CPU hotplug is not better than before - The netlink_release() OOPS, reported by Clark, is still on the list, but unsolved due to lack of information - Christoph Mathys reported a stall in cgroup locking code while using Linux containers. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4-rc6-rt1 The RT patch against 4.4-rc6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4-rc6-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4-rc6-rt1.tar.xz Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-arm64-Mark-PMU-interrupt-IRQF_NO_THREAD.patch26
-rw-r--r--patches/0001-gpio-omap-Allow-building-as-a-loadable-module.patch130
-rw-r--r--patches/0001-sched-Implement-lockless-wake-queues.patch166
-rw-r--r--patches/0001-uaccess-count-pagefault_disable-levels-in-pagefault_.patch119
-rw-r--r--patches/0002-arm64-Allow-forced-irq-threading.patch26
-rw-r--r--patches/0002-futex-Implement-lockless-wakeups.patch181
-rw-r--r--patches/0002-gpio-omap-fix-omap_gpio_free-to-not-clean-up-irq-con.patch45
-rw-r--r--patches/0002-mm-uaccess-trigger-might_sleep-in-might_fault-with-d.patch100
-rw-r--r--patches/0003-gpio-omap-fix-error-handling-in-omap_gpio_irq_type.patch66
-rw-r--r--patches/0003-uaccess-clarify-that-uaccess-may-only-sleep-if-pagef.patch641
-rw-r--r--patches/0004-gpio-omap-rework-omap_x_irq_shutdown-to-touch-only-i.patch62
-rw-r--r--patches/0004-ipc-mqueue-Implement-lockless-pipelined-wakeups.patch183
-rw-r--r--patches/0004-mm-explicitly-disable-enable-preemption-in-kmap_atom.patch367
-rw-r--r--patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch2
-rw-r--r--patches/0005-gpio-omap-rework-omap_gpio_request-to-touch-only-gpi.patch44
-rw-r--r--patches/0005-mips-kmap_coherent-relies-on-disabled-preemption.patch40
-rw-r--r--patches/0006-gpio-omap-rework-omap_gpio_irq_startup-to-handle-cur.patch52
-rw-r--r--patches/0006-mm-use-pagefault_disable-to-check-for-disabled-pagef.patch646
-rw-r--r--patches/0007-drm-i915-use-pagefault_disabled-to-check-for-disable.patch32
-rw-r--r--patches/0007-gpio-omap-add-missed-spin_unlock_irqrestore-in-omap_.patch37
-rw-r--r--patches/0008-futex-UP-futex_atomic_op_inuser-relies-on-disabled-p.patch45
-rw-r--r--patches/0008-gpio-omap-prevent-module-from-being-unloaded-while-i.patch32
-rw-r--r--patches/0009-ARM-OMAP2-Drop-the-concept-of-certain-power-domains-.patch14
-rw-r--r--patches/0009-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on-dis.patch36
-rw-r--r--patches/0010-arm-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on.patch36
-rw-r--r--patches/0010-gpio-omap-use-raw-locks-for-locking.patch338
-rw-r--r--patches/0011-arm-futex-UP-futex_atomic_op_inuser-relies-on-disabl.patch47
-rw-r--r--patches/0011-gpio-omap-Fix-missing-raw-locks-conversion.patch29
-rw-r--r--patches/0012-futex-clarify-that-preemption-doesn-t-have-to-be-dis.patch85
-rw-r--r--patches/0012-gpio-omap-remove-wrong-irq_domain_remove-usage-in-pr.patch27
-rw-r--r--patches/0013-gpio-omap-switch-to-use-platform_get_irq.patch42
-rw-r--r--patches/0013-mips-properly-lock-access-to-the-fpu.patch33
-rw-r--r--patches/0014-gpio-omap-fix-omap2_set_gpio_debounce.patch95
-rw-r--r--patches/0014-uaccess-decouple-preemption-from-the-pagefault-logic.patch60
-rw-r--r--patches/0015-gpio-omap-protect-regs-access-in-omap_gpio_irq_handl.patch63
-rw-r--r--patches/0016-gpio-omap-fix-clk_prepare-unprepare-usage.patch120
-rw-r--r--patches/0017-gpio-omap-Fix-gpiochip_add-handling-for-deferred-pro.patch44
-rw-r--r--patches/0018-gpio-omap-Fix-GPIO-numbering-for-deferred-probe.patch43
-rw-r--r--patches/0019-gpio-omap-fix-static-checker-warning.patch47
-rw-r--r--patches/0020-gpio-omap-move-pm-runtime-in-irq_chip.irq_bus_lock-s.patch133
-rw-r--r--patches/0021-gpio-omap-convert-to-use-generic-irq-handler.patch184
-rw-r--r--patches/ARM-cmpxchg-define-__HAVE_ARCH_CMPXCHG-for-armv6-and.patch39
-rw-r--r--patches/ASoC-Intel-sst-use-instead-of-at-the-of-a-C-statemen.patch26
-rw-r--r--patches/HACK-printk-drop-the-logbuf_lock-more-often.patch10
-rw-r--r--patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch81
-rw-r--r--patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch117
-rw-r--r--patches/Revert-x86-Do-not-disable-preemption-in-int3-on-32bi.patch90
-rw-r--r--patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch8
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch18
-rw-r--r--patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch141
-rw-r--r--patches/arm-convert-boot-lock-to-raw.patch140
-rw-r--r--patches/arm-enable-highmem-for-rt.patch15
-rw-r--r--patches/arm-preempt-lazy-support.patch32
-rw-r--r--patches/arm64-convert-patch_lock-to-raw-lock.patch79
-rw-r--r--patches/arm64-replace-read_lock-to-rcu-lock-in-call_break_hook.patch98
-rw-r--r--patches/arm64-xen--Make-XEN-depend-on-non-rt.patch2
-rw-r--r--patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch2
-rw-r--r--patches/block-blk-mq-use-swait.patch60
-rw-r--r--patches/block-mq-don-t-complete-requests-via-IPI.patch14
-rw-r--r--patches/block-mq-drop-per-ctx-cpu_lock.patch28
-rw-r--r--patches/block-mq-drop-preempt-disable.patch8
-rw-r--r--patches/block-mq-use-cpu_light.patch28
-rw-r--r--patches/block-shorten-interrupt-disabled-regions.patch10
-rw-r--r--patches/bpf-convert-hashtab-lock-to-raw-lock.patch114
-rw-r--r--patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch2
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch26
-rw-r--r--patches/clocksource-tclib-allow-higher-clockrates.patch79
-rw-r--r--patches/completion-use-simple-wait-queues.patch4
-rw-r--r--patches/cond-resched-lock-rt-tweak.patch8
-rw-r--r--patches/cond-resched-softirq-rt.patch6
-rw-r--r--patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch2
-rw-r--r--patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch30
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch92
-rw-r--r--patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch16
-rw-r--r--patches/cpu_down_move_migrate_enable_back.patch4
-rw-r--r--patches/cpufreq-Remove-cpufreq_rwsem.patch195
-rw-r--r--patches/cpumask-disable-offstack-on-rt.patch4
-rw-r--r--patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch14
-rw-r--r--patches/dm-make-rt-aware.patch2
-rw-r--r--patches/drivers-net-fix-livelock-issues.patch2
-rw-r--r--patches/drivers-random-reduce-preempt-disabled-region.patch4
-rw-r--r--patches/drivers-tty-fix-omap-lock-crap.patch4
-rw-r--r--patches/drivers-tty-pl011-irq-disable-madness.patch6
-rw-r--r--patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch12
-rw-r--r--patches/fix-rt-int3-x86_32-3.2-rt.patch101
-rw-r--r--patches/fs-aio-simple-simple-work.patch8
-rw-r--r--patches/fs-block-rt-support.patch2
-rw-r--r--patches/fs-jbd-pull-plug-when-waiting-for-space.patch29
-rw-r--r--patches/fs-jbd-replace-bh_state-lock.patch26
-rw-r--r--patches/fs-replace-bh_uptodate_lock-for-rt.patch10
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch16
-rw-r--r--patches/futex-avoid-double-wake-up-in-PI-futex-wait-wake-on-.patch223
-rw-r--r--patches/futex-requeue-pi-fix.patch4
-rw-r--r--patches/genirq--Handle-interrupts-with-primary-and-threaded-handler-gracefully333
-rw-r--r--patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch10
-rw-r--r--patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch2
-rw-r--r--patches/hotplug-light-get-online-cpus.patch16
-rw-r--r--patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch2
-rw-r--r--patches/hotplug-use-migrate-disable.patch4
-rw-r--r--patches/hrtimer-enfore-64byte-alignment.patch28
-rw-r--r--patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch371
-rw-r--r--patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch37
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch53
-rw-r--r--patches/hwlatdetect.patch2
-rw-r--r--patches/i2c-omap-drop-the-lock-hard-irq-context.patch12
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch4
-rw-r--r--patches/idr-use-local-lock-for-protection.patch43
-rw-r--r--patches/infiniband-mellanox-ib-use-nort-irq.patch4
-rw-r--r--patches/inpt-gameport-use-local-irq-nort.patch24
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch104
-rw-r--r--patches/ipc-make-rt-aware.patch67
-rw-r--r--patches/ipc-msg-Implement-lockless-pipelined-wakeups.patch228
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch28
-rw-r--r--patches/irqwork-Move-irq-safe-work-to-irq-context.patch8
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch12
-rw-r--r--patches/jump-label-rt.patch8
-rw-r--r--patches/kconfig-disable-a-few-options-rt.patch4
-rw-r--r--patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch16
-rw-r--r--patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch8
-rw-r--r--patches/kgb-serial-hackaround.patch12
-rw-r--r--patches/latency-hist.patch58
-rw-r--r--patches/lglocks-rt.patch65
-rw-r--r--patches/list_bl.h-make-list-head-locking-RT-safe.patch2
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch4
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch8
-rw-r--r--patches/mips-disable-highmem-on-rt.patch2
-rw-r--r--patches/mm-bounce-local-irq-save-nort.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch16
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-enable-slub.patch153
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch36
-rw-r--r--patches/mm-page-alloc-use-local-lock-on-target-cpu.patch2
-rw-r--r--patches/mm-page_alloc-reduce-lock-sections-further.patch35
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch47
-rw-r--r--patches/mm-protect-activate-switch-mm.patch2
-rw-r--r--patches/mm-rmap-retry-lock-check-in-anon_vma_free.patch_vma_free.patch52
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch8
-rw-r--r--patches/mm-scatterlist-dont-disable-irqs-on-RT.patch8
-rw-r--r--patches/mm-slub-move-slab-initialization-into-irq-enabled-region.patch162
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch12
-rw-r--r--patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch4
-rw-r--r--patches/move_sched_delayed_work_to_helper.patch4
-rw-r--r--patches/net-another-local-irq-disable-alloc-atomic-headache.patch17
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch12
-rw-r--r--patches/net-gianfar-do-not-disable-interrupts.patch76
-rw-r--r--patches/net-make-devnet_rename_seq-a-mutex.patch12
-rw-r--r--patches/net-prevent-abba-deadlock.patch2
-rw-r--r--patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch2
-rw-r--r--patches/net-tx-action-avoid-livelock-on-rt.patch4
-rw-r--r--patches/net-use-cpu-chill.patch6
-rw-r--r--patches/net-wireless-warn-nort.patch2
-rw-r--r--patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt2
-rw-r--r--patches/oleg-signal-rt-fix.patch43
-rw-r--r--patches/panic-disable-random-on-rt.patch2
-rw-r--r--patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch33
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch2
-rw-r--r--patches/peter_zijlstra-frob-rcu.patch4
-rw-r--r--patches/peterz-srcu-crypto-chain.patch6
-rw-r--r--patches/ping-sysrq.patch8
-rw-r--r--patches/posix-timers-no-broadcast.patch4
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch34
-rw-r--r--patches/power-disable-highmem-on-rt.patch2
-rw-r--r--patches/powerpc-preempt-lazy-support.patch16
-rw-r--r--patches/preempt-lazy-support.patch94
-rw-r--r--patches/preempt-nort-rt-variants.patch8
-rw-r--r--patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch2
-rw-r--r--patches/printk-kill.patch42
-rw-r--r--patches/printk-rt-aware.patch32
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch29
-rw-r--r--patches/radix-tree-rt-aware.patch11
-rw-r--r--patches/random-make-it-work-on-rt.patch28
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch77
-rw-r--r--patches/rcu-disable-rcu-fast-no-hz-on-rt.patch6
-rw-r--r--patches/rcu-make-RCU_BOOST-default-on-RT.patch4
-rw-r--r--patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch66
-rw-r--r--patches/rcu-more-swait-conversions.patch106
-rw-r--r--patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch2
-rw-r--r--patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch2
-rw-r--r--patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch2
-rw-r--r--patches/relay-fix-timer-madness.patch6
-rw-r--r--patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch4
-rw-r--r--patches/rt-add-rt-locks.patch316
-rw-r--r--patches/rt-introduce-cpu-chill.patch2
-rw-r--r--patches/rt-serial-warn-fix.patch10
-rw-r--r--patches/rtmutex--Handle-non-enqueued-waiters-gracefully2
-rw-r--r--patches/rtmutex-Use-chainwalking-control-enum.patch2
-rw-r--r--patches/rtmutex-add-a-first-shot-of-ww_mutex.patch32
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch30
-rw-r--r--patches/rtmutex-lock-killable.patch2
-rw-r--r--patches/rtmutex-trylock-is-okay-on-RT.patch27
-rw-r--r--patches/rtmutex_dont_include_rcu.patch75
-rw-r--r--patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch2
-rw-r--r--patches/sched-delay-put-task.patch8
-rw-r--r--patches/sched-disable-rt-group-sched-on-rt.patch2
-rw-r--r--patches/sched-disable-ttwu-queue.patch2
-rw-r--r--patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch172
-rw-r--r--patches/sched-limit-nr-migrate.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch15
-rw-r--r--patches/sched-mmdrop-delayed.patch16
-rw-r--r--patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch34
-rw-r--r--patches/sched-rt-mutex-wakeup.patch14
-rw-r--r--patches/sched-ttwu-ensure-success-return-is-correct.patch2
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/scsi-fcoe-rt-aware.patch10
-rw-r--r--patches/seqlock-prevent-rt-starvation.patch20
-rw-r--r--patches/series63
-rw-r--r--patches/signal-fix-up-rcu-wreckage.patch4
-rw-r--r--patches/signal-revert-ptrace-preempt-magic.patch2
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch43
-rw-r--r--patches/skbufhead-raw-lock.patch16
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/slub-enable-irqs-for-no-wait.patch21
-rw-r--r--patches/softirq-disable-softirq-stacks-for-rt.patch32
-rw-r--r--patches/softirq-preempt-fix-3-re.patch22
-rw-r--r--patches/softirq-split-locks.patch59
-rw-r--r--patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch6
-rw-r--r--patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch39
-rw-r--r--patches/stop-machine-raw-lock.patch88
-rw-r--r--patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch2
-rw-r--r--patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch6
-rw-r--r--patches/suspend-prevernt-might-sleep-splats.patch10
-rw-r--r--patches/sysfs-realtime-entry.patch2
-rw-r--r--patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch6
-rw-r--r--patches/timekeeping-split-jiffies-lock.patch18
-rw-r--r--patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch2
-rw-r--r--patches/timers-avoid-the-base-null-otptimization-on-rt.patch53
-rw-r--r--patches/timers-preempt-rt-support.patch18
-rw-r--r--patches/timers-prepare-for-full-preemption.patch61
-rw-r--r--patches/tracing-account-for-preempt-off-in-preempt_schedule.patch6
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/usb-use-_nort-in-giveback.patch2
-rw-r--r--patches/vtime-split-lock-and-seqcount.patch28
-rw-r--r--patches/wait-simple-implementation.patch2
-rw-r--r--patches/work-queue-work-around-irqsafe-timer-optimization.patch2
-rw-r--r--patches/work-simple-Simple-work-queue-implemenation.patch2
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch40
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch18
-rw-r--r--patches/workqueue-use-locallock.patch26
-rw-r--r--patches/workqueue-use-rcu.patch98
-rw-r--r--patches/x86-UV-raw_spinlock-conversion.patch16
-rw-r--r--patches/x86-crypto-reduce-preempt-disabled-regions.patch10
-rw-r--r--patches/x86-io-apic-migra-no-unmask.patch7
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-mce-timer-hrtimer.patch24
-rw-r--r--patches/x86-mce-use-swait-queue-for-mce-wakeups.patch6
-rw-r--r--patches/x86-preempt-lazy.patch186
-rw-r--r--patches/x86-stackprot-no-random-on-rt.patch13
-rw-r--r--patches/x86-use-gen-rwsem-spinlocks-rt.patch2
-rw-r--r--patches/xfs--clean-up-inode-lockdep-annotations281
253 files changed, 2767 insertions, 8941 deletions
diff --git a/patches/0001-arm64-Mark-PMU-interrupt-IRQF_NO_THREAD.patch b/patches/0001-arm64-Mark-PMU-interrupt-IRQF_NO_THREAD.patch
deleted file mode 100644
index 1abc7c5369877..0000000000000
--- a/patches/0001-arm64-Mark-PMU-interrupt-IRQF_NO_THREAD.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Anders Roxell <anders.roxell@linaro.org>
-Date: Mon, 27 Apr 2015 22:53:08 +0200
-Subject: arm64: Mark PMU interrupt IRQF_NO_THREAD
-
-Mark the PMU interrupts as non-threadable, as is the case with
-arch/arm: d9c3365 ARM: 7813/1: Mark pmu interupt IRQF_NO_THREAD
-
-[ upstream commit: 96045ed486b0 ]
-
-Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
----
- arch/arm64/kernel/perf_event.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/arm64/kernel/perf_event.c
-+++ b/arch/arm64/kernel/perf_event.c
-@@ -488,7 +488,7 @@ armpmu_reserve_hardware(struct arm_pmu *
- }
-
- err = request_irq(irq, armpmu->handle_irq,
-- IRQF_NOBALANCING,
-+ IRQF_NOBALANCING | IRQF_NO_THREAD,
- "arm-pmu", armpmu);
- if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
diff --git a/patches/0001-gpio-omap-Allow-building-as-a-loadable-module.patch b/patches/0001-gpio-omap-Allow-building-as-a-loadable-module.patch
deleted file mode 100644
index 63fae94bf1c2a..0000000000000
--- a/patches/0001-gpio-omap-Allow-building-as-a-loadable-module.patch
+++ /dev/null
@@ -1,130 +0,0 @@
-From ed28ad9cd730334b1df1de8268c2fef20766cb59 Mon Sep 17 00:00:00 2001
-From: Tony Lindgren <tony@atomide.com>
-Date: Mon, 13 Jul 2015 17:04:15 +0300
-Subject: [PATCH 01/21] gpio: omap: Allow building as a loadable module
-
-commit cac089f9026e9ddb3481daf08f0fc4e5949fa1af upstream
-
-We currently get all kinds of errors building the omap gpio driver
-as a module starting with:
-
-undefined reference to `omap2_gpio_resume_after_idle'
-undefined reference to `omap2_gpio_prepare_for_idle'
-...
-
-Let's fix the issue by adding inline functions to the header.
-Note that we can now also remove the two unused functions for
-omap_set_gpio_debounce and omap_set_gpio_debounce_time.
-
-Then doing rmmod on the module produces further warnings
-because of missing exit related functions. Let's add those.
-
-And finally, we can make the Kconfig entry just a tristate
-option that's selected for omaps.
-
-Cc: Javier Martinez Canillas <javier@dowhile0.org>
-Cc: Kevin Hilman <khilman@deeprootsystems.com>
-Cc: Nishanth Menon <nm@ti.com>
-Signed-off-by: Tony Lindgren <tony@atomide.com>
-Reviewed-by: Grygorii Strashko <grygorii.strashko@linaro.org>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Reviewed-by: Felipe Balbi <balbi@ti.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/Kconfig | 2 +-
- drivers/gpio/gpio-omap.c | 24 ++++++++++++++++++++++++
- include/linux/platform_data/gpio-omap.h | 12 ++++++++++--
- 3 files changed, 35 insertions(+), 3 deletions(-)
-
---- a/drivers/gpio/Kconfig
-+++ b/drivers/gpio/Kconfig
-@@ -308,7 +308,7 @@ config GPIO_OCTEON
- family of SOCs.
-
- config GPIO_OMAP
-- bool "TI OMAP GPIO support" if COMPILE_TEST && !ARCH_OMAP2PLUS
-+ tristate "TI OMAP GPIO support" if ARCH_OMAP2PLUS || COMPILE_TEST
- default y if ARCH_OMAP
- depends on ARM
- select GENERIC_IRQ_CHIP
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -1233,6 +1233,17 @@ static int omap_gpio_probe(struct platfo
- return 0;
- }
-
-+static int omap_gpio_remove(struct platform_device *pdev)
-+{
-+ struct gpio_bank *bank = platform_get_drvdata(pdev);
-+
-+ list_del(&bank->node);
-+ gpiochip_remove(&bank->chip);
-+ pm_runtime_disable(bank->dev);
-+
-+ return 0;
-+}
-+
- #ifdef CONFIG_ARCH_OMAP2PLUS
-
- #if defined(CONFIG_PM)
-@@ -1418,6 +1429,7 @@ static int omap_gpio_runtime_resume(stru
- }
- #endif /* CONFIG_PM */
-
-+#if IS_BUILTIN(CONFIG_GPIO_OMAP)
- void omap2_gpio_prepare_for_idle(int pwr_mode)
- {
- struct gpio_bank *bank;
-@@ -1443,6 +1455,7 @@ void omap2_gpio_resume_after_idle(void)
- pm_runtime_get_sync(bank->dev);
- }
- }
-+#endif
-
- #if defined(CONFIG_PM)
- static void omap_gpio_init_context(struct gpio_bank *p)
-@@ -1598,6 +1611,7 @@ MODULE_DEVICE_TABLE(of, omap_gpio_match)
-
- static struct platform_driver omap_gpio_driver = {
- .probe = omap_gpio_probe,
-+ .remove = omap_gpio_remove,
- .driver = {
- .name = "omap_gpio",
- .pm = &gpio_pm_ops,
-@@ -1615,3 +1629,13 @@ static int __init omap_gpio_drv_reg(void
- return platform_driver_register(&omap_gpio_driver);
- }
- postcore_initcall(omap_gpio_drv_reg);
-+
-+static void __exit omap_gpio_exit(void)
-+{
-+ platform_driver_unregister(&omap_gpio_driver);
-+}
-+module_exit(omap_gpio_exit);
-+
-+MODULE_DESCRIPTION("omap gpio driver");
-+MODULE_ALIAS("platform:gpio-omap");
-+MODULE_LICENSE("GPL v2");
---- a/include/linux/platform_data/gpio-omap.h
-+++ b/include/linux/platform_data/gpio-omap.h
-@@ -208,9 +208,17 @@ struct omap_gpio_platform_data {
- int (*get_context_loss_count)(struct device *dev);
- };
-
-+#if IS_BUILTIN(CONFIG_GPIO_OMAP)
- extern void omap2_gpio_prepare_for_idle(int off_mode);
- extern void omap2_gpio_resume_after_idle(void);
--extern void omap_set_gpio_debounce(int gpio, int enable);
--extern void omap_set_gpio_debounce_time(int gpio, int enable);
-+#else
-+static inline void omap2_gpio_prepare_for_idle(int off_mode)
-+{
-+}
-+
-+static inline void omap2_gpio_resume_after_idle(void)
-+{
-+}
-+#endif
-
- #endif
diff --git a/patches/0001-sched-Implement-lockless-wake-queues.patch b/patches/0001-sched-Implement-lockless-wake-queues.patch
deleted file mode 100644
index 23931132a606e..0000000000000
--- a/patches/0001-sched-Implement-lockless-wake-queues.patch
+++ /dev/null
@@ -1,166 +0,0 @@
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Fri, 1 May 2015 08:27:50 -0700
-Subject: sched: Implement lockless wake-queues
-
-This is useful for locking primitives that can effect multiple
-wakeups per operation and want to avoid lock internal lock contention
-by delaying the wakeups until we've released the lock internal locks.
-
-Alternatively it can be used to avoid issuing multiple wakeups, and
-thus save a few cycles, in packet processing. Queue all target tasks
-and wakeup once you've processed all packets. That way you avoid
-waking the target task multiple times if there were multiple packets
-for the same task.
-
-Properties of a wake_q are:
-- Lockless, as queue head must reside on the stack.
-- Being a queue, maintains wakeup order passed by the callers. This can
- be important for otherwise, in scenarios where highly contended locks
- could affect any reliance on lock fairness.
-- A queued task cannot be added again until it is woken up.
-
-This patch adds the needed infrastructure into the scheduler code
-and uses the new wake_list to delay the futex wakeups until
-after we've released the hash bucket locks.
-
-[upstream commit 7675104990ed255b9315a82ae827ff312a2a88a2]
-
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-[tweaks, adjustments, comments, etc.]
-Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Acked-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Chris Mason <clm@fb.com>
-Cc: Davidlohr Bueso <dave@stgolabs.net>
-Cc: George Spelvin <linux@horizon.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Manfred Spraul <manfred@colorfullife.com>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Link: http://lkml.kernel.org/r/1430494072-30283-2-git-send-email-dave@stgolabs.net
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/sched.h | 46 ++++++++++++++++++++++++++++++++++++++++++++++
- kernel/sched/core.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 92 insertions(+)
-
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -900,6 +900,50 @@ enum cpu_idle_type {
- #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
-
- /*
-+ * Wake-queues are lists of tasks with a pending wakeup, whose
-+ * callers have already marked the task as woken internally,
-+ * and can thus carry on. A common use case is being able to
-+ * do the wakeups once the corresponding user lock as been
-+ * released.
-+ *
-+ * We hold reference to each task in the list across the wakeup,
-+ * thus guaranteeing that the memory is still valid by the time
-+ * the actual wakeups are performed in wake_up_q().
-+ *
-+ * One per task suffices, because there's never a need for a task to be
-+ * in two wake queues simultaneously; it is forbidden to abandon a task
-+ * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
-+ * already in a wake queue, the wakeup will happen soon and the second
-+ * waker can just skip it.
-+ *
-+ * The WAKE_Q macro declares and initializes the list head.
-+ * wake_up_q() does NOT reinitialize the list; it's expected to be
-+ * called near the end of a function, where the fact that the queue is
-+ * not used again will be easy to see by inspection.
-+ *
-+ * Note that this can cause spurious wakeups. schedule() callers
-+ * must ensure the call is done inside a loop, confirming that the
-+ * wakeup condition has in fact occurred.
-+ */
-+struct wake_q_node {
-+ struct wake_q_node *next;
-+};
-+
-+struct wake_q_head {
-+ struct wake_q_node *first;
-+ struct wake_q_node **lastp;
-+};
-+
-+#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
-+
-+#define WAKE_Q(name) \
-+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
-+
-+extern void wake_q_add(struct wake_q_head *head,
-+ struct task_struct *task);
-+extern void wake_up_q(struct wake_q_head *head);
-+
-+/*
- * sched-domains (multiprocessor balancing) declarations:
- */
- #ifdef CONFIG_SMP
-@@ -1511,6 +1555,8 @@ struct task_struct {
- /* Protection of the PI data structures: */
- raw_spinlock_t pi_lock;
-
-+ struct wake_q_node wake_q;
-+
- #ifdef CONFIG_RT_MUTEXES
- /* PI waiters blocked on a rt_mutex held by this task */
- struct rb_root pi_waiters;
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -541,6 +541,52 @@ static bool set_nr_if_polling(struct tas
- #endif
- #endif
-
-+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
-+{
-+ struct wake_q_node *node = &task->wake_q;
-+
-+ /*
-+ * Atomically grab the task, if ->wake_q is !nil already it means
-+ * its already queued (either by us or someone else) and will get the
-+ * wakeup due to that.
-+ *
-+ * This cmpxchg() implies a full barrier, which pairs with the write
-+ * barrier implied by the wakeup in wake_up_list().
-+ */
-+ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
-+ return;
-+
-+ get_task_struct(task);
-+
-+ /*
-+ * The head is context local, there can be no concurrency.
-+ */
-+ *head->lastp = node;
-+ head->lastp = &node->next;
-+}
-+
-+void wake_up_q(struct wake_q_head *head)
-+{
-+ struct wake_q_node *node = head->first;
-+
-+ while (node != WAKE_Q_TAIL) {
-+ struct task_struct *task;
-+
-+ task = container_of(node, struct task_struct, wake_q);
-+ BUG_ON(!task);
-+ /* task can safely be re-inserted now */
-+ node = node->next;
-+ task->wake_q.next = NULL;
-+
-+ /*
-+ * wake_up_process() implies a wmb() to pair with the queueing
-+ * in wake_q_add() so as not to miss wakeups.
-+ */
-+ wake_up_process(task);
-+ put_task_struct(task);
-+ }
-+}
-+
- /*
- * resched_curr - mark rq's current task 'to be rescheduled now'.
- *
diff --git a/patches/0001-uaccess-count-pagefault_disable-levels-in-pagefault_.patch b/patches/0001-uaccess-count-pagefault_disable-levels-in-pagefault_.patch
deleted file mode 100644
index 784d0ab15c761..0000000000000
--- a/patches/0001-uaccess-count-pagefault_disable-levels-in-pagefault_.patch
+++ /dev/null
@@ -1,119 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:06 +0200
-Subject: sched/preempt, mm/fault: Count pagefault_disable() levels in pagefault_disabled
-
-Until now, pagefault_disable()/pagefault_enabled() used the preempt
-count to track whether in an environment with pagefaults disabled (can
-be queried via in_atomic()).
-
-This patch introduces a separate counter in task_struct to count the
-level of pagefault_disable() calls. We'll keep manipulating the preempt
-count to retain compatibility to existing pagefault handlers.
-
-It is now possible to verify whether in a pagefault_disable() envionment
-by calling pagefault_disabled(). In contrast to in_atomic() it will not
-be influenced by preempt_enable()/preempt_disable().
-
-This patch is based on a patch from Ingo Molnar.
-
-[upstream commit 8bcbde5480f9777f8b74d71493722c663e22c21b]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- include/linux/sched.h | 1 +
- include/linux/uaccess.h | 36 +++++++++++++++++++++++++++++-------
- kernel/fork.c | 3 +++
- 3 files changed, 33 insertions(+), 7 deletions(-)
-
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -1724,6 +1724,7 @@ struct task_struct {
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
- #endif
-+ int pagefault_disabled;
- };
-
- /* Future-safe accessor for struct task_struct's cpus_allowed. */
---- a/include/linux/uaccess.h
-+++ b/include/linux/uaccess.h
-@@ -2,20 +2,36 @@
- #define __LINUX_UACCESS_H__
-
- #include <linux/preempt.h>
-+#include <linux/sched.h>
- #include <asm/uaccess.h>
-
-+static __always_inline void pagefault_disabled_inc(void)
-+{
-+ current->pagefault_disabled++;
-+}
-+
-+static __always_inline void pagefault_disabled_dec(void)
-+{
-+ current->pagefault_disabled--;
-+ WARN_ON(current->pagefault_disabled < 0);
-+}
-+
- /*
-- * These routines enable/disable the pagefault handler in that
-- * it will not take any locks and go straight to the fixup table.
-+ * These routines enable/disable the pagefault handler. If disabled, it will
-+ * not take any locks and go straight to the fixup table.
-+ *
-+ * We increase the preempt and the pagefault count, to be able to distinguish
-+ * whether we run in simple atomic context or in a real pagefault_disable()
-+ * context.
-+ *
-+ * For now, after pagefault_disabled() has been called, we run in atomic
-+ * context. User access methods will not sleep.
- *
-- * They have great resemblance to the preempt_disable/enable calls
-- * and in fact they are identical; this is because currently there is
-- * no other way to make the pagefault handlers do this. So we do
-- * disable preemption but we don't necessarily care about that.
- */
- static inline void pagefault_disable(void)
- {
- preempt_count_inc();
-+ pagefault_disabled_inc();
- /*
- * make sure to have issued the store before a pagefault
- * can hit.
-@@ -25,18 +41,24 @@ static inline void pagefault_disable(voi
-
- static inline void pagefault_enable(void)
- {
--#ifndef CONFIG_PREEMPT
- /*
- * make sure to issue those last loads/stores before enabling
- * the pagefault handler again.
- */
- barrier();
-+ pagefault_disabled_dec();
-+#ifndef CONFIG_PREEMPT
- preempt_count_dec();
- #else
- preempt_enable();
- #endif
- }
-
-+/*
-+ * Is the pagefault handler disabled? If so, user access methods will not sleep.
-+ */
-+#define pagefault_disabled() (current->pagefault_disabled != 0)
-+
- #ifndef ARCH_HAS_NOCACHE_UACCESS
-
- static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
---- a/kernel/fork.c
-+++ b/kernel/fork.c
-@@ -1396,6 +1396,9 @@ static struct task_struct *copy_process(
- p->hardirq_context = 0;
- p->softirq_context = 0;
- #endif
-+
-+ p->pagefault_disabled = 0;
-+
- #ifdef CONFIG_LOCKDEP
- p->lockdep_depth = 0; /* no locks held yet */
- p->curr_chain_key = 0;
diff --git a/patches/0002-arm64-Allow-forced-irq-threading.patch b/patches/0002-arm64-Allow-forced-irq-threading.patch
deleted file mode 100644
index 5b450f04b794b..0000000000000
--- a/patches/0002-arm64-Allow-forced-irq-threading.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Anders Roxell <anders.roxell@linaro.org>
-Date: Mon, 27 Apr 2015 22:53:09 +0200
-Subject: arm64: Allow forced irq threading
-
-Now its safe to allow forced interrupt threading for arm64,
-all timer interrupts and the perf interrupt are marked NO_THREAD, as is
-the case with arch/arm: da0ec6f ARM: 7814/2: Allow forced irq threading
-
-[ upstream commit: e8557d1f0c4d ]
-
-Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
----
- arch/arm64/Kconfig | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/arch/arm64/Kconfig
-+++ b/arch/arm64/Kconfig
-@@ -71,6 +71,7 @@ config ARM64
- select HAVE_RCU_TABLE_FREE
- select HAVE_SYSCALL_TRACEPOINTS
- select IRQ_DOMAIN
-+ select IRQ_FORCED_THREADING
- select MODULES_USE_ELF_RELA
- select NO_BOOTMEM
- select OF
diff --git a/patches/0002-futex-Implement-lockless-wakeups.patch b/patches/0002-futex-Implement-lockless-wakeups.patch
deleted file mode 100644
index 9aaa32d24e70f..0000000000000
--- a/patches/0002-futex-Implement-lockless-wakeups.patch
+++ /dev/null
@@ -1,181 +0,0 @@
-From: Davidlohr Bueso <dave@stgolabs.net>
-Date: Fri, 1 May 2015 08:27:51 -0700
-Subject: futex: Implement lockless wakeups
-
-Given the overall futex architecture, any chance of reducing
-hb->lock contention is welcome. In this particular case, using
-wake-queues to enable lockless wakeups addresses very much real
-world performance concerns, even cases of soft-lockups in cases
-of large amounts of blocked tasks (which is not hard to find in
-large boxes, using but just a handful of futex).
-
-At the lowest level, this patch can reduce latency of a single thread
-attempting to acquire hb->lock in highly contended scenarios by a
-up to 2x. At lower counts of nr_wake there are no regressions,
-confirming, of course, that the wake_q handling overhead is practically
-non existent. For instance, while a fair amount of variation,
-the extended pef-bench wakeup benchmark shows for a 20 core machine
-the following avg per-thread time to wakeup its share of tasks:
-
- nr_thr ms-before ms-after
- 16 0.0590 0.0215
- 32 0.0396 0.0220
- 48 0.0417 0.0182
- 64 0.0536 0.0236
- 80 0.0414 0.0097
- 96 0.0672 0.0152
-
-Naturally, this can cause spurious wakeups. However there is no core code
-that cannot handle them afaict, and furthermore tglx does have the point
-that other events can already trigger them anyway.
-
-[upstream commit 1d0dcb3ad9d336e6d6ee020a750a7f8d907e28de]
-
-Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Acked-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Chris Mason <clm@fb.com>
-Cc: Davidlohr Bueso <dave@stgolabs.net>
-Cc: George Spelvin <linux@horizon.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Manfred Spraul <manfred@colorfullife.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Link: http://lkml.kernel.org/r/1430494072-30283-3-git-send-email-dave@stgolabs.net
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/futex.c | 33 +++++++++++++++++----------------
- 1 file changed, 17 insertions(+), 16 deletions(-)
-
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -1090,9 +1090,11 @@ static void __unqueue_futex(struct futex
-
- /*
- * The hash bucket lock must be held when this is called.
-- * Afterwards, the futex_q must not be accessed.
-+ * Afterwards, the futex_q must not be accessed. Callers
-+ * must ensure to later call wake_up_q() for the actual
-+ * wakeups to occur.
- */
--static void wake_futex(struct futex_q *q)
-+static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
- {
- struct task_struct *p = q->task;
-
-@@ -1100,14 +1102,10 @@ static void wake_futex(struct futex_q *q
- return;
-
- /*
-- * We set q->lock_ptr = NULL _before_ we wake up the task. If
-- * a non-futex wake up happens on another CPU then the task
-- * might exit and p would dereference a non-existing task
-- * struct. Prevent this by holding a reference on p across the
-- * wake up.
-+ * Queue the task for later wakeup for after we've released
-+ * the hb->lock. wake_q_add() grabs reference to p.
- */
-- get_task_struct(p);
--
-+ wake_q_add(wake_q, p);
- __unqueue_futex(q);
- /*
- * The waiting task can free the futex_q as soon as
-@@ -1117,9 +1115,6 @@ static void wake_futex(struct futex_q *q
- */
- smp_wmb();
- q->lock_ptr = NULL;
--
-- wake_up_state(p, TASK_NORMAL);
-- put_task_struct(p);
- }
-
- static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
-@@ -1217,6 +1212,7 @@ futex_wake(u32 __user *uaddr, unsigned i
- struct futex_q *this, *next;
- union futex_key key = FUTEX_KEY_INIT;
- int ret;
-+ WAKE_Q(wake_q);
-
- if (!bitset)
- return -EINVAL;
-@@ -1244,13 +1240,14 @@ futex_wake(u32 __user *uaddr, unsigned i
- if (!(this->bitset & bitset))
- continue;
-
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- if (++ret >= nr_wake)
- break;
- }
- }
-
- spin_unlock(&hb->lock);
-+ wake_up_q(&wake_q);
- out_put_key:
- put_futex_key(&key);
- out:
-@@ -1269,6 +1266,7 @@ futex_wake_op(u32 __user *uaddr1, unsign
- struct futex_hash_bucket *hb1, *hb2;
- struct futex_q *this, *next;
- int ret, op_ret;
-+ WAKE_Q(wake_q);
-
- retry:
- ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
-@@ -1320,7 +1318,7 @@ futex_wake_op(u32 __user *uaddr1, unsign
- ret = -EINVAL;
- goto out_unlock;
- }
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- if (++ret >= nr_wake)
- break;
- }
-@@ -1334,7 +1332,7 @@ futex_wake_op(u32 __user *uaddr1, unsign
- ret = -EINVAL;
- goto out_unlock;
- }
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- if (++op_ret >= nr_wake2)
- break;
- }
-@@ -1344,6 +1342,7 @@ futex_wake_op(u32 __user *uaddr1, unsign
-
- out_unlock:
- double_unlock_hb(hb1, hb2);
-+ wake_up_q(&wake_q);
- out_put_keys:
- put_futex_key(&key2);
- out_put_key1:
-@@ -1503,6 +1502,7 @@ static int futex_requeue(u32 __user *uad
- struct futex_pi_state *pi_state = NULL;
- struct futex_hash_bucket *hb1, *hb2;
- struct futex_q *this, *next;
-+ WAKE_Q(wake_q);
-
- if (requeue_pi) {
- /*
-@@ -1679,7 +1679,7 @@ static int futex_requeue(u32 __user *uad
- * woken by futex_unlock_pi().
- */
- if (++task_count <= nr_wake && !requeue_pi) {
-- wake_futex(this);
-+ mark_wake_futex(&wake_q, this);
- continue;
- }
-
-@@ -1719,6 +1719,7 @@ static int futex_requeue(u32 __user *uad
- out_unlock:
- free_pi_state(pi_state);
- double_unlock_hb(hb1, hb2);
-+ wake_up_q(&wake_q);
- hb_waiters_dec(hb2);
-
- /*
diff --git a/patches/0002-gpio-omap-fix-omap_gpio_free-to-not-clean-up-irq-con.patch b/patches/0002-gpio-omap-fix-omap_gpio_free-to-not-clean-up-irq-con.patch
deleted file mode 100644
index ba63133bd910f..0000000000000
--- a/patches/0002-gpio-omap-fix-omap_gpio_free-to-not-clean-up-irq-con.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From 463bb14e6fc957a7d5ec9434afed29430debbbfb Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@linaro.org>
-Date: Mon, 13 Jul 2015 17:08:08 +0300
-Subject: [PATCH 02/21] gpio: omap: fix omap_gpio_free to not clean up irq
- configuration
-
-commit 5f982c70a7c3382d3532ac6d13fdea48ab38b934 upstream
-
-This patch fixes following issue:
-- GPIOn is used as IRQ by some dev, for example PCF8575.INT -> gpio6.11
-- PCFx driver knows nothing about type of IRQ line (GPIO or not)
- so it doesn't request gpio and just do request_irq()
-- If gpio6.11 will be exported through the sysfs and then un-xeported
-then IRQs from PCFx will not be received any more, because
-IRQ configuration for gpio6.11 will be cleaned up unconditionally
-in omap_gpio_free.
-
-Fix this by removing all GPIO IRQ specific code from omap_gpio_free()
-and also do GPIO clean up (change direction to 'in' and disable debounce)
-only if corresponding GPIO is not used as IRQ too.
-GPIO IRQ will be properly cleaned up by GPIO irqchip code.
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@linaro.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -690,8 +690,11 @@ static void omap_gpio_free(struct gpio_c
-
- spin_lock_irqsave(&bank->lock, flags);
- bank->mod_usage &= ~(BIT(offset));
-+ if (!LINE_USED(bank->irq_usage, offset)) {
-+ omap_set_gpio_direction(bank, offset, 1);
-+ omap_clear_gpio_debounce(bank, offset);
-+ }
- omap_disable_gpio_module(bank, offset);
-- omap_reset_gpio(bank, offset);
- spin_unlock_irqrestore(&bank->lock, flags);
-
- /*
diff --git a/patches/0002-mm-uaccess-trigger-might_sleep-in-might_fault-with-d.patch b/patches/0002-mm-uaccess-trigger-might_sleep-in-might_fault-with-d.patch
deleted file mode 100644
index a26899c7f55bd..0000000000000
--- a/patches/0002-mm-uaccess-trigger-might_sleep-in-might_fault-with-d.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:07 +0200
-Subject: mm, uaccess: trigger might_sleep() in might_fault() with disabled pagefaults
-
-Commit 662bbcb2747c ("mm, sched: Allow uaccess in atomic with
-pagefault_disable()") removed might_sleep() checks for all user access
-code (that uses might_fault()).
-
-The reason was to disable wrong "sleep in atomic" warnings in the
-following scenario:
- pagefault_disable()
- rc = copy_to_user(...)
- pagefault_enable()
-
-Which is valid, as pagefault_disable() increments the preempt counter
-and therefore disables the pagefault handler. copy_to_user() will not
-sleep and return an error code if a page is not available.
-
-However, as all might_sleep() checks are removed,
-CONFIG_DEBUG_ATOMIC_SLEEP would no longer detect the following scenario:
- spin_lock(&lock);
- rc = copy_to_user(...)
- spin_unlock(&lock)
-
-If the kernel is compiled with preemption turned on, preempt_disable()
-will make in_atomic() detect disabled preemption. The fault handler would
-correctly never sleep on user access.
-However, with preemption turned off, preempt_disable() is usually a NOP
-(with !CONFIG_PREEMPT_COUNT), therefore in_atomic() will not be able to
-detect disabled preemption nor disabled pagefaults. The fault handler
-could sleep.
-We really want to enable CONFIG_DEBUG_ATOMIC_SLEEP checks for user access
-functions again, otherwise we can end up with horrible deadlocks.
-
-Root of all evil is that pagefault_disable() acts almost as
-preempt_disable(), depending on preemption being turned on/off.
-
-As we now have pagefault_disabled(), we can use it to distinguish
-whether user acces functions might sleep.
-
-Convert might_fault() into a makro that calls __might_fault(), to
-allow proper file + line messages in case of a might_sleep() warning.
-
-[upstream commit 9ec23531fd48031d1b6ca5366f5f967d17a8bc28]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- include/linux/kernel.h | 3 ++-
- mm/memory.c | 18 ++++++------------
- 2 files changed, 8 insertions(+), 13 deletions(-)
-
---- a/include/linux/kernel.h
-+++ b/include/linux/kernel.h
-@@ -244,7 +244,8 @@ static inline u32 reciprocal_scale(u32 v
-
- #if defined(CONFIG_MMU) && \
- (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
--void might_fault(void);
-+#define might_fault() __might_fault(__FILE__, __LINE__)
-+void __might_fault(const char *file, int line);
- #else
- static inline void might_fault(void) { }
- #endif
---- a/mm/memory.c
-+++ b/mm/memory.c
-@@ -3743,7 +3743,7 @@ void print_vma_addr(char *prefix, unsign
- }
-
- #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
--void might_fault(void)
-+void __might_fault(const char *file, int line)
- {
- /*
- * Some code (nfs/sunrpc) uses socket ops on kernel memory while
-@@ -3753,21 +3753,15 @@ void might_fault(void)
- */
- if (segment_eq(get_fs(), KERNEL_DS))
- return;
--
-- /*
-- * it would be nicer only to annotate paths which are not under
-- * pagefault_disable, however that requires a larger audit and
-- * providing helpers like get_user_atomic.
-- */
-- if (in_atomic())
-+ if (pagefault_disabled())
- return;
--
-- __might_sleep(__FILE__, __LINE__, 0);
--
-+ __might_sleep(file, line, 0);
-+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
- if (current->mm)
- might_lock_read(&current->mm->mmap_sem);
-+#endif
- }
--EXPORT_SYMBOL(might_fault);
-+EXPORT_SYMBOL(__might_fault);
- #endif
-
- #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
diff --git a/patches/0003-gpio-omap-fix-error-handling-in-omap_gpio_irq_type.patch b/patches/0003-gpio-omap-fix-error-handling-in-omap_gpio_irq_type.patch
deleted file mode 100644
index b10843843d774..0000000000000
--- a/patches/0003-gpio-omap-fix-error-handling-in-omap_gpio_irq_type.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From 290b539f02c79ede61502447d77579f15f6a4984 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@linaro.org>
-Date: Mon, 13 Jul 2015 17:08:09 +0300
-Subject: [PATCH 03/21] gpio: omap: fix error handling in omap_gpio_irq_type
-
-commit 1562e4618ded89b07d145d6985f469fe8be04830 upstream
-
-The GPIO bank will be kept powered in case if input parameters
-are invalid or error occurred in omap_gpio_irq_type.
-
-Hence, fix it by ensuring that GPIO bank will be unpowered
-in case of errors and add additional check of value returned
-from omap_set_gpio_triggering().
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@linaro.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 16 ++++++++++++----
- 1 file changed, 12 insertions(+), 4 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -488,9 +488,6 @@ static int omap_gpio_irq_type(struct irq
- unsigned long flags;
- unsigned offset = d->hwirq;
-
-- if (!BANK_USED(bank))
-- pm_runtime_get_sync(bank->dev);
--
- if (type & ~IRQ_TYPE_SENSE_MASK)
- return -EINVAL;
-
-@@ -498,12 +495,18 @@ static int omap_gpio_irq_type(struct irq
- (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
- return -EINVAL;
-
-+ if (!BANK_USED(bank))
-+ pm_runtime_get_sync(bank->dev);
-+
- spin_lock_irqsave(&bank->lock, flags);
- retval = omap_set_gpio_triggering(bank, offset, type);
-+ if (retval)
-+ goto error;
- omap_gpio_init_irq(bank, offset);
- if (!omap_gpio_is_input(bank, offset)) {
- spin_unlock_irqrestore(&bank->lock, flags);
-- return -EINVAL;
-+ retval = -EINVAL;
-+ goto error;
- }
- spin_unlock_irqrestore(&bank->lock, flags);
-
-@@ -512,6 +515,11 @@ static int omap_gpio_irq_type(struct irq
- else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
- __irq_set_handler_locked(d->irq, handle_edge_irq);
-
-+ return 0;
-+
-+error:
-+ if (!BANK_USED(bank))
-+ pm_runtime_put(bank->dev);
- return retval;
- }
-
diff --git a/patches/0003-uaccess-clarify-that-uaccess-may-only-sleep-if-pagef.patch b/patches/0003-uaccess-clarify-that-uaccess-may-only-sleep-if-pagef.patch
deleted file mode 100644
index 81c503208b7f3..0000000000000
--- a/patches/0003-uaccess-clarify-that-uaccess-may-only-sleep-if-pagef.patch
+++ /dev/null
@@ -1,641 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:08 +0200
-Subject: [PATCH] sched/preempt, futex: Update comments to clarify that preemption doesn't have to be disabled
-
-In general, non-atomic variants of user access functions must not sleep
-if pagefaults are disabled.
-
-Let's update all relevant comments in uaccess code. This also reflects
-the might_sleep() checks in might_fault().
-
-[upstream commit 2f09b227eeed4b3a072fe818c82a4c773b778cde]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/avr32/include/asm/uaccess.h | 12 ++++++---
- arch/hexagon/include/asm/uaccess.h | 3 +-
- arch/m32r/include/asm/uaccess.h | 30 +++++++++++++++-------
- arch/microblaze/include/asm/uaccess.h | 6 +++-
- arch/mips/include/asm/uaccess.h | 45 ++++++++++++++++++++++------------
- arch/s390/include/asm/uaccess.h | 15 +++++++----
- arch/score/include/asm/uaccess.h | 15 +++++++----
- arch/tile/include/asm/uaccess.h | 18 +++++++++----
- arch/x86/include/asm/uaccess.h | 15 +++++++----
- arch/x86/include/asm/uaccess_32.h | 6 +++-
- arch/x86/lib/usercopy_32.c | 6 +++-
- lib/strnlen_user.c | 6 +++-
- 12 files changed, 118 insertions(+), 59 deletions(-)
-
---- a/arch/avr32/include/asm/uaccess.h
-+++ b/arch/avr32/include/asm/uaccess.h
-@@ -97,7 +97,8 @@ static inline __kernel_size_t __copy_fro
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -116,7 +117,8 @@ static inline __kernel_size_t __copy_fro
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -136,7 +138,8 @@ static inline __kernel_size_t __copy_fro
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -158,7 +161,8 @@ static inline __kernel_size_t __copy_fro
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
---- a/arch/hexagon/include/asm/uaccess.h
-+++ b/arch/hexagon/include/asm/uaccess.h
-@@ -36,7 +36,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
---- a/arch/m32r/include/asm/uaccess.h
-+++ b/arch/m32r/include/asm/uaccess.h
-@@ -91,7 +91,8 @@ static inline void set_fs(mm_segment_t s
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -155,7 +156,8 @@ extern int fixup_exception(struct pt_reg
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -175,7 +177,8 @@ extern int fixup_exception(struct pt_reg
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -194,7 +197,8 @@ extern int fixup_exception(struct pt_reg
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -274,7 +278,8 @@ do { \
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -568,7 +573,8 @@ unsigned long __generic_copy_from_user(v
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -588,7 +594,8 @@ unsigned long __generic_copy_from_user(v
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -606,7 +613,8 @@ unsigned long __generic_copy_from_user(v
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -626,7 +634,8 @@ unsigned long __generic_copy_from_user(v
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
-@@ -677,7 +686,8 @@ unsigned long clear_user(void __user *me
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
---- a/arch/microblaze/include/asm/uaccess.h
-+++ b/arch/microblaze/include/asm/uaccess.h
-@@ -178,7 +178,8 @@ extern long __user_bad(void);
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -290,7 +291,8 @@ extern long __user_bad(void);
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
---- a/arch/mips/include/asm/uaccess.h
-+++ b/arch/mips/include/asm/uaccess.h
-@@ -103,7 +103,8 @@ extern u64 __ua_limit;
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -138,7 +139,8 @@ extern u64 __ua_limit;
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -157,7 +159,8 @@ extern u64 __ua_limit;
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -177,7 +180,8 @@ extern u64 __ua_limit;
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -199,7 +203,8 @@ extern u64 __ua_limit;
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -498,7 +503,8 @@ extern void __put_user_unknown(void);
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -517,7 +523,8 @@ extern void __put_user_unknown(void);
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -537,7 +544,8 @@ extern void __put_user_unknown(void);
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -559,7 +567,8 @@ extern void __put_user_unknown(void);
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -815,7 +824,8 @@ extern size_t __copy_user(void *__to, co
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -888,7 +898,8 @@ extern size_t __copy_user_inatomic(void
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -1075,7 +1086,8 @@ extern size_t __copy_in_user_eva(void *_
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -1107,7 +1119,8 @@ extern size_t __copy_in_user_eva(void *_
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
-@@ -1329,7 +1342,8 @@ strncpy_from_user(char *__to, const char
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-@@ -1398,7 +1412,8 @@ static inline long __strnlen_user(const
- * strnlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
---- a/arch/s390/include/asm/uaccess.h
-+++ b/arch/s390/include/asm/uaccess.h
-@@ -98,7 +98,8 @@ static inline unsigned long extable_fixu
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -118,7 +119,8 @@ unsigned long __must_check __copy_from_u
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -264,7 +266,8 @@ int __get_user_bad(void) __attribute__((
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -290,7 +293,8 @@ void copy_from_user_overflow(void)
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
-@@ -348,7 +352,8 @@ static inline unsigned long strnlen_user
- * strlen_user: - Get the size of a string in user space.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
---- a/arch/score/include/asm/uaccess.h
-+++ b/arch/score/include/asm/uaccess.h
-@@ -36,7 +36,8 @@
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -61,7 +62,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -79,7 +81,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -98,7 +101,8 @@
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -119,7 +123,8 @@
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
---- a/arch/tile/include/asm/uaccess.h
-+++ b/arch/tile/include/asm/uaccess.h
-@@ -78,7 +78,8 @@ int __range_ok(unsigned long addr, unsig
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -192,7 +193,8 @@ extern int __get_user_bad(void)
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -274,7 +276,8 @@ extern int __put_user_bad(void)
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -330,7 +333,8 @@ extern int __put_user_bad(void)
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -366,7 +370,8 @@ copy_to_user(void __user *to, const void
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -437,7 +442,8 @@ static inline unsigned long __must_check
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to user space. Caller must check
- * the specified blocks with access_ok() before calling this function.
---- a/arch/x86/include/asm/uaccess.h
-+++ b/arch/x86/include/asm/uaccess.h
-@@ -74,7 +74,8 @@ static inline bool __chk_range_not_ok(un
- * @addr: User space pointer to start of block to check
- * @size: Size of block to check
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Checks if a pointer to a block of memory in user space is valid.
- *
-@@ -145,7 +146,8 @@ extern int __get_user_bad(void);
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -240,7 +242,8 @@ extern void __put_user_8(void);
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
-@@ -455,7 +458,8 @@ struct __large_struct { unsigned long bu
- * @x: Variable to store result.
- * @ptr: Source address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple variable from user space to kernel
- * space. It supports simple types like char and int, but not larger
-@@ -479,7 +483,8 @@ struct __large_struct { unsigned long bu
- * @x: Value to copy to user space.
- * @ptr: Destination address, in user space.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * This macro copies a single simple value from kernel space to user
- * space. It supports simple types like char and int, but not larger
---- a/arch/x86/include/asm/uaccess_32.h
-+++ b/arch/x86/include/asm/uaccess_32.h
-@@ -70,7 +70,8 @@ static __always_inline unsigned long __m
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space. Caller must check
- * the specified block with access_ok() before calling this function.
-@@ -117,7 +118,8 @@ static __always_inline unsigned long
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space. Caller must check
- * the specified block with access_ok() before calling this function.
---- a/arch/x86/lib/usercopy_32.c
-+++ b/arch/x86/lib/usercopy_32.c
-@@ -647,7 +647,8 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocach
- * @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from kernel space to user space.
- *
-@@ -668,7 +669,8 @@ EXPORT_SYMBOL(_copy_to_user);
- * @from: Source address, in user space.
- * @n: Number of bytes to copy.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Copy data from user space to kernel space.
- *
---- a/lib/strnlen_user.c
-+++ b/lib/strnlen_user.c
-@@ -85,7 +85,8 @@ static inline long do_strnlen_user(const
- * @str: The string to measure.
- * @count: Maximum count (including NUL character)
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
-@@ -121,7 +122,8 @@ EXPORT_SYMBOL(strnlen_user);
- * strlen_user: - Get the size of a user string INCLUDING final NUL.
- * @str: The string to measure.
- *
-- * Context: User context only. This function may sleep.
-+ * Context: User context only. This function may sleep if pagefaults are
-+ * enabled.
- *
- * Get the size of a NUL-terminated string in user space.
- *
diff --git a/patches/0004-gpio-omap-rework-omap_x_irq_shutdown-to-touch-only-i.patch b/patches/0004-gpio-omap-rework-omap_x_irq_shutdown-to-touch-only-i.patch
deleted file mode 100644
index d148a40423ea3..0000000000000
--- a/patches/0004-gpio-omap-rework-omap_x_irq_shutdown-to-touch-only-i.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-From 05c248c1091e714b6c2b4c8885ebfc3000352303 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@linaro.org>
-Date: Mon, 13 Jul 2015 17:08:10 +0300
-Subject: [PATCH 04/21] gpio: omap: rework omap_x_irq_shutdown to touch only
- irqs specific registers
-
-commit 6e96c1b5e54889cd11ce29723a5c38ba284c1d91 upstream
-
-The GPIO Chip and GPIO IRQ Chip functionality are essentially orthogonal,
-so GPIO IRQ Chip implementation shouldn't touch GPIO specific
-registers and vise versa.
-
-Hence, rework omap_gpio_irq_shutdown and try to touch only irqs specific
-registers:
-- don't configure GPIO as input (it, actually, should be already configured
- as input).
-- don't clear debounce configuration if GPIO is still used as GPIO.
- We need to take in to account here commit c9c55d921115
- ("gpio/omap: fix off-mode bug: clear debounce settings on free/reset").
-
-Also remove omap_reset_gpio() function as it is not used any more.
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@linaro.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 15 +++++----------
- 1 file changed, 5 insertions(+), 10 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -646,15 +646,6 @@ static int omap_set_gpio_wakeup(struct g
- return 0;
- }
-
--static void omap_reset_gpio(struct gpio_bank *bank, unsigned offset)
--{
-- omap_set_gpio_direction(bank, offset, 1);
-- omap_set_gpio_irqenable(bank, offset, 0);
-- omap_clear_gpio_irqstatus(bank, offset);
-- omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-- omap_clear_gpio_debounce(bank, offset);
--}
--
- /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
- static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable)
- {
-@@ -821,8 +812,12 @@ static void omap_gpio_irq_shutdown(struc
-
- spin_lock_irqsave(&bank->lock, flags);
- bank->irq_usage &= ~(BIT(offset));
-+ omap_set_gpio_irqenable(bank, offset, 0);
-+ omap_clear_gpio_irqstatus(bank, offset);
-+ omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-+ if (!LINE_USED(bank->mod_usage, offset))
-+ omap_clear_gpio_debounce(bank, offset);
- omap_disable_gpio_module(bank, offset);
-- omap_reset_gpio(bank, offset);
- spin_unlock_irqrestore(&bank->lock, flags);
-
- /*
diff --git a/patches/0004-ipc-mqueue-Implement-lockless-pipelined-wakeups.patch b/patches/0004-ipc-mqueue-Implement-lockless-pipelined-wakeups.patch
deleted file mode 100644
index 92931b77a541b..0000000000000
--- a/patches/0004-ipc-mqueue-Implement-lockless-pipelined-wakeups.patch
+++ /dev/null
@@ -1,183 +0,0 @@
-From: Davidlohr Bueso <dave@stgolabs.net>
-Date: Mon, 4 May 2015 07:02:46 -0700
-Subject: ipc/mqueue: Implement lockless pipelined wakeups
-
-This patch moves the wakeup_process() invocation so it is not done under
-the info->lock by making use of a lockless wake_q. With this change, the
-waiter is woken up once it is STATE_READY and it does not need to loop
-on SMP if it is still in STATE_PENDING. In the timeout case we still need
-to grab the info->lock to verify the state.
-
-This change should also avoid the introduction of preempt_disable() in -rt
-which avoids a busy-loop which pools for the STATE_PENDING -> STATE_READY
-change if the waiter has a higher priority compared to the waker.
-
-Additionally, this patch micro-optimizes wq_sleep by using the cheaper
-cousin of set_current_state(TASK_INTERRUPTABLE) as we will block no
-matter what, thus get rid of the implied barrier.
-
-[upstream commit fa6004ad4528153b699a4d5ce5ea6b33acce74cc]
-
-Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Acked-by: George Spelvin <linux@horizon.com>
-Acked-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: Borislav Petkov <bp@alien8.de>
-Cc: Chris Mason <clm@fb.com>
-Cc: H. Peter Anvin <hpa@zytor.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Manfred Spraul <manfred@colorfullife.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: dave@stgolabs.net
-Link: http://lkml.kernel.org/r/1430748166.1940.17.camel@stgolabs.net
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- ipc/mqueue.c | 54 +++++++++++++++++++++++++++++++++---------------------
- 1 file changed, 33 insertions(+), 21 deletions(-)
-
---- a/ipc/mqueue.c
-+++ b/ipc/mqueue.c
-@@ -47,8 +47,7 @@
- #define RECV 1
-
- #define STATE_NONE 0
--#define STATE_PENDING 1
--#define STATE_READY 2
-+#define STATE_READY 1
-
- struct posix_msg_tree_node {
- struct rb_node rb_node;
-@@ -568,15 +567,12 @@ static int wq_sleep(struct mqueue_inode_
- wq_add(info, sr, ewp);
-
- for (;;) {
-- set_current_state(TASK_INTERRUPTIBLE);
-+ __set_current_state(TASK_INTERRUPTIBLE);
-
- spin_unlock(&info->lock);
- time = schedule_hrtimeout_range_clock(timeout, 0,
- HRTIMER_MODE_ABS, CLOCK_REALTIME);
-
-- while (ewp->state == STATE_PENDING)
-- cpu_relax();
--
- if (ewp->state == STATE_READY) {
- retval = 0;
- goto out;
-@@ -904,11 +900,15 @@ SYSCALL_DEFINE1(mq_unlink, const char __
- * list of waiting receivers. A sender checks that list before adding the new
- * message into the message array. If there is a waiting receiver, then it
- * bypasses the message array and directly hands the message over to the
-- * receiver.
-- * The receiver accepts the message and returns without grabbing the queue
-- * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
-- * are necessary. The same algorithm is used for sysv semaphores, see
-- * ipc/sem.c for more details.
-+ * receiver. The receiver accepts the message and returns without grabbing the
-+ * queue spinlock:
-+ *
-+ * - Set pointer to message.
-+ * - Queue the receiver task for later wakeup (without the info->lock).
-+ * - Update its state to STATE_READY. Now the receiver can continue.
-+ * - Wake up the process after the lock is dropped. Should the process wake up
-+ * before this wakeup (due to a timeout or a signal) it will either see
-+ * STATE_READY and continue or acquire the lock to check the state again.
- *
- * The same algorithm is used for senders.
- */
-@@ -916,21 +916,29 @@ SYSCALL_DEFINE1(mq_unlink, const char __
- /* pipelined_send() - send a message directly to the task waiting in
- * sys_mq_timedreceive() (without inserting message into a queue).
- */
--static inline void pipelined_send(struct mqueue_inode_info *info,
-+static inline void pipelined_send(struct wake_q_head *wake_q,
-+ struct mqueue_inode_info *info,
- struct msg_msg *message,
- struct ext_wait_queue *receiver)
- {
- receiver->msg = message;
- list_del(&receiver->list);
-- receiver->state = STATE_PENDING;
-- wake_up_process(receiver->task);
-- smp_wmb();
-+ wake_q_add(wake_q, receiver->task);
-+ /*
-+ * Rely on the implicit cmpxchg barrier from wake_q_add such
-+ * that we can ensure that updating receiver->state is the last
-+ * write operation: As once set, the receiver can continue,
-+ * and if we don't have the reference count from the wake_q,
-+ * yet, at that point we can later have a use-after-free
-+ * condition and bogus wakeup.
-+ */
- receiver->state = STATE_READY;
- }
-
- /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
- * gets its message and put to the queue (we have one free place for sure). */
--static inline void pipelined_receive(struct mqueue_inode_info *info)
-+static inline void pipelined_receive(struct wake_q_head *wake_q,
-+ struct mqueue_inode_info *info)
- {
- struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
-
-@@ -941,10 +949,9 @@ static inline void pipelined_receive(str
- }
- if (msg_insert(sender->msg, info))
- return;
-+
- list_del(&sender->list);
-- sender->state = STATE_PENDING;
-- wake_up_process(sender->task);
-- smp_wmb();
-+ wake_q_add(wake_q, sender->task);
- sender->state = STATE_READY;
- }
-
-@@ -962,6 +969,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqd
- struct timespec ts;
- struct posix_msg_tree_node *new_leaf = NULL;
- int ret = 0;
-+ WAKE_Q(wake_q);
-
- if (u_abs_timeout) {
- int res = prepare_timeout(u_abs_timeout, &expires, &ts);
-@@ -1045,7 +1053,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqd
- } else {
- receiver = wq_get_first_waiter(info, RECV);
- if (receiver) {
-- pipelined_send(info, msg_ptr, receiver);
-+ pipelined_send(&wake_q, info, msg_ptr, receiver);
- } else {
- /* adds message to the queue */
- ret = msg_insert(msg_ptr, info);
-@@ -1058,6 +1066,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqd
- }
- out_unlock:
- spin_unlock(&info->lock);
-+ wake_up_q(&wake_q);
- out_free:
- if (ret)
- free_msg(msg_ptr);
-@@ -1144,14 +1153,17 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t,
- msg_ptr = wait.msg;
- }
- } else {
-+ WAKE_Q(wake_q);
-+
- msg_ptr = msg_get(info);
-
- inode->i_atime = inode->i_mtime = inode->i_ctime =
- CURRENT_TIME;
-
- /* There is now free space in queue. */
-- pipelined_receive(info);
-+ pipelined_receive(&wake_q, info);
- spin_unlock(&info->lock);
-+ wake_up_q(&wake_q);
- ret = 0;
- }
- if (ret == 0) {
diff --git a/patches/0004-mm-explicitly-disable-enable-preemption-in-kmap_atom.patch b/patches/0004-mm-explicitly-disable-enable-preemption-in-kmap_atom.patch
deleted file mode 100644
index c74a65d9272f0..0000000000000
--- a/patches/0004-mm-explicitly-disable-enable-preemption-in-kmap_atom.patch
+++ /dev/null
@@ -1,367 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:09 +0200
-Subject: sched/preempt, mm/kmap: Explicitly disable/enable preemption in kmap_atomic_*
-
-The existing code relies on pagefault_disable() implicitly disabling
-preemption, so that no schedule will happen between kmap_atomic() and
-kunmap_atomic().
-
-Let's make this explicit, to prepare for pagefault_disable() not
-touching preemption anymore.
-
-[uptream commit 2cb7c9cb426660b5ed58b643d9e7dd5d50ba901f]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/arm/mm/highmem.c | 3 +++
- arch/frv/mm/highmem.c | 2 ++
- arch/metag/mm/highmem.c | 4 +++-
- arch/microblaze/mm/highmem.c | 4 +++-
- arch/mips/mm/highmem.c | 5 ++++-
- arch/mn10300/include/asm/highmem.h | 3 +++
- arch/parisc/include/asm/cacheflush.h | 2 ++
- arch/powerpc/mm/highmem.c | 4 +++-
- arch/sparc/mm/highmem.c | 4 +++-
- arch/tile/mm/highmem.c | 3 ++-
- arch/x86/mm/highmem_32.c | 3 ++-
- arch/x86/mm/iomap_32.c | 2 ++
- arch/xtensa/mm/highmem.c | 2 ++
- include/linux/highmem.h | 2 ++
- include/linux/io-mapping.h | 2 ++
- 15 files changed, 38 insertions(+), 7 deletions(-)
-
---- a/arch/arm/mm/highmem.c
-+++ b/arch/arm/mm/highmem.c
-@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
- void *kmap;
- int type;
-
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
- kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
- }
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
- int idx, type;
- struct page *page = pfn_to_page(pfn);
-
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
---- a/arch/frv/mm/highmem.c
-+++ b/arch/frv/mm/highmem.c
-@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
- unsigned long paddr;
- int type;
-
-+ preempt_disable();
- pagefault_disable();
- type = kmap_atomic_idx_push();
- paddr = page_to_phys(page);
-@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
- }
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
---- a/arch/metag/mm/highmem.c
-+++ b/arch/metag/mm/highmem.c
-@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
- unsigned long vaddr;
- int type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
- unsigned long vaddr;
- int type;
-
-+ preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
---- a/arch/microblaze/mm/highmem.c
-+++ b/arch/microblaze/mm/highmem.c
-@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
- #endif
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
---- a/arch/mips/mm/highmem.c
-+++ b/arch/mips/mm/highmem.c
-@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
-
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
- #endif
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
-@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
- unsigned long vaddr;
- int idx, type;
-
-+ preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
---- a/arch/mn10300/include/asm/highmem.h
-+++ b/arch/mn10300/include/asm/highmem.h
-@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct p
- unsigned long vaddr;
- int idx, type;
-
-+ preempt_disable();
- pagefault_disable();
- if (page < highmem_start_page)
- return page_address(page);
-@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsig
-
- if (vaddr < FIXADDR_START) { /* FIXME */
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsig
-
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- #endif /* __KERNEL__ */
-
---- a/arch/parisc/include/asm/cacheflush.h
-+++ b/arch/parisc/include/asm/cacheflush.h
-@@ -142,6 +142,7 @@ static inline void kunmap(struct page *p
-
- static inline void *kmap_atomic(struct page *page)
- {
-+ preempt_disable();
- pagefault_disable();
- return page_address(page);
- }
-@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void
- {
- flush_kernel_dcache_page_addr(addr);
- pagefault_enable();
-+ preempt_enable();
- }
-
- #define kmap_atomic_prot(page, prot) kmap_atomic(page)
---- a/arch/powerpc/mm/highmem.c
-+++ b/arch/powerpc/mm/highmem.c
-@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
-
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
---- a/arch/sparc/mm/highmem.c
-+++ b/arch/sparc/mm/highmem.c
-@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
- unsigned long vaddr;
- long idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
-
- if (vaddr < FIXADDR_START) { // FIXME
- pagefault_enable();
-+ preempt_enable();
- return;
- }
-
-@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
-
- kmap_atomic_idx_pop();
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
---- a/arch/tile/mm/highmem.c
-+++ b/arch/tile/mm/highmem.c
-@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page
- int idx, type;
- pte_t *pte;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
-
- /* Avoid icache flushes by disallowing atomic executable mappings. */
-@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
---- a/arch/x86/mm/highmem_32.c
-+++ b/arch/x86/mm/highmem_32.c
-@@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page
- unsigned long vaddr;
- int idx, type;
-
-- /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
-+ preempt_disable();
- pagefault_disable();
-
- if (!PageHighMem(page))
-@@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
- #endif
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
---- a/arch/x86/mm/iomap_32.c
-+++ b/arch/x86/mm/iomap_32.c
-@@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long
- unsigned long vaddr;
- int idx, type;
-
-+ preempt_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
-@@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL_GPL(iounmap_atomic);
---- a/arch/xtensa/mm/highmem.c
-+++ b/arch/xtensa/mm/highmem.c
-@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
- enum fixed_addresses idx;
- unsigned long vaddr;
-
-+ preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
- }
-
- pagefault_enable();
-+ preempt_enable();
- }
- EXPORT_SYMBOL(__kunmap_atomic);
-
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -65,6 +65,7 @@ static inline void kunmap(struct page *p
-
- static inline void *kmap_atomic(struct page *page)
- {
-+ preempt_disable();
- pagefault_disable();
- return page_address(page);
- }
-@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct p
- static inline void __kunmap_atomic(void *addr)
- {
- pagefault_enable();
-+ preempt_enable();
- }
-
- #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
---- a/include/linux/io-mapping.h
-+++ b/include/linux/io-mapping.h
-@@ -141,6 +141,7 @@ static inline void __iomem *
- io_mapping_map_atomic_wc(struct io_mapping *mapping,
- unsigned long offset)
- {
-+ preempt_disable();
- pagefault_disable();
- return ((char __force __iomem *) mapping) + offset;
- }
-@@ -149,6 +150,7 @@ static inline void
- io_mapping_unmap_atomic(void __iomem *vaddr)
- {
- pagefault_enable();
-+ preempt_enable();
- }
-
- /* Non-atomic map/unmap */
diff --git a/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index caabe01f3fd38..6bec5562dd270 100644
--- a/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/patches/0005-futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -738,7 +738,9 @@ void exit_pi_state_list(struct task_stru
+@@ -815,7 +815,9 @@ void exit_pi_state_list(struct task_stru
* task still owns the PI-state:
*/
if (head->next != next) {
diff --git a/patches/0005-gpio-omap-rework-omap_gpio_request-to-touch-only-gpi.patch b/patches/0005-gpio-omap-rework-omap_gpio_request-to-touch-only-gpi.patch
deleted file mode 100644
index 263e701928bdb..0000000000000
--- a/patches/0005-gpio-omap-rework-omap_gpio_request-to-touch-only-gpi.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From 17782f9dede8027dc6a26270b2cfec389fe39374 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@linaro.org>
-Date: Mon, 13 Jul 2015 17:08:11 +0300
-Subject: [PATCH 05/21] gpio: omap: rework omap_gpio_request to touch only gpio
- specific registers
-
-commit c3518172129a60a1f3071e61a8a4ffc50c7b2a68 upstream
-
-The GPIO Chip and GPIO IRQ Chip functionality are essentially orthogonal,
-so GPIO Chip implementation shouldn't touch GPIO IRQ specific registers
-and vise versa.
-
-Hence, rework omap_gpio_request:
-- don't reset GPIO IRQ triggering type to IRQ_TYPE_NONE, because
- GPIO irqchip should be responsible for that;
-- call directly omap_enable_gpio_module as all needed checks are already
- present inside it.
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@linaro.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 9 +--------
- 1 file changed, 1 insertion(+), 8 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -668,14 +668,7 @@ static int omap_gpio_request(struct gpio
- pm_runtime_get_sync(bank->dev);
-
- spin_lock_irqsave(&bank->lock, flags);
-- /* Set trigger to none. You need to enable the desired trigger with
-- * request_irq() or set_irq_type(). Only do this if the IRQ line has
-- * not already been requested.
-- */
-- if (!LINE_USED(bank->irq_usage, offset)) {
-- omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-- omap_enable_gpio_module(bank, offset);
-- }
-+ omap_enable_gpio_module(bank, offset);
- bank->mod_usage |= BIT(offset);
- spin_unlock_irqrestore(&bank->lock, flags);
-
diff --git a/patches/0005-mips-kmap_coherent-relies-on-disabled-preemption.patch b/patches/0005-mips-kmap_coherent-relies-on-disabled-preemption.patch
deleted file mode 100644
index 694fad25ad801..0000000000000
--- a/patches/0005-mips-kmap_coherent-relies-on-disabled-preemption.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:10 +0200
-Subject: sched/preempt, mm/kmap, MIPS: Disable preemption in kmap_coherent() explicitly
-
-k(un)map_coherent relies on pagefault_disable() to also disable
-preemption.
-
-Let's make this explicit, to prepare for pagefault_disable() not
-touching preemption anymore.
-
-This patch is based on a patch by Yang Shi on the -rt tree:
-"k{un}map_coherent are just called when cpu_has_dc_aliases == 1 with VIPT
-cache. However, actually, the most modern MIPS processors have PIPT dcache
-without dcache alias issue. In such case, k{un}map_atomic will be called
-with preempt enabled."
-
-[upstream commit ce01948eb85da733558fa77c2a554144a57ab0fb]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/mips/mm/init.c | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/arch/mips/mm/init.c
-+++ b/arch/mips/mm/init.c
-@@ -90,6 +90,7 @@ static void *__kmap_pgprot(struct page *
-
- BUG_ON(Page_dcache_dirty(page));
-
-+ preempt_disable();
- pagefault_disable();
- idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
- idx += in_interrupt() ? FIX_N_COLOURS : 0;
-@@ -152,6 +153,7 @@ void kunmap_coherent(void)
- write_c0_entryhi(old_ctx);
- local_irq_restore(flags);
- pagefault_enable();
-+ preempt_enable();
- }
-
- void copy_user_highpage(struct page *to, struct page *from,
diff --git a/patches/0006-gpio-omap-rework-omap_gpio_irq_startup-to-handle-cur.patch b/patches/0006-gpio-omap-rework-omap_gpio_irq_startup-to-handle-cur.patch
deleted file mode 100644
index 5180d8781157d..0000000000000
--- a/patches/0006-gpio-omap-rework-omap_gpio_irq_startup-to-handle-cur.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 37652e77696d14c5f8b0130441651418e263822d Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@linaro.org>
-Date: Mon, 13 Jul 2015 17:08:12 +0300
-Subject: [PATCH 06/21] gpio: omap: rework omap_gpio_irq_startup to handle
- current pin state properly
-
-commit 121dcb760426ca67ee90a8b2db6a75eee010f8e3 upstream
-
-The omap_gpio_irq_startup() can be called at time when:
-- corresponding GPIO has been requested already and in this case
-it has to be configured as input already. If not - return with -EINVAL
-and do not try to re-configure it as it could be unsafe.
-- corresponding GPIO is free: reconfigure GPIO as input.
-
-In addition, call omap_enable_gpio_module directly as all needed
-checks are already present inside it.
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@linaro.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 14 +++++++++++++-
- 1 file changed, 13 insertions(+), 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -790,11 +790,23 @@ static unsigned int omap_gpio_irq_startu
- pm_runtime_get_sync(bank->dev);
-
- spin_lock_irqsave(&bank->lock, flags);
-- omap_gpio_init_irq(bank, offset);
-+
-+ if (!LINE_USED(bank->mod_usage, offset))
-+ omap_set_gpio_direction(bank, offset, 1);
-+ else if (!omap_gpio_is_input(bank, offset))
-+ goto err;
-+ omap_enable_gpio_module(bank, offset);
-+ bank->irq_usage |= BIT(offset);
-+
- spin_unlock_irqrestore(&bank->lock, flags);
- omap_gpio_unmask_irq(d);
-
- return 0;
-+err:
-+ spin_unlock_irqrestore(&bank->lock, flags);
-+ if (!BANK_USED(bank))
-+ pm_runtime_put(bank->dev);
-+ return -EINVAL;
- }
-
- static void omap_gpio_irq_shutdown(struct irq_data *d)
diff --git a/patches/0006-mm-use-pagefault_disable-to-check-for-disabled-pagef.patch b/patches/0006-mm-use-pagefault_disable-to-check-for-disabled-pagef.patch
deleted file mode 100644
index 65643180719e1..0000000000000
--- a/patches/0006-mm-use-pagefault_disable-to-check-for-disabled-pagef.patch
+++ /dev/null
@@ -1,646 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:11 +0200
-Subject: mm/fault, arch: Use pagefault_disable() to check for disabled pagefaults in the handler
-
-Introduce faulthandler_disabled() and use it to check for irq context and
-disabled pagefaults (via pagefault_disable()) in the pagefault handlers.
-
-Please note that we keep the in_atomic() checks in place - to detect
-whether in irq context (in which case preemption is always properly
-disabled).
-
-In contrast, preempt_disable() should never be used to disable pagefaults.
-With !CONFIG_PREEMPT_COUNT, preempt_disable() doesn't modify the preempt
-counter, and therefore the result of in_atomic() differs.
-We validate that condition by using might_fault() checks when calling
-might_sleep().
-
-Therefore, add a comment to faulthandler_disabled(), describing why this
-is needed.
-
-faulthandler_disabled() and pagefault_disable() are defined in
-linux/uaccess.h, so let's properly add that include to all relevant files.
-
-This patch is based on a patch from Thomas Gleixner.
-
-[upstream commit 70ffdb9393a7264a069265edded729078dcf0425]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/alpha/mm/fault.c | 5 ++---
- arch/arc/mm/fault.c | 2 +-
- arch/arm/mm/fault.c | 2 +-
- arch/arm64/mm/fault.c | 2 +-
- arch/avr32/mm/fault.c | 4 ++--
- arch/cris/mm/fault.c | 6 +++---
- arch/frv/mm/fault.c | 4 ++--
- arch/ia64/mm/fault.c | 4 ++--
- arch/m32r/mm/fault.c | 8 ++++----
- arch/m68k/mm/fault.c | 4 ++--
- arch/metag/mm/fault.c | 2 +-
- arch/microblaze/mm/fault.c | 8 ++++----
- arch/mips/mm/fault.c | 4 ++--
- arch/mn10300/mm/fault.c | 4 ++--
- arch/nios2/mm/fault.c | 2 +-
- arch/parisc/kernel/traps.c | 4 ++--
- arch/parisc/mm/fault.c | 4 ++--
- arch/powerpc/mm/fault.c | 9 +++++----
- arch/s390/mm/fault.c | 2 +-
- arch/score/mm/fault.c | 3 ++-
- arch/sh/mm/fault.c | 5 +++--
- arch/sparc/mm/fault_32.c | 4 ++--
- arch/sparc/mm/fault_64.c | 4 ++--
- arch/sparc/mm/init_64.c | 2 +-
- arch/tile/mm/fault.c | 4 ++--
- arch/um/kernel/trap.c | 4 ++--
- arch/unicore32/mm/fault.c | 2 +-
- arch/x86/mm/fault.c | 5 +++--
- arch/xtensa/mm/fault.c | 4 ++--
- include/linux/uaccess.h | 12 ++++++++++++
- 30 files changed, 72 insertions(+), 57 deletions(-)
-
---- a/arch/alpha/mm/fault.c
-+++ b/arch/alpha/mm/fault.c
-@@ -23,8 +23,7 @@
- #include <linux/smp.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
--
--#include <asm/uaccess.h>
-+#include <linux/uaccess.h>
-
- extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
-
-@@ -107,7 +106,7 @@ do_page_fault(unsigned long address, uns
-
- /* If we're in an interrupt context, or have no user context,
- we must not take the fault. */
-- if (!mm || in_atomic())
-+ if (!mm || faulthandler_disabled())
- goto no_context;
-
- #ifdef CONFIG_ALPHA_LARGE_VMALLOC
---- a/arch/arc/mm/fault.c
-+++ b/arch/arc/mm/fault.c
-@@ -86,7 +86,7 @@ void do_page_fault(unsigned long address
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
---- a/arch/arm/mm/fault.c
-+++ b/arch/arm/mm/fault.c
-@@ -276,7 +276,7 @@ do_page_fault(unsigned long addr, unsign
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
---- a/arch/arm64/mm/fault.c
-+++ b/arch/arm64/mm/fault.c
-@@ -211,7 +211,7 @@ static int __kprobes do_page_fault(unsig
- * If we're in an interrupt or have no user context, we must not take
- * the fault.
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
---- a/arch/avr32/mm/fault.c
-+++ b/arch/avr32/mm/fault.c
-@@ -14,11 +14,11 @@
- #include <linux/pagemap.h>
- #include <linux/kdebug.h>
- #include <linux/kprobes.h>
-+#include <linux/uaccess.h>
-
- #include <asm/mmu_context.h>
- #include <asm/sysreg.h>
- #include <asm/tlb.h>
--#include <asm/uaccess.h>
-
- #ifdef CONFIG_KPROBES
- static inline int notify_page_fault(struct pt_regs *regs, int trap)
-@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned l
- * If we're in an interrupt or have no user context, we must
- * not take the fault...
- */
-- if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
-+ if (faulthandler_disabled() || !mm || regs->sr & SYSREG_BIT(GM))
- goto no_context;
-
- local_irq_enable();
---- a/arch/cris/mm/fault.c
-+++ b/arch/cris/mm/fault.c
-@@ -8,7 +8,7 @@
- #include <linux/interrupt.h>
- #include <linux/module.h>
- #include <linux/wait.h>
--#include <asm/uaccess.h>
-+#include <linux/uaccess.h>
- #include <arch/system.h>
-
- extern int find_fixup_code(struct pt_regs *);
-@@ -109,11 +109,11 @@ do_page_fault(unsigned long address, str
- info.si_code = SEGV_MAPERR;
-
- /*
-- * If we're in an interrupt or "atomic" operation or have no
-+ * If we're in an interrupt, have pagefaults disabled or have no
- * user context, we must not take the fault.
- */
-
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
---- a/arch/frv/mm/fault.c
-+++ b/arch/frv/mm/fault.c
-@@ -19,9 +19,9 @@
- #include <linux/kernel.h>
- #include <linux/ptrace.h>
- #include <linux/hardirq.h>
-+#include <linux/uaccess.h>
-
- #include <asm/pgtable.h>
--#include <asm/uaccess.h>
- #include <asm/gdb-stub.h>
-
- /*****************************************************************************/
-@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datamm
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(__frame))
---- a/arch/ia64/mm/fault.c
-+++ b/arch/ia64/mm/fault.c
-@@ -11,10 +11,10 @@
- #include <linux/kprobes.h>
- #include <linux/kdebug.h>
- #include <linux/prefetch.h>
-+#include <linux/uaccess.h>
-
- #include <asm/pgtable.h>
- #include <asm/processor.h>
--#include <asm/uaccess.h>
-
- extern int die(char *, struct pt_regs *, long);
-
-@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long addres
- /*
- * If we're in an interrupt or have no user context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- #ifdef CONFIG_VIRTUAL_MEM_MAP
---- a/arch/m32r/mm/fault.c
-+++ b/arch/m32r/mm/fault.c
-@@ -24,9 +24,9 @@
- #include <linux/vt_kern.h> /* For unblank_screen() */
- #include <linux/highmem.h>
- #include <linux/module.h>
-+#include <linux/uaccess.h>
-
- #include <asm/m32r.h>
--#include <asm/uaccess.h>
- #include <asm/hardirq.h>
- #include <asm/mmu_context.h>
- #include <asm/tlbflush.h>
-@@ -111,10 +111,10 @@ asmlinkage void do_page_fault(struct pt_
- mm = tsk->mm;
-
- /*
-- * If we're in an interrupt or have no user context or are running in an
-- * atomic region then we must not take the fault..
-+ * If we're in an interrupt or have no user context or have pagefaults
-+ * disabled then we must not take the fault.
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (error_code & ACE_USERMODE)
---- a/arch/m68k/mm/fault.c
-+++ b/arch/m68k/mm/fault.c
-@@ -10,10 +10,10 @@
- #include <linux/ptrace.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/uaccess.h>
-
- #include <asm/setup.h>
- #include <asm/traps.h>
--#include <asm/uaccess.h>
- #include <asm/pgalloc.h>
-
- extern void die_if_kernel(char *, struct pt_regs *, long);
-@@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs,
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
---- a/arch/metag/mm/fault.c
-+++ b/arch/metag/mm/fault.c
-@@ -105,7 +105,7 @@ int do_page_fault(struct pt_regs *regs,
-
- mm = tsk->mm;
-
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
---- a/arch/microblaze/mm/fault.c
-+++ b/arch/microblaze/mm/fault.c
-@@ -107,14 +107,14 @@ void do_page_fault(struct pt_regs *regs,
- if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
- is_write = 0;
-
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(faulthandler_disabled() || !mm)) {
- if (kernel_mode(regs))
- goto bad_area_nosemaphore;
-
-- /* in_atomic() in user mode is really bad,
-+ /* faulthandler_disabled() in user mode is really bad,
- as is current->mm == NULL. */
-- pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
-- mm);
-+ pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
-+ mm);
- pr_emerg("r15 = %lx MSR = %lx\n",
- regs->r15, regs->msr);
- die("Weird page fault", regs, SIGSEGV);
---- a/arch/mips/mm/fault.c
-+++ b/arch/mips/mm/fault.c
-@@ -21,10 +21,10 @@
- #include <linux/module.h>
- #include <linux/kprobes.h>
- #include <linux/perf_event.h>
-+#include <linux/uaccess.h>
-
- #include <asm/branch.h>
- #include <asm/mmu_context.h>
--#include <asm/uaccess.h>
- #include <asm/ptrace.h>
- #include <asm/highmem.h> /* For VMALLOC_END */
- #include <linux/kdebug.h>
-@@ -94,7 +94,7 @@ static void __kprobes __do_page_fault(st
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (user_mode(regs))
---- a/arch/mn10300/mm/fault.c
-+++ b/arch/mn10300/mm/fault.c
-@@ -23,8 +23,8 @@
- #include <linux/interrupt.h>
- #include <linux/init.h>
- #include <linux/vt_kern.h> /* For unblank_screen() */
-+#include <linux/uaccess.h>
-
--#include <asm/uaccess.h>
- #include <asm/pgalloc.h>
- #include <asm/hardirq.h>
- #include <asm/cpu-regs.h>
-@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
---- a/arch/nios2/mm/fault.c
-+++ b/arch/nios2/mm/fault.c
-@@ -77,7 +77,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (user_mode(regs))
---- a/arch/parisc/kernel/traps.c
-+++ b/arch/parisc/kernel/traps.c
-@@ -26,9 +26,9 @@
- #include <linux/console.h>
- #include <linux/bug.h>
- #include <linux/ratelimit.h>
-+#include <linux/uaccess.h>
-
- #include <asm/assembly.h>
--#include <asm/uaccess.h>
- #include <asm/io.h>
- #include <asm/irq.h>
- #include <asm/traps.h>
-@@ -796,7 +796,7 @@ void notrace handle_interruption(int cod
- * unless pagefault_disable() was called before.
- */
-
-- if (fault_space == 0 && !in_atomic())
-+ if (fault_space == 0 && !faulthandler_disabled())
- {
- pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
- parisc_terminate("Kernel Fault", regs, code, fault_address);
---- a/arch/parisc/mm/fault.c
-+++ b/arch/parisc/mm/fault.c
-@@ -15,8 +15,8 @@
- #include <linux/sched.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
-+#include <linux/uaccess.h>
-
--#include <asm/uaccess.h>
- #include <asm/traps.h>
-
- /* Various important other fields */
-@@ -207,7 +207,7 @@ void do_page_fault(struct pt_regs *regs,
- int fault;
- unsigned int flags;
-
-- if (in_atomic())
-+ if (pagefault_disabled())
- goto no_context;
-
- tsk = current;
---- a/arch/powerpc/mm/fault.c
-+++ b/arch/powerpc/mm/fault.c
-@@ -33,13 +33,13 @@
- #include <linux/ratelimit.h>
- #include <linux/context_tracking.h>
- #include <linux/hugetlb.h>
-+#include <linux/uaccess.h>
-
- #include <asm/firmware.h>
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/mmu.h>
- #include <asm/mmu_context.h>
--#include <asm/uaccess.h>
- #include <asm/tlbflush.h>
- #include <asm/siginfo.h>
- #include <asm/debug.h>
-@@ -272,15 +272,16 @@ int __kprobes do_page_fault(struct pt_re
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
-
-- if (in_atomic() || mm == NULL) {
-+ if (faulthandler_disabled() || mm == NULL) {
- if (!user_mode(regs)) {
- rc = SIGSEGV;
- goto bail;
- }
-- /* in_atomic() in user mode is really bad,
-+ /* faulthandler_disabled() in user mode is really bad,
- as is current->mm == NULL. */
- printk(KERN_EMERG "Page fault in user mode with "
-- "in_atomic() = %d mm = %p\n", in_atomic(), mm);
-+ "faulthandler_disabled() = %d mm = %p\n",
-+ faulthandler_disabled(), mm);
- printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
- regs->nip, regs->msr);
- die("Weird page fault", regs, SIGSEGV);
---- a/arch/s390/mm/fault.c
-+++ b/arch/s390/mm/fault.c
-@@ -399,7 +399,7 @@ static inline int do_exception(struct pt
- * user context.
- */
- fault = VM_FAULT_BADCONTEXT;
-- if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
-+ if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
- goto out;
-
- address = trans_exc_code & __FAIL_ADDR_MASK;
---- a/arch/score/mm/fault.c
-+++ b/arch/score/mm/fault.c
-@@ -34,6 +34,7 @@
- #include <linux/string.h>
- #include <linux/types.h>
- #include <linux/ptrace.h>
-+#include <linux/uaccess.h>
-
- /*
- * This routine handles page faults. It determines the address,
-@@ -73,7 +74,7 @@ asmlinkage void do_page_fault(struct pt_
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (pagefault_disabled() || !mm)
- goto bad_area_nosemaphore;
-
- if (user_mode(regs))
---- a/arch/sh/mm/fault.c
-+++ b/arch/sh/mm/fault.c
-@@ -17,6 +17,7 @@
- #include <linux/kprobes.h>
- #include <linux/perf_event.h>
- #include <linux/kdebug.h>
-+#include <linux/uaccess.h>
- #include <asm/io_trapped.h>
- #include <asm/mmu_context.h>
- #include <asm/tlbflush.h>
-@@ -438,9 +439,9 @@ asmlinkage void __kprobes do_page_fault(
-
- /*
- * If we're in an interrupt, have no user context or are running
-- * in an atomic region then we must not take the fault:
-+ * with pagefaults disabled then we must not take the fault:
- */
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(faulthandler_disabled() || !mm)) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
---- a/arch/sparc/mm/fault_32.c
-+++ b/arch/sparc/mm/fault_32.c
-@@ -21,6 +21,7 @@
- #include <linux/perf_event.h>
- #include <linux/interrupt.h>
- #include <linux/kdebug.h>
-+#include <linux/uaccess.h>
-
- #include <asm/page.h>
- #include <asm/pgtable.h>
-@@ -29,7 +30,6 @@
- #include <asm/setup.h>
- #include <asm/smp.h>
- #include <asm/traps.h>
--#include <asm/uaccess.h>
-
- #include "mm_32.h"
-
-@@ -196,7 +196,7 @@ asmlinkage void do_sparc_fault(struct pt
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (pagefault_disabled() || !mm)
- goto no_context;
-
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
---- a/arch/sparc/mm/fault_64.c
-+++ b/arch/sparc/mm/fault_64.c
-@@ -22,12 +22,12 @@
- #include <linux/kdebug.h>
- #include <linux/percpu.h>
- #include <linux/context_tracking.h>
-+#include <linux/uaccess.h>
-
- #include <asm/page.h>
- #include <asm/pgtable.h>
- #include <asm/openprom.h>
- #include <asm/oplib.h>
--#include <asm/uaccess.h>
- #include <asm/asi.h>
- #include <asm/lsu.h>
- #include <asm/sections.h>
-@@ -330,7 +330,7 @@ asmlinkage void __kprobes do_sparc64_fau
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto intr_or_no_mm;
-
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
---- a/arch/sparc/mm/init_64.c
-+++ b/arch/sparc/mm/init_64.c
-@@ -2738,7 +2738,7 @@ void hugetlb_setup(struct pt_regs *regs)
- struct mm_struct *mm = current->mm;
- struct tsb_config *tp;
-
-- if (in_atomic() || !mm) {
-+ if (faulthandler_disabled() || !mm) {
- const struct exception_table_entry *entry;
-
- entry = search_exception_tables(regs->tpc);
---- a/arch/tile/mm/fault.c
-+++ b/arch/tile/mm/fault.c
-@@ -354,9 +354,9 @@ static int handle_page_fault(struct pt_r
-
- /*
- * If we're in an interrupt, have no user context or are running in an
-- * atomic region then we must not take the fault.
-+ * region with pagefaults disabled then we must not take the fault.
- */
-- if (in_atomic() || !mm) {
-+ if (pagefault_disabled() || !mm) {
- vma = NULL; /* happy compiler */
- goto bad_area_nosemaphore;
- }
---- a/arch/um/kernel/trap.c
-+++ b/arch/um/kernel/trap.c
-@@ -35,10 +35,10 @@ int handle_page_fault(unsigned long addr
- *code_out = SEGV_MAPERR;
-
- /*
-- * If the fault was during atomic operation, don't take the fault, just
-+ * If the fault was with pagefaults disabled, don't take the fault, just
- * fail.
- */
-- if (in_atomic())
-+ if (faulthandler_disabled())
- goto out_nosemaphore;
-
- if (is_user)
---- a/arch/unicore32/mm/fault.c
-+++ b/arch/unicore32/mm/fault.c
-@@ -218,7 +218,7 @@ static int do_pf(unsigned long addr, uns
- * If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm)
-+ if (faulthandler_disabled() || !mm)
- goto no_context;
-
- if (user_mode(regs))
---- a/arch/x86/mm/fault.c
-+++ b/arch/x86/mm/fault.c
-@@ -13,6 +13,7 @@
- #include <linux/hugetlb.h> /* hstate_index_to_shift */
- #include <linux/prefetch.h> /* prefetchw */
- #include <linux/context_tracking.h> /* exception_enter(), ... */
-+#include <linux/uaccess.h> /* faulthandler_disabled() */
-
- #include <asm/traps.h> /* dotraplinkage, ... */
- #include <asm/pgalloc.h> /* pgd_*(), ... */
-@@ -1126,9 +1127,9 @@ static noinline void
-
- /*
- * If we're in an interrupt, have no user context or are running
-- * in an atomic region then we must not take the fault:
-+ * in a region with pagefaults disabled then we must not take the fault
- */
-- if (unlikely(in_atomic() || !mm)) {
-+ if (unlikely(faulthandler_disabled() || !mm)) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
---- a/arch/xtensa/mm/fault.c
-+++ b/arch/xtensa/mm/fault.c
-@@ -15,10 +15,10 @@
- #include <linux/mm.h>
- #include <linux/module.h>
- #include <linux/hardirq.h>
-+#include <linux/uaccess.h>
- #include <asm/mmu_context.h>
- #include <asm/cacheflush.h>
- #include <asm/hardirq.h>
--#include <asm/uaccess.h>
- #include <asm/pgalloc.h>
-
- DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
-@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
- /* If we're in an interrupt or have no user
- * context, we must not take the fault..
- */
-- if (in_atomic() || !mm) {
-+ if (faulthandler_disabled() || !mm) {
- bad_page_fault(regs, address, SIGSEGV);
- return;
- }
---- a/include/linux/uaccess.h
-+++ b/include/linux/uaccess.h
-@@ -59,6 +59,18 @@ static inline void pagefault_enable(void
- */
- #define pagefault_disabled() (current->pagefault_disabled != 0)
-
-+/*
-+ * The pagefault handler is in general disabled by pagefault_disable() or
-+ * when in irq context (via in_atomic()).
-+ *
-+ * This function should only be used by the fault handlers. Other users should
-+ * stick to pagefault_disabled().
-+ * Please NEVER use preempt_disable() to disable the fault handler. With
-+ * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
-+ * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
-+ */
-+#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
-+
- #ifndef ARCH_HAS_NOCACHE_UACCESS
-
- static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
diff --git a/patches/0007-drm-i915-use-pagefault_disabled-to-check-for-disable.patch b/patches/0007-drm-i915-use-pagefault_disabled-to-check-for-disable.patch
deleted file mode 100644
index fd0ac3bc496a5..0000000000000
--- a/patches/0007-drm-i915-use-pagefault_disabled-to-check-for-disable.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:12 +0200
-Subject: mm/fault, drm/i915: Use pagefault_disabled() to check for disabled pagefaults
-
-Now that the pagefault disabled counter is in place, we can replace
-the in_atomic() check by a pagefault_disabled() checks.
-
-[upstream commit 32d8206725bcf6e3ce7832ac39e61a6ecfd558db]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -32,6 +32,7 @@
- #include "i915_trace.h"
- #include "intel_drv.h"
- #include <linux/dma_remapping.h>
-+#include <linux/uaccess.h>
-
- #define __EXEC_OBJECT_HAS_PIN (1<<31)
- #define __EXEC_OBJECT_HAS_FENCE (1<<30)
-@@ -465,7 +466,7 @@ i915_gem_execbuffer_relocate_entry(struc
- }
-
- /* We can't wait for rendering with pagefaults disabled */
-- if (obj->active && in_atomic())
-+ if (obj->active && pagefault_disabled())
- return -EFAULT;
-
- if (use_cpu_reloc(obj))
diff --git a/patches/0007-gpio-omap-add-missed-spin_unlock_irqrestore-in-omap_.patch b/patches/0007-gpio-omap-add-missed-spin_unlock_irqrestore-in-omap_.patch
deleted file mode 100644
index e014a2f918578..0000000000000
--- a/patches/0007-gpio-omap-add-missed-spin_unlock_irqrestore-in-omap_.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From e63638dc9dbb27e4d18dd1101cbe2c153253e343 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@linaro.org>
-Date: Mon, 13 Jul 2015 17:08:13 +0300
-Subject: [PATCH 07/21] gpio: omap: add missed spin_unlock_irqrestore in
- omap_gpio_irq_type
-
-Add missed spin_unlock_irqrestore in omap_gpio_irq_type when
-omap_set_gpio_triggering() is failed.
-
-It fixes static checker warning:
-
- drivers/gpio/gpio-omap.c:523 omap_gpio_irq_type()
- warn: inconsistent returns 'spin_lock:&bank->lock'.
-
-This fixes commit:
-1562e4618ded ('gpio: omap: fix error handling in omap_gpio_irq_type')
-
-Reported-by: Javier Martinez Canillas <javier@dowhile0.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@linaro.org>
----
- drivers/gpio/gpio-omap.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -500,8 +500,10 @@ static int omap_gpio_irq_type(struct irq
-
- spin_lock_irqsave(&bank->lock, flags);
- retval = omap_set_gpio_triggering(bank, offset, type);
-- if (retval)
-+ if (retval) {
-+ spin_unlock_irqrestore(&bank->lock, flags);
- goto error;
-+ }
- omap_gpio_init_irq(bank, offset);
- if (!omap_gpio_is_input(bank, offset)) {
- spin_unlock_irqrestore(&bank->lock, flags);
diff --git a/patches/0008-futex-UP-futex_atomic_op_inuser-relies-on-disabled-p.patch b/patches/0008-futex-UP-futex_atomic_op_inuser-relies-on-disabled-p.patch
deleted file mode 100644
index 1701eac2505f8..0000000000000
--- a/patches/0008-futex-UP-futex_atomic_op_inuser-relies-on-disabled-p.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:13 +0200
-Subject: sched/preempt, futex: Disable preemption in UP futex_atomic_op_inuser() explicitly
-
-Let's explicitly disable/enable preemption in the !CONFIG_SMP version
-of futex_atomic_op_inuser, to prepare for pagefault_disable() not
-touching preemption anymore.
-
-Otherwise we might break mutual exclusion when relying on a get_user()/
-put_user() implementation.
-
-[upstream commit f3dae07e442a8131a5485b6a38db2aa22a7a48cf]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- include/asm-generic/futex.h | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
-
---- a/include/asm-generic/futex.h
-+++ b/include/asm-generic/futex.h
-@@ -8,8 +8,7 @@
- #ifndef CONFIG_SMP
- /*
- * The following implementation only for uniprocessor machines.
-- * For UP, it's relies on the fact that pagefault_disable() also disables
-- * preemption to ensure mutual exclusion.
-+ * It relies on preempt_disable() ensuring mutual exclusion.
- *
- */
-
-@@ -38,6 +37,7 @@ futex_atomic_op_inuser(int encoded_op, u
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
-+ preempt_disable();
- pagefault_disable();
-
- ret = -EFAULT;
-@@ -72,6 +72,7 @@ futex_atomic_op_inuser(int encoded_op, u
-
- out_pagefault_enable:
- pagefault_enable();
-+ preempt_enable();
-
- if (ret == 0) {
- switch (cmp) {
diff --git a/patches/0008-gpio-omap-prevent-module-from-being-unloaded-while-i.patch b/patches/0008-gpio-omap-prevent-module-from-being-unloaded-while-i.patch
deleted file mode 100644
index e20698e51eb1e..0000000000000
--- a/patches/0008-gpio-omap-prevent-module-from-being-unloaded-while-i.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 1e7405ae2fa7c7fefb2bc8dbb8365f99755c74ad Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 13 Jul 2015 17:08:14 +0300
-Subject: [PATCH 08/21] gpio: omap: prevent module from being unloaded while in
- use
-
-OMAP GPIO driver allowed to be built as loadable module, but it
-doesn't set owner field in GPIO chip structure. As result,
-module_get/put() API is not working and it's possible to unload
-OMAP driver while in use:
-
- omap_gpio 48051000.gpio: REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED
-
-Hence, add missing configuration.
-
-Cc: Tony Lindgren <tony@atomide.com>
-Fixes: cac089f9026e ('gpio: omap: Allow building as a loadable module')
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -1187,6 +1187,7 @@ static int omap_gpio_probe(struct platfo
- bank->irq = res->start;
- bank->dev = dev;
- bank->chip.dev = dev;
-+ bank->chip.owner = THIS_MODULE;
- bank->dbck_flag = pdata->dbck_flag;
- bank->stride = pdata->bank_stride;
- bank->width = pdata->bank_width;
diff --git a/patches/0009-ARM-OMAP2-Drop-the-concept-of-certain-power-domains-.patch b/patches/0009-ARM-OMAP2-Drop-the-concept-of-certain-power-domains-.patch
index 77e1ccb57a472..bb5602e570643 100644
--- a/patches/0009-ARM-OMAP2-Drop-the-concept-of-certain-power-domains-.patch
+++ b/patches/0009-ARM-OMAP2-Drop-the-concept-of-certain-power-domains-.patch
@@ -85,7 +85,7 @@ Signed-off-by: Keerthy <j-keerthy@ti.com>
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
-@@ -67,7 +67,7 @@ struct gpio_bank {
+@@ -69,7 +69,7 @@ struct gpio_bank {
struct device *dev;
bool is_mpuio;
bool dbck_flag;
@@ -94,7 +94,7 @@ Signed-off-by: Keerthy <j-keerthy@ti.com>
bool context_valid;
int stride;
u32 width;
-@@ -1197,15 +1197,9 @@ static int omap_gpio_probe(struct platfo
+@@ -1208,15 +1208,9 @@ static int omap_gpio_probe(struct platfo
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
#endif
@@ -113,7 +113,7 @@ Signed-off-by: Keerthy <j-keerthy@ti.com>
}
if (bank->regs->set_dataout && bank->regs->clr_dataout)
-@@ -1346,7 +1340,7 @@ static int omap_gpio_runtime_resume(stru
+@@ -1373,7 +1367,7 @@ static int omap_gpio_runtime_resume(stru
* been initialised and so initialise it now. Also initialise
* the context loss count.
*/
@@ -122,7 +122,7 @@ Signed-off-by: Keerthy <j-keerthy@ti.com>
omap_gpio_init_context(bank);
if (bank->get_context_loss_count)
-@@ -1367,17 +1361,15 @@ static int omap_gpio_runtime_resume(stru
+@@ -1394,17 +1388,15 @@ static int omap_gpio_runtime_resume(stru
writel_relaxed(bank->context.risingdetect,
bank->base + bank->regs->risingdetect);
@@ -139,7 +139,7 @@ Signed-off-by: Keerthy <j-keerthy@ti.com>
- if (c != bank->context_loss_count) {
- omap_gpio_restore_context(bank);
- } else {
-- spin_unlock_irqrestore(&bank->lock, flags);
+- raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
+ spin_unlock_irqrestore(&bank->lock, flags);
@@ -147,7 +147,7 @@ Signed-off-by: Keerthy <j-keerthy@ti.com>
}
}
-@@ -1449,7 +1441,7 @@ void omap2_gpio_prepare_for_idle(int pwr
+@@ -1476,7 +1468,7 @@ void omap2_gpio_prepare_for_idle(int pwr
struct gpio_bank *bank;
list_for_each_entry(bank, &omap_gpio_list, node) {
@@ -156,7 +156,7 @@ Signed-off-by: Keerthy <j-keerthy@ti.com>
continue;
bank->power_mode = pwr_mode;
-@@ -1463,7 +1455,7 @@ void omap2_gpio_resume_after_idle(void)
+@@ -1490,7 +1482,7 @@ void omap2_gpio_resume_after_idle(void)
struct gpio_bank *bank;
list_for_each_entry(bank, &omap_gpio_list, node) {
diff --git a/patches/0009-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on-dis.patch b/patches/0009-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on-dis.patch
deleted file mode 100644
index ed74ddae816aa..0000000000000
--- a/patches/0009-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on-dis.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:14 +0200
-Subject: sched/preempt, futex: Disable preemption in UP futex_atomic_op_inuser() explicitly
-
-Let's explicitly disable/enable preemption in the !CONFIG_SMP version
-of futex_atomic_cmpxchg_inatomic, to prepare for pagefault_disable() not
-touching preemption anymore. This is needed for this function to be
-callable from both, atomic and non-atomic context.
-
-Otherwise we might break mutual exclusion when relying on a get_user()/
-put_user() implementation.
-
-[upstream commit f3dae07e442a8131a5485b6a38db2aa22a7a48cf]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- include/asm-generic/futex.h | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/include/asm-generic/futex.h
-+++ b/include/asm-generic/futex.h
-@@ -107,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
- {
- u32 val;
-
-+ preempt_disable();
- if (unlikely(get_user(val, uaddr) != 0))
- return -EFAULT;
-
-@@ -114,6 +115,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
- return -EFAULT;
-
- *uval = val;
-+ preempt_enable();
-
- return 0;
- }
diff --git a/patches/0010-arm-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on.patch b/patches/0010-arm-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on.patch
deleted file mode 100644
index 48aebb2d4152e..0000000000000
--- a/patches/0010-arm-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on.patch
+++ /dev/null
@@ -1,36 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:15 +0200
-Subject: sched/preempt, arm/futex: Disable preemption in UP futex_atomic_cmpxchg_inatomic() explicitly
-
-The !CONFIG_SMP implementation of futex_atomic_cmpxchg_inatomic()
-requires preemption to be disabled to guarantee mutual exclusion.
-Let's make this explicit.
-
-This patch is based on a patch by Sebastian Andrzej Siewior on the
--rt branch.
-
-[upstream commit 39919b01ae4c1949736b40b79e27178d0c0bc406]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/arm/include/asm/futex.h | 3 +++
- 1 file changed, 3 insertions(+)
-
---- a/arch/arm/include/asm/futex.h
-+++ b/arch/arm/include/asm/futex.h
-@@ -93,6 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-+ preempt_disable();
- __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
- "1: " TUSER(ldr) " %1, [%4]\n"
- " teq %1, %2\n"
-@@ -104,6 +105,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
- : "cc", "memory");
-
- *uval = val;
-+ preempt_enable();
-+
- return ret;
- }
-
diff --git a/patches/0010-gpio-omap-use-raw-locks-for-locking.patch b/patches/0010-gpio-omap-use-raw-locks-for-locking.patch
deleted file mode 100644
index 85a082b1de33a..0000000000000
--- a/patches/0010-gpio-omap-use-raw-locks-for-locking.patch
+++ /dev/null
@@ -1,338 +0,0 @@
-From 8b568939eb4098e3cf10ec0b49f3b082a332667d Mon Sep 17 00:00:00 2001
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 31 Aug 2015 18:52:19 +0300
-Subject: [PATCH 10/21] gpio: omap: use raw locks for locking
-
-This patch converts gpio_bank.lock from a spin_lock into a
-raw_spin_lock. The call path is to access this lock is always under a
-raw_spin_lock, for instance
-- __setup_irq() holds &desc->lock with irq off
- + __irq_set_trigger()
- + omap_gpio_irq_type()
-
-- handle_level_irq() (runs with irqs off therefore raw locks)
- + mask_ack_irq()
- + omap_gpio_mask_irq()
-
-This fixes the obvious backtrace on -RT. However the locking vs context
-is not and this is not limited to -RT:
-- omap_gpio_irq_type() is called with IRQ off and has an conditional
- call to pm_runtime_get_sync() which may sleep. Either it may happen or
- it may not happen but pm_runtime_get_sync() should not be called with
- irqs off.
-
-- omap_gpio_debounce() is holding the lock with IRQs off.
- + omap2_set_gpio_debounce()
- + clk_prepare_enable()
- + clk_prepare() this one might sleep.
- The number of users of gpiod_set_debounce() / gpio_set_debounce()
- looks low but still this is not good.
-
-Acked-by: Javier Martinez Canillas <javier@dowhile0.org>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/gpio/gpio-omap.c | 80 +++++++++++++++++++++++------------------------
- 1 file changed, 40 insertions(+), 40 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -57,7 +57,7 @@ struct gpio_bank {
- u32 saved_datain;
- u32 level_mask;
- u32 toggle_mask;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- struct gpio_chip chip;
- struct clk *dbck;
- u32 mod_usage;
-@@ -498,7 +498,7 @@ static int omap_gpio_irq_type(struct irq
- if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- retval = omap_set_gpio_triggering(bank, offset, type);
- if (retval) {
- spin_unlock_irqrestore(&bank->lock, flags);
-@@ -506,11 +506,11 @@ static int omap_gpio_irq_type(struct irq
- }
- omap_gpio_init_irq(bank, offset);
- if (!omap_gpio_is_input(bank, offset)) {
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- retval = -EINVAL;
- goto error;
- }
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- __irq_set_handler_locked(d->irq, handle_level_irq);
-@@ -636,14 +636,14 @@ static int omap_set_gpio_wakeup(struct g
- return -EINVAL;
- }
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->context.wake_en |= gpio_bit;
- else
- bank->context.wake_en &= ~gpio_bit;
-
- writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -669,10 +669,10 @@ static int omap_gpio_request(struct gpio
- if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap_enable_gpio_module(bank, offset);
- bank->mod_usage |= BIT(offset);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -682,14 +682,14 @@ static void omap_gpio_free(struct gpio_c
- struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->mod_usage &= ~(BIT(offset));
- if (!LINE_USED(bank->irq_usage, offset)) {
- omap_set_gpio_direction(bank, offset, 1);
- omap_clear_gpio_debounce(bank, offset);
- }
- omap_disable_gpio_module(bank, offset);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- /*
- * If this is the last gpio to be freed in the bank,
-@@ -791,7 +791,7 @@ static unsigned int omap_gpio_irq_startu
- if (!BANK_USED(bank))
- pm_runtime_get_sync(bank->dev);
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
-
- if (!LINE_USED(bank->mod_usage, offset))
- omap_set_gpio_direction(bank, offset, 1);
-@@ -800,12 +800,12 @@ static unsigned int omap_gpio_irq_startu
- omap_enable_gpio_module(bank, offset);
- bank->irq_usage |= BIT(offset);
-
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- omap_gpio_unmask_irq(d);
-
- return 0;
- err:
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- if (!BANK_USED(bank))
- pm_runtime_put(bank->dev);
- return -EINVAL;
-@@ -817,7 +817,7 @@ static void omap_gpio_irq_shutdown(struc
- unsigned long flags;
- unsigned offset = d->hwirq;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->irq_usage &= ~(BIT(offset));
- omap_set_gpio_irqenable(bank, offset, 0);
- omap_clear_gpio_irqstatus(bank, offset);
-@@ -825,7 +825,7 @@ static void omap_gpio_irq_shutdown(struc
- if (!LINE_USED(bank->mod_usage, offset))
- omap_clear_gpio_debounce(bank, offset);
- omap_disable_gpio_module(bank, offset);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- /*
- * If this is the last IRQ to be freed in the bank,
-@@ -849,10 +849,10 @@ static void omap_gpio_mask_irq(struct ir
- unsigned offset = d->hwirq;
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_irqenable(bank, offset, 0);
- omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- }
-
- static void omap_gpio_unmask_irq(struct irq_data *d)
-@@ -862,7 +862,7 @@ static void omap_gpio_unmask_irq(struct
- u32 trigger = irqd_get_trigger_type(d);
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- if (trigger)
- omap_set_gpio_triggering(bank, offset, trigger);
-
-@@ -874,7 +874,7 @@ static void omap_gpio_unmask_irq(struct
- }
-
- omap_set_gpio_irqenable(bank, offset, 1);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- }
-
- /*---------------------------------------------------------------------*/
-@@ -887,9 +887,9 @@ static int omap_mpuio_suspend_noirq(stru
- OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -902,9 +902,9 @@ static int omap_mpuio_resume_noirq(struc
- OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- unsigned long flags;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- writel_relaxed(bank->context.wake_en, mask_reg);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -950,9 +950,9 @@ static int omap_gpio_get_direction(struc
-
- bank = container_of(chip, struct gpio_bank, chip);
- reg = bank->base + bank->regs->direction;
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- dir = !!(readl_relaxed(reg) & BIT(offset));
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return dir;
- }
-
-@@ -962,9 +962,9 @@ static int omap_gpio_input(struct gpio_c
- unsigned long flags;
-
- bank = container_of(chip, struct gpio_bank, chip);
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap_set_gpio_direction(bank, offset, 1);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
-@@ -986,10 +986,10 @@ static int omap_gpio_output(struct gpio_
- unsigned long flags;
-
- bank = container_of(chip, struct gpio_bank, chip);
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->set_dataout(bank, offset, value);
- omap_set_gpio_direction(bank, offset, 0);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
-@@ -1001,9 +1001,9 @@ static int omap_gpio_debounce(struct gpi
-
- bank = container_of(chip, struct gpio_bank, chip);
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- omap2_set_gpio_debounce(bank, offset, debounce);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -1014,9 +1014,9 @@ static void omap_gpio_set(struct gpio_ch
- unsigned long flags;
-
- bank = container_of(chip, struct gpio_bank, chip);
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
- bank->set_dataout(bank, offset, value);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- }
-
- /*---------------------------------------------------------------------*/
-@@ -1207,7 +1207,7 @@ static int omap_gpio_probe(struct platfo
- else
- bank->set_dataout = omap_set_gpio_dataout_mask;
-
-- spin_lock_init(&bank->lock);
-+ raw_spin_lock_init(&bank->lock);
-
- /* Static mapping, never released */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-@@ -1265,7 +1265,7 @@ static int omap_gpio_runtime_suspend(str
- unsigned long flags;
- u32 wake_low, wake_hi;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
-
- /*
- * Only edges can generate a wakeup event to the PRCM.
-@@ -1318,7 +1318,7 @@ static int omap_gpio_runtime_suspend(str
- bank->get_context_loss_count(bank->dev);
-
- omap_gpio_dbck_disable(bank);
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
-@@ -1333,7 +1333,7 @@ static int omap_gpio_runtime_resume(stru
- unsigned long flags;
- int c;
-
-- spin_lock_irqsave(&bank->lock, flags);
-+ raw_spin_lock_irqsave(&bank->lock, flags);
-
- /*
- * On the first resume during the probe, the context has not
-@@ -1368,13 +1368,13 @@ static int omap_gpio_runtime_resume(stru
- if (c != bank->context_loss_count) {
- omap_gpio_restore_context(bank);
- } else {
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
- }
-
- if (!bank->workaround_enabled) {
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
- }
-
-@@ -1429,7 +1429,7 @@ static int omap_gpio_runtime_resume(stru
- }
-
- bank->workaround_enabled = false;
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
-
- return 0;
- }
diff --git a/patches/0011-arm-futex-UP-futex_atomic_op_inuser-relies-on-disabl.patch b/patches/0011-arm-futex-UP-futex_atomic_op_inuser-relies-on-disabl.patch
deleted file mode 100644
index e837433a78789..0000000000000
--- a/patches/0011-arm-futex-UP-futex_atomic_op_inuser-relies-on-disabl.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:16 +0200
-Subject: sched/preempt, arm/futex: Disable preemption in UP futex_atomic_op_inuser() explicitly
-
-The !CONFIG_SMP implementation of futex_atomic_op_inuser() seems to rely
-on disabled preemption to guarantee mutual exclusion.
-
-From commit e589ed23dd27:
- "For UP it's enough to disable preemption to ensure mutual exclusion..."
-From the code itself:
- "!SMP, we can work around lack of atomic ops by disabling preemption"
-
-Let's make this explicit, to prepare for pagefault_disable() not
-touching preemption anymore.
-
-[upstream commit 388b0e0adbc98a1b12a077dc92851a3ce016db42]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/arm/include/asm/futex.h | 10 ++++++++--
- 1 file changed, 8 insertions(+), 2 deletions(-)
-
---- a/arch/arm/include/asm/futex.h
-+++ b/arch/arm/include/asm/futex.h
-@@ -127,7 +127,10 @@ futex_atomic_op_inuser (int encoded_op,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+#ifndef CONFIG_SMP
-+ preempt_disable();
-+#endif
-+ pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
-@@ -149,7 +152,10 @@ futex_atomic_op_inuser (int encoded_op,
- ret = -ENOSYS;
- }
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-+#ifndef CONFIG_SMP
-+ preempt_enable();
-+#endif
-
- if (!ret) {
- switch (cmp) {
diff --git a/patches/0011-gpio-omap-Fix-missing-raw-locks-conversion.patch b/patches/0011-gpio-omap-Fix-missing-raw-locks-conversion.patch
deleted file mode 100644
index 738ab126a6977..0000000000000
--- a/patches/0011-gpio-omap-Fix-missing-raw-locks-conversion.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From 27934f278560ff4a800439cd01a102a65e622801 Mon Sep 17 00:00:00 2001
-From: Axel Lin <axel.lin@ingics.com>
-Date: Mon, 31 Aug 2015 18:52:20 +0300
-Subject: [PATCH 11/21] gpio: omap: Fix missing raw locks conversion
-
-Fix below build warning:
- CC drivers/gpio/gpio-omap.o
-drivers/gpio/gpio-omap.c: In function 'omap_gpio_irq_type':
-drivers/gpio/gpio-omap.c:504:3: warning: passing argument 1 of 'spin_unlock_irqrestore' from incompatible pointer type [enabled by default]
-include/linux/spinlock.h:360:29: note: expected 'struct spinlock_t *' but argument is of type 'struct raw_spinlock_t *'
-
-Fixes: commit 4dbada2be460 ("gpio: omap: use raw locks for locking")
-Signed-off-by: Axel Lin <axel.lin@ingics.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/gpio/gpio-omap.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -501,7 +501,7 @@ static int omap_gpio_irq_type(struct irq
- raw_spin_lock_irqsave(&bank->lock, flags);
- retval = omap_set_gpio_triggering(bank, offset, type);
- if (retval) {
-- spin_unlock_irqrestore(&bank->lock, flags);
-+ raw_spin_unlock_irqrestore(&bank->lock, flags);
- goto error;
- }
- omap_gpio_init_irq(bank, offset);
diff --git a/patches/0012-futex-clarify-that-preemption-doesn-t-have-to-be-dis.patch b/patches/0012-futex-clarify-that-preemption-doesn-t-have-to-be-dis.patch
deleted file mode 100644
index 42dde8e91c0a0..0000000000000
--- a/patches/0012-futex-clarify-that-preemption-doesn-t-have-to-be-dis.patch
+++ /dev/null
@@ -1,85 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:17 +0200
-Subject: sched/preempt, futex: Update comments to clarify that preemption doesn't have to be disabled
-
-As arm64 and arc have no special implementations for !CONFIG_SMP, mutual
-exclusion doesn't seem to rely on preemption.
-
-Let's make it clear in the comments that preemption doesn't have to be
-disabled when accessing user space in the futex code, so we can remove
-preempt_disable() from pagefault_disable().
-
-[upstream commit 2f09b227eeed4b3a072fe818c82a4c773b778cde]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/arc/include/asm/futex.h | 10 +++++-----
- arch/arm64/include/asm/futex.h | 4 ++--
- 2 files changed, 7 insertions(+), 7 deletions(-)
-
---- a/arch/arc/include/asm/futex.h
-+++ b/arch/arc/include/asm/futex.h
-@@ -53,7 +53,7 @@ static inline int futex_atomic_op_inuser
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+ pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
-@@ -75,7 +75,7 @@ static inline int futex_atomic_op_inuser
- ret = -ENOSYS;
- }
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-
- if (!ret) {
- switch (cmp) {
-@@ -104,7 +104,7 @@ static inline int futex_atomic_op_inuser
- return ret;
- }
-
--/* Compare-xchg with preemption disabled.
-+/* Compare-xchg with pagefaults disabled.
- * Notes:
- * -Best-Effort: Exchg happens only if compare succeeds.
- * If compare fails, returns; leaving retry/looping to upper layers
-@@ -121,7 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+ pagefault_disable();
-
- /* TBD : can use llock/scond */
- __asm__ __volatile__(
-@@ -142,7 +142,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval,
- : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
- : "cc", "memory");
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-
- *uval = val;
- return val;
---- a/arch/arm64/include/asm/futex.h
-+++ b/arch/arm64/include/asm/futex.h
-@@ -58,7 +58,7 @@ futex_atomic_op_inuser (int encoded_op,
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
-- pagefault_disable(); /* implies preempt_disable() */
-+ pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
-@@ -85,7 +85,7 @@ futex_atomic_op_inuser (int encoded_op,
- ret = -ENOSYS;
- }
-
-- pagefault_enable(); /* subsumes preempt_enable() */
-+ pagefault_enable();
-
- if (!ret) {
- switch (cmp) {
diff --git a/patches/0012-gpio-omap-remove-wrong-irq_domain_remove-usage-in-pr.patch b/patches/0012-gpio-omap-remove-wrong-irq_domain_remove-usage-in-pr.patch
deleted file mode 100644
index 0635c1fdd8188..0000000000000
--- a/patches/0012-gpio-omap-remove-wrong-irq_domain_remove-usage-in-pr.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From b461fa3a9d07736eb2f8405899e8b85ee962b9c8 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 31 Aug 2015 18:52:21 +0300
-Subject: [PATCH 12/21] gpio: omap: remove wrong irq_domain_remove usage in
- probe
-
-The bank->chip.irqdomain is uninitialized at the moment when
-irq_domain_remove() is called, so remove this call.
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/gpio/gpio-omap.c | 1 -
- 1 file changed, 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -1213,7 +1213,6 @@ static int omap_gpio_probe(struct platfo
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bank->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(bank->base)) {
-- irq_domain_remove(bank->chip.irqdomain);
- return PTR_ERR(bank->base);
- }
-
diff --git a/patches/0013-gpio-omap-switch-to-use-platform_get_irq.patch b/patches/0013-gpio-omap-switch-to-use-platform_get_irq.patch
deleted file mode 100644
index 81d9cf32dd05e..0000000000000
--- a/patches/0013-gpio-omap-switch-to-use-platform_get_irq.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From 65677de608ceb2c9d0f72d4fc0375d18c68b021a Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 31 Aug 2015 18:52:22 +0300
-Subject: [PATCH 13/21] gpio: omap: switch to use platform_get_irq
-
-Switch OMAP GPIO driver to use platform_get_irq(), because
-it is not recommened to use platform_get_resource(pdev, IORESOURCE_IRQ, ..)
-for requesting IRQ resources any more, as they can be not ready yet
-in case of DT-boot.
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/gpio/gpio-omap.c | 13 ++++++++-----
- 1 file changed, 8 insertions(+), 5 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -1178,13 +1178,16 @@ static int omap_gpio_probe(struct platfo
- irqc->irq_set_wake = omap_gpio_wake_enable,
- irqc->name = dev_name(&pdev->dev);
-
-- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-- if (unlikely(!res)) {
-- dev_err(dev, "Invalid IRQ resource\n");
-- return -ENODEV;
-+ bank->irq = platform_get_irq(pdev, 0);
-+ if (bank->irq <= 0) {
-+ if (!bank->irq)
-+ bank->irq = -ENXIO;
-+ if (bank->irq != -EPROBE_DEFER)
-+ dev_err(dev,
-+ "can't get irq resource ret=%d\n", bank->irq);
-+ return bank->irq;
- }
-
-- bank->irq = res->start;
- bank->dev = dev;
- bank->chip.dev = dev;
- bank->chip.owner = THIS_MODULE;
diff --git a/patches/0013-mips-properly-lock-access-to-the-fpu.patch b/patches/0013-mips-properly-lock-access-to-the-fpu.patch
deleted file mode 100644
index 5acfb89ffb0dd..0000000000000
--- a/patches/0013-mips-properly-lock-access-to-the-fpu.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:19 +0200
-Subject: sched/preempt, MIPS: Properly lock access to the FPU
-
-Let's always disable preemption and pagefaults when locking the fpu,
-so we can be sure that the owner won't change in between.
-
-This is a preparation for pagefault_disable() not touching preemption
-anymore.
-
-[upstream commit 76deabd1867d6d2895152f31fdec819e3505738b]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- arch/mips/kernel/signal-common.h | 9 ++-------
- 1 file changed, 2 insertions(+), 7 deletions(-)
-
---- a/arch/mips/kernel/signal-common.h
-+++ b/arch/mips/kernel/signal-common.h
-@@ -28,12 +28,7 @@ extern void __user *get_sigframe(struct
- extern int fpcsr_pending(unsigned int __user *fpcsr);
-
- /* Make sure we will not lose FPU ownership */
--#ifdef CONFIG_PREEMPT
--#define lock_fpu_owner() preempt_disable()
--#define unlock_fpu_owner() preempt_enable()
--#else
--#define lock_fpu_owner() pagefault_disable()
--#define unlock_fpu_owner() pagefault_enable()
--#endif
-+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
-+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
-
- #endif /* __SIGNAL_COMMON_H */
diff --git a/patches/0014-gpio-omap-fix-omap2_set_gpio_debounce.patch b/patches/0014-gpio-omap-fix-omap2_set_gpio_debounce.patch
deleted file mode 100644
index c21c73f5463b0..0000000000000
--- a/patches/0014-gpio-omap-fix-omap2_set_gpio_debounce.patch
+++ /dev/null
@@ -1,95 +0,0 @@
-From e02e1a208b803111ba52cc321a419378e9ed63bd Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 31 Aug 2015 18:52:23 +0300
-Subject: [PATCH 14/21] gpio: omap: fix omap2_set_gpio_debounce
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-According to TRMs:
-
-Required input line stable =
- (the value of the GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) × 31,
-where the value of the GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME bit field
-is from 0 to 255.
-
-But now omap2_set_gpio_debounce() will calculate debounce time and
-behave incorrectly in the following cases:
-1) requested debounce time is !0 and <32
- calculated DEBOUNCETIME = 0x1 == 62 us;
- expected value of DEBOUNCETIME = 0x0 == 31us
-2) requested debounce time is 0
- calculated DEBOUNCETIME = 0x1 == 62 us;
- expected: disable debounce and DEBOUNCETIME = 0x0
-3) requested debounce time is >32 and <63
- calculated DEBOUNCETIME = 0x0 and debounce will be disabled;
- expected: enable debounce and DEBOUNCETIME = 0x1 == 62 us
-
-Hence, rework omap2_set_gpio_debounce() to fix above cases:
-1) introduce local variable "enable" and use it to identify
-when debounce need to be enabled or disabled. Disable debounce
-if requested debounce time is 0.
-2) use below formula for debounce time calculation:
- debounce = (DIV_ROUND_UP(debounce, 31) - 1) & 0xFF;
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/gpio/gpio-omap.c | 19 ++++++++++---------
- 1 file changed, 10 insertions(+), 9 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -29,6 +29,7 @@
- #include <linux/platform_data/gpio-omap.h>
-
- #define OFF_MODE 1
-+#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
-
- static LIST_HEAD(omap_gpio_list);
-
-@@ -204,8 +205,9 @@ static inline void omap_gpio_dbck_disabl
- * @offset: the gpio number on this @bank
- * @debounce: debounce time to use
- *
-- * OMAP's debounce time is in 31us steps so we need
-- * to convert and round up to the closest unit.
-+ * OMAP's debounce time is in 31us steps
-+ * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
-+ * so we need to convert and round up to the closest unit.
- */
- static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
- unsigned debounce)
-@@ -213,16 +215,15 @@ static void omap2_set_gpio_debounce(stru
- void __iomem *reg;
- u32 val;
- u32 l;
-+ bool enable = !!debounce;
-
- if (!bank->dbck_flag)
- return;
-
-- if (debounce < 32)
-- debounce = 0x01;
-- else if (debounce > 7936)
-- debounce = 0xff;
-- else
-- debounce = (debounce / 0x1f) - 1;
-+ if (enable) {
-+ debounce = DIV_ROUND_UP(debounce, 31) - 1;
-+ debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
-+ }
-
- l = BIT(offset);
-
-@@ -233,7 +234,7 @@ static void omap2_set_gpio_debounce(stru
- reg = bank->base + bank->regs->debounce_en;
- val = readl_relaxed(reg);
-
-- if (debounce)
-+ if (enable)
- val |= l;
- else
- val &= ~l;
diff --git a/patches/0014-uaccess-decouple-preemption-from-the-pagefault-logic.patch b/patches/0014-uaccess-decouple-preemption-from-the-pagefault-logic.patch
deleted file mode 100644
index e9b148bf5d968..0000000000000
--- a/patches/0014-uaccess-decouple-preemption-from-the-pagefault-logic.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-From: David Hildenbrand <dahi@linux.vnet.ibm.com>
-Date: Mon, 11 May 2015 17:52:20 +0200
-Subject: sched/preempt, mm/fault: Decouple preemption from the page fault logic
-
-As the fault handlers now all rely on the pagefault_disabled() checks
-and implicit preempt_disable() calls by pagefault_disable() have been
-made explicit, we can completely rely on the pagefault_disableD counter.
-
-So let's no longer touch the preempt count when disabling/enabling
-pagefaults. After a call to pagefault_disable(), pagefault_disabled()
-will return true, but in_atomic() won't.
-
-[upstream commit 8222dbe21e79338de92d5e1956cd1e3994cc9f93]
-Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
----
- include/linux/uaccess.h | 16 ++--------------
- 1 file changed, 2 insertions(+), 14 deletions(-)
-
---- a/include/linux/uaccess.h
-+++ b/include/linux/uaccess.h
-@@ -1,7 +1,6 @@
- #ifndef __LINUX_UACCESS_H__
- #define __LINUX_UACCESS_H__
-
--#include <linux/preempt.h>
- #include <linux/sched.h>
- #include <asm/uaccess.h>
-
-@@ -20,17 +19,11 @@ static __always_inline void pagefault_di
- * These routines enable/disable the pagefault handler. If disabled, it will
- * not take any locks and go straight to the fixup table.
- *
-- * We increase the preempt and the pagefault count, to be able to distinguish
-- * whether we run in simple atomic context or in a real pagefault_disable()
-- * context.
-- *
-- * For now, after pagefault_disabled() has been called, we run in atomic
-- * context. User access methods will not sleep.
-- *
-+ * User access methods will not sleep when called from a pagefault_disabled()
-+ * environment.
- */
- static inline void pagefault_disable(void)
- {
-- preempt_count_inc();
- pagefault_disabled_inc();
- /*
- * make sure to have issued the store before a pagefault
-@@ -47,11 +40,6 @@ static inline void pagefault_enable(void
- */
- barrier();
- pagefault_disabled_dec();
--#ifndef CONFIG_PREEMPT
-- preempt_count_dec();
--#else
-- preempt_enable();
--#endif
- }
-
- /*
diff --git a/patches/0015-gpio-omap-protect-regs-access-in-omap_gpio_irq_handl.patch b/patches/0015-gpio-omap-protect-regs-access-in-omap_gpio_irq_handl.patch
deleted file mode 100644
index 010e99a753065..0000000000000
--- a/patches/0015-gpio-omap-protect-regs-access-in-omap_gpio_irq_handl.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-From ec29016aefaf6649096224bc9031a303cb21df47 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 31 Aug 2015 18:52:24 +0300
-Subject: [PATCH 15/21] gpio: omap: protect regs access in
- omap_gpio_irq_handler
-
-The access to HW registers has to be be protected in
-omap_gpio_irq_handler(), as it may race with code executed on
-another CPUs.
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/gpio/gpio-omap.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -718,6 +718,7 @@ static void omap_gpio_irq_handler(unsign
- int unmasked = 0;
- struct irq_chip *irqchip = irq_desc_get_chip(desc);
- struct gpio_chip *chip = irq_get_handler_data(irq);
-+ unsigned long lock_flags;
-
- chained_irq_enter(irqchip, desc);
-
-@@ -732,6 +733,8 @@ static void omap_gpio_irq_handler(unsign
- u32 isr_saved, level_mask = 0;
- u32 enabled;
-
-+ raw_spin_lock_irqsave(&bank->lock, lock_flags);
-+
- enabled = omap_get_gpio_irqbank_mask(bank);
- isr_saved = isr = readl_relaxed(isr_reg) & enabled;
-
-@@ -745,6 +748,8 @@ static void omap_gpio_irq_handler(unsign
- omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
-
-+ raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
-+
- /* if there is only edge sensitive GPIO pin interrupts
- configured, we could unmask GPIO bank interrupt immediately */
- if (!level_mask && !unmasked) {
-@@ -759,6 +764,7 @@ static void omap_gpio_irq_handler(unsign
- bit = __ffs(isr);
- isr &= ~(BIT(bit));
-
-+ raw_spin_lock_irqsave(&bank->lock, lock_flags);
- /*
- * Some chips can't respond to both rising and falling
- * at the same time. If this irq was requested with
-@@ -769,6 +775,8 @@ static void omap_gpio_irq_handler(unsign
- if (bank->toggle_mask & (BIT(bit)))
- omap_toggle_gpio_edge_triggering(bank, bit);
-
-+ raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
-+
- generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
- bit));
- }
diff --git a/patches/0016-gpio-omap-fix-clk_prepare-unprepare-usage.patch b/patches/0016-gpio-omap-fix-clk_prepare-unprepare-usage.patch
deleted file mode 100644
index b7f81f2bb5327..0000000000000
--- a/patches/0016-gpio-omap-fix-clk_prepare-unprepare-usage.patch
+++ /dev/null
@@ -1,120 +0,0 @@
-From 72f110f4546f9fff10bdeeb13f075c6263abe8ff Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 31 Aug 2015 18:52:25 +0300
-Subject: [PATCH 16/21] gpio: omap: fix clk_prepare/unprepare usage
-
-As per CCF documentation (clk.txt) the clk_prepare/unprepare APIs
-are not allowed in atomic context. But now OMAP GPIO driver
-uses them while applying debounce settings and as part
-of PM runtime irqsafe operations:
-
-- omap_gpio_debounce() is holding the lock with IRQs off.
- + omap2_set_gpio_debounce()
- + clk_prepare_enable()
- + clk_prepare() this one might sleep.
-
-- pm_runtime_get_sync() is holding the lock with IRQs off
- + omap_gpio_runtime_suspend()
- + raw_spin_lock_irqsave()
- + omap_gpio_dbck_disable()
- + clk_disable_unprepare()
-
-Hence, fix it by moeving dbclk prepare/unprepare in OMAP GPIO
-omap_gpio_probe/omap_gpio_remove. Also, while here, ensure that
-debounce functionality is disabled if clk_get() failed,
-because otherwise kernel will carsh in omap2_set_gpio_debounce().
-
-Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
----
- drivers/gpio/gpio-omap.c | 27 ++++++++++++++++++---------
- 1 file changed, 18 insertions(+), 9 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -176,7 +176,7 @@ static inline void omap_gpio_rmw(void __
- static inline void omap_gpio_dbck_enable(struct gpio_bank *bank)
- {
- if (bank->dbck_enable_mask && !bank->dbck_enabled) {
-- clk_prepare_enable(bank->dbck);
-+ clk_enable(bank->dbck);
- bank->dbck_enabled = true;
-
- writel_relaxed(bank->dbck_enable_mask,
-@@ -194,7 +194,7 @@ static inline void omap_gpio_dbck_disabl
- */
- writel_relaxed(0, bank->base + bank->regs->debounce_en);
-
-- clk_disable_unprepare(bank->dbck);
-+ clk_disable(bank->dbck);
- bank->dbck_enabled = false;
- }
- }
-@@ -227,7 +227,7 @@ static void omap2_set_gpio_debounce(stru
-
- l = BIT(offset);
-
-- clk_prepare_enable(bank->dbck);
-+ clk_enable(bank->dbck);
- reg = bank->base + bank->regs->debounce;
- writel_relaxed(debounce, reg);
-
-@@ -241,7 +241,7 @@ static void omap2_set_gpio_debounce(stru
- bank->dbck_enable_mask = val;
-
- writel_relaxed(val, reg);
-- clk_disable_unprepare(bank->dbck);
-+ clk_disable(bank->dbck);
- /*
- * Enable debounce clock per module.
- * This call is mandatory because in omap_gpio_request() when
-@@ -286,7 +286,7 @@ static void omap_clear_gpio_debounce(str
- bank->context.debounce = 0;
- writel_relaxed(bank->context.debounce, bank->base +
- bank->regs->debounce);
-- clk_disable_unprepare(bank->dbck);
-+ clk_disable(bank->dbck);
- bank->dbck_enabled = false;
- }
- }
-@@ -1070,10 +1070,6 @@ static void omap_gpio_mod_init(struct gp
- /* Initialize interface clk ungated, module enabled */
- if (bank->regs->ctrl)
- writel_relaxed(0, base + bank->regs->ctrl);
--
-- bank->dbck = clk_get(bank->dev, "dbclk");
-- if (IS_ERR(bank->dbck))
-- dev_err(bank->dev, "Could not get gpio dbck\n");
- }
-
- static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
-@@ -1228,6 +1224,17 @@ static int omap_gpio_probe(struct platfo
- return PTR_ERR(bank->base);
- }
-
-+ if (bank->dbck_flag) {
-+ bank->dbck = devm_clk_get(bank->dev, "dbclk");
-+ if (IS_ERR(bank->dbck)) {
-+ dev_err(bank->dev,
-+ "Could not get gpio dbck. Disable debounce\n");
-+ bank->dbck_flag = false;
-+ } else {
-+ clk_prepare(bank->dbck);
-+ }
-+ }
-+
- platform_set_drvdata(pdev, bank);
-
- pm_runtime_enable(bank->dev);
-@@ -1259,6 +1266,8 @@ static int omap_gpio_remove(struct platf
- list_del(&bank->node);
- gpiochip_remove(&bank->chip);
- pm_runtime_disable(bank->dev);
-+ if (bank->dbck_flag)
-+ clk_unprepare(bank->dbck);
-
- return 0;
- }
diff --git a/patches/0017-gpio-omap-Fix-gpiochip_add-handling-for-deferred-pro.patch b/patches/0017-gpio-omap-Fix-gpiochip_add-handling-for-deferred-pro.patch
deleted file mode 100644
index b83f8ce541eab..0000000000000
--- a/patches/0017-gpio-omap-Fix-gpiochip_add-handling-for-deferred-pro.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From f8ed3ff1efe8be27483c48d06974f8026dd673dd Mon Sep 17 00:00:00 2001
-From: Tony Lindgren <tony@atomide.com>
-Date: Mon, 28 Sep 2015 18:36:12 -0500
-Subject: [PATCH 17/21] gpio: omap: Fix gpiochip_add() handling for deferred
- probe
-
-commit 5e606abef57a89b3ca25f5d97a953c6cdad7cbac upstream.
-
-Currently we gpio-omap breaks if gpiochip_add() returns -EPROBE_DEFER:
-
-[ 0.570000] gpiochip_add: GPIOs 0..31 (gpio) failed to register
-[ 0.570000] omap_gpio 48310000.gpio: Could not register gpio chip -517
-...
-[ 3.670000] omap_gpio 48310000.gpio: Unbalanced pm_runtime_enable!
-
-Let's fix the issue by adding the missing pm_runtime_put() on error.
-
-Cc: Grygorii Strashko <grygorii.strashko@ti.com>
-Cc: Javier Martinez Canillas <javier@dowhile0.org>
-Cc: Kevin Hilman <khilman@deeprootsystems.com>
-Cc: Santosh Shilimkar <ssantosh@kernel.org>
-Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
-Signed-off-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -1247,8 +1247,11 @@ static int omap_gpio_probe(struct platfo
- omap_gpio_mod_init(bank);
-
- ret = omap_gpio_chip_init(bank, irqc);
-- if (ret)
-+ if (ret) {
-+ pm_runtime_put_sync(bank->dev);
-+ pm_runtime_disable(bank->dev);
- return ret;
-+ }
-
- omap_gpio_show_rev(bank);
-
diff --git a/patches/0018-gpio-omap-Fix-GPIO-numbering-for-deferred-probe.patch b/patches/0018-gpio-omap-Fix-GPIO-numbering-for-deferred-probe.patch
deleted file mode 100644
index ec56c6279c851..0000000000000
--- a/patches/0018-gpio-omap-Fix-GPIO-numbering-for-deferred-probe.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 1cda618ade93f4439e17e8159182b035e9c189c9 Mon Sep 17 00:00:00 2001
-From: Tony Lindgren <tony@atomide.com>
-Date: Mon, 28 Sep 2015 18:36:13 -0500
-Subject: [PATCH 18/21] gpio: omap: Fix GPIO numbering for deferred probe
-
-commit 46d4f7c25e1bb59b1663878b843a7ec06eaf5806 upstream.
-
-If gpio-omap probe fails with -EPROBE_DEFER, the GPIO numbering
-keeps increasing. Only increase the gpio count if gpiochip_add()
-was successful as otherwise the numbers will increase for each
-probe attempt.
-
-Cc: Javier Martinez Canillas <javier@dowhile0.org>
-Cc: Kevin Hilman <khilman@deeprootsystems.com>
-Cc: Santosh Shilimkar <ssantosh@kernel.org>
-Reviewed-by: Grygorii Strashko <grygorii.strashko@ti.com>
-Signed-off-by: Tony Lindgren <tony@atomide.com>
-Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -1098,7 +1098,6 @@ static int omap_gpio_chip_init(struct gp
- } else {
- bank->chip.label = "gpio";
- bank->chip.base = gpio;
-- gpio += bank->width;
- }
- bank->chip.ngpio = bank->width;
-
-@@ -1108,6 +1107,9 @@ static int omap_gpio_chip_init(struct gp
- return ret;
- }
-
-+ if (!bank->is_mpuio)
-+ gpio += bank->width;
-+
- #ifdef CONFIG_ARCH_OMAP1
- /*
- * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
diff --git a/patches/0019-gpio-omap-fix-static-checker-warning.patch b/patches/0019-gpio-omap-fix-static-checker-warning.patch
deleted file mode 100644
index 9d6993921f809..0000000000000
--- a/patches/0019-gpio-omap-fix-static-checker-warning.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From 58ac52221b71c6a7e067ebe2f13e595cb99e17b6 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 28 Sep 2015 18:36:14 -0500
-Subject: [PATCH 19/21] gpio: omap: fix static checker warning
-
-This patch fixes below static checker warning by changing
-type of irq field in struct gpio_bank from u16 to int.
-
-drivers/gpio/gpio-omap.c:1191 omap_gpio_probe()
- warn: assigning (-6) to unsigned variable 'bank->irq'
-
-drivers/gpio/gpio-omap.c
- 1188 bank->irq = platform_get_irq(pdev, 0);
- 1189 if (bank->irq <= 0) {
-
-bank->irq is u16.
-
- 1190 if (!bank->irq)
- 1191 bank->irq = -ENXIO;
-
-Does not work.
-
- 1192 if (bank->irq != -EPROBE_DEFER)
-
-Does not work.
-
- 1193 dev_err(dev,
- 1194 "can't get irq resource ret=%d\n", bank->irq);
- 1195 return bank->irq;
- 1196 }
-
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -51,7 +51,7 @@ struct gpio_regs {
- struct gpio_bank {
- struct list_head node;
- void __iomem *base;
-- u16 irq;
-+ int irq;
- u32 non_wakeup_gpios;
- u32 enabled_non_wakeup_gpios;
- struct gpio_regs context;
diff --git a/patches/0020-gpio-omap-move-pm-runtime-in-irq_chip.irq_bus_lock-s.patch b/patches/0020-gpio-omap-move-pm-runtime-in-irq_chip.irq_bus_lock-s.patch
deleted file mode 100644
index b11be591c1f0e..0000000000000
--- a/patches/0020-gpio-omap-move-pm-runtime-in-irq_chip.irq_bus_lock-s.patch
+++ /dev/null
@@ -1,133 +0,0 @@
-From c82f51eada9730a1b1c07c75a036a1a893964e49 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 28 Sep 2015 18:36:15 -0500
-Subject: [PATCH 20/21] gpio: omap: move pm runtime in
- irq_chip.irq_bus_lock/sync_unlock
-
-The PM runtime API can't be used in atomic contex on -RT even if
-it's configured as irqsafe. As result, below error report can
-be seen when PM runtime API called from IRQ chip's callbacks
-irq_startup/irq_shutdown/irq_set_type, because they are
-protected by RAW spinlock:
-
-BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917
-in_atomic(): 1, irqs_disabled(): 128, pid: 96, name: insmod
-3 locks held by insmod/96:
- #0: (&dev->mutex){......}, at: [<c04752c8>] __driver_attach+0x54/0xa0
- #1: (&dev->mutex){......}, at: [<c04752d4>] __driver_attach+0x60/0xa0
- #2: (class){......}, at: [<c00a408c>] __irq_get_desc_lock+0x60/0xa4
-irq event stamp: 1834
-hardirqs last enabled at (1833): [<c06ab2a4>] _raw_spin_unlock_irqrestore+0x88/0x90
-hardirqs last disabled at (1834): [<c06ab068>] _raw_spin_lock_irqsave+0x2c/0x64
-softirqs last enabled at (0): [<c003d220>] copy_process.part.52+0x410/0x19d8
-softirqs last disabled at (0): [< (null)>] (null)
-Preemption disabled at:[< (null)>] (null)
-
-CPU: 1 PID: 96 Comm: insmod Tainted: G W O 4.1.3-rt3-00618-g57e2387-dirty #184
-Hardware name: Generic DRA74X (Flattened Device Tree)
-[<c00190f4>] (unwind_backtrace) from [<c0014734>] (show_stack+0x20/0x24)
-[<c0014734>] (show_stack) from [<c06a62ec>] (dump_stack+0x88/0xdc)
-[<c06a62ec>] (dump_stack) from [<c006ca44>] (___might_sleep+0x198/0x2a8)
-[<c006ca44>] (___might_sleep) from [<c06ab6d4>] (rt_spin_lock+0x30/0x70)
-[<c06ab6d4>] (rt_spin_lock) from [<c04815ac>] (__pm_runtime_resume+0x68/0xa4)
-[<c04815ac>] (__pm_runtime_resume) from [<c04123f4>] (omap_gpio_irq_type+0x188/0x1d8)
-[<c04123f4>] (omap_gpio_irq_type) from [<c00a64e4>] (__irq_set_trigger+0x68/0x130)
-[<c00a64e4>] (__irq_set_trigger) from [<c00a7bc4>] (irq_set_irq_type+0x44/0x6c)
-[<c00a7bc4>] (irq_set_irq_type) from [<c00abbf8>] (irq_create_of_mapping+0x120/0x174)
-[<c00abbf8>] (irq_create_of_mapping) from [<c0577b74>] (of_irq_get+0x48/0x58)
-[<c0577b74>] (of_irq_get) from [<c0540a14>] (i2c_device_probe+0x54/0x15c)
-[<c0540a14>] (i2c_device_probe) from [<c04750dc>] (driver_probe_device+0x184/0x2c8)
-[<c04750dc>] (driver_probe_device) from [<c0475310>] (__driver_attach+0x9c/0xa0)
-[<c0475310>] (__driver_attach) from [<c0473238>] (bus_for_each_dev+0x7c/0xb0)
-[<c0473238>] (bus_for_each_dev) from [<c0474af4>] (driver_attach+0x28/0x30)
-[<c0474af4>] (driver_attach) from [<c0474760>] (bus_add_driver+0x154/0x200)
-[<c0474760>] (bus_add_driver) from [<c0476348>] (driver_register+0x88/0x108)
-[<c0476348>] (driver_register) from [<c0541600>] (i2c_register_driver+0x3c/0x90)
-[<c0541600>] (i2c_register_driver) from [<bf003018>] (pcf857x_init+0x18/0x24 [gpio_pcf857x])
-[<bf003018>] (pcf857x_init [gpio_pcf857x]) from [<c000998c>] (do_one_initcall+0x128/0x1e8)
-[<c000998c>] (do_one_initcall) from [<c06a4220>] (do_init_module+0x6c/0x1bc)
-[<c06a4220>] (do_init_module) from [<c00dd0c8>] (load_module+0x18e8/0x21c4)
-[<c00dd0c8>] (load_module) from [<c00ddaa0>] (SyS_init_module+0xfc/0x158)
-[<c00ddaa0>] (SyS_init_module) from [<c000ff40>] (ret_fast_syscall+0x0/0x54)
-
-The IRQ chip interface defines only two callbacks which are executed in
-non-atomic contex - irq_bus_lock/irq_bus_sync_unlock, so lets move
-PM runtime calls there.
-
-Cc: <linux-rt-users@vger.kernel.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Tested-by: Austin Schuh <austin@peloton-tech.com>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 25 +++++++++++++++----------
- 1 file changed, 15 insertions(+), 10 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -496,9 +496,6 @@ static int omap_gpio_irq_type(struct irq
- (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
- return -EINVAL;
-
-- if (!BANK_USED(bank))
-- pm_runtime_get_sync(bank->dev);
--
- raw_spin_lock_irqsave(&bank->lock, flags);
- retval = omap_set_gpio_triggering(bank, offset, type);
- if (retval) {
-@@ -521,8 +518,6 @@ static int omap_gpio_irq_type(struct irq
- return 0;
-
- error:
-- if (!BANK_USED(bank))
-- pm_runtime_put(bank->dev);
- return retval;
- }
-
-@@ -797,9 +792,6 @@ static unsigned int omap_gpio_irq_startu
- unsigned long flags;
- unsigned offset = d->hwirq;
-
-- if (!BANK_USED(bank))
-- pm_runtime_get_sync(bank->dev);
--
- raw_spin_lock_irqsave(&bank->lock, flags);
-
- if (!LINE_USED(bank->mod_usage, offset))
-@@ -815,8 +807,6 @@ static unsigned int omap_gpio_irq_startu
- return 0;
- err:
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-- if (!BANK_USED(bank))
-- pm_runtime_put(bank->dev);
- return -EINVAL;
- }
-
-@@ -835,6 +825,19 @@ static void omap_gpio_irq_shutdown(struc
- omap_clear_gpio_debounce(bank, offset);
- omap_disable_gpio_module(bank, offset);
- raw_spin_unlock_irqrestore(&bank->lock, flags);
-+}
-+
-+static void omap_gpio_irq_bus_lock(struct irq_data *data)
-+{
-+ struct gpio_bank *bank = omap_irq_data_get_bank(data);
-+
-+ if (!BANK_USED(bank))
-+ pm_runtime_get_sync(bank->dev);
-+}
-+
-+static void gpio_irq_bus_sync_unlock(struct irq_data *data)
-+{
-+ struct gpio_bank *bank = omap_irq_data_get_bank(data);
-
- /*
- * If this is the last IRQ to be freed in the bank,
-@@ -1183,6 +1186,8 @@ static int omap_gpio_probe(struct platfo
- irqc->irq_unmask = omap_gpio_unmask_irq,
- irqc->irq_set_type = omap_gpio_irq_type,
- irqc->irq_set_wake = omap_gpio_wake_enable,
-+ irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
-+ irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
- irqc->name = dev_name(&pdev->dev);
-
- bank->irq = platform_get_irq(pdev, 0);
diff --git a/patches/0021-gpio-omap-convert-to-use-generic-irq-handler.patch b/patches/0021-gpio-omap-convert-to-use-generic-irq-handler.patch
deleted file mode 100644
index c30e6fbc6440e..0000000000000
--- a/patches/0021-gpio-omap-convert-to-use-generic-irq-handler.patch
+++ /dev/null
@@ -1,184 +0,0 @@
-From a53b71a3646765698f8280c1889412575ea1d7f5 Mon Sep 17 00:00:00 2001
-From: Grygorii Strashko <grygorii.strashko@ti.com>
-Date: Mon, 28 Sep 2015 18:36:16 -0500
-Subject: [PATCH 21/21] gpio: omap: convert to use generic irq handler
-
-This patch converts TI OMAP GPIO driver to use generic irq handler
-instead of chained IRQ handler. This way OMAP GPIO driver will be
-compatible with RT kernel where it will be forced thread IRQ handler
-while in non-RT kernel it still will be executed in HW IRQ context.
-As part of this change the IRQ wakeup configuration is applied to
-GPIO Bank IRQ as it now will be under control of IRQ PM Core during
-suspend.
-
-There are also additional benefits:
- - on-RT kernel there will be no complains any more about PM runtime usage
- in atomic context "BUG: sleeping function called from invalid context";
- - GPIO bank IRQs will appear in /proc/interrupts and its usage statistic
- will be visible;
- - GPIO bank IRQs could be configured through IRQ proc_fs interface and,
- as result, could be a part of IRQ balancing process if needed;
- - GPIO bank IRQs will be under control of IRQ PM Core during
- suspend to RAM.
-
-Disadvantage:
- - additional runtime overhed as call chain till
- omap_gpio_irq_handler() will be longer now
- - necessity to use wa_lock in omap_gpio_irq_handler() to W/A warning
- in handle_irq_event_percpu()
- WARNING: CPU: 1 PID: 35 at kernel/irq/handle.c:149 handle_irq_event_percpu+0x51c/0x638()
-
-This patch doesn't fully follows recommendations provided by Sebastian
-Andrzej Siewior [1], because It's required to go through and check all
-GPIO IRQ pin states as fast as possible and pass control to handle_level_irq
-or handle_edge_irq. handle_level_irq or handle_edge_irq will perform actions
-specific for IRQ triggering type and wakeup corresponding registered
-threaded IRQ handler (at least it's expected to be threaded).
-IRQs can be lost if handle_nested_irq() will be used, because excecution
-time of some pin specific GPIO IRQ handler can be very significant and
-require accessing ext. devices (I2C).
-
-Idea of such kind reworking was also discussed in [2].
-
-[1] http://www.spinics.net/lists/linux-omap/msg120665.html
-[2] http://www.spinics.net/lists/linux-omap/msg119516.html
-
-Cc: <linux-rt-users@vger.kernel.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Tested-by: Austin Schuh <austin@peloton-tech.com>
-Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
----
- drivers/gpio/gpio-omap.c | 55 +++++++++++++++++++++++------------------------
- 1 file changed, 27 insertions(+), 28 deletions(-)
-
---- a/drivers/gpio/gpio-omap.c
-+++ b/drivers/gpio/gpio-omap.c
-@@ -59,6 +59,7 @@ struct gpio_bank {
- u32 level_mask;
- u32 toggle_mask;
- raw_spinlock_t lock;
-+ raw_spinlock_t wa_lock;
- struct gpio_chip chip;
- struct clk *dbck;
- u32 mod_usage;
-@@ -649,8 +650,13 @@ static int omap_gpio_wake_enable(struct
- {
- struct gpio_bank *bank = omap_irq_data_get_bank(d);
- unsigned offset = d->hwirq;
-+ int ret;
-+
-+ ret = omap_set_gpio_wakeup(bank, offset, enable);
-+ if (!ret)
-+ ret = irq_set_irq_wake(bank->irq, enable);
-
-- return omap_set_gpio_wakeup(bank, offset, enable);
-+ return ret;
- }
-
- static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
-@@ -704,26 +710,21 @@ static void omap_gpio_free(struct gpio_c
- * line's interrupt handler has been run, we may miss some nested
- * interrupts.
- */
--static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
-+static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank)
- {
- void __iomem *isr_reg = NULL;
- u32 isr;
- unsigned int bit;
-- struct gpio_bank *bank;
-- int unmasked = 0;
-- struct irq_chip *irqchip = irq_desc_get_chip(desc);
-- struct gpio_chip *chip = irq_get_handler_data(irq);
-+ struct gpio_bank *bank = gpiobank;
-+ unsigned long wa_lock_flags;
- unsigned long lock_flags;
-
-- chained_irq_enter(irqchip, desc);
--
-- bank = container_of(chip, struct gpio_bank, chip);
- isr_reg = bank->base + bank->regs->irqstatus;
-- pm_runtime_get_sync(bank->dev);
--
- if (WARN_ON(!isr_reg))
- goto exit;
-
-+ pm_runtime_get_sync(bank->dev);
-+
- while (1) {
- u32 isr_saved, level_mask = 0;
- u32 enabled;
-@@ -745,13 +746,6 @@ static void omap_gpio_irq_handler(unsign
-
- raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
-
-- /* if there is only edge sensitive GPIO pin interrupts
-- configured, we could unmask GPIO bank interrupt immediately */
-- if (!level_mask && !unmasked) {
-- unmasked = 1;
-- chained_irq_exit(irqchip, desc);
-- }
--
- if (!isr)
- break;
-
-@@ -772,18 +766,18 @@ static void omap_gpio_irq_handler(unsign
-
- raw_spin_unlock_irqrestore(&bank->lock, lock_flags);
-
-+ raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags);
-+
- generic_handle_irq(irq_find_mapping(bank->chip.irqdomain,
- bit));
-+
-+ raw_spin_unlock_irqrestore(&bank->wa_lock,
-+ wa_lock_flags);
- }
- }
-- /* if bank has any level sensitive GPIO pin interrupt
-- configured, we must unmask the bank interrupt only after
-- handler(s) are executed in order to avoid spurious bank
-- interrupt */
- exit:
-- if (!unmasked)
-- chained_irq_exit(irqchip, desc);
- pm_runtime_put(bank->dev);
-+ return IRQ_HANDLED;
- }
-
- static unsigned int omap_gpio_irq_startup(struct irq_data *d)
-@@ -1135,7 +1129,7 @@ static int omap_gpio_chip_init(struct gp
- }
-
- ret = gpiochip_irqchip_add(&bank->chip, irqc,
-- irq_base, omap_gpio_irq_handler,
-+ irq_base, handle_bad_irq,
- IRQ_TYPE_NONE);
-
- if (ret) {
-@@ -1144,10 +1138,14 @@ static int omap_gpio_chip_init(struct gp
- return -ENODEV;
- }
-
-- gpiochip_set_chained_irqchip(&bank->chip, irqc,
-- bank->irq, omap_gpio_irq_handler);
-+ gpiochip_set_chained_irqchip(&bank->chip, irqc, bank->irq, NULL);
-
-- return 0;
-+ ret = devm_request_irq(bank->dev, bank->irq, omap_gpio_irq_handler,
-+ 0, dev_name(bank->dev), bank);
-+ if (ret)
-+ gpiochip_remove(&bank->chip);
-+
-+ return ret;
- }
-
- static const struct of_device_id omap_gpio_match[];
-@@ -1223,6 +1221,7 @@ static int omap_gpio_probe(struct platfo
- bank->set_dataout = omap_set_gpio_dataout_mask;
-
- raw_spin_lock_init(&bank->lock);
-+ raw_spin_lock_init(&bank->wa_lock);
-
- /* Static mapping, never released */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/patches/ARM-cmpxchg-define-__HAVE_ARCH_CMPXCHG-for-armv6-and.patch b/patches/ARM-cmpxchg-define-__HAVE_ARCH_CMPXCHG-for-armv6-and.patch
deleted file mode 100644
index 8c3abb11a6189..0000000000000
--- a/patches/ARM-cmpxchg-define-__HAVE_ARCH_CMPXCHG-for-armv6-and.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: Yong Zhang <yong.zhang at windriver.com>
-Date: Thu, 29 Jan 2015 12:56:18 -0600
-Subject: ARM: cmpxchg: define __HAVE_ARCH_CMPXCHG for armv6 and later
-
-Both pi_stress and sigwaittest in rt-test show performance gain with
-__HAVE_ARCH_CMPXCHG. Testing result on coretile_express_a9x4:
-
-pi_stress -p 99 --duration=300 (on linux-3.4-rc5; bigger is better)
- vanilla: Total inversion performed: 5493381
- patched: Total inversion performed: 5621746
-
-sigwaittest -p 99 -l 100000 (on linux-3.4-rc5-rt6; less is better)
- 3.4-rc5-rt6: Min 24, Cur 27, Avg 30, Max 98
- patched: Min 19, Cur 21, Avg 23, Max 96
-
-Signed-off-by: Yong Zhang <yong.zhang0 at gmail.com>
-Cc: Russell King <rmk+kernel at arm.linux.org.uk>
-Cc: Nicolas Pitre <nico at linaro.org>
-Cc: Will Deacon <will.deacon at arm.com>
-Cc: Catalin Marinas <catalin.marinas at arm.com>
-Cc: Thomas Gleixner <tglx at linutronix.de>
-Cc: linux-arm-kernel at lists.infradead.org
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/include/asm/cmpxchg.h | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/arch/arm/include/asm/cmpxchg.h
-+++ b/arch/arm/include/asm/cmpxchg.h
-@@ -129,6 +129,8 @@ static inline unsigned long __xchg(unsig
-
- #else /* min ARCH >= ARMv6 */
-
-+#define __HAVE_ARCH_CMPXCHG 1
-+
- extern void __bad_cmpxchg(volatile void *ptr, int size);
-
- /*
diff --git a/patches/ASoC-Intel-sst-use-instead-of-at-the-of-a-C-statemen.patch b/patches/ASoC-Intel-sst-use-instead-of-at-the-of-a-C-statemen.patch
deleted file mode 100644
index 53e65cde29271..0000000000000
--- a/patches/ASoC-Intel-sst-use-instead-of-at-the-of-a-C-statemen.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 11 Jun 2015 14:17:06 +0200
-Subject: ASoC: Intel: sst: use ; instead of , at the of a C statement
-
-This was spotted by Fernando Lopez-Lezcano <nando@ccrma.Stanford.EDU>
-while he tried to compile a -RT kernel with this driver enabled.
-"make C=2" would also warn about this. This is is based on his patch.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- sound/soc/intel/atom/sst/sst.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/sound/soc/intel/atom/sst/sst.c
-+++ b/sound/soc/intel/atom/sst/sst.c
-@@ -368,8 +368,8 @@ static inline void sst_restore_shim64(st
- * initialize by FW or driver when firmware is loaded
- */
- spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags);
-- sst_shim_write64(shim, SST_IMRX, shim_regs->imrx),
-- sst_shim_write64(shim, SST_CSR, shim_regs->csr),
-+ sst_shim_write64(shim, SST_IMRX, shim_regs->imrx);
-+ sst_shim_write64(shim, SST_CSR, shim_regs->csr);
- spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags);
- }
-
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 029dfa695fc38..2e005d172b831 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1163,6 +1163,7 @@ static int syslog_print_all(char __user
+@@ -1203,6 +1203,7 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1174,7 +1175,14 @@ static int syslog_print_all(char __user
+@@ -1214,7 +1215,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
enum log_flags prev;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
-@@ -1195,6 +1203,14 @@ static int syslog_print_all(char __user
+@@ -1235,6 +1243,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* move first record forward until length fits into the buffer */
-@@ -1208,6 +1224,14 @@ static int syslog_print_all(char __user
+@@ -1248,6 +1264,14 @@ static int syslog_print_all(char __user
prev = msg->flags;
idx = log_next(idx);
seq++;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* last message fitting into this dump */
-@@ -1248,6 +1272,7 @@ static int syslog_print_all(char __user
+@@ -1288,6 +1312,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
diff --git a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index 7440ee2bf3547..0439c87b572aa 100644
--- a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -5,82 +5,17 @@ Subject: KVM: lapic: mark LAPIC timer handler as irqsafe
Since lapic timer handler only wakes up a simple waitqueue,
it can be executed from hardirq context.
-Also handle the case where hrtimer_start_expires fails due to -ETIME,
-by injecting the interrupt to the guest immediately.
-
Reduces average cyclictest latency by 3us.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/kvm/lapic.c | 40 +++++++++++++++++++++++++++++++++++++---
- 1 file changed, 37 insertions(+), 3 deletions(-)
+ arch/x86/kvm/lapic.c | 1 +
+ 1 file changed, 1 insertion(+)
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -1169,8 +1169,36 @@ void wait_lapic_expire(struct kvm_vcpu *
- __delay(tsc_deadline - guest_tsc);
- }
-
-+static enum hrtimer_restart apic_timer_fn(struct hrtimer *data);
-+
-+static void __apic_timer_expired(struct hrtimer *data)
-+{
-+ int ret, i = 0;
-+ enum hrtimer_restart r;
-+ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
-+
-+ r = apic_timer_fn(data);
-+
-+ if (r == HRTIMER_RESTART) {
-+ do {
-+ ret = hrtimer_start_expires(data, HRTIMER_MODE_ABS);
-+ if (ret == -ETIME)
-+ hrtimer_add_expires_ns(&ktimer->timer,
-+ ktimer->period);
-+ i++;
-+ } while (ret == -ETIME && i < 10);
-+
-+ if (ret == -ETIME) {
-+ printk_once(KERN_ERR "%s: failed to reprogram timer\n",
-+ __func__);
-+ WARN_ON_ONCE(1);
-+ }
-+ }
-+}
-+
- static void start_apic_timer(struct kvm_lapic *apic)
- {
-+ int ret;
- ktime_t now;
-
- atomic_set(&apic->lapic_timer.pending, 0);
-@@ -1201,9 +1229,11 @@ static void start_apic_timer(struct kvm_
- }
- }
-
-- hrtimer_start(&apic->lapic_timer.timer,
-+ ret = hrtimer_start(&apic->lapic_timer.timer,
- ktime_add_ns(now, apic->lapic_timer.period),
- HRTIMER_MODE_ABS);
-+ if (ret == -ETIME)
-+ __apic_timer_expired(&apic->lapic_timer.timer);
-
- apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
- PRIx64 ", "
-@@ -1235,8 +1265,10 @@ static void start_apic_timer(struct kvm_
- do_div(ns, this_tsc_khz);
- expire = ktime_add_ns(now, ns);
- expire = ktime_sub_ns(expire, lapic_timer_advance_ns);
-- hrtimer_start(&apic->lapic_timer.timer,
-+ ret = hrtimer_start(&apic->lapic_timer.timer,
- expire, HRTIMER_MODE_ABS);
-+ if (ret == -ETIME)
-+ __apic_timer_expired(&apic->lapic_timer.timer);
- } else
- apic_timer_expired(apic);
-
-@@ -1709,6 +1741,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -1801,6 +1801,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
apic->lapic_timer.timer.function = apic_timer_fn;
@@ -88,13 +23,3 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* APIC is created enabled. This will prevent kvm_lapic_set_base from
-@@ -1836,7 +1869,8 @@ void __kvm_migrate_apic_timer(struct kvm
-
- timer = &vcpu->arch.apic->lapic_timer.timer;
- if (hrtimer_cancel(timer))
-- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
-+ if (hrtimer_start_expires(timer, HRTIMER_MODE_ABS) == -ETIME)
-+ __apic_timer_expired(timer);
- }
-
- /*
diff --git a/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch b/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch
index 16160641401fa..94fddc6f575c6 100644
--- a/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch
+++ b/patches/KVM-use-simple-waitqueue-for-vcpu-wq.patch
@@ -32,12 +32,12 @@ This patch reduces the average latency in my tests from 14us to 11us.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/arm/kvm/arm.c | 4 ++--
+ arch/arm/kvm/arm.c | 8 ++++----
arch/arm/kvm/psci.c | 4 ++--
arch/powerpc/include/asm/kvm_host.h | 4 ++--
arch/powerpc/kvm/book3s_hv.c | 23 +++++++++++------------
arch/s390/include/asm/kvm_host.h | 2 +-
- arch/s390/kvm/interrupt.c | 8 ++++----
+ arch/s390/kvm/interrupt.c | 4 ++--
arch/x86/kvm/lapic.c | 6 +++---
include/linux/kvm_host.h | 4 ++--
virt/kvm/async_pf.c | 4 ++--
@@ -46,21 +46,32 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
-@@ -474,9 +474,9 @@ bool kvm_arch_intc_initialized(struct kv
+@@ -498,18 +498,18 @@ static void kvm_arm_resume_guest(struct
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+ vcpu->arch.pause = false;
+- wake_up_interruptible(wq);
++ swait_wake_interruptible(wq);
+ }
+ }
- static void vcpu_pause(struct kvm_vcpu *vcpu)
+ static void vcpu_sleep(struct kvm_vcpu *vcpu)
{
- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
-+ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
++ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
-- wait_event_interruptible(*wq, !vcpu->arch.pause);
-+ swait_event_interruptible(*wq, !vcpu->arch.pause);
+- wait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
++ swait_event_interruptible(*wq, ((!vcpu->arch.power_off) &&
+ (!vcpu->arch.pause)));
}
- static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
-@@ -68,7 +68,7 @@ static unsigned long kvm_psci_vcpu_on(st
+@@ -70,7 +70,7 @@ static unsigned long kvm_psci_vcpu_on(st
{
struct kvm *kvm = source_vcpu->kvm;
struct kvm_vcpu *vcpu = NULL;
@@ -69,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned long cpu_id;
unsigned long context_id;
phys_addr_t target_pc;
-@@ -117,7 +117,7 @@ static unsigned long kvm_psci_vcpu_on(st
+@@ -119,7 +119,7 @@ static unsigned long kvm_psci_vcpu_on(st
smp_mb(); /* Make sure the above is visible */
wq = kvm_arch_vcpu_wq(vcpu);
@@ -80,16 +91,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
-@@ -280,7 +280,7 @@ struct kvmppc_vcore {
- u8 in_guest;
+@@ -286,7 +286,7 @@ struct kvmppc_vcore {
struct list_head runnable_threads;
+ struct list_head preempt_list;
spinlock_t lock;
- wait_queue_head_t wq;
+ struct swait_head wq;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
-@@ -613,7 +613,7 @@ struct kvm_vcpu_arch {
+@@ -626,7 +626,7 @@ struct kvm_vcpu_arch {
u8 prodded;
u32 last_inst;
@@ -100,10 +111,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int trap;
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
-@@ -115,11 +115,11 @@ static bool kvmppc_ipi_thread(int cpu)
+@@ -114,11 +114,11 @@ static bool kvmppc_ipi_thread(int cpu)
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
- int cpu = vcpu->cpu;
+ int cpu;
- wait_queue_head_t *wqp;
+ struct swait_head *wqp;
@@ -115,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
++vcpu->stat.halt_wakeup;
}
-@@ -686,8 +686,8 @@ int kvmppc_pseries_do_hcall(struct kvm_v
+@@ -701,8 +701,8 @@ int kvmppc_pseries_do_hcall(struct kvm_v
tvcpu->arch.prodded = 1;
smp_mb();
if (vcpu->arch.ceded) {
@@ -126,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
vcpu->stat.halt_wakeup++;
}
}
-@@ -1426,7 +1426,7 @@ static struct kvmppc_vcore *kvmppc_vcore
+@@ -1441,7 +1441,7 @@ static struct kvmppc_vcore *kvmppc_vcore
INIT_LIST_HEAD(&vcore->runnable_threads);
spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock);
@@ -135,7 +146,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = core * threads_per_subcore;
-@@ -2073,10 +2073,9 @@ static void kvmppc_vcore_blocked(struct
+@@ -2513,10 +2513,9 @@ static void kvmppc_vcore_blocked(struct
{
struct kvm_vcpu *vcpu;
int do_sleep = 1;
@@ -148,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Check one last time for pending exceptions and ceded state after
-@@ -2090,7 +2089,7 @@ static void kvmppc_vcore_blocked(struct
+@@ -2530,7 +2529,7 @@ static void kvmppc_vcore_blocked(struct
}
if (!do_sleep) {
@@ -157,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
-@@ -2098,7 +2097,7 @@ static void kvmppc_vcore_blocked(struct
+@@ -2538,7 +2537,7 @@ static void kvmppc_vcore_blocked(struct
trace_kvmppc_vcore_blocked(vc, 0);
spin_unlock(&vc->lock);
schedule();
@@ -166,8 +177,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
trace_kvmppc_vcore_blocked(vc, 1);
-@@ -2142,7 +2141,7 @@ static int kvmppc_run_vcpu(struct kvm_ru
- kvmppc_start_thread(vcpu);
+@@ -2594,7 +2593,7 @@ static int kvmppc_run_vcpu(struct kvm_ru
+ kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
- wake_up(&vc->wq);
@@ -177,7 +188,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
-@@ -419,7 +419,7 @@ struct kvm_s390_irq_payload {
+@@ -427,7 +427,7 @@ struct kvm_s390_irq_payload {
struct kvm_s390_local_interrupt {
spinlock_t lock;
struct kvm_s390_float_interrupt *float_int;
@@ -188,7 +199,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct kvm_s390_irq_payload irq;
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
-@@ -875,13 +875,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu
+@@ -868,13 +868,13 @@ int kvm_s390_handle_wait(struct kvm_vcpu
void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
{
@@ -204,27 +215,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
vcpu->stat.halt_wakeup++;
}
}
-@@ -987,7 +987,7 @@ int kvm_s390_inject_program_int(struct k
- spin_lock(&li->lock);
- irq.u.pgm.code = code;
- __inject_prog(vcpu, &irq);
-- BUG_ON(waitqueue_active(li->wq));
-+ BUG_ON(swaitqueue_active(li->wq));
- spin_unlock(&li->lock);
- return 0;
- }
-@@ -1006,7 +1006,7 @@ int kvm_s390_inject_prog_irq(struct kvm_
- spin_lock(&li->lock);
- irq.u.pgm = *pgm_info;
- rc = __inject_prog(vcpu, &irq);
-- BUG_ON(waitqueue_active(li->wq));
-+ BUG_ON(swaitqueue_active(li->wq));
- spin_unlock(&li->lock);
- return rc;
- }
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -1106,7 +1106,7 @@ static void apic_update_lvtt(struct kvm_
+@@ -1195,7 +1195,7 @@ static void apic_update_lvtt(struct kvm_
static void apic_timer_expired(struct kvm_lapic *apic)
{
struct kvm_vcpu *vcpu = apic->vcpu;
@@ -233,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct kvm_timer *ktimer = &apic->lapic_timer;
if (atomic_read(&apic->lapic_timer.pending))
-@@ -1115,8 +1115,8 @@ static void apic_timer_expired(struct kv
+@@ -1204,8 +1204,8 @@ static void apic_timer_expired(struct kv
atomic_inc(&apic->lapic_timer.pending);
kvm_set_pending_timer(vcpu);
@@ -246,16 +239,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ktimer->expired_tscdeadline = ktimer->tscdeadline;
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
-@@ -230,7 +230,7 @@ struct kvm_vcpu {
-
+@@ -243,7 +243,7 @@ struct kvm_vcpu {
int fpu_active;
int guest_fpu_loaded, guest_xcr0_loaded;
+ unsigned char fpu_counter;
- wait_queue_head_t wq;
+ struct swait_head wq;
struct pid *pid;
int sigset_active;
sigset_t sigset;
-@@ -701,7 +701,7 @@ static inline bool kvm_arch_has_noncoher
+@@ -794,7 +794,7 @@ static inline bool kvm_arch_has_assigned
}
#endif
@@ -266,10 +259,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return vcpu->arch.wqp;
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
-@@ -94,8 +94,8 @@ static void async_pf_execute(struct work
-
- trace_kvm_async_pf_completed(addr, gva);
-
+@@ -98,8 +98,8 @@ static void async_pf_execute(struct work
+ * This memory barrier pairs with prepare_to_wait's set_current_state()
+ */
+ smp_mb();
- if (waitqueue_active(&vcpu->wq))
- wake_up_interruptible(&vcpu->wq);
+ if (swaitqueue_active(&vcpu->wq))
@@ -279,26 +272,26 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_put_kvm(vcpu->kvm);
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
-@@ -218,7 +218,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
- vcpu->kvm = kvm;
+@@ -227,7 +227,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu,
vcpu->vcpu_id = id;
vcpu->pid = NULL;
+ vcpu->halt_poll_ns = 0;
- init_waitqueue_head(&vcpu->wq);
+ init_swait_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
-@@ -1779,7 +1779,7 @@ static int kvm_vcpu_check_block(struct k
+ vcpu->pre_pcpu = -1;
+@@ -1999,7 +1999,7 @@ static int kvm_vcpu_check_block(struct k
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
ktime_t start, cur;
- DEFINE_WAIT(wait);
+ DEFINE_SWAITER(wait);
bool waited = false;
+ u64 block_ns;
- start = cur = ktime_get();
-@@ -1800,7 +1800,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
- }
+@@ -2024,7 +2024,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
+ kvm_arch_vcpu_blocking(vcpu);
for (;;) {
- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
@@ -306,7 +299,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (kvm_vcpu_check_block(vcpu) < 0)
break;
-@@ -1809,7 +1809,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
+@@ -2033,7 +2033,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcp
schedule();
}
@@ -314,8 +307,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ swait_finish(&vcpu->wq, &wait);
cur = ktime_get();
- out:
-@@ -1825,11 +1825,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu
+ kvm_arch_vcpu_unblocking(vcpu);
+@@ -2065,11 +2065,11 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu
{
int me;
int cpu = vcpu->cpu;
@@ -330,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
++vcpu->stat.halt_wakeup;
}
-@@ -1930,7 +1930,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *m
+@@ -2170,7 +2170,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *m
continue;
if (vcpu == me)
continue;
diff --git a/patches/Revert-x86-Do-not-disable-preemption-in-int3-on-32bi.patch b/patches/Revert-x86-Do-not-disable-preemption-in-int3-on-32bi.patch
deleted file mode 100644
index a6cbd45a0c0ee..0000000000000
--- a/patches/Revert-x86-Do-not-disable-preemption-in-int3-on-32bi.patch
+++ /dev/null
@@ -1,90 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 22 Dec 2015 12:17:39 +0100
-Subject: Revert "x86: Do not disable preemption in int3 on 32bit"
-
-Since commit 959274753857 ("x86, traps: Track entry into and exit from
-IST context") we always disable preemption. Which means the original
-patch is no longer usefull.
-The patch mentioned is part of v4.0-rc1+
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/traps.c | 28 +++++++---------------------
- 1 file changed, 7 insertions(+), 21 deletions(-)
-
---- a/arch/x86/kernel/traps.c
-+++ b/arch/x86/kernel/traps.c
-@@ -88,21 +88,9 @@ static inline void conditional_sti(struc
- local_irq_enable();
- }
-
--static inline void conditional_sti_ist(struct pt_regs *regs)
-+static inline void preempt_conditional_sti(struct pt_regs *regs)
- {
--#ifdef CONFIG_X86_64
-- /*
-- * X86_64 uses a per CPU stack on the IST for certain traps
-- * like int3. The task can not be preempted when using one
-- * of these stacks, thus preemption must be disabled, otherwise
-- * the stack can be corrupted if the task is scheduled out,
-- * and another task comes in and uses this stack.
-- *
-- * On x86_32 the task keeps its own stack and it is OK if the
-- * task schedules out.
-- */
- preempt_count_inc();
--#endif
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_enable();
- }
-@@ -113,13 +101,11 @@ static inline void conditional_cli(struc
- local_irq_disable();
- }
-
--static inline void conditional_cli_ist(struct pt_regs *regs)
-+static inline void preempt_conditional_cli(struct pt_regs *regs)
- {
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_disable();
--#ifdef CONFIG_X86_64
- preempt_count_dec();
--#endif
- }
-
- enum ctx_state ist_enter(struct pt_regs *regs)
-@@ -550,9 +536,9 @@ dotraplinkage void notrace do_int3(struc
- * as we may switch to the interrupt stack.
- */
- debug_stack_usage_inc();
-- conditional_sti_ist(regs);
-+ preempt_conditional_sti(regs);
- do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
-- conditional_cli_ist(regs);
-+ preempt_conditional_cli(regs);
- debug_stack_usage_dec();
- exit:
- ist_exit(regs, prev_state);
-@@ -682,12 +668,12 @@ dotraplinkage void do_debug(struct pt_re
- debug_stack_usage_inc();
-
- /* It's safe to allow irq's after DR6 has been saved */
-- conditional_sti_ist(regs);
-+ preempt_conditional_sti(regs);
-
- if (v8086_mode(regs)) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
- X86_TRAP_DB);
-- conditional_cli_ist(regs);
-+ preempt_conditional_cli(regs);
- debug_stack_usage_dec();
- goto exit;
- }
-@@ -707,7 +693,7 @@ dotraplinkage void do_debug(struct pt_re
- si_code = get_si_code(tsk->thread.debugreg6);
- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
- send_sigtrap(tsk, regs, error_code, si_code);
-- conditional_cli_ist(regs);
-+ preempt_conditional_cli(regs);
- debug_stack_usage_dec();
-
- exit:
diff --git a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index 67dbcae408a4d..7ce92eec47923 100644
--- a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
-@@ -112,7 +112,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pen
+@@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pen
* interrupt level
*/
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ACPI_FAILURE (status)) {
return_ACPI_STATUS (status);
}
-@@ -141,7 +141,7 @@ void acpi_ut_mutex_terminate(void)
+@@ -156,7 +156,7 @@ void acpi_ut_mutex_terminate(void)
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock);
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Delete the reader/writer lock */
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
-@@ -123,6 +123,7 @@
+@@ -127,6 +127,7 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define acpi_cpu_flags unsigned long
/* Use native linux version of acpi_os_allocate_zeroed */
-@@ -141,6 +142,20 @@
+@@ -145,6 +146,20 @@
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index de7aa8b52aad8..0b4207ff9c66c 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -19,17 +19,17 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -69,6 +69,7 @@ config ARM64
+@@ -76,6 +76,7 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE
+ select HAVE_PREEMPT_LAZY
select HAVE_SYSCALL_TRACEPOINTS
+ select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
- select IRQ_FORCED_THREADING
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
-@@ -47,6 +47,7 @@ struct thread_info {
+@@ -49,6 +49,7 @@ struct thread_info {
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
@@ -37,7 +37,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
int cpu; /* cpu */
};
-@@ -101,6 +102,7 @@ static inline struct thread_info *curren
+@@ -103,6 +104,7 @@ static inline struct thread_info *curren
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
@@ -45,7 +45,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-@@ -117,6 +119,7 @@ static inline struct thread_info *curren
+@@ -118,6 +120,7 @@ static inline struct thread_info *curren
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
@@ -65,7 +65,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -367,11 +367,16 @@ ENDPROC(el1_sync)
+@@ -363,11 +363,16 @@ ENDPROC(el1_sync)
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
@@ -85,7 +85,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
-@@ -385,6 +390,7 @@ ENDPROC(el1_irq)
+@@ -381,6 +386,7 @@ ENDPROC(el1_irq)
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
@@ -93,8 +93,8 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
ret x24
#endif
-@@ -622,6 +628,7 @@ ENDPROC(cpu_switch_to)
- str x0, [sp, #S_X0] // returned x0
+@@ -625,6 +631,7 @@ ENDPROC(cpu_switch_to)
+ */
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
+ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched
diff --git a/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch b/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
index 7cf425a86288c..b5a3fbc3dfbba 100644
--- a/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+++ b/patches/arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
@@ -13,44 +13,117 @@ commit 8fe82a55 ("ARM: at91: sparse irq support") which is included since v3.6.
Patch based on what Sami Pietikäinen <Sami.Pietikainen@wapice.com> suggested].
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/clocksource/timer-atmel-pit.c | 4 ++++
- drivers/clocksource/timer-atmel-st.c | 1 +
- 2 files changed, 5 insertions(+)
+ drivers/clocksource/timer-atmel-pit.c | 15 ++++++++-------
+ drivers/clocksource/timer-atmel-st.c | 28 ++++++++++++++++++++--------
+ 2 files changed, 28 insertions(+), 15 deletions(-)
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
-@@ -90,6 +90,7 @@ static cycle_t read_pit_clk(struct clock
- return elapsed;
+@@ -96,6 +96,7 @@ static int pit_clkevt_shutdown(struct cl
+
+ /* disable irq, leaving the clocksource active */
+ pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
++ free_irq(atmel_pit_irq, data);
+ return 0;
}
-+static struct irqaction at91sam926x_pit_irq;
- /*
- * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16)
- */
-@@ -100,6 +101,8 @@ pit_clkevt_mode(enum clock_event_mode mo
-
- switch (mode) {
- case CLOCK_EVT_MODE_PERIODIC:
-+ /* Set up irq handler */
-+ setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
- /* update clocksource counter */
- data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
- pit_write(data->base, AT91_PIT_MR,
-@@ -113,6 +116,7 @@ pit_clkevt_mode(enum clock_event_mode mo
- /* disable irq, leaving the clocksource active */
- pit_write(data->base, AT91_PIT_MR,
- (data->cycle - 1) | AT91_PIT_PITEN);
-+ remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
+@@ -105,6 +106,13 @@ static int pit_clkevt_shutdown(struct cl
+ static int pit_clkevt_set_periodic(struct clock_event_device *dev)
+ {
+ struct pit_data *data = clkevt_to_pit_data(dev);
++ int ret;
++
++ ret = request_irq(data->irq, at91sam926x_pit_interrupt,
++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
++ "at91_tick", data);
++ if (ret)
++ panic(pr_fmt("Unable to setup IRQ\n"));
+
+ /* update clocksource counter */
+ data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+@@ -206,13 +214,6 @@ static void __init at91sam926x_pit_commo
+ data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ clocksource_register_hz(&data->clksrc, pit_rate);
+
+- /* Set up irq handler */
+- ret = request_irq(data->irq, at91sam926x_pit_interrupt,
+- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+- "at91_tick", data);
+- if (ret)
+- panic(pr_fmt("Unable to setup IRQ\n"));
+-
+ /* Set up and register clockevents */
+ data->clkevt.name = "pit";
+ data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC;
--- a/drivers/clocksource/timer-atmel-st.c
+++ b/drivers/clocksource/timer-atmel-st.c
-@@ -131,6 +131,7 @@ clkevt32k_mode(enum clock_event_mode mod
- break;
- case CLOCK_EVT_MODE_SHUTDOWN:
- case CLOCK_EVT_MODE_UNUSED:
-+ remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
- case CLOCK_EVT_MODE_RESUME:
- irqmask = 0;
- break;
+@@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_
+ last_crtr = read_CRTR();
+ }
+
++static int atmel_st_irq;
++
+ static int clkevt32k_shutdown(struct clock_event_device *evt)
+ {
+ clkdev32k_disable_and_flush_irq();
+ irqmask = 0;
+ regmap_write(regmap_st, AT91_ST_IER, irqmask);
++ free_irq(atmel_st_irq, regmap_st);
+ return 0;
+ }
+
+ static int clkevt32k_set_oneshot(struct clock_event_device *dev)
+ {
++ int ret;
++
+ clkdev32k_disable_and_flush_irq();
+
++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
++ "at91_tick", regmap_st);
++ if (ret)
++ panic(pr_fmt("Unable to setup IRQ\n"));
++
+ /*
+ * ALM for oneshot irqs, set by next_event()
+ * before 32 seconds have passed.
+@@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct
+
+ static int clkevt32k_set_periodic(struct clock_event_device *dev)
+ {
++ int irq;
++
+ clkdev32k_disable_and_flush_irq();
+
++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt,
++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
++ "at91_tick", regmap_st);
++ if (ret)
++ panic(pr_fmt("Unable to setup IRQ\n"));
++
+ /* PIT for periodic irqs; fixed rate of 1/HZ */
+ irqmask = AT91_ST_PITS;
+ regmap_write(regmap_st, AT91_ST_PIMR, timer_latch);
+@@ -198,7 +217,7 @@ static void __init atmel_st_timer_init(s
+ {
+ struct clk *sclk;
+ unsigned int sclk_rate, val;
+- int irq, ret;
++ int ret;
+
+ regmap_st = syscon_node_to_regmap(node);
+ if (IS_ERR(regmap_st))
+@@ -214,13 +233,6 @@ static void __init atmel_st_timer_init(s
+ if (!irq)
+ panic(pr_fmt("Unable to get IRQ from DT\n"));
+
+- /* Make IRQs happen for the system timer */
+- ret = request_irq(irq, at91rm9200_timer_interrupt,
+- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
+- "at91_tick", regmap_st);
+- if (ret)
+- panic(pr_fmt("Unable to setup IRQ\n"));
+-
+ sclk = of_clk_get(node, 0);
+ if (IS_ERR(sclk))
+ panic(pr_fmt("Unable to get slow clock\n"));
diff --git a/patches/arm-convert-boot-lock-to-raw.patch b/patches/arm-convert-boot-lock-to-raw.patch
index aa79108db45fe..06a82869f033e 100644
--- a/patches/arm-convert-boot-lock-to-raw.patch
+++ b/patches/arm-convert-boot-lock-to-raw.patch
@@ -20,19 +20,18 @@ Link: http://lkml.kernel.org/r/4E77B952.3010606@am.sony.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/mach-exynos/platsmp.c | 12 ++++++------
- arch/arm/mach-hisi/platmcpm.c | 26 +++++++++++++-------------
+ arch/arm/mach-hisi/platmcpm.c | 22 +++++++++++-----------
arch/arm/mach-omap2/omap-smp.c | 10 +++++-----
arch/arm/mach-prima2/platsmp.c | 10 +++++-----
arch/arm/mach-qcom/platsmp.c | 10 +++++-----
arch/arm/mach-spear/platsmp.c | 10 +++++-----
arch/arm/mach-sti/platsmp.c | 10 +++++-----
- arch/arm/mach-ux500/platsmp.c | 10 +++++-----
arch/arm/plat-versatile/platsmp.c | 10 +++++-----
- 9 files changed, 54 insertions(+), 54 deletions(-)
+ 8 files changed, 47 insertions(+), 47 deletions(-)
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
-@@ -231,7 +231,7 @@ static void __iomem *scu_base_addr(void)
+@@ -230,7 +230,7 @@ static void __iomem *scu_base_addr(void)
return (void __iomem *)(S5P_VA_SCU);
}
@@ -41,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exynos_secondary_init(unsigned int cpu)
{
-@@ -244,8 +244,8 @@ static void exynos_secondary_init(unsign
+@@ -243,8 +243,8 @@ static void exynos_secondary_init(unsign
/*
* Synchronise with the boot thread.
*/
@@ -51,8 +50,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ raw_spin_unlock(&boot_lock);
}
- static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -259,7 +259,7 @@ static int exynos_boot_secondary(unsigne
+ int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr)
+@@ -308,7 +308,7 @@ static int exynos_boot_secondary(unsigne
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -61,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The secondary processor is waiting to be released from
-@@ -286,7 +286,7 @@ static int exynos_boot_secondary(unsigne
+@@ -335,7 +335,7 @@ static int exynos_boot_secondary(unsigne
if (timeout == 0) {
printk(KERN_ERR "cpu1 power enable failed");
@@ -70,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return -ETIMEDOUT;
}
}
-@@ -342,7 +342,7 @@ static int exynos_boot_secondary(unsigne
+@@ -381,7 +381,7 @@ static int exynos_boot_secondary(unsigne
* calibrations, then wait for it to finish
*/
fail:
@@ -81,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/arch/arm/mach-hisi/platmcpm.c
+++ b/arch/arm/mach-hisi/platmcpm.c
-@@ -57,7 +57,7 @@
+@@ -61,7 +61,7 @@
static void __iomem *sysctrl, *fabric;
static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
@@ -90,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static u32 fabric_phys_addr;
/*
* [0]: bootwrapper physical address
-@@ -104,7 +104,7 @@ static int hip04_mcpm_power_up(unsigned
+@@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned
if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
return -EINVAL;
@@ -99,8 +98,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (hip04_cpu_table[cluster][cpu])
goto out;
-@@ -133,7 +133,7 @@ static int hip04_mcpm_power_up(unsigned
- udelay(20);
+@@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned
+
out:
hip04_cpu_table[cluster][cpu]++;
- spin_unlock_irq(&boot_lock);
@@ -108,43 +107,39 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -149,7 +149,7 @@ static void hip04_mcpm_power_down(void)
-
- __mcpm_cpu_going_down(cpu, cluster);
+@@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
- spin_lock(&boot_lock);
+ raw_spin_lock(&boot_lock);
- BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
hip04_cpu_table[cluster][cpu]--;
if (hip04_cpu_table[cluster][cpu] == 1) {
-@@ -162,7 +162,7 @@ static void hip04_mcpm_power_down(void)
-
- last_man = hip04_cluster_is_down(cluster);
- if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- /* Since it's Cortex A15, disable L2 prefetching. */
- asm volatile(
- "mcr p15, 1, %0, c15, c0, 3 \n\t"
-@@ -173,7 +173,7 @@ static void hip04_mcpm_power_down(void)
- hip04_set_snoop_filter(cluster, 0);
- __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
- } else {
+ /* A power_up request went ahead of us. */
- spin_unlock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
- v7_exit_coherency_flush(louis);
+ return;
+ } else if (hip04_cpu_table[cluster][cpu] > 1) {
+ pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu);
+@@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l
}
-@@ -192,7 +192,7 @@ static int hip04_mcpm_wait_for_powerdown
+ last_man = hip04_cluster_is_down(cluster);
+- spin_unlock(&boot_lock);
++ raw_spin_unlock(&boot_lock);
+ if (last_man) {
+ /* Since it's Cortex A15, disable L2 prefetching. */
+ asm volatile(
+@@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l
cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
count = TIMEOUT_MSEC / POLL_MSEC;
- spin_lock_irq(&boot_lock);
+ raw_spin_lock_irq(&boot_lock);
for (tries = 0; tries < count; tries++) {
- if (hip04_cpu_table[cluster][cpu]) {
- ret = -EBUSY;
-@@ -202,10 +202,10 @@ static int hip04_mcpm_wait_for_powerdown
+ if (hip04_cpu_table[cluster][cpu])
+ goto err;
+@@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l
data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
if (data & CORE_WFI_STATUS(cpu))
break;
@@ -157,32 +152,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (tries >= count)
goto err;
-@@ -220,10 +220,10 @@ static int hip04_mcpm_wait_for_powerdown
- }
- if (tries >= count)
+@@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l
goto err;
+ if (hip04_cluster_is_down(cluster))
+ hip04_set_snoop_filter(cluster, 0);
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
- return 0;
+ return 1;
err:
- spin_unlock_irq(&boot_lock);
+ raw_spin_unlock_irq(&boot_lock);
- return ret;
- }
-
-@@ -235,10 +235,10 @@ static void hip04_mcpm_powered_up(void)
- cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
- cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
- if (!hip04_cpu_table[cluster][cpu])
- hip04_cpu_table[cluster][cpu] = 1;
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
+ return 0;
}
-
- static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
+ #endif
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -43,7 +43,7 @@
@@ -273,7 +255,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+static DEFINE_RAW_SPINLOCK(boot_lock);
#ifdef CONFIG_HOTPLUG_CPU
- static void __ref qcom_cpu_die(unsigned int cpu)
+ static void qcom_cpu_die(unsigned int cpu)
@@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned
/*
* Synchronise with the boot thread.
@@ -345,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
-@@ -34,7 +34,7 @@ static void write_pen_release(int val)
+@@ -35,7 +35,7 @@ static void write_pen_release(int val)
sync_cache_w(&pen_release);
}
@@ -354,7 +336,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void sti_secondary_init(unsigned int cpu)
{
-@@ -49,8 +49,8 @@ static void sti_secondary_init(unsigned
+@@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned
/*
* Synchronise with the boot thread.
*/
@@ -365,47 +347,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -61,7 +61,7 @@ static int sti_boot_secondary(unsigned i
- * set synchronisation state between this boot processor
- * and the secondary one
- */
-- spin_lock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-
- /*
- * The secondary processor is waiting to be released from
-@@ -92,7 +92,7 @@ static int sti_boot_secondary(unsigned i
- * now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
-- spin_unlock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
-
- return pen_release != -1 ? -ENOSYS : 0;
- }
---- a/arch/arm/mach-ux500/platsmp.c
-+++ b/arch/arm/mach-ux500/platsmp.c
-@@ -51,7 +51,7 @@ static void __iomem *scu_base_addr(void)
- return NULL;
- }
-
--static DEFINE_SPINLOCK(boot_lock);
-+static DEFINE_RAW_SPINLOCK(boot_lock);
-
- static void ux500_secondary_init(unsigned int cpu)
- {
-@@ -64,8 +64,8 @@ static void ux500_secondary_init(unsigne
- /*
- * Synchronise with the boot thread.
- */
-- spin_lock(&boot_lock);
-- spin_unlock(&boot_lock);
-+ raw_spin_lock(&boot_lock);
-+ raw_spin_unlock(&boot_lock);
- }
-
- static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -76,7 +76,7 @@ static int ux500_boot_secondary(unsigned
+@@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned i
* set synchronisation state between this boot processor
* and the secondary one
*/
@@ -414,7 +356,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The secondary processor is waiting to be released from
-@@ -97,7 +97,7 @@ static int ux500_boot_secondary(unsigned
+@@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned i
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
diff --git a/patches/arm-enable-highmem-for-rt.patch b/patches/arm-enable-highmem-for-rt.patch
index fe8d45a62817d..baafd26b86e76 100644
--- a/patches/arm-enable-highmem-for-rt.patch
+++ b/patches/arm-enable-highmem-for-rt.patch
@@ -7,9 +7,9 @@ fixup highmem for ARM.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/include/asm/switch_to.h | 8 ++++++
- arch/arm/mm/highmem.c | 46 ++++++++++++++++++++++++++++++++++-----
+ arch/arm/mm/highmem.c | 45 ++++++++++++++++++++++++++++++++++-----
include/linux/highmem.h | 1
- 3 files changed, 50 insertions(+), 5 deletions(-)
+ 3 files changed, 49 insertions(+), 5 deletions(-)
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -27,10 +27,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -22,6 +29,7 @@ extern struct task_struct *__switch_to(s
-
+@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(s
#define switch_to(prev,next,last) \
do { \
+ __complete_pending_tlbi(); \
+ switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
-@@ -143,7 +151,10 @@ void *kmap_atomic_pfn(unsigned long pfn)
+@@ -143,7 +151,34 @@ void *kmap_atomic_pfn(unsigned long pfn)
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
@@ -107,11 +107,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return (void *)vaddr;
}
-@@ -157,3 +168,28 @@ struct page *kmap_atomic_to_page(const v
-
- return pte_page(get_fixmap_pte(vaddr));
- }
-+
+#if defined CONFIG_PREEMPT_RT_FULL
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index e3d8ca6958ded..dd96770ef9a8f 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -66,6 +66,7 @@ config ARM
+@@ -68,6 +68,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
-@@ -50,6 +50,7 @@ struct cpu_context_save {
+@@ -49,6 +49,7 @@ struct cpu_context_save {
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0 => preemptable, <0 => bug */
@@ -33,15 +33,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mm_segment_t addr_limit; /* address limit */
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
-@@ -147,6 +148,7 @@ extern int vfp_restore_user_hwstate(stru
- #define TIF_SIGPENDING 0
- #define TIF_NEED_RESCHED 1
- #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
-+#define TIF_NEED_RESCHED_LAZY 3
- #define TIF_UPROBE 7
- #define TIF_SYSCALL_TRACE 8
- #define TIF_SYSCALL_AUDIT 9
-@@ -160,6 +162,7 @@ extern int vfp_restore_user_hwstate(stru
+@@ -143,6 +144,7 @@ extern int vfp_restore_user_hwstate(stru
+ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
+ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+ #define TIF_SECCOMP 7 /* seccomp syscall filtering active */
++#define TIF_NEED_RESCHED_LAZY 8
+
+ #define TIF_NOHZ 12 /* in adaptive nohz mode */
+ #define TIF_USING_IWMMXT 17
+@@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(stru
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
-@@ -208,11 +208,18 @@ ENDPROC(__dabt_svc)
+@@ -215,11 +215,18 @@ ENDPROC(__dabt_svc)
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
svc_exit r5, irq = 1 @ return from exception
-@@ -227,6 +234,8 @@ ENDPROC(__irq_svc)
+@@ -234,6 +241,8 @@ ENDPROC(__irq_svc)
1: bl preempt_schedule_irq @ irq en/disable is done inside
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
tst r0, #_TIF_NEED_RESCHED
@@ -93,9 +93,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
-@@ -568,7 +568,8 @@ asmlinkage int
- do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
- {
+@@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, un
+ */
+ trace_hardirqs_off();
do {
- if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+ if (likely(thread_flags & (_TIF_NEED_RESCHED |
diff --git a/patches/arm64-convert-patch_lock-to-raw-lock.patch b/patches/arm64-convert-patch_lock-to-raw-lock.patch
deleted file mode 100644
index 5353009407a9d..0000000000000
--- a/patches/arm64-convert-patch_lock-to-raw-lock.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-Subject: arm64: Convert patch_lock to raw lock
-From: Yang Shi <yang.shi@linaro.org>
-Date: Tue, 6 Oct 2015 14:12:31 -0700
-
-When running kprobe test on arm64 rt kernel, it reports the below warning:
-
-root@qemu7:~# modprobe kprobe_example
-BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917
-in_atomic(): 0, irqs_disabled(): 128, pid: 484, name: modprobe
-CPU: 0 PID: 484 Comm: modprobe Not tainted 4.1.6-rt5 #2
-Hardware name: linux,dummy-virt (DT)
-Call trace:
-[<ffffffc0000891b8>] dump_backtrace+0x0/0x128
-[<ffffffc000089300>] show_stack+0x20/0x30
-[<ffffffc00061dae8>] dump_stack+0x1c/0x28
-[<ffffffc0000bbad0>] ___might_sleep+0x120/0x198
-[<ffffffc0006223e8>] rt_spin_lock+0x28/0x40
-[<ffffffc000622b30>] __aarch64_insn_write+0x28/0x78
-[<ffffffc000622e48>] aarch64_insn_patch_text_nosync+0x18/0x48
-[<ffffffc000622ee8>] aarch64_insn_patch_text_cb+0x70/0xa0
-[<ffffffc000622f40>] aarch64_insn_patch_text_sync+0x28/0x48
-[<ffffffc0006236e0>] arch_arm_kprobe+0x38/0x48
-[<ffffffc00010e6f4>] arm_kprobe+0x34/0x50
-[<ffffffc000110374>] register_kprobe+0x4cc/0x5b8
-[<ffffffbffc002038>] kprobe_init+0x38/0x7c [kprobe_example]
-[<ffffffc000084240>] do_one_initcall+0x90/0x1b0
-[<ffffffc00061c498>] do_init_module+0x6c/0x1cc
-[<ffffffc0000fd0c0>] load_module+0x17f8/0x1db0
-[<ffffffc0000fd8cc>] SyS_finit_module+0xb4/0xc8
-
-Convert patch_lock to raw lock to avoid this issue.
-
-Although the problem is found on rt kernel, the fix should be applicable to
-mainline kernel too.
-
-Signed-off-by: Yang Shi <yang.shi@linaro.org>
-Acked-by: Steven Rostedt <rostedt@goodmis.org>
-Cc: linux-arm-kernel@lists.infradead.org
-Cc: linaro-kernel@lists.linaro.org
-Cc: catalin.marinas@arm.com
-Cc: will.deacon@arm.com
-Link: http://lkml.kernel.org/r/1444165951-26065-1-git-send-email-yang.shi@linaro.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
-v1 -> v2:
- Add Steven's Acked-by
- Fix subject (remove unnecessary "make")
- Fix a typo error in commit log
-
- arch/arm64/kernel/insn.c | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
---- a/arch/arm64/kernel/insn.c
-+++ b/arch/arm64/kernel/insn.c
-@@ -77,7 +77,7 @@ bool __kprobes aarch64_insn_is_nop(u32 i
- }
- }
-
--static DEFINE_SPINLOCK(patch_lock);
-+static DEFINE_RAW_SPINLOCK(patch_lock);
-
- static void __kprobes *patch_map(void *addr, int fixmap)
- {
-@@ -124,13 +124,13 @@ static int __kprobes __aarch64_insn_writ
- unsigned long flags = 0;
- int ret;
-
-- spin_lock_irqsave(&patch_lock, flags);
-+ raw_spin_lock_irqsave(&patch_lock, flags);
- waddr = patch_map(addr, FIX_TEXT_POKE0);
-
- ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
-
- patch_unmap(FIX_TEXT_POKE0);
-- spin_unlock_irqrestore(&patch_lock, flags);
-+ raw_spin_unlock_irqrestore(&patch_lock, flags);
-
- return ret;
- }
diff --git a/patches/arm64-replace-read_lock-to-rcu-lock-in-call_break_hook.patch b/patches/arm64-replace-read_lock-to-rcu-lock-in-call_break_hook.patch
deleted file mode 100644
index 690c0032a64b8..0000000000000
--- a/patches/arm64-replace-read_lock-to-rcu-lock-in-call_break_hook.patch
+++ /dev/null
@@ -1,98 +0,0 @@
-Subject: arm64: Replace read_lock to rcu lock in call_break_hook
-From: Yang Shi <yang.shi@linaro.org>
-Date: Mon, 5 Oct 2015 14:32:51 -0700
-
-Upstream commit: 62c6c61adbc623cdacf74b8f29c278e539060c48
-
-BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917
-in_atomic(): 0, irqs_disabled(): 128, pid: 342, name: perf
-1 lock held by perf/342:
- #0: (break_hook_lock){+.+...}, at: [<ffffffc0000851ac>] call_break_hook+0x34/0xd0
-irq event stamp: 62224
-hardirqs last enabled at (62223): [<ffffffc00010b7bc>] __call_rcu.constprop.59+0x104/0x270
-hardirqs last disabled at (62224): [<ffffffc0000fbe20>] vprintk_emit+0x68/0x640
-softirqs last enabled at (0): [<ffffffc000097928>] copy_process.part.8+0x428/0x17f8
-softirqs last disabled at (0): [< (null)>] (null)
-CPU: 0 PID: 342 Comm: perf Not tainted 4.1.6-rt5 #4
-Hardware name: linux,dummy-virt (DT)
-Call trace:
-[<ffffffc000089968>] dump_backtrace+0x0/0x128
-[<ffffffc000089ab0>] show_stack+0x20/0x30
-[<ffffffc0007030d0>] dump_stack+0x7c/0xa0
-[<ffffffc0000c878c>] ___might_sleep+0x174/0x260
-[<ffffffc000708ac8>] __rt_spin_lock+0x28/0x40
-[<ffffffc000708db0>] rt_read_lock+0x60/0x80
-[<ffffffc0000851a8>] call_break_hook+0x30/0xd0
-[<ffffffc000085a70>] brk_handler+0x30/0x98
-[<ffffffc000082248>] do_debug_exception+0x50/0xb8
-Exception stack(0xffffffc00514fe30 to 0xffffffc00514ff50)
-fe20: 00000000 00000000 c1594680 0000007f
-fe40: ffffffff ffffffff 92063940 0000007f 0550dcd8 ffffffc0 00000000 00000000
-fe60: 0514fe70 ffffffc0 000be1f8 ffffffc0 0514feb0 ffffffc0 0008948c ffffffc0
-fe80: 00000004 00000000 0514fed0 ffffffc0 ffffffff ffffffff 9282a948 0000007f
-fea0: 00000000 00000000 9282b708 0000007f c1592820 0000007f 00083914 ffffffc0
-fec0: 00000000 00000000 00000010 00000000 00000064 00000000 00000001 00000000
-fee0: 005101e0 00000000 c1594680 0000007f c1594740 0000007f ffffffd8 ffffff80
-ff00: 00000000 00000000 00000000 00000000 c1594770 0000007f c1594770 0000007f
-ff20: 00665e10 00000000 7f7f7f7f 7f7f7f7f 01010101 01010101 00000000 00000000
-ff40: 928e4cc0 0000007f 91ff11e8 0000007f
-
-call_break_hook is called in atomic context (hard irq disabled), so replace
-the sleepable lock to rcu lock, replace relevant list operations to rcu
-version and call synchronize_rcu() in unregister_break_hook().
-
-And, replace write lock to spinlock in {un}register_break_hook.
-
-Signed-off-by: Yang Shi <yang.shi@linaro.org>
-Signed-off-by: Will Deacon <will.deacon@arm.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/arm64/kernel/debug-monitors.c | 21 +++++++++++----------
- 1 file changed, 11 insertions(+), 10 deletions(-)
-
---- a/arch/arm64/kernel/debug-monitors.c
-+++ b/arch/arm64/kernel/debug-monitors.c
-@@ -271,20 +271,21 @@ static int single_step_handler(unsigned
- * Use reader/writer locks instead of plain spinlock.
- */
- static LIST_HEAD(break_hook);
--static DEFINE_RWLOCK(break_hook_lock);
-+static DEFINE_SPINLOCK(break_hook_lock);
-
- void register_break_hook(struct break_hook *hook)
- {
-- write_lock(&break_hook_lock);
-- list_add(&hook->node, &break_hook);
-- write_unlock(&break_hook_lock);
-+ spin_lock(&break_hook_lock);
-+ list_add_rcu(&hook->node, &break_hook);
-+ spin_unlock(&break_hook_lock);
- }
-
- void unregister_break_hook(struct break_hook *hook)
- {
-- write_lock(&break_hook_lock);
-- list_del(&hook->node);
-- write_unlock(&break_hook_lock);
-+ spin_lock(&break_hook_lock);
-+ list_del_rcu(&hook->node);
-+ spin_unlock(&break_hook_lock);
-+ synchronize_rcu();
- }
-
- static int call_break_hook(struct pt_regs *regs, unsigned int esr)
-@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_reg
- struct break_hook *hook;
- int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
-
-- read_lock(&break_hook_lock);
-- list_for_each_entry(hook, &break_hook, node)
-+ rcu_read_lock();
-+ list_for_each_entry_rcu(hook, &break_hook, node)
- if ((esr & hook->esr_mask) == hook->esr_val)
- fn = hook->fn;
-- read_unlock(&break_hook_lock);
-+ rcu_read_unlock();
-
- return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
- }
diff --git a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
index f84454a8a67ee..f78656dafe92f 100644
--- a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -600,7 +600,7 @@ config XEN_DOM0
+@@ -562,7 +562,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
diff --git a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch b/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
index ff11c7c9677b4..f22ada1312563 100644
--- a/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
+++ b/patches/blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1612,7 +1612,7 @@ static int blk_mq_hctx_notify(void *data
+@@ -1640,7 +1640,7 @@ static int blk_mq_hctx_notify(void *data
{
struct blk_mq_hw_ctx *hctx = data;
diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch
index 0792dc7d08449..d062d02a211db 100644
--- a/patches/block-blk-mq-use-swait.patch
+++ b/patches/block-blk-mq-use-swait.patch
@@ -38,61 +38,61 @@ Subject: block: blk-mq: Use swait
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- block/blk-core.c | 2 +-
- block/blk-mq.c | 10 +++++-----
+ block/blk-core.c | 6 +++---
+ block/blk-mq.c | 6 +++---
include/linux/blkdev.h | 2 +-
3 files changed, 7 insertions(+), 7 deletions(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -664,7 +664,7 @@ struct request_queue *blk_alloc_queue_no
- q->bypass_depth = 1;
- __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
-
-- init_waitqueue_head(&q->mq_freeze_wq);
-+ init_swait_head(&q->mq_freeze_wq);
-
- if (blkcg_init_queue(q))
- goto fail_bdi;
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -88,7 +88,7 @@ static int blk_mq_queue_enter(struct req
- if (!(gfp & __GFP_WAIT))
+@@ -644,7 +644,7 @@ int blk_queue_enter(struct request_queue
+ if (!gfpflags_allow_blocking(gfp))
return -EBUSY;
- ret = wait_event_interruptible(q->mq_freeze_wq,
+ ret = swait_event_interruptible(q->mq_freeze_wq,
- !q->mq_freeze_depth || blk_queue_dying(q));
+ !atomic_read(&q->mq_freeze_depth) ||
+ blk_queue_dying(q));
if (blk_queue_dying(q))
- return -ENODEV;
-@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release
+@@ -664,7 +664,7 @@ static void blk_queue_usage_counter_rele
struct request_queue *q =
- container_of(ref, struct request_queue, mq_usage_counter);
+ container_of(ref, struct request_queue, q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
- void blk_mq_freeze_queue_start(struct request_queue *q)
-@@ -127,7 +127,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
+ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+@@ -726,7 +726,7 @@ struct request_queue *blk_alloc_queue_no
+ q->bypass_depth = 1;
+ __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
+
+- init_waitqueue_head(&q->mq_freeze_wq);
++ init_swait_head(&q->mq_freeze_wq);
+
+ /*
+ * Init percpu_ref in atomic mode so that it's faster to shutdown.
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
-- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
-+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
++ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
/*
-@@ -151,7 +151,7 @@ void blk_mq_unfreeze_queue(struct reques
- spin_unlock_irq(q->queue_lock);
- if (wake) {
- percpu_ref_reinit(&q->mq_usage_counter);
+@@ -130,7 +130,7 @@ void blk_mq_unfreeze_queue(struct reques
+ WARN_ON_ONCE(freeze_depth < 0);
+ if (!freeze_depth) {
+ percpu_ref_reinit(&q->q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ swait_wake_all(&q->mq_freeze_wq);
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
-@@ -170,7 +170,7 @@ void blk_mq_wake_waiters(struct request_
+@@ -149,7 +149,7 @@ void blk_mq_wake_waiters(struct request_
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
@@ -103,12 +103,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -483,7 +483,7 @@ struct request_queue {
+@@ -456,7 +456,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
- wait_queue_head_t mq_freeze_wq;
+ struct swait_head mq_freeze_wq;
- struct percpu_ref mq_usage_counter;
+ struct percpu_ref q_usage_counter;
struct list_head all_q_node;
diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch
index 7f9f91ff18b70..160cf27b79b30 100644
--- a/patches/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -100,6 +100,9 @@ void blk_rq_init(struct request_queue *q
+@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->__sector = (sector_t) -1;
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -217,6 +217,9 @@ static void blk_mq_rq_ctx_init(struct re
+@@ -196,6 +196,9 @@ static void blk_mq_rq_ctx_init(struct re
rq->resid_len = 0;
rq->sense = NULL;
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
-@@ -346,6 +349,17 @@ void blk_mq_end_request(struct request *
+@@ -325,6 +328,17 @@ void blk_mq_end_request(struct request *
}
EXPORT_SYMBOL(blk_mq_end_request);
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
-@@ -353,6 +367,8 @@ static void __blk_mq_complete_request_re
+@@ -332,6 +346,8 @@ static void __blk_mq_complete_request_re
rq->q->softirq_done_fn(rq);
}
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -369,10 +385,14 @@ static void blk_mq_ipi_complete_request(
+@@ -348,10 +364,14 @@ static void blk_mq_ipi_complete_request(
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
-@@ -202,6 +202,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -212,6 +212,7 @@ static inline u16 blk_mq_unique_tag_to_t
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_start_request(struct request *rq);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -101,6 +101,7 @@ struct request {
+@@ -89,6 +89,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
diff --git a/patches/block-mq-drop-per-ctx-cpu_lock.patch b/patches/block-mq-drop-per-ctx-cpu_lock.patch
index 66cea706d8f54..53e9ccbe96644 100644
--- a/patches/block-mq-drop-per-ctx-cpu_lock.patch
+++ b/patches/block-mq-drop-per-ctx-cpu_lock.patch
@@ -63,17 +63,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1386,9 +1386,7 @@ static void blk_sq_make_request(struct r
- if (list_empty(&plug->mq_list))
- trace_block_plug(q);
- else if (request_count >= BLK_MAX_REQUEST_COUNT) {
-- spin_unlock(&data.ctx->cpu_lock);
- blk_flush_plug_list(plug, false);
-- spin_lock(&data.ctx->cpu_lock);
- trace_block_plug(q);
- }
- list_add_tail(&rq->queuelist, &plug->mq_list);
-@@ -1581,7 +1579,6 @@ static int blk_mq_hctx_cpu_offline(struc
+@@ -1405,9 +1405,7 @@ static blk_qc_t blk_sq_make_request(stru
+ blk_mq_put_ctx(data.ctx);
+
+ if (request_count >= BLK_MAX_REQUEST_COUNT) {
+- spin_unlock(&data.ctx->cpu_lock);
+ blk_flush_plug_list(plug, false);
+- spin_lock(&data.ctx->cpu_lock);
+ trace_block_plug(q);
+ }
+
+@@ -1609,7 +1607,6 @@ static int blk_mq_hctx_cpu_offline(struc
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (list_empty(&tmp))
return NOTIFY_OK;
-@@ -1775,7 +1772,6 @@ static void blk_mq_init_cpu_queues(struc
+@@ -1803,7 +1800,6 @@ static void blk_mq_init_cpu_queues(struc
memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int cpu;
unsigned int index_hw;
-@@ -80,7 +79,6 @@ static inline struct blk_mq_ctx *__blk_m
+@@ -78,7 +77,6 @@ static inline struct blk_mq_ctx *__blk_m
struct blk_mq_ctx *ctx;
ctx = per_cpu_ptr(q->queue_ctx, cpu);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ctx;
}
-@@ -95,14 +93,8 @@ static inline struct blk_mq_ctx *blk_mq_
+@@ -93,14 +91,8 @@ static inline struct blk_mq_ctx *blk_mq_
return __blk_mq_get_ctx(q, get_cpu_light());
}
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index 3f711e18a5556..354aeb8c78de8 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -364,7 +364,7 @@ static void blk_mq_ipi_complete_request(
+@@ -343,7 +343,7 @@ static void blk_mq_ipi_complete_request(
return;
}
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
-@@ -376,7 +376,7 @@ static void blk_mq_ipi_complete_request(
+@@ -355,7 +355,7 @@ static void blk_mq_ipi_complete_request(
} else {
rq->q->softirq_done_fn(rq);
}
@@ -30,8 +30,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ put_cpu_light();
}
- void __blk_mq_complete_request(struct request *rq)
-@@ -905,14 +905,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+ static void __blk_mq_complete_request(struct request *rq)
+@@ -862,14 +862,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
return;
if (!async) {
diff --git a/patches/block-mq-use-cpu_light.patch b/patches/block-mq-use-cpu_light.patch
index ac403d37865ad..1d62870fcba64 100644
--- a/patches/block-mq-use-cpu_light.patch
+++ b/patches/block-mq-use-cpu_light.patch
@@ -17,17 +17,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1366,7 +1366,9 @@ static void blk_sq_make_request(struct r
- if (list_empty(&plug->mq_list))
- trace_block_plug(q);
- else if (request_count >= BLK_MAX_REQUEST_COUNT) {
-+ spin_unlock(&data.ctx->cpu_lock);
- blk_flush_plug_list(plug, false);
-+ spin_lock(&data.ctx->cpu_lock);
- trace_block_plug(q);
- }
- list_add_tail(&rq->queuelist, &plug->mq_list);
-@@ -1559,6 +1561,7 @@ static int blk_mq_hctx_cpu_offline(struc
+@@ -1385,7 +1385,9 @@ static blk_qc_t blk_sq_make_request(stru
+ blk_mq_put_ctx(data.ctx);
+
+ if (request_count >= BLK_MAX_REQUEST_COUNT) {
++ spin_unlock(&data.ctx->cpu_lock);
+ blk_flush_plug_list(plug, false);
++ spin_lock(&data.ctx->cpu_lock);
+ trace_block_plug(q);
+ }
+
+@@ -1587,6 +1589,7 @@ static int blk_mq_hctx_cpu_offline(struc
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (list_empty(&tmp))
return NOTIFY_OK;
-@@ -1752,6 +1755,7 @@ static void blk_mq_init_cpu_queues(struc
+@@ -1780,6 +1783,7 @@ static void blk_mq_init_cpu_queues(struc
memset(__ctx, 0, sizeof(*__ctx));
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int cpu;
unsigned int index_hw;
-@@ -76,7 +77,11 @@ struct blk_align_bitmap {
+@@ -74,7 +75,11 @@ struct blk_align_bitmap {
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
{
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -87,12 +92,18 @@ static inline struct blk_mq_ctx *__blk_m
+@@ -85,12 +90,18 @@ static inline struct blk_mq_ctx *__blk_m
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch
index c849c258d15c2..ef9a3567eece8 100644
--- a/patches/block-shorten-interrupt-disabled-regions.patch
+++ b/patches/block-shorten-interrupt-disabled-regions.patch
@@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -3077,7 +3077,7 @@ static void queue_unplugged(struct reque
+@@ -3182,7 +3182,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3125,7 +3125,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3230,7 +3230,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3145,11 +3144,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3250,11 +3249,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3162,7 +3156,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3267,7 +3261,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
-@@ -3189,8 +3183,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3294,8 +3288,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
diff --git a/patches/bpf-convert-hashtab-lock-to-raw-lock.patch b/patches/bpf-convert-hashtab-lock-to-raw-lock.patch
deleted file mode 100644
index 719ffe4f5504c..0000000000000
--- a/patches/bpf-convert-hashtab-lock-to-raw-lock.patch
+++ /dev/null
@@ -1,114 +0,0 @@
-Subject: bpf: Convert hashtab lock to raw lock
-From: Yang Shi <yang.shi@linaro.org>
-Date: Fri, 30 Oct 2015 15:16:26 -0700
-
-When running bpf samples on rt kernel, it reports the below warning:
-
-BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917
-in_atomic(): 1, irqs_disabled(): 128, pid: 477, name: ping
-Preemption disabled at:[<ffff80000017db58>] kprobe_perf_func+0x30/0x228
-
-CPU: 3 PID: 477 Comm: ping Not tainted 4.1.10-rt8 #4
-Hardware name: Freescale Layerscape 2085a RDB Board (DT)
-Call trace:
-[<ffff80000008a5b0>] dump_backtrace+0x0/0x128
-[<ffff80000008a6f8>] show_stack+0x20/0x30
-[<ffff8000007da90c>] dump_stack+0x7c/0xa0
-[<ffff8000000e4830>] ___might_sleep+0x188/0x1a0
-[<ffff8000007e2200>] rt_spin_lock+0x28/0x40
-[<ffff80000018bf9c>] htab_map_update_elem+0x124/0x320
-[<ffff80000018c718>] bpf_map_update_elem+0x40/0x58
-[<ffff800000187658>] __bpf_prog_run+0xd48/0x1640
-[<ffff80000017ca6c>] trace_call_bpf+0x8c/0x100
-[<ffff80000017db58>] kprobe_perf_func+0x30/0x228
-[<ffff80000017dd84>] kprobe_dispatcher+0x34/0x58
-[<ffff8000007e399c>] kprobe_handler+0x114/0x250
-[<ffff8000007e3bf4>] kprobe_breakpoint_handler+0x1c/0x30
-[<ffff800000085b80>] brk_handler+0x88/0x98
-[<ffff8000000822f0>] do_debug_exception+0x50/0xb8
-Exception stack(0xffff808349687460 to 0xffff808349687580)
-7460: 4ca2b600 ffff8083 4a3a7000 ffff8083 49687620 ffff8083 0069c5f8 ffff8000
-7480: 00000001 00000000 007e0628 ffff8000 496874b0 ffff8083 007e1de8 ffff8000
-74a0: 496874d0 ffff8083 0008e04c ffff8000 00000001 00000000 4ca2b600 ffff8083
-74c0: 00ba2e80 ffff8000 49687528 ffff8083 49687510 ffff8083 000e5c70 ffff8000
-74e0: 00c22348 ffff8000 00000000 ffff8083 49687510 ffff8083 000e5c74 ffff8000
-7500: 4ca2b600 ffff8083 49401800 ffff8083 00000001 00000000 00000000 00000000
-7520: 496874d0 ffff8083 00000000 00000000 00000000 00000000 00000000 00000000
-7540: 2f2e2d2c 33323130 00000000 00000000 4c944500 ffff8083 00000000 00000000
-7560: 00000000 00000000 008751e0 ffff8000 00000001 00000000 124e2d1d 00107b77
-
-Convert hashtab lock to raw lock to avoid such warning.
-
-Signed-off-by: Yang Shi <yang.shi@linaro.org>
-Cc: linaro-kernel@lists.linaro.org
-Cc: ast@kernel.org
-Cc: rostedt@goodmis.org
-Link: http://lkml.kernel.org/r/1446243386-26582-1-git-send-email-yang.shi@linaro.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
-This patch is applicable to mainline kernel too.
-
- kernel/bpf/hashtab.c | 14 +++++++-------
- 1 file changed, 7 insertions(+), 7 deletions(-)
-
---- a/kernel/bpf/hashtab.c
-+++ b/kernel/bpf/hashtab.c
-@@ -17,7 +17,7 @@
- struct bpf_htab {
- struct bpf_map map;
- struct hlist_head *buckets;
-- spinlock_t lock;
-+ raw_spinlock_t lock;
- u32 count; /* number of elements in this hashtable */
- u32 n_buckets; /* number of hash buckets */
- u32 elem_size; /* size of each element in bytes */
-@@ -82,7 +82,7 @@ static struct bpf_map *htab_map_alloc(un
- for (i = 0; i < htab->n_buckets; i++)
- INIT_HLIST_HEAD(&htab->buckets[i]);
-
-- spin_lock_init(&htab->lock);
-+ raw_spin_lock_init(&htab->lock);
- htab->count = 0;
-
- htab->elem_size = sizeof(struct htab_elem) +
-@@ -230,7 +230,7 @@ static int htab_map_update_elem(struct b
- l_new->hash = htab_map_hash(l_new->key, key_size);
-
- /* bpf_map_update_elem() can be called in_irq() */
-- spin_lock_irqsave(&htab->lock, flags);
-+ raw_spin_lock_irqsave(&htab->lock, flags);
-
- head = select_bucket(htab, l_new->hash);
-
-@@ -266,11 +266,11 @@ static int htab_map_update_elem(struct b
- } else {
- htab->count++;
- }
-- spin_unlock_irqrestore(&htab->lock, flags);
-+ raw_spin_unlock_irqrestore(&htab->lock, flags);
-
- return 0;
- err:
-- spin_unlock_irqrestore(&htab->lock, flags);
-+ raw_spin_unlock_irqrestore(&htab->lock, flags);
- kfree(l_new);
- return ret;
- }
-@@ -291,7 +291,7 @@ static int htab_map_delete_elem(struct b
-
- hash = htab_map_hash(key, key_size);
-
-- spin_lock_irqsave(&htab->lock, flags);
-+ raw_spin_lock_irqsave(&htab->lock, flags);
-
- head = select_bucket(htab, hash);
-
-@@ -304,7 +304,7 @@ static int htab_map_delete_elem(struct b
- ret = 0;
- }
-
-- spin_unlock_irqrestore(&htab->lock, flags);
-+ raw_spin_unlock_irqrestore(&htab->lock, flags);
- return ret;
- }
-
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index eb6fa60662827..38cce01941faf 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2127,14 +2127,17 @@ static void drain_local_stock(struct wor
+@@ -1913,14 +1913,17 @@ static void drain_local_stock(struct wor
*/
static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
{
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index 2906dbdc8845c..cdadf87874b32 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -28,31 +28,31 @@ To avoid:
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/cgroup.h | 2 ++
- kernel/cgroup.c | 9 +++++----
+ include/linux/cgroup-defs.h | 2 ++
+ kernel/cgroup.c | 9 +++++----
2 files changed, 7 insertions(+), 4 deletions(-)
---- a/include/linux/cgroup.h
-+++ b/include/linux/cgroup.h
-@@ -22,6 +22,7 @@
- #include <linux/seq_file.h>
- #include <linux/kernfs.h>
- #include <linux/wait.h>
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -16,6 +16,7 @@
+ #include <linux/percpu-refcount.h>
+ #include <linux/percpu-rwsem.h>
+ #include <linux/workqueue.h>
+#include <linux/work-simple.h>
#ifdef CONFIG_CGROUPS
-@@ -91,6 +92,7 @@ struct cgroup_subsys_state {
+@@ -136,6 +137,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
+ struct swork_event destroy_swork;
};
- /* bits in struct cgroup_subsys_state flags field */
+ /*
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -4422,10 +4422,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -4724,10 +4724,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4464,8 +4464,8 @@ static void css_release(struct percpu_re
+@@ -4766,8 +4766,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5069,6 +5069,7 @@ static int __init cgroup_wq_init(void)
+@@ -5363,6 +5363,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/clocksource-tclib-allow-higher-clockrates.patch b/patches/clocksource-tclib-allow-higher-clockrates.patch
index 92c47ba80adb8..38d7ee4ae32b1 100644
--- a/patches/clocksource-tclib-allow-higher-clockrates.patch
+++ b/patches/clocksource-tclib-allow-higher-clockrates.patch
@@ -10,9 +10,9 @@ Add a compile time selection to allow higher clock resulution.
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- drivers/clocksource/tcb_clksrc.c | 37 ++++++++++++++++++++++---------------
+ drivers/clocksource/tcb_clksrc.c | 36 +++++++++++++++++++++---------------
drivers/misc/Kconfig | 12 ++++++++++--
- 2 files changed, 32 insertions(+), 17 deletions(-)
+ 2 files changed, 31 insertions(+), 17 deletions(-)
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -47,45 +47,42 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- */
static u32 timer_clock;
- static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
-@@ -111,11 +104,12 @@ static void tc_mode(enum clock_event_mod
- case CLOCK_EVT_MODE_PERIODIC:
- clk_enable(tcd->clk);
-
-- /* slow clock, count up to RC, then irq and restart */
-+ /* count up to RC, then irq and restart */
- __raw_writel(timer_clock
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-- __raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
-+ __raw_writel((tcd->freq + HZ / 2) / HZ,
-+ tcaddr + ATMEL_TC_REG(2, RC));
-
- /* Enable clock and interrupts on RC compare */
- __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -128,7 +122,7 @@ static void tc_mode(enum clock_event_mod
- case CLOCK_EVT_MODE_ONESHOT:
- clk_enable(tcd->clk);
-
-- /* slow clock, count up to RC, then irq and stop */
-+ /* count up to RC, then irq and stop */
- __raw_writel(timer_clock | ATMEL_TC_CPCSTOP
- | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
- regs + ATMEL_TC_REG(2, CMR));
-@@ -157,8 +151,12 @@ static struct tc_clkevt_device clkevt =
- .name = "tc_clkevt",
- .features = CLOCK_EVT_FEAT_PERIODIC
- | CLOCK_EVT_FEAT_ONESHOT,
-+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ static int tc_shutdown(struct clock_event_device *d)
+@@ -113,7 +106,7 @@ static int tc_set_oneshot(struct clock_e
+
+ clk_enable(tcd->clk);
+
+- /* slow clock, count up to RC, then irq and stop */
++ /* count up to RC, then irq and stop */
+ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
+ ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -135,10 +128,10 @@ static int tc_set_periodic(struct clock_
+ */
+ clk_enable(tcd->clk);
+
+- /* slow clock, count up to RC, then irq and restart */
++ /* count up to RC, then irq and restart */
+ __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+ regs + ATMEL_TC_REG(2, CMR));
+- __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++ __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+
+ /* Enable clock and interrupts on RC compare */
+ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+@@ -165,7 +158,11 @@ static struct tc_clkevt_device clkevt =
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
- .rating = 125,
++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+ .rating = 125,
+#else
-+ .rating = 200,
++ .rating = 200,
+#endif
- .set_next_event = tc_next_event,
- .set_mode = tc_mode,
- },
-@@ -178,8 +176,9 @@ static irqreturn_t ch2_irq(int irq, void
+ .set_next_event = tc_next_event,
+ .set_state_shutdown = tc_shutdown,
+ .set_state_periodic = tc_set_periodic,
+@@ -187,8 +184,9 @@ static irqreturn_t ch2_irq(int irq, void
return IRQ_NONE;
}
@@ -96,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
-@@ -193,7 +192,11 @@ static int __init setup_clkevents(struct
+@@ -209,7 +207,11 @@ static int __init setup_clkevents(struct
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
@@ -109,7 +106,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clkevt.clkevt.cpumask = cpumask_of(0);
-@@ -203,7 +206,7 @@ static int __init setup_clkevents(struct
+@@ -220,7 +222,7 @@ static int __init setup_clkevents(struct
return ret;
}
@@ -118,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -340,7 +343,11 @@ static int __init tcb_clksrc_init(void)
+@@ -357,7 +359,11 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index f1ec51e273128..616a4e87f0ccc 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -198,7 +198,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2809,7 +2809,10 @@ void migrate_disable(void)
+@@ -3143,7 +3143,10 @@ void migrate_disable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@@ -210,7 +210,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -2839,7 +2842,10 @@ void migrate_enable(void)
+@@ -3173,7 +3176,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cond-resched-lock-rt-tweak.patch b/patches/cond-resched-lock-rt-tweak.patch
index d4e1e9614b4d2..b810dbbd0b3e6 100644
--- a/patches/cond-resched-lock-rt-tweak.patch
+++ b/patches/cond-resched-lock-rt-tweak.patch
@@ -7,12 +7,12 @@ locked. Update PREEMPT_LOCK_OFFSET for that case.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/preempt_mask.h | 4 ++++
+ include/linux/preempt.h | 4 ++++
1 file changed, 4 insertions(+)
---- a/include/linux/preempt_mask.h
-+++ b/include/linux/preempt_mask.h
-@@ -83,7 +83,11 @@
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -91,7 +91,11 @@
/*
* The preempt_count offset after spin_lock()
*/
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index 4bb8f75ccfa45..fb2f3d7e89b19 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2931,12 +2931,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -2984,12 +2984,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4486,6 +4486,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -4832,6 +4832,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4499,6 +4500,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4845,6 +4846,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index 5a6e1e5222caf..d03d541aae4fd 100644
--- a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -109,6 +109,14 @@ struct hotplug_pcp {
+@@ -110,6 +110,14 @@ struct hotplug_pcp {
int grab_lock;
struct completion synced;
#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index 3ddd623d38dd5..4dd05bfd9dc30 100644
--- a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -19,12 +19,12 @@ Cc: Clark Williams <clark.williams@gmail.com>
Link: http://lkml.kernel.org/r/1330702617.25686.265.camel@gandalf.stny.rr.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/cpu.c | 38 +++++++++++++++++++++++++++++---------
- 1 file changed, 29 insertions(+), 9 deletions(-)
+ kernel/cpu.c | 34 +++++++++++++++++++++++++++-------
+ 1 file changed, 27 insertions(+), 7 deletions(-)
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -59,10 +59,16 @@ static int cpu_hotplug_disabled;
+@@ -60,10 +60,16 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -75,12 +81,26 @@ static struct {
+@@ -76,12 +82,26 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -117,8 +137,8 @@ void pin_current_cpu(void)
+@@ -118,8 +138,8 @@ void pin_current_cpu(void)
return;
}
preempt_enable();
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -191,9 +211,9 @@ void get_online_cpus(void)
+@@ -192,9 +212,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -91,21 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -201,11 +221,11 @@ bool try_get_online_cpus(void)
- {
- if (cpu_hotplug.active_writer == current)
- return true;
-- if (!mutex_trylock(&cpu_hotplug.lock))
-+ if (!hotplug_trylock())
- return false;
- cpuhp_lock_acquire_tryread();
- atomic_inc(&cpu_hotplug.refcount);
-- mutex_unlock(&cpu_hotplug.lock);
-+ hotplug_unlock();
- return true;
- }
- EXPORT_SYMBOL_GPL(try_get_online_cpus);
-@@ -259,11 +279,11 @@ void cpu_hotplug_begin(void)
+@@ -247,11 +267,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -119,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -272,7 +292,7 @@ void cpu_hotplug_begin(void)
+@@ -260,7 +280,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 3a06653a12a1f..39543b48345f3 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -50,13 +50,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/sched.h | 7 +
- kernel/cpu.c | 244 ++++++++++++++++++++++++++++++++++++++++----------
- kernel/sched/core.c | 82 ++++++++++++++++
- 3 files changed, 285 insertions(+), 48 deletions(-)
+ kernel/cpu.c | 240 ++++++++++++++++++++++++++++++++++++++++----------
+ kernel/sched/core.c | 82 ++++++++++++++++-
+ 3 files changed, 283 insertions(+), 46 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2217,6 +2217,10 @@ extern void do_set_cpus_allowed(struct t
+@@ -2282,6 +2282,10 @@ extern void do_set_cpus_allowed(struct t
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
@@ -67,7 +67,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
-@@ -2229,6 +2233,9 @@ static inline int set_cpus_allowed_ptr(s
+@@ -2294,6 +2298,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_NO_HZ_COMMON
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -59,16 +59,10 @@ static int cpu_hotplug_disabled;
+@@ -60,16 +60,10 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -80,27 +74,13 @@ static struct {
+@@ -81,27 +75,13 @@ static struct {
#endif
} cpu_hotplug = {
.active_writer = NULL,
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -108,12 +88,42 @@ static struct {
+@@ -109,12 +89,42 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
-@@ -127,18 +137,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -128,18 +138,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -159,26 +190,84 @@ void unpin_current_cpu(void)
+@@ -160,26 +191,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
@@ -304,7 +304,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -186,23 +275,83 @@ static int sync_unplug_thread(void *data
+@@ -187,23 +276,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -395,7 +395,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void get_online_cpus(void)
-@@ -211,9 +360,9 @@ void get_online_cpus(void)
+@@ -212,9 +361,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -407,21 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -221,11 +370,11 @@ bool try_get_online_cpus(void)
- {
- if (cpu_hotplug.active_writer == current)
- return true;
-- if (!hotplug_trylock())
-+ if (!mutex_trylock(&cpu_hotplug.lock))
- return false;
- cpuhp_lock_acquire_tryread();
- atomic_inc(&cpu_hotplug.refcount);
-- hotplug_unlock();
-+ mutex_unlock(&cpu_hotplug.lock);
- return true;
- }
- EXPORT_SYMBOL_GPL(try_get_online_cpus);
-@@ -279,11 +428,11 @@ void cpu_hotplug_begin(void)
+@@ -267,11 +416,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -435,7 +421,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -292,7 +441,7 @@ void cpu_hotplug_begin(void)
+@@ -280,7 +429,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
@@ -444,7 +430,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_lock_release();
}
-@@ -527,6 +676,9 @@ static int __ref _cpu_down(unsigned int
+@@ -516,6 +665,9 @@ static int _cpu_down(unsigned int cpu, i
smpboot_park_threads(cpu);
@@ -452,30 +438,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ cpu_unplug_sync(cpu);
+
/*
- * So now all preempt/rcu users must observe !cpu_active().
- */
+ * Prevent irq alloc/free while the dying cpu reorganizes the
+ * interrupt affinities.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2761,7 +2761,7 @@ void migrate_disable(void)
- {
- struct task_struct *p = current;
-
-- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
-+ if (in_atomic()) {
- #ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic++;
- #endif
-@@ -2794,7 +2794,7 @@ void migrate_enable(void)
- unsigned long flags;
- struct rq *rq;
-
-- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
-+ if (in_atomic()) {
- #ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic--;
- #endif
-@@ -4967,6 +4967,84 @@ void do_set_cpus_allowed(struct task_str
- cpumask_copy(&p->cpus_allowed, new_mask);
+@@ -1220,6 +1220,84 @@ void do_set_cpus_allowed(struct task_str
+ enqueue_task(rq, p, ENQUEUE_RESTORE);
}
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
@@ -557,5 +525,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+
/*
- * This is how migration works:
- *
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+@@ -3085,7 +3163,7 @@ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+
+- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
++ if (in_atomic()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+ #endif
+@@ -3118,7 +3196,7 @@ void migrate_enable(void)
+ unsigned long flags;
+ struct rq *rq;
+
+- if (in_atomic() || p->flags & PF_NO_SETAFFINITY) {
++ if (in_atomic()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+ #endif
diff --git a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index edf10aca00f4d..e40b98dd6011e 100644
--- a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1746,12 +1746,13 @@ void hrtimer_init_sleeper(struct hrtimer
+@@ -1657,12 +1657,13 @@ void hrtimer_init_sleeper(struct hrtimer
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -47,9 +47,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(state);
hrtimer_start_expires(&t->timer, mode);
- if (!hrtimer_active(&t->timer))
- t->task = NULL;
-@@ -1795,7 +1796,8 @@ long __sched hrtimer_nanosleep_restart(s
+
+ if (likely(t->task))
+@@ -1704,7 +1705,8 @@ long __sched hrtimer_nanosleep_restart(s
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
rmtp = restart->nanosleep.rmtp;
-@@ -1812,8 +1814,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1721,8 +1723,10 @@ long __sched hrtimer_nanosleep_restart(s
return ret;
}
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1826,7 +1830,7 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1735,7 +1739,7 @@ long hrtimer_nanosleep(struct timespec *
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
/* Absolute timers do not update the rmtp value and restart: */
-@@ -1853,6 +1857,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1762,6 +1766,12 @@ long hrtimer_nanosleep(struct timespec *
return ret;
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1879,7 +1889,8 @@ void cpu_chill(void)
+@@ -1788,7 +1798,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
diff --git a/patches/cpu_down_move_migrate_enable_back.patch b/patches/cpu_down_move_migrate_enable_back.patch
index 2d39eb47ee972..40d0699a8260e 100644
--- a/patches/cpu_down_move_migrate_enable_back.patch
+++ b/patches/cpu_down_move_migrate_enable_back.patch
@@ -34,7 +34,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -668,6 +668,7 @@ static int __ref _cpu_down(unsigned int
+@@ -657,6 +657,7 @@ static int _cpu_down(unsigned int cpu, i
err = -EBUSY;
goto restore_cpus;
}
@@ -42,7 +42,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
cpu_hotplug_begin();
err = cpu_unplug_begin(cpu);
-@@ -744,7 +745,6 @@ static int __ref _cpu_down(unsigned int
+@@ -741,7 +742,6 @@ static int _cpu_down(unsigned int cpu, i
out_release:
cpu_unplug_done(cpu);
out_cancel:
diff --git a/patches/cpufreq-Remove-cpufreq_rwsem.patch b/patches/cpufreq-Remove-cpufreq_rwsem.patch
deleted file mode 100644
index 434d35083ee03..0000000000000
--- a/patches/cpufreq-Remove-cpufreq_rwsem.patch
+++ /dev/null
@@ -1,195 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 21 Jul 2015 15:28:49 +0200
-Subject: cpufreq: Remove cpufreq_rwsem
-
-cpufreq_rwsem was introduced in commit 6eed9404ab3c4 ("cpufreq: Use
-rwsem for protecting critical sections) in order to replace
-try_module_get() on the cpu-freq driver. That try_module_get() worked
-well until the refcount was so heavily used that module removal became
-more or less impossible.
-
-Though when looking at the various (undocumented) protection
-mechanisms in that code, the randomly sprinkeled around cpufreq_rwsem
-locking sites are superfluous.
-
-The policy, which is acquired in cpufreq_cpu_get() and released in
-cpufreq_cpu_put() is sufficiently protected already.
-
- cpufreq_cpu_get(cpu)
- /* Protects against concurrent driver removal */
- read_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = per_cpu(cpufreq_cpu_data, cpu);
- kobject_get(&policy->kobj);
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-The reference on the policy serializes versus module unload already:
-
- cpufreq_unregister_driver()
- subsys_interface_unregister()
- __cpufreq_remove_dev_finish()
- per_cpu(cpufreq_cpu_data) = NULL;
- cpufreq_policy_put_kobj()
-
-If there is a reference held on the policy, i.e. obtained prior to the
-unregister call, then cpufreq_policy_put_kobj() will wait until that
-reference is dropped. So once subsys_interface_unregister() returns
-there is no policy pointer in flight and no new reference can be
-obtained. So that rwsem protection is useless.
-
-The other usage of cpufreq_rwsem in show()/store() of the sysfs
-interface is redundant as well because sysfs already does the proper
-kobject_get()/put() pairs.
-
-That leaves CPU hotplug versus module removal. The current
-down_write() around the write_lock() in cpufreq_unregister_driver() is
-silly at best as it protects actually nothing.
-
-The trivial solution to this is to prevent hotplug across
-cpufreq_unregister_driver completely.
-
-[upstream: rafael/linux-pm 454d3a2500a4eb33be85dde3bfba9e5f6b5efadc]
-[fixes: "cpufreq_stat_notifier_trans: No policy found" since v4.0-rt]
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/cpufreq/cpufreq.c | 35 +++--------------------------------
- 1 file changed, 3 insertions(+), 32 deletions(-)
-
---- a/drivers/cpufreq/cpufreq.c
-+++ b/drivers/cpufreq/cpufreq.c
-@@ -64,12 +64,6 @@ static inline bool has_target(void)
- return cpufreq_driver->target_index || cpufreq_driver->target;
- }
-
--/*
-- * rwsem to guarantee that cpufreq driver module doesn't unload during critical
-- * sections
-- */
--static DECLARE_RWSEM(cpufreq_rwsem);
--
- /* internal prototypes */
- static int __cpufreq_governor(struct cpufreq_policy *policy,
- unsigned int event);
-@@ -215,9 +209,6 @@ struct cpufreq_policy *cpufreq_cpu_get(u
- if (cpu >= nr_cpu_ids)
- return NULL;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- return NULL;
--
- /* get the cpufreq driver */
- read_lock_irqsave(&cpufreq_driver_lock, flags);
-
-@@ -230,9 +221,6 @@ struct cpufreq_policy *cpufreq_cpu_get(u
-
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
-- if (!policy)
-- up_read(&cpufreq_rwsem);
--
- return policy;
- }
- EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
-@@ -240,7 +228,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
- void cpufreq_cpu_put(struct cpufreq_policy *policy)
- {
- kobject_put(&policy->kobj);
-- up_read(&cpufreq_rwsem);
- }
- EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-
-@@ -765,9 +752,6 @@ static ssize_t show(struct kobject *kobj
- struct freq_attr *fattr = to_attr(attr);
- ssize_t ret;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- return -EINVAL;
--
- down_read(&policy->rwsem);
-
- if (fattr->show)
-@@ -776,7 +760,6 @@ static ssize_t show(struct kobject *kobj
- ret = -EIO;
-
- up_read(&policy->rwsem);
-- up_read(&cpufreq_rwsem);
-
- return ret;
- }
-@@ -793,9 +776,6 @@ static ssize_t store(struct kobject *kob
- if (!cpu_online(policy->cpu))
- goto unlock;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- goto unlock;
--
- down_write(&policy->rwsem);
-
- if (fattr->store)
-@@ -804,8 +784,6 @@ static ssize_t store(struct kobject *kob
- ret = -EIO;
-
- up_write(&policy->rwsem);
--
-- up_read(&cpufreq_rwsem);
- unlock:
- put_online_cpus();
-
-@@ -1117,16 +1095,12 @@ static int __cpufreq_add_dev(struct devi
- if (unlikely(policy))
- return 0;
-
-- if (!down_read_trylock(&cpufreq_rwsem))
-- return 0;
--
- /* Check if this cpu was hot-unplugged earlier and has siblings */
- read_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_policy(policy) {
- if (cpumask_test_cpu(cpu, policy->related_cpus)) {
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(policy, cpu, dev);
-- up_read(&cpufreq_rwsem);
- return ret;
- }
- }
-@@ -1269,8 +1243,6 @@ static int __cpufreq_add_dev(struct devi
-
- kobject_uevent(&policy->kobj, KOBJ_ADD);
-
-- up_read(&cpufreq_rwsem);
--
- /* Callback for handling stuff after policy is ready */
- if (cpufreq_driver->ready)
- cpufreq_driver->ready(policy);
-@@ -1304,8 +1276,6 @@ static int __cpufreq_add_dev(struct devi
- cpufreq_policy_free(policy);
-
- nomem_out:
-- up_read(&cpufreq_rwsem);
--
- return ret;
- }
-
-@@ -2499,19 +2469,20 @@ int cpufreq_unregister_driver(struct cpu
-
- pr_debug("unregistering driver %s\n", driver->name);
-
-+ /* Protect against concurrent cpu hotplug */
-+ get_online_cpus();
- subsys_interface_unregister(&cpufreq_interface);
- if (cpufreq_boost_supported())
- cpufreq_sysfs_remove_file(&boost.attr);
-
- unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
-
-- down_write(&cpufreq_rwsem);
- write_lock_irqsave(&cpufreq_driver_lock, flags);
-
- cpufreq_driver = NULL;
-
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-- up_write(&cpufreq_rwsem);
-+ put_online_cpus();
-
- return 0;
- }
diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch
index 5fcdbbe512f5f..1f6bea8e0f77e 100644
--- a/patches/cpumask-disable-offstack-on-rt.patch
+++ b/patches/cpumask-disable-offstack-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -841,7 +841,7 @@ config IOMMU_HELPER
+@@ -851,7 +851,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -391,6 +391,7 @@ config CHECK_SIGNATURE
+@@ -395,6 +395,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
diff --git a/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index 52ebb7a3aee1c..0416d434c06fb 100644
--- a/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
-@@ -60,7 +60,7 @@ static inline void cast5_fpu_end(bool fp
+@@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fp
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
{
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = CAST5_BLOCK_SIZE;
unsigned int nbytes;
-@@ -76,7 +76,7 @@ static int ecb_crypt(struct blkcipher_de
+@@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_de
u8 *wsrc = walk->src.virt.addr;
u8 *wdst = walk->dst.virt.addr;
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Process multi-block batch */
if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
-@@ -104,10 +104,9 @@ static int ecb_crypt(struct blkcipher_de
+@@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_de
} while (nbytes >= bsize);
done:
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
-@@ -228,7 +227,7 @@ static unsigned int __cbc_decrypt(struct
+@@ -227,7 +226,7 @@ static unsigned int __cbc_decrypt(struct
static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct blkcipher_walk walk;
int err;
-@@ -237,12 +236,11 @@ static int cbc_decrypt(struct blkcipher_
+@@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes)) {
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
-@@ -312,7 +310,7 @@ static unsigned int __ctr_crypt(struct b
+@@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct b
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct blkcipher_walk walk;
int err;
-@@ -321,13 +319,12 @@ static int ctr_crypt(struct blkcipher_de
+@@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_de
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index fb6eccbc040c7..21b28ce1d2673 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
-@@ -2132,7 +2132,7 @@ static void dm_request_fn(struct request
+@@ -2126,7 +2126,7 @@ static void dm_request_fn(struct request
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
queue_kthread_work(&md->kworker, &tio->work);
diff --git a/patches/drivers-net-fix-livelock-issues.patch b/patches/drivers-net-fix-livelock-issues.patch
index 21af2b1b04a08..06d2a8006144e 100644
--- a/patches/drivers-net-fix-livelock-issues.patch
+++ b/patches/drivers-net-fix-livelock-issues.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
-@@ -2213,11 +2213,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
+@@ -2221,11 +2221,7 @@ static netdev_tx_t atl1c_xmit_frame(stru
}
tpd_req = atl1c_cal_tpd_req(skb);
diff --git a/patches/drivers-random-reduce-preempt-disabled-region.patch b/patches/drivers-random-reduce-preempt-disabled-region.patch
index 31078915df128..1b1132096298e 100644
--- a/patches/drivers-random-reduce-preempt-disabled-region.patch
+++ b/patches/drivers-random-reduce-preempt-disabled-region.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -776,8 +776,6 @@ static void add_timer_randomness(struct
+@@ -796,8 +796,6 @@ static void add_timer_randomness(struct
} sample;
long delta, delta2, delta3;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
-@@ -818,7 +816,6 @@ static void add_timer_randomness(struct
+@@ -838,7 +836,6 @@ static void add_timer_randomness(struct
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
diff --git a/patches/drivers-tty-fix-omap-lock-crap.patch b/patches/drivers-tty-fix-omap-lock-crap.patch
index 5c1d59ee9027d..b6a54199b9e91 100644
--- a/patches/drivers-tty-fix-omap-lock-crap.patch
+++ b/patches/drivers-tty-fix-omap-lock-crap.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
-@@ -1282,13 +1282,10 @@ serial_omap_console_write(struct console
+@@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console
pm_runtime_get_sync(up->dev);
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the IER then disable the interrupts
-@@ -1317,8 +1314,7 @@ serial_omap_console_write(struct console
+@@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
diff --git a/patches/drivers-tty-pl011-irq-disable-madness.patch b/patches/drivers-tty-pl011-irq-disable-madness.patch
index 43404cb43d318..34857d9124878 100644
--- a/patches/drivers-tty-pl011-irq-disable-madness.patch
+++ b/patches/drivers-tty-pl011-irq-disable-madness.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2000,13 +2000,19 @@ pl011_console_write(struct console *co,
+@@ -2067,13 +2067,19 @@ pl011_console_write(struct console *co,
clk_enable(uap->clk);
@@ -35,8 +35,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the CR then disable the interrupts
-@@ -2028,8 +2034,7 @@ pl011_console_write(struct console *co,
- writew(old_cr, uap->port.membase + UART011_CR);
+@@ -2098,8 +2104,7 @@ pl011_console_write(struct console *co,
+ writew(old_cr, uap->port.membase + UART011_CR);
if (locked)
- spin_unlock(&uap->port.lock);
diff --git a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index fb381dd566fb9..bcd215a49ffcf 100644
--- a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -46,13 +46,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1339,7 +1339,9 @@ i915_gem_ringbuffer_submission(struct dr
- return ret;
- }
+@@ -1264,7 +1264,9 @@ i915_gem_ringbuffer_submission(struct i9
+ if (ret)
+ return ret;
+#ifndef CONFIG_PREEMPT_RT_BASE
- trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), dispatch_flags);
+ trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
+#endif
- i915_gem_execbuffer_move_to_active(vmas, ring);
- i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
+ i915_gem_execbuffer_move_to_active(vmas, params->request);
+ i915_gem_execbuffer_retire_commands(params);
diff --git a/patches/fix-rt-int3-x86_32-3.2-rt.patch b/patches/fix-rt-int3-x86_32-3.2-rt.patch
deleted file mode 100644
index 8f052ac7564ae..0000000000000
--- a/patches/fix-rt-int3-x86_32-3.2-rt.patch
+++ /dev/null
@@ -1,101 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: x86: Do not disable preemption in int3 on 32bit
-
-Preemption must be disabled before enabling interrupts in do_trap
-on x86_64 because the stack in use for int3 and debug is a per CPU
-stack set by th IST. But 32bit does not have an IST and the stack
-still belongs to the current task and there is no problem in scheduling
-out the task.
-
-Keep preemption enabled on X86_32 when enabling interrupts for
-do_trap().
-
-The name of the function is changed from preempt_conditional_sti/cli()
-to conditional_sti/cli_ist(), to annotate that this function is used
-when the stack is on the IST.
-
-
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- arch/x86/kernel/traps.c | 28 +++++++++++++++++++++-------
- 1 file changed, 21 insertions(+), 7 deletions(-)
-
---- a/arch/x86/kernel/traps.c
-+++ b/arch/x86/kernel/traps.c
-@@ -88,9 +88,21 @@ static inline void conditional_sti(struc
- local_irq_enable();
- }
-
--static inline void preempt_conditional_sti(struct pt_regs *regs)
-+static inline void conditional_sti_ist(struct pt_regs *regs)
- {
-+#ifdef CONFIG_X86_64
-+ /*
-+ * X86_64 uses a per CPU stack on the IST for certain traps
-+ * like int3. The task can not be preempted when using one
-+ * of these stacks, thus preemption must be disabled, otherwise
-+ * the stack can be corrupted if the task is scheduled out,
-+ * and another task comes in and uses this stack.
-+ *
-+ * On x86_32 the task keeps its own stack and it is OK if the
-+ * task schedules out.
-+ */
- preempt_count_inc();
-+#endif
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_enable();
- }
-@@ -101,11 +113,13 @@ static inline void conditional_cli(struc
- local_irq_disable();
- }
-
--static inline void preempt_conditional_cli(struct pt_regs *regs)
-+static inline void conditional_cli_ist(struct pt_regs *regs)
- {
- if (regs->flags & X86_EFLAGS_IF)
- local_irq_disable();
-+#ifdef CONFIG_X86_64
- preempt_count_dec();
-+#endif
- }
-
- enum ctx_state ist_enter(struct pt_regs *regs)
-@@ -536,9 +550,9 @@ dotraplinkage void notrace do_int3(struc
- * as we may switch to the interrupt stack.
- */
- debug_stack_usage_inc();
-- preempt_conditional_sti(regs);
-+ conditional_sti_ist(regs);
- do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
-- preempt_conditional_cli(regs);
-+ conditional_cli_ist(regs);
- debug_stack_usage_dec();
- exit:
- ist_exit(regs, prev_state);
-@@ -668,12 +682,12 @@ dotraplinkage void do_debug(struct pt_re
- debug_stack_usage_inc();
-
- /* It's safe to allow irq's after DR6 has been saved */
-- preempt_conditional_sti(regs);
-+ conditional_sti_ist(regs);
-
- if (v8086_mode(regs)) {
- handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
- X86_TRAP_DB);
-- preempt_conditional_cli(regs);
-+ conditional_cli_ist(regs);
- debug_stack_usage_dec();
- goto exit;
- }
-@@ -693,7 +707,7 @@ dotraplinkage void do_debug(struct pt_re
- si_code = get_si_code(tsk->thread.debugreg6);
- if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
- send_sigtrap(tsk, regs, error_code, si_code);
-- preempt_conditional_cli(regs);
-+ conditional_cli_ist(regs);
- debug_stack_usage_dec();
-
- exit:
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index b08c65f861482..01339f8a744ee 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
-@@ -559,9 +561,9 @@ static int kiocb_cancel(struct aio_kiocb
+@@ -568,9 +570,9 @@ static int kiocb_cancel(struct aio_kiocb
return cancel(&kiocb->common);
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_debug("freeing %p\n", ctx);
-@@ -580,8 +582,8 @@ static void free_ioctx_reqs(struct percp
+@@ -589,8 +591,8 @@ static void free_ioctx_reqs(struct percp
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -589,9 +591,9 @@ static void free_ioctx_reqs(struct percp
+@@ -598,9 +600,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -610,6 +612,14 @@ static void free_ioctx_users(struct perc
+@@ -619,6 +621,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
diff --git a/patches/fs-block-rt-support.patch b/patches/fs-block-rt-support.patch
index 8f124097ccff8..9e0ba604887ee 100644
--- a/patches/fs-block-rt-support.patch
+++ b/patches/fs-block-rt-support.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -194,7 +194,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+@@ -217,7 +217,7 @@ EXPORT_SYMBOL(blk_delay_queue);
**/
void blk_start_queue(struct request_queue *q)
{
diff --git a/patches/fs-jbd-pull-plug-when-waiting-for-space.patch b/patches/fs-jbd-pull-plug-when-waiting-for-space.patch
deleted file mode 100644
index 0dd4c95d3ec30..0000000000000
--- a/patches/fs-jbd-pull-plug-when-waiting-for-space.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From: Mike Galbraith <mgalbraith@suse.de>
-Date: Wed, 11 Jul 2012 22:05:20 +0000
-Subject: fs, jbd: pull your plug when waiting for space
-
-With an -rt kernel, and a heavy sync IO load, tasks can jam
-up on journal locks without unplugging, which can lead to
-terminal IO starvation. Unplug and schedule when waiting for space.
-
-Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Theodore Tso <tytso@mit.edu>
-Link: http://lkml.kernel.org/r/1341812414.7370.73.camel@marge.simpson.net
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- fs/jbd/checkpoint.c | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/fs/jbd/checkpoint.c
-+++ b/fs/jbd/checkpoint.c
-@@ -129,6 +129,8 @@ void __log_wait_for_space(journal_t *jou
- if (journal->j_flags & JFS_ABORT)
- return;
- spin_unlock(&journal->j_state_lock);
-+ if (current->plug)
-+ io_schedule();
- mutex_lock(&journal->j_checkpoint_mutex);
-
- /*
diff --git a/patches/fs-jbd-replace-bh_state-lock.patch b/patches/fs-jbd-replace-bh_state-lock.patch
index bd240b1081a8e..4496a7ad9a3ce 100644
--- a/patches/fs-jbd-replace-bh_state-lock.patch
+++ b/patches/fs-jbd-replace-bh_state-lock.patch
@@ -6,42 +6,38 @@ bit_spin_locks break under RT.
Based on a previous patch from Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
--
-
- include/linux/buffer_head.h | 10 ++++++++++
- include/linux/jbd_common.h | 24 ++++++++++++++++++++++++
- 2 files changed, 34 insertions(+)
+ include/linux/buffer_head.h | 8 ++++++++
+ include/linux/jbd2.h | 24 ++++++++++++++++++++++++
+ 2 files changed, 32 insertions(+)
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
-@@ -77,6 +77,11 @@ struct buffer_head {
+@@ -77,6 +77,10 @@ struct buffer_head {
atomic_t b_count; /* users using this buffer_head */
#ifdef CONFIG_PREEMPT_RT_BASE
spinlock_t b_uptodate_lock;
-+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
-+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++#if IS_ENABLED(CONFIG_JBD2)
+ spinlock_t b_state_lock;
+ spinlock_t b_journal_head_lock;
+#endif
#endif
};
-@@ -108,6 +113,11 @@ static inline void buffer_head_init_lock
+@@ -108,6 +112,10 @@ static inline void buffer_head_init_lock
{
#ifdef CONFIG_PREEMPT_RT_BASE
spin_lock_init(&bh->b_uptodate_lock);
-+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
-+ defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
++#if IS_ENABLED(CONFIG_JBD2)
+ spin_lock_init(&bh->b_state_lock);
+ spin_lock_init(&bh->b_journal_head_lock);
+#endif
#endif
}
---- a/include/linux/jbd_common.h
-+++ b/include/linux/jbd_common.h
-@@ -15,32 +15,56 @@ static inline struct journal_head *bh2jh
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -352,32 +352,56 @@ static inline struct journal_head *bh2jh
static inline void jbd_lock_bh_state(struct buffer_head *bh)
{
@@ -97,4 +93,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
}
- #endif
+ #define J_ASSERT(assert) BUG_ON(!(assert))
diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
index aff9e498fbde0..109d35cb70758 100644
--- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct
+@@ -305,8 +305,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -315,8 +314,7 @@ static void end_buffer_async_read(struct
+@@ -319,8 +318,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If none of the buffers had errors and they are all
-@@ -328,9 +326,7 @@ static void end_buffer_async_read(struct
+@@ -332,9 +330,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffe
+@@ -362,8 +358,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_write(bh);
unlock_buffer(bh);
-@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffe
+@@ -375,15 +370,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index 4a4412259955f..1c037d6d795a6 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -4,14 +4,14 @@ Subject: trace: Add migrate-disabled counter to tracing output
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/ftrace_event.h | 2 ++
+ include/linux/trace_events.h | 2 ++
kernel/trace/trace.c | 9 ++++++---
kernel/trace/trace_events.c | 2 ++
kernel/trace/trace_output.c | 5 +++++
4 files changed, 15 insertions(+), 3 deletions(-)
---- a/include/linux/ftrace_event.h
-+++ b/include/linux/ftrace_event.h
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
@@ -66,6 +66,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
@@ -20,10 +20,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ unsigned short padding;
};
- #define FTRACE_MAX_EVENT \
+ #define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1641,6 +1641,8 @@ tracing_generic_entry_update(struct trac
+@@ -1663,6 +1663,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2563,9 +2565,10 @@ static void print_lat_help_header(struct
+@@ -2560,9 +2562,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
-@@ -162,6 +162,8 @@ static int trace_define_common_fields(vo
+@@ -186,6 +186,8 @@ static int trace_define_common_fields(vo
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
__common_field(int, pid);
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -472,6 +472,11 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -428,6 +428,11 @@ int trace_print_lat_fmt(struct trace_seq
else
trace_seq_putc(s, '.');
diff --git a/patches/futex-avoid-double-wake-up-in-PI-futex-wait-wake-on-.patch b/patches/futex-avoid-double-wake-up-in-PI-futex-wait-wake-on-.patch
deleted file mode 100644
index ec0496037c012..0000000000000
--- a/patches/futex-avoid-double-wake-up-in-PI-futex-wait-wake-on-.patch
+++ /dev/null
@@ -1,223 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 18 Feb 2015 20:17:31 +0100
-Subject: futex: avoid double wake up in PI futex wait / wake on -RT
-
-The boosted priority is reverted after the unlock but before the
-futex_hash_bucket (hb) has been accessed. The result is that we boost the
-task, deboost the task, boost again for the hb lock, deboost again.
-A sched trace of this scenario looks the following:
-
-| med_prio-93 sched_wakeup: comm=high_prio pid=92 prio=9 success=1 target_cpu=000
-| med_prio-93 sched_switch: prev_comm=med_prio prev_pid=93 prev_prio=29 prev_state=R ==> next_comm=high_prio next_pid=92 next_prio=9
-|high_prio-92 sched_pi_setprio: comm=low_prio pid=91 oldprio=120 newprio=9
-|high_prio-92 sched_switch: prev_comm=high_prio prev_pid=92 prev_prio=9 prev_state=S ==> next_comm=low_prio next_pid=91 next_prio=9
-| low_prio-91 sched_wakeup: comm=high_prio pid=92 prio=9 success=1 target_cpu=000
-| low_prio-91 sched_pi_setprio: comm=low_prio pid=91 oldprio=9 newprio=120
-| low_prio-91 sched_switch: prev_comm=low_prio prev_pid=91 prev_prio=120 prev_state=R+ ==> next_comm=high_prio next_pid=92 next_prio=9
-|high_prio-92 sched_pi_setprio: comm=low_prio pid=91 oldprio=120 newprio=9
-|high_prio-92 sched_switch: prev_comm=high_prio prev_pid=92 prev_prio=9 prev_state=D ==> next_comm=low_prio next_pid=91 next_prio=9
-| low_prio-91 sched_wakeup: comm=high_prio pid=92 prio=9 success=1 target_cpu=000
-| low_prio-91 sched_pi_setprio: comm=low_prio pid=91 oldprio=9 newprio=120
-| low_prio-91 sched_switch: prev_comm=low_prio prev_pid=91 prev_prio=120 prev_state=R+ ==> next_comm=high_prio next_pid=92 next_prio=9
-
-We see four sched_pi_setprio() invocation but ideally two would be enough.
-The patch tries to avoid the double wakeup by a wake up once the hb lock is
-released. The same test case:
-
-| med_prio-21 sched_wakeup: comm=high_prio pid=20 prio=9 success=1 target_cpu=000
-| med_prio-21 sched_switch: prev_comm=med_prio prev_pid=21 prev_prio=29 prev_state=R ==> next_comm=high_prio next_pid=20 next_prio=9
-|high_prio-20 sched_pi_setprio: comm=low_prio pid=19 oldprio=120 newprio=9
-|high_prio-20 sched_switch: prev_comm=high_prio prev_pid=20 prev_prio=9 prev_state=S ==> next_comm=low_prio next_pid=19 next_prio=9
-| low_prio-19 sched_wakeup: comm=high_prio pid=20 prio=9 success=1 target_cpu=000
-| low_prio-19 sched_pi_setprio: comm=low_prio pid=19 oldprio=9 newprio=120
-| low_prio-19 sched_switch: prev_comm=low_prio prev_pid=19 prev_prio=120 prev_state=R+ ==> next_comm=high_prio next_pid=20 next_prio=9
-
-only two sched_pi_setprio() invocations as one would expect and see
-without -RT.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/futex.c | 32 +++++++++++++++++++++++++++++---
- kernel/locking/rtmutex.c | 40 +++++++++++++++++++++++++++++-----------
- kernel/locking/rtmutex_common.h | 4 ++++
- 3 files changed, 62 insertions(+), 14 deletions(-)
-
---- a/kernel/futex.c
-+++ b/kernel/futex.c
-@@ -1117,11 +1117,13 @@ static void mark_wake_futex(struct wake_
- q->lock_ptr = NULL;
- }
-
--static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
-+static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
-+ struct futex_hash_bucket *hb)
- {
- struct task_struct *new_owner;
- struct futex_pi_state *pi_state = this->pi_state;
- u32 uninitialized_var(curval), newval;
-+ bool deboost;
- int ret = 0;
-
- if (!pi_state)
-@@ -1173,7 +1175,17 @@ static int wake_futex_pi(u32 __user *uad
- raw_spin_unlock_irq(&new_owner->pi_lock);
-
- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
-- rt_mutex_unlock(&pi_state->pi_mutex);
-+
-+ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex);
-+
-+ /*
-+ * We deboost after dropping hb->lock. That prevents a double
-+ * wakeup on RT.
-+ */
-+ spin_unlock(&hb->lock);
-+
-+ if (deboost)
-+ rt_mutex_adjust_prio(current);
-
- return 0;
- }
-@@ -2413,13 +2425,26 @@ static int futex_unlock_pi(u32 __user *u
- */
- match = futex_top_waiter(hb, &key);
- if (match) {
-- ret = wake_futex_pi(uaddr, uval, match);
-+ ret = wake_futex_pi(uaddr, uval, match, hb);
-+
-+ /*
-+ * In case of success wake_futex_pi dropped the hash
-+ * bucket lock.
-+ */
-+ if (!ret)
-+ goto out_putkey;
-+
- /*
- * The atomic access to the futex value generated a
- * pagefault, so retry the user-access and the wakeup:
- */
- if (ret == -EFAULT)
- goto pi_faulted;
-+
-+ /*
-+ * wake_futex_pi has detected invalid state. Tell user
-+ * space.
-+ */
- goto out_unlock;
- }
-
-@@ -2440,6 +2465,7 @@ static int futex_unlock_pi(u32 __user *u
-
- out_unlock:
- spin_unlock(&hb->lock);
-+out_putkey:
- put_futex_key(&key);
- return ret;
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -300,7 +300,7 @@ static void __rt_mutex_adjust_prio(struc
- * of task. We do not use the spin_xx_mutex() variants here as we are
- * outside of the debug path.)
- */
--static void rt_mutex_adjust_prio(struct task_struct *task)
-+void rt_mutex_adjust_prio(struct task_struct *task)
- {
- unsigned long flags;
-
-@@ -957,8 +957,9 @@ static int task_blocks_on_rt_mutex(struc
- /*
- * Wake up the next waiter on the lock.
- *
-- * Remove the top waiter from the current tasks pi waiter list and
-- * wake it up.
-+ * Remove the top waiter from the current tasks pi waiter list,
-+ * wake it up and return whether the current task needs to undo
-+ * a potential priority boosting.
- *
- * Called with lock->wait_lock held.
- */
-@@ -1255,7 +1256,7 @@ static inline int rt_mutex_slowtrylock(s
- /*
- * Slow path to release a rt-mutex:
- */
--static void __sched
-+static bool __sched
- rt_mutex_slowunlock(struct rt_mutex *lock)
- {
- raw_spin_lock(&lock->wait_lock);
-@@ -1298,7 +1299,7 @@ rt_mutex_slowunlock(struct rt_mutex *loc
- while (!rt_mutex_has_waiters(lock)) {
- /* Drops lock->wait_lock ! */
- if (unlock_rt_mutex_safe(lock) == true)
-- return;
-+ return false;
- /* Relock the rtmutex and try again */
- raw_spin_lock(&lock->wait_lock);
- }
-@@ -1311,8 +1312,7 @@ rt_mutex_slowunlock(struct rt_mutex *loc
-
- raw_spin_unlock(&lock->wait_lock);
-
-- /* Undo pi boosting if necessary: */
-- rt_mutex_adjust_prio(current);
-+ return true;
- }
-
- /*
-@@ -1363,12 +1363,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
-
- static inline void
- rt_mutex_fastunlock(struct rt_mutex *lock,
-- void (*slowfn)(struct rt_mutex *lock))
-+ bool (*slowfn)(struct rt_mutex *lock))
- {
-- if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
-+ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
- rt_mutex_deadlock_account_unlock(current);
-- else
-- slowfn(lock);
-+ } else if (slowfn(lock)) {
-+ /* Undo pi boosting if necessary: */
-+ rt_mutex_adjust_prio(current);
-+ }
- }
-
- /**
-@@ -1463,6 +1465,22 @@ void __sched rt_mutex_unlock(struct rt_m
- EXPORT_SYMBOL_GPL(rt_mutex_unlock);
-
- /**
-+ * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
-+ * @lock: the rt_mutex to be unlocked
-+ *
-+ * Returns: true/false indicating whether priority adjustment is
-+ * required or not.
-+ */
-+bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
-+{
-+ if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
-+ rt_mutex_deadlock_account_unlock(current);
-+ return false;
-+ }
-+ return rt_mutex_slowunlock(lock);
-+}
-+
-+/**
- * rt_mutex_destroy - mark a mutex unusable
- * @lock: the mutex to be destroyed
- *
---- a/kernel/locking/rtmutex_common.h
-+++ b/kernel/locking/rtmutex_common.h
-@@ -132,6 +132,10 @@ extern int rt_mutex_finish_proxy_lock(st
- struct rt_mutex_waiter *waiter);
- extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
-
-+extern bool rt_mutex_futex_unlock(struct rt_mutex *lock);
-+
-+extern void rt_mutex_adjust_prio(struct task_struct *task);
-+
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- # include "rtmutex-debug.h"
- #else
diff --git a/patches/futex-requeue-pi-fix.patch b/patches/futex-requeue-pi-fix.patch
index d2938959eaf7f..8bdb0b62e1bbc 100644
--- a/patches/futex-requeue-pi-fix.patch
+++ b/patches/futex-requeue-pi-fix.patch
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1603,6 +1604,35 @@ int rt_mutex_start_proxy_lock(struct rt_
+@@ -1631,6 +1632,35 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
RT_MUTEX_FULL_CHAINWALK);
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -120,6 +120,7 @@ enum rtmutex_chainwalk {
+@@ -98,6 +98,7 @@ enum rtmutex_chainwalk {
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
diff --git a/patches/genirq--Handle-interrupts-with-primary-and-threaded-handler-gracefully b/patches/genirq--Handle-interrupts-with-primary-and-threaded-handler-gracefully
deleted file mode 100644
index 34a3e86abf72c..0000000000000
--- a/patches/genirq--Handle-interrupts-with-primary-and-threaded-handler-gracefully
+++ /dev/null
@@ -1,333 +0,0 @@
-Subject: genirq: Handle force threading of interrupts with primary and thread handler
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sat, 19 Sep 2015 11:56:20 +0200
-
-Force threading of interrupts does not deal with interrupts which are
-requested with a primary and a threaded handler. The current policy is
-to leave them alone and let the primary handler run in interrupt
-context, but we set the ONESHOT flag for those interrupts as well.
-
-Kohji Okuno debugged a problem with the SDHCI driver where the
-interrupt thread waits for a hardware interrupt to trigger, which cant
-work well because the hardware interrupt is masked due to the ONESHOT
-flag being set. He proposed to set the ONESHOT flag only if the
-interrupt does not provide a thread handler.
-
-Though that does not work either because these interrupts can be
-shared. So the other interrupt would rightfully get the ONESHOT flag
-set and therefor the same situation would happen again.
-
-To deal with this proper, we need to force thread the primary handler
-of such interrupts as well. That means that the primary interrupt
-handler is treated as any other primary interrupt handler which is not
-marked IRQF_NO_THREAD. The threaded handler becomes a separate thread
-so the SDHCI flow logic can be handled gracefully.
-
-The same issue was reported against 4.1-rt.
-
-Reported-by: Kohji Okuno <okuno.kohji@jp.panasonic.com>
-Reported-By: Michal Å mucr <msmucr@gmail.com>
-Reported-and-tested-by: Nathan Sullivan <nathan.sullivan@ni.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: stable-rt@vger.kernel.org
----
-
-This requires to revert commit: d04ea10ba1ea 'mmc: sdhci: don't provide
-hard irq handler'
-
- include/linux/interrupt.h | 2
- kernel/irq/manage.c | 160 +++++++++++++++++++++++++++++++++-------------
- 2 files changed, 120 insertions(+), 42 deletions(-)
-
---- a/include/linux/interrupt.h
-+++ b/include/linux/interrupt.h
-@@ -102,6 +102,7 @@ typedef irqreturn_t (*irq_handler_t)(int
- * @flags: flags (see IRQF_* above)
- * @thread_fn: interrupt handler function for threaded interrupts
- * @thread: thread pointer for threaded interrupts
-+ * @secondary: pointer to secondary irqaction (force threading)
- * @thread_flags: flags related to @thread
- * @thread_mask: bitmask for keeping track of @thread activity
- * @dir: pointer to the proc/irq/NN/name entry
-@@ -113,6 +114,7 @@ struct irqaction {
- struct irqaction *next;
- irq_handler_t thread_fn;
- struct task_struct *thread;
-+ struct irqaction *secondary;
- unsigned int irq;
- unsigned int flags;
- unsigned long thread_flags;
---- a/kernel/irq/manage.c
-+++ b/kernel/irq/manage.c
-@@ -697,6 +697,12 @@ static irqreturn_t irq_nested_primary_ha
- return IRQ_NONE;
- }
-
-+static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
-+{
-+ WARN(1, "Secondary action handler called for irq %d\n", irq);
-+ return IRQ_NONE;
-+}
-+
- static int irq_wait_for_interrupt(struct irqaction *action)
- {
- set_current_state(TASK_INTERRUPTIBLE);
-@@ -723,7 +729,8 @@ static int irq_wait_for_interrupt(struct
- static void irq_finalize_oneshot(struct irq_desc *desc,
- struct irqaction *action)
- {
-- if (!(desc->istate & IRQS_ONESHOT))
-+ if (!(desc->istate & IRQS_ONESHOT) ||
-+ action->handler == irq_forced_secondary_handler)
- return;
- again:
- chip_bus_lock(desc);
-@@ -877,6 +884,18 @@ static void irq_thread_dtor(struct callb
- irq_finalize_oneshot(desc, action);
- }
-
-+static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
-+{
-+ struct irqaction *secondary = action->secondary;
-+
-+ if (WARN_ON_ONCE(!secondary))
-+ return;
-+
-+ raw_spin_lock_irq(&desc->lock);
-+ __irq_wake_thread(desc, secondary);
-+ raw_spin_unlock_irq(&desc->lock);
-+}
-+
- /*
- * Interrupt handler thread
- */
-@@ -907,6 +926,8 @@ static int irq_thread(void *data)
- action_ret = handler_fn(desc, action);
- if (action_ret == IRQ_HANDLED)
- atomic_inc(&desc->threads_handled);
-+ if (action_ret == IRQ_WAKE_THREAD)
-+ irq_wake_secondary(desc, action);
-
- wake_threads_waitq(desc);
- }
-@@ -951,20 +972,36 @@ void irq_wake_thread(unsigned int irq, v
- }
- EXPORT_SYMBOL_GPL(irq_wake_thread);
-
--static void irq_setup_forced_threading(struct irqaction *new)
-+static int irq_setup_forced_threading(struct irqaction *new)
- {
- if (!force_irqthreads)
-- return;
-+ return 0;
- if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
-- return;
-+ return 0;
-
- new->flags |= IRQF_ONESHOT;
-
-- if (!new->thread_fn) {
-- set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
-- new->thread_fn = new->handler;
-- new->handler = irq_default_primary_handler;
-- }
-+ /*
-+ * Handle the case where we have a real primary handler and a
-+ * thread handler. We force thread them as well by creating a
-+ * secondary action.
-+ */
-+ if (new->handler != irq_default_primary_handler && new->thread_fn) {
-+ /* Allocate the secondary action */
-+ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
-+ if (!new->secondary)
-+ return -ENOMEM;
-+ new->secondary->handler = irq_forced_secondary_handler;
-+ new->secondary->thread_fn = new->thread_fn;
-+ new->secondary->dev_id = new->dev_id;
-+ new->secondary->irq = new->irq;
-+ new->secondary->name = new->name;
-+ }
-+ /* Deal with the primary handler */
-+ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
-+ new->thread_fn = new->handler;
-+ new->handler = irq_default_primary_handler;
-+ return 0;
- }
-
- static int irq_request_resources(struct irq_desc *desc)
-@@ -984,6 +1021,48 @@ static void irq_release_resources(struct
- c->irq_release_resources(d);
- }
-
-+static int
-+setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
-+{
-+ struct task_struct *t;
-+ struct sched_param param = {
-+ .sched_priority = MAX_USER_RT_PRIO/2,
-+ };
-+
-+ if (!secondary) {
-+ t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
-+ new->name);
-+ } else {
-+ t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
-+ new->name);
-+ param.sched_priority += 1;
-+ }
-+
-+ if (IS_ERR(t))
-+ return PTR_ERR(t);
-+
-+ sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
-+
-+ /*
-+ * We keep the reference to the task struct even if
-+ * the thread dies to avoid that the interrupt code
-+ * references an already freed task_struct.
-+ */
-+ get_task_struct(t);
-+ new->thread = t;
-+ /*
-+ * Tell the thread to set its affinity. This is
-+ * important for shared interrupt handlers as we do
-+ * not invoke setup_affinity() for the secondary
-+ * handlers as everything is already set up. Even for
-+ * interrupts marked with IRQF_NO_BALANCE this is
-+ * correct as we want the thread to move to the cpu(s)
-+ * on which the requesting code placed the interrupt.
-+ */
-+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
-+ return 0;
-+}
-+
- /*
- * Internal function to register an irqaction - typically used to
- * allocate special interrupts that are part of the architecture.
-@@ -1004,6 +1083,8 @@ static int
- if (!try_module_get(desc->owner))
- return -ENODEV;
-
-+ new->irq = irq;
-+
- /*
- * Check whether the interrupt nests into another interrupt
- * thread.
-@@ -1021,8 +1102,11 @@ static int
- */
- new->handler = irq_nested_primary_handler;
- } else {
-- if (irq_settings_can_thread(desc))
-- irq_setup_forced_threading(new);
-+ if (irq_settings_can_thread(desc)) {
-+ ret = irq_setup_forced_threading(new);
-+ if (ret)
-+ goto out_mput;
-+ }
- }
-
- /*
-@@ -1031,37 +1115,14 @@ static int
- * thread.
- */
- if (new->thread_fn && !nested) {
-- struct task_struct *t;
-- static const struct sched_param param = {
-- .sched_priority = MAX_USER_RT_PRIO/2,
-- };
--
-- t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
-- new->name);
-- if (IS_ERR(t)) {
-- ret = PTR_ERR(t);
-+ ret = setup_irq_thread(new, irq, false);
-+ if (ret)
- goto out_mput;
-+ if (new->secondary) {
-+ ret = setup_irq_thread(new->secondary, irq, true);
-+ if (ret)
-+ goto out_thread;
- }
--
-- sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
--
-- /*
-- * We keep the reference to the task struct even if
-- * the thread dies to avoid that the interrupt code
-- * references an already freed task_struct.
-- */
-- get_task_struct(t);
-- new->thread = t;
-- /*
-- * Tell the thread to set its affinity. This is
-- * important for shared interrupt handlers as we do
-- * not invoke setup_affinity() for the secondary
-- * handlers as everything is already set up. Even for
-- * interrupts marked with IRQF_NO_BALANCE this is
-- * correct as we want the thread to move to the cpu(s)
-- * on which the requesting code placed the interrupt.
-- */
-- set_bit(IRQTF_AFFINITY, &new->thread_flags);
- }
-
- if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
-@@ -1234,7 +1295,6 @@ static int
- irq, nmsk, omsk);
- }
-
-- new->irq = irq;
- *old_ptr = new;
-
- irq_pm_install_action(desc, new);
-@@ -1260,6 +1320,8 @@ static int
- */
- if (new->thread)
- wake_up_process(new->thread);
-+ if (new->secondary)
-+ wake_up_process(new->secondary->thread);
-
- register_irq_proc(irq, desc);
- new->dir = NULL;
-@@ -1290,6 +1352,13 @@ static int
- kthread_stop(t);
- put_task_struct(t);
- }
-+ if (new->secondary && new->secondary->thread) {
-+ struct task_struct *t = new->secondary->thread;
-+
-+ new->secondary->thread = NULL;
-+ kthread_stop(t);
-+ put_task_struct(t);
-+ }
- out_mput:
- module_put(desc->owner);
- return ret;
-@@ -1397,9 +1466,14 @@ static struct irqaction *__free_irq(unsi
- if (action->thread) {
- kthread_stop(action->thread);
- put_task_struct(action->thread);
-+ if (action->secondary && action->secondary->thread) {
-+ kthread_stop(action->secondary->thread);
-+ put_task_struct(action->secondary->thread);
-+ }
- }
-
- module_put(desc->owner);
-+ kfree(action->secondary);
- return action;
- }
-
-@@ -1543,8 +1617,10 @@ int request_threaded_irq(unsigned int ir
- retval = __setup_irq(irq, desc, action);
- chip_bus_sync_unlock(desc);
-
-- if (retval)
-+ if (retval) {
-+ kfree(action->secondary);
- kfree(action);
-+ }
-
- #ifdef CONFIG_DEBUG_SHIRQ_FIXME
- if (!retval && (irqflags & IRQF_SHARED)) {
diff --git a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index ba421a181f166..6617a5fcfc12f 100644
--- a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -181,6 +181,62 @@ static inline void
+@@ -183,6 +183,62 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
-@@ -220,7 +276,17 @@ int irq_set_affinity_locked(struct irq_d
+@@ -222,7 +278,17 @@ int irq_set_affinity_locked(struct irq_d
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
irqd_set(data, IRQD_AFFINITY_SET);
-@@ -258,10 +324,8 @@ int irq_set_affinity_hint(unsigned int i
+@@ -260,10 +326,8 @@ int irq_set_affinity_hint(unsigned int i
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
-@@ -283,6 +347,13 @@ static void irq_affinity_notify(struct w
+@@ -285,6 +349,13 @@ static void irq_affinity_notify(struct w
kref_put(&notify->kref, notify->release);
}
@@ -133,7 +133,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
-@@ -312,6 +383,8 @@ irq_set_affinity_notifier(unsigned int i
+@@ -314,6 +385,8 @@ irq_set_affinity_notifier(unsigned int i
notify->irq = irq;
kref_init(&notify->kref);
INIT_WORK(&notify->work, irq_affinity_notify);
diff --git a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index 78d70daec6e33..4789651832413 100644
--- a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -267,7 +267,7 @@ static int sync_unplug_thread(void *data
+@@ -268,7 +268,7 @@ static int sync_unplug_thread(void *data
* we don't want any more work on this CPU.
*/
current->flags &= ~PF_NO_SETAFFINITY;
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 4ddd270805bf1..8f69f74689e9c 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -221,9 +221,6 @@ static inline void smpboot_thread_init(v
+@@ -222,9 +222,6 @@ static inline void smpboot_thread_init(v
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
-@@ -234,6 +231,8 @@ extern bool try_get_online_cpus(void);
+@@ -234,6 +231,8 @@ extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-@@ -252,6 +251,8 @@ static inline void cpu_hotplug_done(void
+@@ -251,6 +250,8 @@ static inline void cpu_hotplug_done(void
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* These aren't inline functions due to a GCC bug. */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -88,6 +88,100 @@ static struct {
+@@ -89,6 +89,100 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -149,9 +149,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void get_online_cpus(void)
{
-@@ -349,13 +443,14 @@ static int __ref take_cpu_down(void *_pa
+@@ -338,13 +432,14 @@ static int take_cpu_down(void *_param)
/* Requires cpu_add_remove_lock to be held */
- static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
+ static int _cpu_down(unsigned int cpu, int tasks_frozen)
{
- int err, nr_calls = 0;
+ int mycpu, err, nr_calls = 0;
@@ -165,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -363,7 +458,27 @@ static int __ref _cpu_down(unsigned int
+@@ -352,7 +447,27 @@ static int _cpu_down(unsigned int cpu, i
if (!cpu_online(cpu))
return -EINVAL;
@@ -193,7 +193,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
if (err) {
-@@ -427,6 +542,8 @@ static int __ref _cpu_down(unsigned int
+@@ -424,6 +539,8 @@ static int _cpu_down(unsigned int cpu, i
check_for_tasks(cpu);
out_release:
diff --git a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index cc7d94c5daab6..5da8b011d622d 100644
--- a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -167,7 +167,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -168,7 +168,7 @@ static int cpu_unplug_begin(unsigned int
struct task_struct *tsk;
init_completion(&hp->synced);
diff --git a/patches/hotplug-use-migrate-disable.patch b/patches/hotplug-use-migrate-disable.patch
index 25844ce8e1714..a9175682f6a6c 100644
--- a/patches/hotplug-use-migrate-disable.patch
+++ b/patches/hotplug-use-migrate-disable.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -466,14 +466,13 @@ static int __ref _cpu_down(unsigned int
+@@ -455,14 +455,13 @@ static int _cpu_down(unsigned int cpu, i
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpu_hotplug_begin();
err = cpu_unplug_begin(cpu);
-@@ -546,6 +545,7 @@ static int __ref _cpu_down(unsigned int
+@@ -543,6 +542,7 @@ static int _cpu_down(unsigned int cpu, i
out_release:
cpu_unplug_done(cpu);
out_cancel:
diff --git a/patches/hrtimer-enfore-64byte-alignment.patch b/patches/hrtimer-enfore-64byte-alignment.patch
new file mode 100644
index 0000000000000..336f03719098f
--- /dev/null
+++ b/patches/hrtimer-enfore-64byte-alignment.patch
@@ -0,0 +1,28 @@
+From e35e67cb032e78055b63eae5a3a370664fabfc01 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 23 Dec 2015 20:57:41 +0100
+Subject: [PATCH] hrtimer: enfore 64byte alignment
+
+The patch "hrtimer: Fixup hrtimer callback changes for preempt-rt" adds
+a list_head expired to struct hrtimer_clock_base and with it we run into
+BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -124,11 +124,7 @@ struct hrtimer_sleeper {
+ struct task_struct *task;
+ };
+
+-#ifdef CONFIG_64BIT
+ # define HRTIMER_CLOCK_BASE_ALIGN 64
+-#else
+-# define HRTIMER_CLOCK_BASE_ALIGN 32
+-#endif
+
+ /**
+ * struct hrtimer_clock_base - the timer base for a specific clock
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 7e46774ae69e7..42d03ce29a17b 100644
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -12,17 +12,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
- include/linux/hrtimer.h | 3
+ include/linux/hrtimer.h | 4 +
kernel/sched/core.c | 1
kernel/sched/rt.c | 1
- kernel/time/hrtimer.c | 219 +++++++++++++++++++++++++++++++++++++++++------
+ kernel/time/hrtimer.c | 142 +++++++++++++++++++++++++++++++++++++++++++----
kernel/time/tick-sched.c | 1
kernel/watchdog.c | 1
- 6 files changed, 200 insertions(+), 26 deletions(-)
+ 6 files changed, 139 insertions(+), 11 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -111,6 +111,8 @@ struct hrtimer {
+@@ -102,6 +102,8 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
@@ -31,17 +31,25 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
ktime_t praecox;
#endif
-@@ -150,6 +152,7 @@ struct hrtimer_clock_base {
+@@ -141,6 +143,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
+ struct list_head expired;
- ktime_t resolution;
ktime_t (*get_time)(void);
- ktime_t softirq_time;
+ ktime_t offset;
+ } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
+@@ -184,6 +187,7 @@ struct hrtimer_cpu_base {
+ raw_spinlock_t lock;
+ seqcount_t seq;
+ struct hrtimer *running;
++ struct hrtimer *running_soft;
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -461,6 +461,7 @@ static void init_rq_hrtick(struct rq *rq
+@@ -438,6 +438,7 @@ static void init_rq_hrtick(struct rq *rq
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
@@ -51,7 +59,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
static inline void hrtick_clear(struct rq *rq)
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -44,6 +44,7 @@ void init_rt_bandwidth(struct rt_bandwid
+@@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwid
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -61,136 +69,42 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -577,8 +577,7 @@ static int hrtimer_reprogram(struct hrti
- * When the callback is running, we do not reprogram the clock event
- * device. The timer callback is either running on a different CPU or
- * the callback is executed in the hrtimer_interrupt context. The
-- * reprogramming is handled either by the softirq, which called the
-- * callback or at the end of the hrtimer_interrupt.
-+ * reprogramming is handled at the end of the hrtimer_interrupt.
- */
- if (hrtimer_callback_running(timer))
- return 0;
-@@ -622,6 +621,9 @@ static int hrtimer_reprogram(struct hrti
- return res;
- }
-
-+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now);
-+static int hrtimer_rt_defer(struct hrtimer *timer);
-+
- /*
- * Initialize the high resolution related parts of cpu_base
- */
-@@ -631,6 +633,21 @@ static inline void hrtimer_init_hres(str
- base->hres_active = 0;
- }
-
-+static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-+ struct hrtimer_clock_base *base,
-+ int wakeup)
-+{
-+ if (!hrtimer_reprogram(timer, base))
-+ return 0;
-+ if (!wakeup)
-+ return -ETIME;
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ if (!hrtimer_rt_defer(timer))
-+ return -ETIME;
-+#endif
-+ return 1;
-+}
-+
- static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
- {
- ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
-@@ -712,6 +729,13 @@ static inline int hrtimer_is_hres_enable
- static inline int hrtimer_switch_to_hres(void) { return 0; }
+@@ -730,11 +730,8 @@ static inline int hrtimer_is_hres_enable
+ static inline void hrtimer_switch_to_hres(void) { }
static inline void
hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
-+static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
-+ struct hrtimer_clock_base *base,
-+ int wakeup)
-+{
-+ return 0;
-+}
-+
- static inline int hrtimer_reprogram(struct hrtimer *timer,
- struct hrtimer_clock_base *base)
- {
-@@ -719,7 +743,6 @@ static inline int hrtimer_reprogram(stru
- }
+-static inline int hrtimer_reprogram(struct hrtimer *timer,
+- struct hrtimer_clock_base *base)
+-{
+- return 0;
+-}
++static inline void hrtimer_reprogram(struct hrtimer *timer,
++ struct hrtimer_clock_base *base) { }
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
--
- #endif /* CONFIG_HIGH_RES_TIMERS */
- /*
-@@ -854,9 +877,9 @@ void hrtimer_wait_for_timer(const struct
+@@ -883,7 +880,7 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
- if (base && base->cpu_base && !hrtimer_hres_active())
+ if (base && base->cpu_base && !timer->irqsafe)
wait_event(base->cpu_base->wait,
-- !(timer->state & HRTIMER_STATE_CALLBACK));
-+ !(timer->state & HRTIMER_STATE_CALLBACK));
+ !(hrtimer_callback_running(timer)));
}
-
- #else
-@@ -906,6 +929,11 @@ static void __remove_hrtimer(struct hrti
- if (!(timer->state & HRTIMER_STATE_ENQUEUED))
- goto out;
+@@ -933,6 +930,11 @@ static void __remove_hrtimer(struct hrti
+ if (!(state & HRTIMER_STATE_ENQUEUED))
+ return;
+ if (unlikely(!list_empty(&timer->cb_entry))) {
+ list_del_init(&timer->cb_entry);
-+ goto out;
++ return;
+ }
+
- next_timer = timerqueue_getnext(&base->active);
- timerqueue_del(&base->active, &timer->node);
- if (&timer->node == next_timer) {
-@@ -1016,15 +1044,26 @@ int __hrtimer_start_range_ns(struct hrti
- * on dynticks target.
- */
- wake_up_nohz_cpu(new_base->cpu_base->cpu);
-- } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
-- hrtimer_reprogram(timer, new_base)) {
-+ } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases)) {
-+
-+ ret = hrtimer_enqueue_reprogram(timer, new_base, wakeup);
-+ if (ret < 0) {
-+ /*
-+ * In case we failed to reprogram the timer (mostly
-+ * because out current timer is already elapsed),
-+ * remove it again and report a failure. This avoids
-+ * stale base->first entries.
-+ */
-+ debug_deactivate(timer);
-+ __remove_hrtimer(timer, new_base,
-+ timer->state & HRTIMER_STATE_CALLBACK, 0);
-+ } else if (ret > 0) {
- /*
- * Only allow reprogramming if the new base is on this CPU.
- * (it might still be on another CPU if the timer was pending)
- *
- * XXX send_remote_softirq() ?
- */
-- if (wakeup) {
- /*
- * We need to drop cpu_base->lock to avoid a
- * lock ordering issue vs. rq->lock.
-@@ -1032,9 +1071,7 @@ int __hrtimer_start_range_ns(struct hrti
- raw_spin_unlock(&new_base->cpu_base->lock);
- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- local_irq_restore(flags);
-- return ret;
-- } else {
-- __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ return 0;
- }
- }
+ if (!timerqueue_del(&base->active, &timer->node))
+ cpu_base->active_bases &= ~(1 << base->index);
-@@ -1189,6 +1226,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1162,6 +1164,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
@@ -198,61 +112,44 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
timerqueue_init(&timer->node);
#ifdef CONFIG_TIMER_STATS
-@@ -1272,10 +1310,128 @@ static void __run_hrtimer(struct hrtimer
- timer->state &= ~HRTIMER_STATE_CALLBACK;
- }
+@@ -1202,6 +1205,7 @@ bool hrtimer_active(const struct hrtimer
+ seq = raw_read_seqcount_begin(&cpu_base->seq);
--#ifdef CONFIG_HIGH_RES_TIMERS
--
- static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+ if (timer->state != HRTIMER_STATE_INACTIVE ||
++ cpu_base->running_soft == timer ||
+ cpu_base->running == timer)
+ return true;
+
+@@ -1292,10 +1296,111 @@ static void __run_hrtimer(struct hrtimer
+ cpu_base->running = NULL;
+ }
+#ifdef CONFIG_PREEMPT_RT_BASE
+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer,
+ struct hrtimer_clock_base *base)
+{
-+ /*
-+ * Note, we clear the callback flag before we requeue the
-+ * timer otherwise we trigger the callback_running() check
-+ * in hrtimer_reprogram().
-+ */
-+ timer->state &= ~HRTIMER_STATE_CALLBACK;
-+
-+ if (restart != HRTIMER_NORESTART) {
-+ BUG_ON(hrtimer_active(timer));
-+ /*
-+ * Enqueue the timer, if it's the leftmost timer then
-+ * we need to reprogram it.
-+ */
-+ if (!enqueue_hrtimer(timer, base))
-+ return;
++ int leftmost;
+
-+#ifndef CONFIG_HIGH_RES_TIMERS
-+ }
-+#else
-+ if (base->cpu_base->hres_active &&
-+ hrtimer_reprogram(timer, base))
-+ goto requeue;
++ if (restart != HRTIMER_NORESTART &&
++ !(timer->state & HRTIMER_STATE_ENQUEUED)) {
+
-+ } else if (hrtimer_active(timer)) {
-+ /*
-+ * If the timer was rearmed on another CPU, reprogram
-+ * the event device.
-+ */
-+ if (&timer->node == base->active.next &&
-+ base->cpu_base->hres_active &&
-+ hrtimer_reprogram(timer, base))
-+ goto requeue;
-+ }
-+ return;
++ leftmost = enqueue_hrtimer(timer, base);
++ if (!leftmost)
++ return;
++#ifdef CONFIG_HIGH_RES_TIMERS
++ if (!hrtimer_is_hres_active(timer)) {
++ /*
++ * Kick to reschedule the next tick to handle the new timer
++ * on dynticks target.
++ */
++ if (base->cpu_base->nohz_active)
++ wake_up_nohz_cpu(base->cpu_base->cpu);
++ } else {
+
-+requeue:
-+ /*
-+ * Timer is expired. Thus move it from tree to pending list
-+ * again.
-+ */
-+ __remove_hrtimer(timer, base, timer->state, 0);
-+ list_add_tail(&timer->cb_entry, &base->expired);
++ hrtimer_reprogram(timer, base);
++ }
+#endif
++ }
+}
+
+/*
@@ -285,8 +182,11 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+ * Same as the above __run_hrtimer function
+ * just we run with interrupts enabled.
+ */
-+ debug_hrtimer_deactivate(timer);
-+ __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
++ debug_deactivate(timer);
++ cpu_base->running_soft = timer;
++ raw_write_seqcount_barrier(&cpu_base->seq);
++
++ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
+ timer_stats_account_hrtimer(timer);
+ fn = timer->function;
+
@@ -295,6 +195,10 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+ raw_spin_lock_irq(&cpu_base->lock);
+
+ hrtimer_rt_reprogram(restart, timer, base);
++ raw_write_seqcount_barrier(&cpu_base->seq);
++
++ WARN_ON_ONCE(cpu_base->running_soft != timer);
++ cpu_base->running_soft = NULL;
+ }
+ }
+
@@ -315,106 +219,53 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+#else
+
-+static inline void hrtimer_rt_run_pending(void)
-+{
-+ hrtimer_peek_ahead_timers();
-+}
-+
+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; }
+
+#endif
+
-+#ifdef CONFIG_HIGH_RES_TIMERS
+
- /*
- * High resolution timer interrupt
- * Called with interrupts disabled
-@@ -1284,7 +1440,7 @@ void hrtimer_interrupt(struct clock_even
+ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- ktime_t expires_next, now, entry_time, delta;
-- int i, retries = 0;
-+ int i, retries = 0, raise = 0;
+ struct hrtimer_clock_base *base = cpu_base->clock_base;
+ unsigned int active = cpu_base->active_bases;
++ int raise = 0;
- BUG_ON(!cpu_base->hres_active);
- cpu_base->nr_events++;
-@@ -1343,7 +1499,10 @@ void hrtimer_interrupt(struct clock_even
+ for (; active; base++, active >>= 1) {
+ struct timerqueue_node *node;
+@@ -1335,15 +1440,20 @@ static void __hrtimer_run_queues(struct
if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
break;
-- __run_hrtimer(timer, &basenow);
-+ if (!hrtimer_rt_defer(timer))
-+ __run_hrtimer(timer, &basenow);
-+ else
-+ raise = 1;
+- __run_hrtimer(cpu_base, base, timer, &basenow);
++ if (!hrtimer_rt_defer(timer))
++ __run_hrtimer(cpu_base, base, timer, &basenow);
++ else
++ raise = 1;
}
}
- /* Reevaluate the clock bases for the next expiry */
-@@ -1360,6 +1519,10 @@ void hrtimer_interrupt(struct clock_even
- if (expires_next.tv64 == KTIME_MAX ||
- !tick_program_event(expires_next, 0)) {
- cpu_base->hang_detected = 0;
-+
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+
- return;
- }
-
-@@ -1439,18 +1602,18 @@ void hrtimer_peek_ahead_timers(void)
- __hrtimer_peek_ahead_timers();
- local_irq_restore(flags);
++ if (raise)
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
--
--static void run_hrtimer_softirq(struct softirq_action *h)
--{
-- hrtimer_peek_ahead_timers();
--}
--
- #else /* CONFIG_HIGH_RES_TIMERS */
-
- static inline void __hrtimer_peek_ahead_timers(void) { }
- #endif /* !CONFIG_HIGH_RES_TIMERS */
+-#ifdef CONFIG_HIGH_RES_TIMERS
+-
+ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
-+
-+static void run_hrtimer_softirq(struct softirq_action *h)
-+{
-+ hrtimer_rt_run_pending();
-+}
++#ifdef CONFIG_HIGH_RES_TIMERS
+
/*
- * Called from timer softirq every jiffy, expire hrtimers:
- *
-@@ -1483,7 +1646,7 @@ void hrtimer_run_queues(void)
- struct timerqueue_node *node;
- struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
- struct hrtimer_clock_base *base;
-- int index, gettime = 1;
-+ int index, gettime = 1, raise = 0;
-
- if (hrtimer_hres_active())
- return;
-@@ -1508,12 +1671,16 @@ void hrtimer_run_queues(void)
- hrtimer_get_expires_tv64(timer))
- break;
-
-- __run_hrtimer(timer, &base->softirq_time);
-+ if (!hrtimer_rt_defer(timer))
-+ __run_hrtimer(timer, &base->softirq_time);
-+ else
-+ raise = 1;
- }
- raw_spin_unlock(&cpu_base->lock);
- }
-
+ * High resolution timer interrupt
+ * Called with interrupts disabled
+@@ -1481,8 +1591,6 @@ void hrtimer_run_queues(void)
+ now = hrtimer_update_base(cpu_base);
+ __hrtimer_run_queues(cpu_base, now);
+ raw_spin_unlock(&cpu_base->lock);
+-
- wake_up_timer_waiters(cpu_base);
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
}
/*
-@@ -1535,6 +1702,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1504,6 +1612,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -422,7 +273,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1671,6 +1839,7 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1638,6 +1747,7 @@ static void init_hrtimers_cpu(int cpu)
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -430,19 +281,31 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
cpu_base->cpu = cpu;
-@@ -1783,9 +1952,7 @@ void __init hrtimers_init(void)
+@@ -1742,11 +1852,21 @@ static struct notifier_block hrtimers_nb
+ .notifier_call = hrtimer_cpu_notify,
+ };
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++static void run_hrtimer_softirq(struct softirq_action *h)
++{
++ hrtimer_rt_run_pending();
++}
++#endif
++
+ void __init hrtimers_init(void)
+ {
hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
(void *)(long)smp_processor_id());
register_cpu_notifier(&hrtimers_nb);
--#ifdef CONFIG_HIGH_RES_TIMERS
- open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
--#endif
++#ifdef CONFIG_PREEMPT_RT_BASE
++ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
++#endif
}
/**
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -1159,6 +1159,7 @@ void tick_setup_sched_timer(void)
+@@ -1105,6 +1105,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -452,7 +315,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/* Get the next period (per cpu) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -454,6 +454,7 @@ static void watchdog_enable(unsigned int
+@@ -507,6 +507,7 @@ static void watchdog_enable(unsigned int
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
diff --git a/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch b/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
deleted file mode 100644
index 3b19d7e0e3f64..0000000000000
--- a/patches/hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-Subject: hrtimer: Raise softirq if hrtimer irq stalled
-From: Watanabe <shunsuke.watanabe@tel.com>
-Date: Sun, 28 Oct 2012 11:13:44 +0100
-
-When the hrtimer stall detection hits the softirq is not raised.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- kernel/time/hrtimer.c | 9 ++++-----
- 1 file changed, 4 insertions(+), 5 deletions(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1519,11 +1519,7 @@ void hrtimer_interrupt(struct clock_even
- if (expires_next.tv64 == KTIME_MAX ||
- !tick_program_event(expires_next, 0)) {
- cpu_base->hang_detected = 0;
--
-- if (raise)
-- raise_softirq_irqoff(HRTIMER_SOFTIRQ);
--
-- return;
-+ goto out;
- }
-
- /*
-@@ -1567,6 +1563,9 @@ void hrtimer_interrupt(struct clock_even
- tick_program_event(expires_next, 1);
- printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
- ktime_to_ns(delta));
-+out:
-+ if (raise)
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
- }
-
- /*
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index 3edbdde07a7bc..bbcaa28560733 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -9,26 +9,26 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/hrtimer.h | 10 ++++++++++
+ include/linux/hrtimer.h | 12 +++++++++++-
kernel/time/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
kernel/time/itimer.c | 1 +
kernel/time/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
- 4 files changed, 76 insertions(+), 1 deletion(-)
+ 4 files changed, 77 insertions(+), 2 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -197,6 +197,9 @@ struct hrtimer_cpu_base {
- unsigned long nr_hangs;
- ktime_t max_hang_time;
+@@ -204,6 +204,9 @@ struct hrtimer_cpu_base {
+ unsigned int nr_hangs;
+ unsigned int max_hang_time;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+ wait_queue_head_t wait;
+#endif
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
- };
+ } ____cacheline_aligned;
-@@ -384,6 +387,13 @@ static inline int hrtimer_restart(struct
- return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+@@ -392,6 +395,13 @@ static inline void hrtimer_restart(struc
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
+/* Softirq preemption could deadlock timer removal */
@@ -40,10 +40,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
- extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
+
+@@ -411,7 +421,7 @@ static inline int hrtimer_is_queued(stru
+ * Helper function to check, whether the timer is running the callback
+ * function
+ */
+-static inline int hrtimer_callback_running(struct hrtimer *timer)
++static inline int hrtimer_callback_running(const struct hrtimer *timer)
+ {
+ return timer->base->cpu_base->running == timer;
+ }
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -837,6 +837,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -866,6 +866,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -66,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+ if (base && base->cpu_base && !hrtimer_hres_active())
+ wait_event(base->cpu_base->wait,
-+ !(timer->state & HRTIMER_STATE_CALLBACK));
++ !(hrtimer_callback_running(timer)));
+}
+
+#else
@@ -76,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1099,7 +1125,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1076,7 +1102,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -85,16 +94,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1486,6 +1512,8 @@ void hrtimer_run_queues(void)
- }
- raw_spin_unlock(&cpu_base->lock);
- }
+@@ -1455,6 +1481,8 @@ void hrtimer_run_queues(void)
+ now = hrtimer_update_base(cpu_base);
+ __hrtimer_run_queues(cpu_base, now);
+ raw_spin_unlock(&cpu_base->lock);
+
+ wake_up_timer_waiters(cpu_base);
}
/*
-@@ -1647,6 +1675,9 @@ static void init_hrtimers_cpu(int cpu)
+@@ -1614,6 +1642,9 @@ static void init_hrtimers_cpu(int cpu)
cpu_base->cpu = cpu;
hrtimer_init_hres(cpu_base);
@@ -116,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
expires = timeval_to_ktime(value->it_value);
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
-@@ -821,6 +821,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+@@ -828,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
return overrun;
}
@@ -137,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
-@@ -898,6 +912,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+@@ -905,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
if (!timr)
return -EINVAL;
@@ -145,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
-@@ -906,9 +921,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+@@ -913,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
@@ -158,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -946,10 +964,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
+@@ -953,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
if (!timer)
return -EINVAL;
@@ -174,7 +183,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
-@@ -975,8 +998,18 @@ static void itimer_delete(struct k_itime
+@@ -982,8 +1005,18 @@ static void itimer_delete(struct k_itime
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
diff --git a/patches/hwlatdetect.patch b/patches/hwlatdetect.patch
index b77f79ffff7bd..aa24630f492c0 100644
--- a/patches/hwlatdetect.patch
+++ b/patches/hwlatdetect.patch
@@ -122,7 +122,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
depends on PCI
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
-@@ -38,6 +38,7 @@ obj-$(CONFIG_C2PORT) += c2port/
+@@ -39,6 +39,7 @@ obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
diff --git a/patches/i2c-omap-drop-the-lock-hard-irq-context.patch b/patches/i2c-omap-drop-the-lock-hard-irq-context.patch
index d0d9194020682..cf711e0e9237e 100644
--- a/patches/i2c-omap-drop-the-lock-hard-irq-context.patch
+++ b/patches/i2c-omap-drop-the-lock-hard-irq-context.patch
@@ -14,19 +14,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
-@@ -996,15 +996,12 @@ omap_i2c_isr(int irq, void *dev_id)
+@@ -995,15 +995,12 @@ omap_i2c_isr(int irq, void *dev_id)
u16 mask;
u16 stat;
-- spin_lock(&dev->lock);
-- mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
-+ mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
+- spin_lock(&omap->lock);
+- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
+ stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG);
++ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG);
if (stat & mask)
ret = IRQ_WAKE_THREAD;
-- spin_unlock(&dev->lock);
+- spin_unlock(&omap->lock);
-
return ret;
}
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index cb7557ee15eff..c3a203e8eb581 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,9 +18,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -10088,7 +10088,7 @@ void intel_check_page_flip(struct drm_de
- struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+@@ -11366,7 +11366,7 @@ void intel_check_page_flip(struct drm_de
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_unpin_work *work;
- WARN_ON(!in_interrupt());
+ WARN_ON_NONRT(!in_interrupt());
diff --git a/patches/idr-use-local-lock-for-protection.patch b/patches/idr-use-local-lock-for-protection.patch
index b5fec80070742..32dc36c69e92c 100644
--- a/patches/idr-use-local-lock-for-protection.patch
+++ b/patches/idr-use-local-lock-for-protection.patch
@@ -7,8 +7,8 @@ We need to protect the per cpu variable and prevent migration.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/idr.h | 4 ++++
- lib/idr.c | 36 +++++++++++++++++++++++++++++++++---
- 2 files changed, 37 insertions(+), 3 deletions(-)
+ lib/idr.c | 43 +++++++++++++++++++++++++++++++++++++------
+ 2 files changed, 41 insertions(+), 6 deletions(-)
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -37,9 +37,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-@@ -366,6 +367,35 @@ static void idr_fill_slot(struct idr *id
- idr_mark_full(pa, id);
- }
+@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *
+ static DEFINE_PER_CPU(int, idr_preload_cnt);
+ static DEFINE_SPINLOCK(simple_ida_lock);
+#ifdef CONFIG_PREEMPT_RT_FULL
+static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
@@ -70,19 +70,46 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_enable();
+}
+#endif
++
++
+ /* the maximum ID which can be allocated given idr->layers */
+ static int idr_max(int layers)
+ {
+@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc
+ * context. See idr_preload() for details.
+ */
+ if (!in_interrupt()) {
+- preempt_disable();
++ idr_preload_lock();
+ new = __this_cpu_read(idr_preload_head);
+ if (new) {
+ __this_cpu_write(idr_preload_head, new->ary[0]);
+ __this_cpu_dec(idr_preload_cnt);
+ new->ary[0] = NULL;
+ }
+- preempt_enable();
++ idr_preload_unlock();
+ if (new)
+ return new;
+ }
+@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *id
+ idr_mark_full(pa, id);
+ }
+-
/**
* idr_preload - preload for idr_alloc()
-@@ -401,7 +431,7 @@ void idr_preload(gfp_t gfp_mask)
+ * @gfp_mask: allocation mask to use for preloading
+@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
WARN_ON_ONCE(in_interrupt());
- might_sleep_if(gfp_mask & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(gfp_mask));
- preempt_disable();
+ idr_preload_lock();
/*
* idr_alloc() is likely to succeed w/o full idr_layer buffer and
-@@ -413,9 +443,9 @@ void idr_preload(gfp_t gfp_mask)
+@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
struct idr_layer *new;
diff --git a/patches/infiniband-mellanox-ib-use-nort-irq.patch b/patches/infiniband-mellanox-ib-use-nort-irq.patch
index b282436e6f694..90b28b986e129 100644
--- a/patches/infiniband-mellanox-ib-use-nort-irq.patch
+++ b/patches/infiniband-mellanox-ib-use-nort-irq.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
-@@ -821,7 +821,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -847,7 +847,7 @@ void ipoib_mcast_restart_task(struct wor
ipoib_dbg_mcast(priv, "restarting multicast task\n");
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netif_addr_lock(dev);
spin_lock(&priv->lock);
-@@ -903,7 +903,7 @@ void ipoib_mcast_restart_task(struct wor
+@@ -929,7 +929,7 @@ void ipoib_mcast_restart_task(struct wor
spin_unlock(&priv->lock);
netif_addr_unlock(dev);
diff --git a/patches/inpt-gameport-use-local-irq-nort.patch b/patches/inpt-gameport-use-local-irq-nort.patch
index 97e0e68484971..94f34ce39d7f5 100644
--- a/patches/inpt-gameport-use-local-irq-nort.patch
+++ b/patches/inpt-gameport-use-local-irq-nort.patch
@@ -8,11 +8,27 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- drivers/input/gameport/gameport.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
+ drivers/input/gameport/gameport.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
+@@ -91,13 +91,13 @@ static int gameport_measure_speed(struct
+ tx = ~0;
+
+ for (i = 0; i < 50; i++) {
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ t1 = ktime_get_ns();
+ for (t = 0; t < 50; t++)
+ gameport_read(gameport);
+ t2 = ktime_get_ns();
+ t3 = ktime_get_ns();
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ udelay(i * 10);
+ t = (t2 - t1) - (t3 - t2);
+ if (t < tx)
@@ -124,12 +124,12 @@ static int old_gameport_measure_speed(st
tx = 1 << 30;
@@ -34,9 +50,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for(i = 0; i < 50; i++) {
- local_irq_save(flags);
+ local_irq_save_nort(flags);
- rdtscl(t1);
+ t1 = rdtsc();
for (t = 0; t < 50; t++) gameport_read(gameport);
- rdtscl(t2);
+ t2 = rdtsc();
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
udelay(i * 10);
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index d39d066156e6c..5be4b994c05d1 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -34,14 +34,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/preempt.h | 9 +++
include/linux/sched.h | 29 +++++++++-
include/linux/smp.h | 3 +
- kernel/sched/core.c | 131 ++++++++++++++++++++++++++++++++++++++++++++++--
+ kernel/sched/core.c | 132 +++++++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/debug.c | 7 ++
lib/smp_processor_id.c | 5 +
- 7 files changed, 178 insertions(+), 9 deletions(-)
+ 7 files changed, 182 insertions(+), 6 deletions(-)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -221,6 +221,9 @@ static inline void smpboot_thread_init(v
+@@ -222,6 +222,9 @@ static inline void smpboot_thread_init(v
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -154,11 +154,20 @@ do { \
+@@ -257,11 +257,20 @@ do { \
# define preempt_enable_rt() preempt_enable()
# define preempt_disable_nort() barrier()
# define preempt_enable_nort() barrier()
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT_NOTIFIERS
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1371,6 +1371,12 @@ struct task_struct {
+@@ -1413,6 +1413,12 @@ struct task_struct {
#endif
unsigned int policy;
@@ -89,9 +89,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1781,9 +1787,6 @@ struct task_struct {
- int pagefault_disabled;
- };
+@@ -1836,9 +1842,6 @@ extern int arch_task_struct_size __read_
+ # define arch_task_struct_size (sizeof(struct task_struct))
+ #endif
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -3066,6 +3069,26 @@ static inline void set_task_cpu(struct t
+@@ -3114,6 +3117,26 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
@@ -140,14 +140,48 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2703,6 +2703,125 @@ static inline void schedule_debug(struct
- schedstat_inc(this_rq(), sched_count);
+@@ -1164,6 +1164,15 @@ void set_cpus_allowed_common(struct task
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
}
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+#define MIGRATE_DISABLE_SET_AFFIN (1<<30) /* Can't make a negative */
+#define migrate_disabled_updated(p) ((p)->migrate_disable & MIGRATE_DISABLE_SET_AFFIN)
+#define migrate_disable_count(p) ((p)->migrate_disable & ~MIGRATE_DISABLE_SET_AFFIN)
++#else
++static inline void update_migrate_disable(struct task_struct *p) { }
++#define migrate_disabled_updated(p) 0
++#endif
++
+ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+ struct rq *rq = task_rq(p);
+@@ -1171,6 +1180,11 @@ void do_set_cpus_allowed(struct task_str
+
+ lockdep_assert_held(&p->pi_lock);
+
++ if (migrate_disabled_updated(p)) {
++ cpumask_copy(&p->cpus_allowed, new_mask);
++ return;
++ }
++
+ queued = task_on_rq_queued(p);
+ running = task_current(rq, p);
+
+@@ -1232,7 +1246,7 @@ static int __set_cpus_allowed_ptr(struct
+ do_set_cpus_allowed(p, new_mask);
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+@@ -3022,6 +3036,120 @@ static inline void schedule_debug(struct
+ schedstat_inc(this_rq(), sched_count);
+ }
+
++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP)
+
+static inline void update_migrate_disable(struct task_struct *p)
+{
@@ -239,7 +273,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * about locking.
+ */
+ rq = this_rq();
-+ raw_spin_lock_irqsave(&rq->lock, flags);
++ raw_spin_lock_irqsave(&current->pi_lock, flags);
++ raw_spin_lock(&rq->lock);
+
+ /*
+ * Clearing migrate_disable causes tsk_cpus_allowed to
@@ -247,10 +282,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ */
+ p->migrate_disable = 0;
+ mask = tsk_cpus_allowed(p);
-+ if (p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, mask);
-+ p->nr_cpus_allowed = cpumask_weight(mask);
-+ raw_spin_unlock_irqrestore(&rq->lock, flags);
++ do_set_cpus_allowed(p, mask);
++
++ raw_spin_unlock(&rq->lock);
++ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ } else
+ p->migrate_disable = 0;
+
@@ -258,52 +293,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_enable();
+}
+EXPORT_SYMBOL(migrate_enable);
-+#else
-+static inline void update_migrate_disable(struct task_struct *p) { }
-+#define migrate_disabled_updated(p) 0
+#endif
+
/*
* Pick up the highest-prio task:
*/
-@@ -2809,6 +2928,8 @@ static void __sched __schedule(void)
- smp_mb__before_spinlock();
+@@ -3137,6 +3265,8 @@ static void __sched notrace __schedule(b
raw_spin_lock_irq(&rq->lock);
+ lockdep_pin_lock(&rq->lock);
+ update_migrate_disable(prev);
+
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
switch_count = &prev->nivcsw;
-@@ -4810,11 +4931,13 @@ static struct rq *move_queued_task(struc
-
- void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
- {
-- if (p->sched_class->set_cpus_allowed)
-- p->sched_class->set_cpus_allowed(p, new_mask);
-+ if (!migrate_disabled_updated(p)) {
-+ if (p->sched_class->set_cpus_allowed)
-+ p->sched_class->set_cpus_allowed(p, new_mask);
-+ p->nr_cpus_allowed = cpumask_weight(new_mask);
-+ }
-
- cpumask_copy(&p->cpus_allowed, new_mask);
-- p->nr_cpus_allowed = cpumask_weight(new_mask);
- }
-
- /*
-@@ -4860,7 +4983,7 @@ int set_cpus_allowed_ptr(struct task_str
- do_set_cpus_allowed(p, new_mask);
-
- /* Can the task run on the task's current CPU? If so, we're done */
-- if (cpumask_test_cpu(task_cpu(p), new_mask))
-+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
- goto out;
-
- dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -260,6 +260,9 @@ void print_rt_rq(struct seq_file *m, int
+@@ -251,6 +251,9 @@ void print_rt_rq(struct seq_file *m, int
P(rt_throttled);
PN(rt_time);
PN(rt_runtime);
@@ -313,7 +319,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#undef PN
#undef P
-@@ -648,6 +651,10 @@ void proc_sched_show_task(struct task_st
+@@ -635,6 +638,10 @@ void proc_sched_show_task(struct task_st
#endif
P(policy);
P(prio);
diff --git a/patches/ipc-make-rt-aware.patch b/patches/ipc-make-rt-aware.patch
deleted file mode 100644
index 78f3ed82b8d91..0000000000000
--- a/patches/ipc-make-rt-aware.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From: Ingo Molnar <mingo@elte.hu>
-Date: Fri, 3 Jul 2009 08:30:12 -0500
-Subject: ipc: Make the ipc code -rt aware
-
-RT serializes the code with the (rt)spinlock but keeps preemption
-enabled. Some parts of the code need to be atomic nevertheless.
-
-Protect it with preempt_disable/enable_rt pairts.
-
-Signed-off-by: Ingo Molnar <mingo@elte.hu>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- ipc/msg.c | 16 +++++++++++++++-
- 1 file changed, 15 insertions(+), 1 deletion(-)
-
---- a/ipc/msg.c
-+++ b/ipc/msg.c
-@@ -188,6 +188,12 @@ static void expunge_all(struct msg_queue
- struct msg_receiver *msr, *t;
-
- list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
-+ /*
-+ * Make sure that the wakeup doesnt preempt
-+ * this CPU prematurely. (on PREEMPT_RT)
-+ */
-+ preempt_disable_rt();
-+
- msr->r_msg = NULL; /* initialize expunge ordering */
- wake_up_process(msr->r_tsk);
- /*
-@@ -198,6 +204,8 @@ static void expunge_all(struct msg_queue
- */
- smp_mb();
- msr->r_msg = ERR_PTR(res);
-+
-+ preempt_enable_rt();
- }
- }
-
-@@ -574,6 +582,11 @@ static inline int pipelined_send(struct
- if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
- !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
- msr->r_msgtype, msr->r_mode)) {
-+ /*
-+ * Make sure that the wakeup doesnt preempt
-+ * this CPU prematurely. (on PREEMPT_RT)
-+ */
-+ preempt_disable_rt();
-
- list_del(&msr->r_list);
- if (msr->r_maxsize < msg->m_ts) {
-@@ -595,12 +608,13 @@ static inline int pipelined_send(struct
- */
- smp_mb();
- msr->r_msg = msg;
-+ preempt_enable_rt();
-
- return 1;
- }
-+ preempt_enable_rt();
- }
- }
--
- return 0;
- }
-
diff --git a/patches/ipc-msg-Implement-lockless-pipelined-wakeups.patch b/patches/ipc-msg-Implement-lockless-pipelined-wakeups.patch
new file mode 100644
index 0000000000000..be023bfbf596e
--- /dev/null
+++ b/patches/ipc-msg-Implement-lockless-pipelined-wakeups.patch
@@ -0,0 +1,228 @@
+From 9a69dce752915917ecfe06a21f9c826c76f6eb07 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 30 Oct 2015 11:59:07 +0100
+Subject: [PATCH] ipc/msg: Implement lockless pipelined wakeups
+
+This patch moves the wakeup_process() invocation so it is not done under
+the perm->lock by making use of a lockless wake_q. With this change, the
+waiter is woken up once the message has been assigned and it does not
+need to loop on SMP if the message points to NULL. In the signal case we
+still need to check the pointer under the lock to verify the state.
+
+This change should also avoid the introduction of preempt_disable() in
+-RT which avoids a busy-loop which pools for the NULL -> !NULL
+change if the waiter has a higher priority compared to the waker.
+
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: George Spelvin <linux@horizon.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+
+ ipc/msg.c | 101 +++++++++++++++++---------------------------------------------
+ 1 file changed, 28 insertions(+), 73 deletions(-)
+
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -183,20 +183,14 @@ static void ss_wakeup(struct list_head *
+ }
+ }
+
+-static void expunge_all(struct msg_queue *msq, int res)
++static void expunge_all(struct msg_queue *msq, int res,
++ struct wake_q_head *wake_q)
+ {
+ struct msg_receiver *msr, *t;
+
+ list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
+- msr->r_msg = NULL; /* initialize expunge ordering */
+- wake_up_process(msr->r_tsk);
+- /*
+- * Ensure that the wakeup is visible before setting r_msg as
+- * the receiving end depends on it: either spinning on a nil,
+- * or dealing with -EAGAIN cases. See lockless receive part 1
+- * and 2 in do_msgrcv().
+- */
+- smp_wmb(); /* barrier (B) */
++
++ wake_q_add(wake_q, msr->r_tsk);
+ msr->r_msg = ERR_PTR(res);
+ }
+ }
+@@ -213,11 +207,13 @@ static void freeque(struct ipc_namespace
+ {
+ struct msg_msg *msg, *t;
+ struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
++ WAKE_Q(wake_q);
+
+- expunge_all(msq, -EIDRM);
++ expunge_all(msq, -EIDRM, &wake_q);
+ ss_wakeup(&msq->q_senders, 1);
+ msg_rmid(ns, msq);
+ ipc_unlock_object(&msq->q_perm);
++ wake_up_q(&wake_q);
+ rcu_read_unlock();
+
+ list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
+@@ -342,6 +338,7 @@ static int msgctl_down(struct ipc_namesp
+ struct kern_ipc_perm *ipcp;
+ struct msqid64_ds uninitialized_var(msqid64);
+ struct msg_queue *msq;
++ WAKE_Q(wake_q);
+ int err;
+
+ if (cmd == IPC_SET) {
+@@ -389,7 +386,7 @@ static int msgctl_down(struct ipc_namesp
+ /* sleeping receivers might be excluded by
+ * stricter permissions.
+ */
+- expunge_all(msq, -EAGAIN);
++ expunge_all(msq, -EAGAIN, &wake_q);
+ /* sleeping senders might be able to send
+ * due to a larger queue size.
+ */
+@@ -402,6 +399,7 @@ static int msgctl_down(struct ipc_namesp
+
+ out_unlock0:
+ ipc_unlock_object(&msq->q_perm);
++ wake_up_q(&wake_q);
+ out_unlock1:
+ rcu_read_unlock();
+ out_up:
+@@ -566,7 +564,8 @@ static int testmsg(struct msg_msg *msg,
+ return 0;
+ }
+
+-static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
++static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg,
++ struct wake_q_head *wake_q)
+ {
+ struct msg_receiver *msr, *t;
+
+@@ -577,27 +576,13 @@ static inline int pipelined_send(struct
+
+ list_del(&msr->r_list);
+ if (msr->r_maxsize < msg->m_ts) {
+- /* initialize pipelined send ordering */
+- msr->r_msg = NULL;
+- wake_up_process(msr->r_tsk);
+- /* barrier (B) see barrier comment below */
+- smp_wmb();
++ wake_q_add(wake_q, msr->r_tsk);
+ msr->r_msg = ERR_PTR(-E2BIG);
+ } else {
+- msr->r_msg = NULL;
+ msq->q_lrpid = task_pid_vnr(msr->r_tsk);
+ msq->q_rtime = get_seconds();
+- wake_up_process(msr->r_tsk);
+- /*
+- * Ensure that the wakeup is visible before
+- * setting r_msg, as the receiving can otherwise
+- * exit - once r_msg is set, the receiver can
+- * continue. See lockless receive part 1 and 2
+- * in do_msgrcv(). Barrier (B).
+- */
+- smp_wmb();
++ wake_q_add(wake_q, msr->r_tsk);
+ msr->r_msg = msg;
+-
+ return 1;
+ }
+ }
+@@ -613,6 +598,7 @@ long do_msgsnd(int msqid, long mtype, vo
+ struct msg_msg *msg;
+ int err;
+ struct ipc_namespace *ns;
++ WAKE_Q(wake_q);
+
+ ns = current->nsproxy->ipc_ns;
+
+@@ -698,7 +684,7 @@ long do_msgsnd(int msqid, long mtype, vo
+ msq->q_lspid = task_tgid_vnr(current);
+ msq->q_stime = get_seconds();
+
+- if (!pipelined_send(msq, msg)) {
++ if (!pipelined_send(msq, msg, &wake_q)) {
+ /* no one is waiting for this message, enqueue it */
+ list_add_tail(&msg->m_list, &msq->q_messages);
+ msq->q_cbytes += msgsz;
+@@ -712,6 +698,7 @@ long do_msgsnd(int msqid, long mtype, vo
+
+ out_unlock0:
+ ipc_unlock_object(&msq->q_perm);
++ wake_up_q(&wake_q);
+ out_unlock1:
+ rcu_read_unlock();
+ if (msg != NULL)
+@@ -932,57 +919,25 @@ long do_msgrcv(int msqid, void __user *b
+ rcu_read_lock();
+
+ /* Lockless receive, part 2:
+- * Wait until pipelined_send or expunge_all are outside of
+- * wake_up_process(). There is a race with exit(), see
+- * ipc/mqueue.c for the details. The correct serialization
+- * ensures that a receiver cannot continue without the wakeup
+- * being visibible _before_ setting r_msg:
++ * The work in pipelined_send() and expunge_all():
++ * - Set pointer to message
++ * - Queue the receiver task for later wakeup
++ * - Wake up the process after the lock is dropped.
+ *
+- * CPU 0 CPU 1
+- * <loop receiver>
+- * smp_rmb(); (A) <-- pair -. <waker thread>
+- * <load ->r_msg> | msr->r_msg = NULL;
+- * | wake_up_process();
+- * <continue> `------> smp_wmb(); (B)
+- * msr->r_msg = msg;
+- *
+- * Where (A) orders the message value read and where (B) orders
+- * the write to the r_msg -- done in both pipelined_send and
+- * expunge_all.
++ * Should the process wake up before this wakeup (due to a
++ * signal) it will either see the message and continue …
+ */
+- for (;;) {
+- /*
+- * Pairs with writer barrier in pipelined_send
+- * or expunge_all.
+- */
+- smp_rmb(); /* barrier (A) */
+- msg = (struct msg_msg *)msr_d.r_msg;
+- if (msg)
+- break;
+
+- /*
+- * The cpu_relax() call is a compiler barrier
+- * which forces everything in this loop to be
+- * re-loaded.
+- */
+- cpu_relax();
+- }
+-
+- /* Lockless receive, part 3:
+- * If there is a message or an error then accept it without
+- * locking.
+- */
++ msg = (struct msg_msg *)msr_d.r_msg;
+ if (msg != ERR_PTR(-EAGAIN))
+ goto out_unlock1;
+
+- /* Lockless receive, part 3:
+- * Acquire the queue spinlock.
+- */
++ /*
++ * … or see -EAGAIN, acquire the lock to check the message
++ * again.
++ */
+ ipc_lock_object(&msq->q_perm);
+
+- /* Lockless receive, part 4:
+- * Repeat test after acquiring the spinlock.
+- */
+ msg = (struct msg_msg *)msr_d.r_msg;
+ if (msg != ERR_PTR(-EAGAIN))
+ goto out_unlock0;
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index 729cd55447972..b3349045220c7 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -43,28 +43,28 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* it from the spurious interrupt detection
* mechanism and from core side polling.
+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT)
+ * IRQ_DISABLE_UNLAZY - Disable lazy irq disable
*/
enum {
- IRQ_TYPE_NONE = 0x00000000,
-@@ -97,13 +98,14 @@ enum {
- IRQ_NOTHREAD = (1 << 16),
+@@ -99,13 +100,14 @@ enum {
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
-+ IRQ_NO_SOFTIRQ_CALL = (1 << 19),
+ IRQ_DISABLE_UNLAZY = (1 << 19),
++ IRQ_NO_SOFTIRQ_CALL = (1 << 20),
};
#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-- IRQ_IS_POLLED)
-+ IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL)
+- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY)
++ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL)
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -907,7 +907,15 @@ irq_forced_thread_fn(struct irq_desc *de
+@@ -940,7 +940,15 @@ irq_forced_thread_fn(struct irq_desc *de
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -1357,6 +1365,9 @@ static int
+@@ -1390,6 +1398,9 @@ static int
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
@@ -89,27 +89,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ irq_settings_set_no_softirq_call(desc);
+
/* Set default affinity mask once everything is setup */
- setup_affinity(irq, desc, mask);
+ setup_affinity(desc, mask);
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
-@@ -15,6 +15,7 @@ enum {
- _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
+@@ -16,6 +16,7 @@ enum {
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQ_IS_POLLED = IRQ_IS_POLLED,
+ _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
-@@ -28,6 +29,7 @@ enum {
- #define IRQ_NESTED_THREAD GOT_YOU_MORON
+@@ -30,6 +31,7 @@ enum {
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#define IRQ_IS_POLLED GOT_YOU_MORON
+ #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
-@@ -38,6 +40,16 @@ irq_settings_clr_and_set(struct irq_desc
+@@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
diff --git a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
index 57a542ac3562d..8bd359a81417f 100644
--- a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -55,7 +55,7 @@ Cc: stable-rt@vger.kernel.org
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1455,7 +1455,7 @@ void update_process_times(int user_tick)
+@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -64,9 +64,9 @@ Cc: stable-rt@vger.kernel.org
if (in_irq())
irq_work_tick();
#endif
-@@ -1471,9 +1471,7 @@ static void run_timer_softirq(struct sof
-
- hrtimer_run_pending();
+@@ -1498,9 +1498,7 @@ static void run_timer_softirq(struct sof
+ {
+ struct tvec_base *base = this_cpu_ptr(&tvec_bases);
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
- irq_work_tick();
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index b4ffd72b23ed8..fe30b1f69222a 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -90,6 +90,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
+@@ -94,6 +94,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->push_cpu = nr_cpu_ids;
raw_spin_lock_init(&rt_rq->push_lock);
init_irq_work(&rt_rq->push_work, push_irq_work_func);
@@ -165,7 +165,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* sched_clock_tick() needs us? */
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
-@@ -227,6 +232,7 @@ static void nohz_full_kick_work_func(str
+@@ -209,6 +214,7 @@ static void nohz_full_kick_work_func(str
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_work_func,
@@ -175,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1455,7 +1455,7 @@ void update_process_times(int user_tick)
+@@ -1484,7 +1484,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -184,9 +184,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (in_irq())
irq_work_tick();
#endif
-@@ -1471,6 +1471,10 @@ static void run_timer_softirq(struct sof
-
- hrtimer_run_pending();
+@@ -1498,6 +1498,10 @@ static void run_timer_softirq(struct sof
+ {
+ struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
+ irq_work_tick();
diff --git a/patches/jump-label-rt.patch b/patches/jump-label-rt.patch
index b5ea36db24b1e..45dec0e56269d 100644
--- a/patches/jump-label-rt.patch
+++ b/patches/jump-label-rt.patch
@@ -24,12 +24,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -31,7 +31,7 @@ config ARM
+@@ -33,7 +33,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
-- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
-+ select HAVE_ARCH_JUMP_LABEL if (!XIP_KERNEL && !PREEMPT_RT_BASE)
- select HAVE_ARCH_KGDB
+- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !PREEMPT_RT_BASE
+ select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_TRACEHOOK
diff --git a/patches/kconfig-disable-a-few-options-rt.patch b/patches/kconfig-disable-a-few-options-rt.patch
index 10df569c51a07..872e2aff7fffe 100644
--- a/patches/kconfig-disable-a-few-options-rt.patch
+++ b/patches/kconfig-disable-a-few-options-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/Kconfig
+++ b/arch/Kconfig
-@@ -6,6 +6,7 @@ config OPROFILE
+@@ -9,6 +9,7 @@ config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
depends on HAVE_OPROFILE
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -409,7 +409,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+@@ -392,7 +392,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index 421b4966210bb..4bf7247eb49c8 100644
--- a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -108,6 +108,7 @@ struct hotplug_pcp {
+@@ -109,6 +109,7 @@ struct hotplug_pcp {
int refcount;
int grab_lock;
struct completion synced;
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RT_FULL
/*
* Note, on PREEMPT_RT, the hotplug lock must save the state of
-@@ -211,6 +212,7 @@ static int sync_unplug_thread(void *data
+@@ -212,6 +213,7 @@ static int sync_unplug_thread(void *data
{
struct hotplug_pcp *hp = data;
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_disable();
hp->unplug = current;
wait_for_pinned_cpus(hp);
-@@ -276,6 +278,14 @@ static void __cpu_unplug_sync(struct hot
+@@ -277,6 +279,14 @@ static void __cpu_unplug_sync(struct hot
wait_for_completion(&hp->synced);
}
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -299,6 +309,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -300,6 +310,7 @@ static int cpu_unplug_begin(unsigned int
tell_sched_cpu_down_begin(cpu);
init_completion(&hp->synced);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
if (IS_ERR(hp->sync_tsk)) {
-@@ -314,8 +325,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -315,8 +326,7 @@ static int cpu_unplug_begin(unsigned int
* wait for tasks that are going to enter these sections and
* we must not have them block.
*/
@@ -75,9 +75,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -682,6 +692,7 @@ static int __ref _cpu_down(unsigned int
- #endif
- synchronize_rcu();
+@@ -671,6 +681,7 @@ static int _cpu_down(unsigned int cpu, i
+ else
+ synchronize_rcu();
+ __cpu_unplug_wait(cpu);
smpboot_park_threads(cpu);
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 44da96651dfe0..e8720e3d0e40c 100644
--- a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -640,6 +640,7 @@ static int __ref _cpu_down(unsigned int
+@@ -629,6 +629,7 @@ static int _cpu_down(unsigned int cpu, i
.hcpu = hcpu,
};
cpumask_var_t cpumask;
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -650,6 +651,12 @@ static int __ref _cpu_down(unsigned int
+@@ -639,6 +640,12 @@ static int _cpu_down(unsigned int cpu, i
/* Move the downtaker off the unplug cpu */
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
-@@ -658,7 +665,8 @@ static int __ref _cpu_down(unsigned int
+@@ -647,7 +654,8 @@ static int _cpu_down(unsigned int cpu, i
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
migrate_enable();
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
cpu_hotplug_begin();
-@@ -740,6 +748,9 @@ static int __ref _cpu_down(unsigned int
+@@ -737,6 +745,9 @@ static int _cpu_down(unsigned int cpu, i
cpu_hotplug_done();
if (!err)
cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index 77eb4fe7bf596..2cb029728551e 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -18,22 +18,22 @@ Thanks,
Jason.
---
- drivers/tty/serial/8250/8250_core.c | 3 ++-
+ drivers/tty/serial/8250/8250_port.c | 3 ++-
include/linux/kdb.h | 2 ++
kernel/debug/kdb/kdb_io.c | 6 ++----
3 files changed, 6 insertions(+), 5 deletions(-)
---- a/drivers/tty/serial/8250/8250_core.c
-+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -36,6 +36,7 @@
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -35,6 +35,7 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/kdb.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
- #ifdef CONFIG_SPARC
-@@ -3381,7 +3382,7 @@ static void serial8250_console_write(str
+
+@@ -2851,7 +2852,7 @@ void serial8250_console_write(struct uar
if (port->sysrq)
locked = 0;
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index c28447dd6830d..557aaefe80aec 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -16,14 +16,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Documentation/trace/histograms.txt | 186 +++++
include/linux/hrtimer.h | 3
include/linux/sched.h | 6
- include/trace/events/hist.h | 74 ++
+ include/trace/events/hist.h | 72 ++
include/trace/events/latency_hist.h | 29
kernel/time/hrtimer.c | 21
kernel/trace/Kconfig | 104 +++
kernel/trace/Makefile | 4
kernel/trace/latency_hist.c | 1178 ++++++++++++++++++++++++++++++++++++
kernel/trace/trace_irqsoff.c | 11
- 10 files changed, 1616 insertions(+)
+ 10 files changed, 1614 insertions(+)
--- /dev/null
+++ b/Documentation/trace/histograms.txt
@@ -216,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+These data are also reset when the wakeup histogram is reset.
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -111,6 +111,9 @@ struct hrtimer {
+@@ -102,6 +102,9 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
@@ -228,7 +228,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *start_site;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1753,6 +1753,12 @@ struct task_struct {
+@@ -1794,6 +1794,12 @@ struct task_struct {
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
@@ -240,10 +240,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG
- struct memcg_oom_info {
+ struct mem_cgroup *memcg_in_oom;
--- /dev/null
+++ b/include/trace/events/hist.h
-@@ -0,0 +1,74 @@
+@@ -0,0 +1,72 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
+
@@ -255,7 +255,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
+#define trace_preemptirqsoff_hist(a, b)
-+#define trace_preemptirqsoff_hist_rcuidle(a, b)
+#else
+TRACE_EVENT(preemptirqsoff_hist,
+
@@ -280,7 +279,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
+#define trace_hrtimer_interrupt(a, b, c, d)
-+#define trace_hrtimer_interrupt_rcuidle(a, b, c, d)
+#else
+TRACE_EVENT(hrtimer_interrupt,
+
@@ -360,7 +358,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "tick-internal.h"
-@@ -966,7 +967,16 @@ int __hrtimer_start_range_ns(struct hrti
+@@ -994,7 +995,16 @@ void hrtimer_start_range_ns(struct hrtim
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
timer_stats_hrtimer_set_start_info(timer);
@@ -375,18 +373,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ }
+#endif
leftmost = enqueue_hrtimer(timer, new_base);
-
- if (!leftmost) {
-@@ -1238,6 +1248,8 @@ static void __run_hrtimer(struct hrtimer
-
- #ifdef CONFIG_HIGH_RES_TIMERS
-
-+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
-+
- /*
- * High resolution timer interrupt
- * Called with interrupts disabled
-@@ -1281,6 +1293,15 @@ void hrtimer_interrupt(struct clock_even
+ if (!leftmost)
+ goto unlock;
+@@ -1275,6 +1285,15 @@ static void __hrtimer_run_queues(struct
timer = container_of(node, struct hrtimer, node);
@@ -402,6 +391,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
+@@ -1297,6 +1316,8 @@ static void __hrtimer_run_queues(struct
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+
++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
++
+ /*
+ * High resolution timer interrupt
+ * Called with interrupts disabled
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -187,6 +187,24 @@ config IRQSOFF_TRACER
@@ -1733,7 +1731,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "trace.h"
-@@ -433,11 +434,13 @@ void start_critical_timings(void)
+@@ -420,11 +421,13 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1747,23 +1745,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -447,6 +450,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+@@ -434,6 +437,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
-+ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0);
++ trace_preemptirqsoff_hist(IRQS_ON, 0);
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
-@@ -455,6 +459,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -442,6 +446,7 @@ void time_hardirqs_off(unsigned long a0,
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
-+ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1);
++ trace_preemptirqsoff_hist(IRQS_OFF, 1);
}
#else /* !CONFIG_PROVE_LOCKING */
-@@ -480,6 +485,7 @@ inline void print_irqtrace_events(struct
+@@ -467,6 +472,7 @@ inline void print_irqtrace_events(struct
*/
void trace_hardirqs_on(void)
{
@@ -1771,7 +1769,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -489,11 +495,13 @@ void trace_hardirqs_off(void)
+@@ -476,11 +482,13 @@ void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1785,7 +1783,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
-@@ -503,6 +511,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
+@@ -490,6 +498,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -1793,7 +1791,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
-@@ -512,12 +521,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+@@ -499,12 +508,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
diff --git a/patches/lglocks-rt.patch b/patches/lglocks-rt.patch
index 2998b6da43963..b9f2e05f920f2 100644
--- a/patches/lglocks-rt.patch
+++ b/patches/lglocks-rt.patch
@@ -11,20 +11,20 @@ owner and boost if needed.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/lglock.h | 21 ++++++++++++++++--
- kernel/locking/lglock.c | 54 ++++++++++++++++++++++++++++++++----------------
- 2 files changed, 55 insertions(+), 20 deletions(-)
+ include/linux/lglock.h | 18 +++++++++++++
+ kernel/locking/lglock.c | 62 ++++++++++++++++++++++++++++++------------------
+ 2 files changed, 58 insertions(+), 22 deletions(-)
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
-@@ -34,22 +34,39 @@
+@@ -34,13 +34,30 @@
#endif
struct lglock {
-+#ifndef CONFIG_PREEMPT_RT_FULL
- arch_spinlock_t __percpu *lock;
-+#else
++#ifdef CONFIG_PREEMPT_RT_FULL
+ struct rt_mutex __percpu *lock;
++#else
+ arch_spinlock_t __percpu *lock;
+#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lock_key;
@@ -32,20 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
};
--#define DEFINE_LGLOCK(name) \
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define DEFINE_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- struct lglock name = { .lock = &name ## _lock }
-
--#define DEFINE_STATIC_LGLOCK(name) \
-+# define DEFINE_STATIC_LGLOCK(name) \
- static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
- = __ARCH_SPIN_LOCK_UNLOCKED; \
- static struct lglock name = { .lock = &name ## _lock }
-+#else
-+
++#ifdef CONFIG_PREEMPT_RT_FULL
+# define DEFINE_LGLOCK(name) \
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
@@ -55,10 +42,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \
+ = __RT_MUTEX_INITIALIZER( name ## _lock); \
+ static struct lglock name = { .lock = &name ## _lock }
++
++#else
++
+ #define DEFINE_LGLOCK(name) \
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+@@ -50,6 +67,7 @@ struct lglock {
+ static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
+ = __ARCH_SPIN_LOCK_UNLOCKED; \
+ static struct lglock name = { .lock = &name ## _lock }
+#endif
void lg_lock_init(struct lglock *lg, char *name);
- void lg_local_lock(struct lglock *lg);
+
--- a/kernel/locking/lglock.c
+++ b/kernel/locking/lglock.c
@@ -4,6 +4,15 @@
@@ -150,7 +147,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(lg_local_unlock_cpu);
-@@ -64,12 +82,12 @@ void lg_global_lock(struct lglock *lg)
+@@ -70,15 +88,15 @@ void lg_double_lock(struct lglock *lg, i
+
+ preempt_disable();
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+- arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
+- arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
++ lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
++ lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
+ }
+
+ void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
+ {
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
+- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
++ lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
++ lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
+ preempt_enable();
+ }
+
+@@ -86,12 +104,12 @@ void lg_global_lock(struct lglock *lg)
{
int i;
@@ -166,7 +183,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL(lg_global_lock);
-@@ -80,10 +98,10 @@ void lg_global_unlock(struct lglock *lg)
+@@ -102,10 +120,10 @@ void lg_global_unlock(struct lglock *lg)
lock_release(&lg->lock_dep_map, 1, _RET_IP_);
for_each_possible_cpu(i) {
diff --git a/patches/list_bl.h-make-list-head-locking-RT-safe.patch b/patches/list_bl.h-make-list-head-locking-RT-safe.patch
index b5c3490b6f2ca..52df640232617 100644
--- a/patches/list_bl.h-make-list-head-locking-RT-safe.patch
+++ b/patches/list_bl.h-make-list-head-locking-RT-safe.patch
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
-@@ -117,12 +127,26 @@ static inline void hlist_bl_del_init(str
+@@ -118,12 +128,26 @@ static inline void hlist_bl_del_init(str
static inline void hlist_bl_lock(struct hlist_bl_head *b)
{
diff --git a/patches/localversion.patch b/patches/localversion.patch
index a3b81e783166e..1e8ff31d716fd 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.1.15-rt17
+Subject: v4.4-rc6-rt1
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt17
++-rt1
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 9404763acf4a4..818b66bff1e09 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_IRQSOFF_TRACER) || \
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3563,6 +3563,7 @@ static void check_flags(unsigned long fl
+@@ -3525,6 +3525,7 @@ static void check_flags(unsigned long fl
}
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3577,6 +3578,7 @@ static void check_flags(unsigned long fl
+@@ -3539,6 +3540,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
index 66849317cfb81..f825a7f4911d7 100644
--- a/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
+++ b/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
@@ -16,9 +16,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
-@@ -24,7 +24,6 @@
- #include <linux/module.h>
+@@ -26,7 +26,6 @@
#include <linux/kthread.h>
+ #include <linux/sched/rt.h>
#include <linux/spinlock.h>
-#include <linux/rwlock.h>
#include <linux/mutex.h>
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index f5cf653d319ab..67c66a5ace93b 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -20,7 +20,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1918,8 +1918,9 @@ static void raid_run_ops(struct stripe_h
+@@ -1929,8 +1929,9 @@ static void raid_run_ops(struct stripe_h
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -31,7 +31,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1975,7 +1976,8 @@ static void raid_run_ops(struct stripe_h
+@@ -1986,7 +1987,8 @@ static void raid_run_ops(struct stripe_h
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
-@@ -6363,6 +6365,7 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6411,6 +6413,7 @@ static int raid5_alloc_percpu(struct r5c
__func__, cpu);
break;
}
@@ -51,7 +51,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
-@@ -495,6 +495,7 @@ struct r5conf {
+@@ -504,6 +504,7 @@ struct r5conf {
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index 64bc2829ced98..fd7d32e663797 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2366,7 +2366,7 @@ config CPU_R4400_WORKAROUNDS
+@@ -2409,7 +2409,7 @@ config CPU_R4400_WORKAROUNDS
#
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/mm-bounce-local-irq-save-nort.patch b/patches/mm-bounce-local-irq-save-nort.patch
index fb0f5b2b573a8..de36519d141db 100644
--- a/patches/mm-bounce-local-irq-save-nort.patch
+++ b/patches/mm-bounce-local-irq-save-nort.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/bounce.c
+++ b/block/bounce.c
-@@ -54,11 +54,11 @@ static void bounce_copy_vec(struct bio_v
+@@ -55,11 +55,11 @@ static void bounce_copy_vec(struct bio_v
unsigned long flags;
unsigned char *vto;
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index aec16dd380bad..d880226f03963 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -14,15 +14,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -32,6 +32,7 @@
+@@ -31,6 +31,7 @@
+ #include <linux/memcontrol.h>
#include <linux/gfp.h>
#include <linux/uio.h>
- #include <linux/hugetlb.h>
+#include <linux/locallock.h>
+ #include <linux/hugetlb.h>
+ #include <linux/page_idle.h>
- #include "internal.h"
-
-@@ -45,6 +46,9 @@ static DEFINE_PER_CPU(struct pagevec, lr
+@@ -46,6 +47,9 @@ static DEFINE_PER_CPU(struct pagevec, lr
static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -628,13 +633,13 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -630,13 +635,13 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -814,9 +819,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -816,9 +821,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -844,18 +849,19 @@ void deactivate_file_page(struct page *p
+@@ -846,18 +851,19 @@ void deactivate_file_page(struct page *p
return;
if (likely(get_page_unless_zero(page))) {
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index bbeab6257d117..d2ad7c35e9643 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1688,6 +1688,7 @@ choice
+@@ -1719,6 +1719,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
-@@ -1706,6 +1707,7 @@ config SLUB
+@@ -1737,6 +1738,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index 3a1e45bb4c687..4d3a46dd9db19 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -7,13 +7,13 @@ move the freeing out of the lock held region.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- mm/slab.h | 4 ++
- mm/slub.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++---------------
- 2 files changed, 95 insertions(+), 27 deletions(-)
+ mm/slab.h | 4 +
+ mm/slub.c | 125 ++++++++++++++++++++++++++++++++++++++++++++++++--------------
+ 2 files changed, 102 insertions(+), 27 deletions(-)
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -330,7 +330,11 @@ static inline struct kmem_cache *cache_f
+@@ -324,7 +324,11 @@ static inline struct kmem_cache *cache_f
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -27,16 +27,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1069,7 +1069,7 @@ static noinline struct kmem_cache_node *
- {
- struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+@@ -1075,7 +1075,7 @@ static noinline struct kmem_cache_node *
+ void *object = head;
+ int cnt = 0;
- spin_lock_irqsave(&n->list_lock, *flags);
+ raw_spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page);
if (!check_slab(s, page))
-@@ -1116,7 +1116,7 @@ static noinline struct kmem_cache_node *
+@@ -1136,7 +1136,7 @@ static noinline struct kmem_cache_node *
fail:
slab_unlock(page);
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
slab_fix(s, "Object at 0x%p not freed", object);
return NULL;
}
-@@ -1242,6 +1242,12 @@ static inline void dec_slabs_node(struct
+@@ -1263,6 +1263,12 @@ static inline void dec_slabs_node(struct
#endif /* CONFIG_SLUB_DEBUG */
@@ -58,32 +58,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1352,7 +1358,11 @@ static struct page *allocate_slab(struct
+@@ -1402,7 +1408,11 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (system_state == SYSTEM_RUNNING)
+#else
- if (flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(flags))
+#endif
local_irq_enable();
flags |= s->allocflags;
-@@ -1421,7 +1431,11 @@ static struct page *allocate_slab(struct
+@@ -1473,7 +1483,11 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
+#ifdef CONFIG_PREEMPT_RT_FULL
+ if (system_state == SYSTEM_RUNNING)
+#else
- if (flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(flags))
+#endif
local_irq_disable();
if (!page)
return NULL;
-@@ -1478,6 +1492,16 @@ static void __free_slab(struct kmem_cach
- memcg_uncharge_slab(s, order);
+@@ -1529,6 +1543,16 @@ static void __free_slab(struct kmem_cach
+ __free_kmem_pages(page, order);
}
+static void free_delayed(struct list_head *h)
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1512,6 +1536,12 @@ static void free_slab(struct kmem_cache
+@@ -1560,6 +1584,12 @@ static void free_slab(struct kmem_cache
}
call_rcu(head, rcu_free_slab);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
__free_slab(s, page);
}
-@@ -1625,7 +1655,7 @@ static void *get_partial_node(struct kme
+@@ -1673,7 +1703,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1650,7 +1680,7 @@ static void *get_partial_node(struct kme
+@@ -1698,7 +1728,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return object;
}
-@@ -1896,7 +1926,7 @@ static void deactivate_slab(struct kmem_
+@@ -1944,7 +1974,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
m = M_FULL;
-@@ -1907,7 +1937,7 @@ static void deactivate_slab(struct kmem_
+@@ -1955,7 +1985,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1942,7 +1972,7 @@ static void deactivate_slab(struct kmem_
+@@ -1990,7 +2020,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -1974,10 +2004,10 @@ static void unfreeze_partials(struct kme
+@@ -2022,10 +2052,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
do {
-@@ -2006,7 +2036,7 @@ static void unfreeze_partials(struct kme
+@@ -2054,7 +2084,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2045,14 +2075,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2093,14 +2123,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2124,7 +2161,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2172,7 +2209,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2160,10 +2212,10 @@ static unsigned long count_partial(struc
+@@ -2208,10 +2260,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
@@ -237,53 +237,61 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2300,9 +2352,11 @@ static inline void *get_freelist(struct
- static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- unsigned long addr, struct kmem_cache_cpu *c)
+@@ -2349,8 +2401,10 @@ static inline void *get_freelist(struct
+ * already disabled (which is the case for bulk allocation).
+ */
+ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
+- unsigned long addr, struct kmem_cache_cpu *c)
++ unsigned long addr, struct kmem_cache_cpu *c,
++ struct list_head *to_free)
{
+ struct slub_free_list *f;
void *freelist;
struct page *page;
- unsigned long flags;
-+ LIST_HEAD(tofree);
- local_irq_save(flags);
- #ifdef CONFIG_PREEMPT
-@@ -2370,7 +2424,13 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2410,6 +2464,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
++
+out:
+ f = this_cpu_ptr(&slub_free_list);
+ raw_spin_lock(&f->lock);
-+ list_splice_init(&f->list, &tofree);
++ list_splice_init(&f->list, to_free);
+ raw_spin_unlock(&f->lock);
- local_irq_restore(flags);
-+ free_delayed(&tofree);
++
return freelist;
new_slab:
-@@ -2387,8 +2447,7 @@ static void *__slab_alloc(struct kmem_ca
-
- if (unlikely(!freelist)) {
- slab_out_of_memory(s, gfpflags, node);
-- local_irq_restore(flags);
-- return NULL;
-+ goto out;
- }
-
- page = c->page;
-@@ -2403,8 +2462,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2441,7 +2502,7 @@ static void *___slab_alloc(struct kmem_c
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
-- local_irq_restore(flags);
- return freelist;
+ goto out;
}
/*
-@@ -2588,7 +2646,7 @@ static void __slab_free(struct kmem_cach
+@@ -2453,6 +2514,7 @@ static void *__slab_alloc(struct kmem_ca
+ {
+ void *p;
+ unsigned long flags;
++ LIST_HEAD(tofree);
+
+ local_irq_save(flags);
+ #ifdef CONFIG_PREEMPT
+@@ -2464,8 +2526,9 @@ static void *__slab_alloc(struct kmem_ca
+ c = this_cpu_ptr(s->cpu_slab);
+ #endif
+
+- p = ___slab_alloc(s, gfpflags, node, addr, c);
++ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
+ local_irq_restore(flags);
++ free_delayed(&tofree);
+ return p;
+ }
+
+@@ -2652,7 +2715,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -292,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2620,7 +2678,7 @@ static void __slab_free(struct kmem_cach
+@@ -2684,7 +2747,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -301,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2662,7 +2720,7 @@ static void __slab_free(struct kmem_cach
+@@ -2726,7 +2789,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -310,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
slab_empty:
-@@ -2677,7 +2735,7 @@ static void __slab_free(struct kmem_cach
+@@ -2741,7 +2804,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -319,7 +327,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -2876,7 +2934,7 @@ static void
+@@ -2913,6 +2976,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+ void **p)
+ {
+ struct kmem_cache_cpu *c;
++ LIST_HEAD(to_free);
+ int i;
+
+ /* memcg and kmem_cache debug support */
+@@ -2936,7 +3000,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+ * of re-populating per CPU c->freelist
+ */
+ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
+- _RET_IP_, c);
++ _RET_IP_, c, &to_free);
+ if (unlikely(!p[i]))
+ goto error;
+
+@@ -2948,6 +3012,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+ }
+ c->tid = next_tid(c->tid);
+ local_irq_enable();
++ free_delayed(&to_free);
+
+ /* Clear memory outside IRQ disabled fastpath loop */
+ if (unlikely(flags & __GFP_ZERO)) {
+@@ -3095,7 +3160,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -328,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3458,7 +3516,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3677,7 +3742,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -337,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -3489,7 +3547,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3708,7 +3773,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -346,7 +379,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -3665,6 +3723,12 @@ void __init kmem_cache_init(void)
+@@ -3884,6 +3949,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -359,7 +392,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -3907,7 +3971,7 @@ static int validate_slab_node(struct kme
+@@ -4127,7 +4198,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@@ -368,7 +401,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -3929,7 +3993,7 @@ static int validate_slab_node(struct kme
+@@ -4149,7 +4220,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -377,7 +410,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return count;
}
-@@ -4117,12 +4181,12 @@ static int list_locations(struct kmem_ca
+@@ -4337,12 +4408,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 1c6864305d47f..a1c70c329121e 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2147,7 +2147,7 @@ static void drain_all_stock(struct mem_c
+@@ -1933,7 +1933,7 @@ static void drain_all_stock(struct mem_c
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2164,7 +2164,7 @@ static void drain_all_stock(struct mem_c
+@@ -1950,7 +1950,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index ea16c08471729..1ac923195c74e 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -9,9 +9,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/swap.h | 1 +
mm/compaction.c | 6 ++++--
- mm/memcontrol.c | 18 ++++++++++++------
+ mm/memcontrol.c | 20 ++++++++++++++------
mm/swap.c | 2 +-
- 4 files changed, 18 insertions(+), 9 deletions(-)
+ 4 files changed, 20 insertions(+), 9 deletions(-)
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -25,10 +25,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -1406,10 +1406,12 @@ static int compact_zone(struct zone *zon
+@@ -1443,10 +1443,12 @@ static int compact_zone(struct zone *zon
cc->migrate_pfn & ~((1UL << cc->order) - 1);
- if (last_migrated_pfn < current_block_start) {
+ if (cc->last_migrated_pfn < current_block_start) {
- cpu = get_cpu();
+ cpu = get_cpu_light();
+ local_lock_irq(swapvec_lock);
@@ -38,11 +38,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- put_cpu();
+ put_cpu_light();
/* No more flushing until we migrate again */
- last_migrated_pfn = 0;
+ cc->last_migrated_pfn = 0;
}
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -66,6 +66,8 @@
+@@ -67,6 +67,8 @@
#include <net/sock.h>
#include <net/ip.h>
#include <net/tcp_memcontrol.h>
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "slab.h"
#include <asm/uaccess.h>
-@@ -85,6 +87,7 @@ int do_swap_account __read_mostly;
+@@ -87,6 +89,7 @@ int do_swap_account __read_mostly;
#define do_swap_account 0
#endif
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static const char * const mem_cgroup_stat_names[] = {
"cache",
"rss",
-@@ -4802,12 +4805,12 @@ static int mem_cgroup_move_account(struc
+@@ -4584,12 +4587,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5544,10 +5547,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5341,10 +5344,10 @@ void mem_cgroup_commit_charge(struct pag
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
}
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_swap_account && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5603,14 +5606,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5400,14 +5403,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5814,6 +5817,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5599,6 +5602,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg;
unsigned short oldid;
@@ -112,12 +112,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5836,9 +5840,11 @@ void mem_cgroup_swapout(struct page *pag
- if (!mem_cgroup_is_root(memcg))
- page_counter_uncharge(&memcg->memory, 1);
-
+@@ -5627,9 +5631,13 @@ void mem_cgroup_swapout(struct page *pag
+ * important here to have the interrupts disabled because it is the
+ * only synchronisation we have for udpating the per-CPU variables.
+ */
+ local_lock_irqsave(event_lock, flags);
- /* Caller disabled preemption with mapping->tree_lock */
++#ifndef CONFIG_PREEMPT_RT_BASE
+ VM_BUG_ON(!irqs_disabled());
++#endif
mem_cgroup_charge_statistics(memcg, page, -1);
memcg_check_events(memcg, page);
+ local_unlock_irqrestore(event_lock, flags);
@@ -126,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -47,7 +47,7 @@ static DEFINE_PER_CPU(struct pagevec, lr
+@@ -48,7 +48,7 @@ static DEFINE_PER_CPU(struct pagevec, lr
static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs);
static DEFINE_LOCAL_IRQ_LOCK(rotate_lock);
diff --git a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
index f3205d67aef3f..8465350896d4e 100644
--- a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -238,9 +238,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -269,9 +269,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
#ifdef CONFIG_PREEMPT_RT_BASE
# define cpu_lock_irqsave(cpu, flags) \
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index 1c3d915db4d0f..3b1ae808485e9 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -8,12 +8,12 @@ call free_pages_bulk() outside of the percpu page allocator locks.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- mm/page_alloc.c | 85 ++++++++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 61 insertions(+), 24 deletions(-)
+ mm/page_alloc.c | 89 +++++++++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 63 insertions(+), 26 deletions(-)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -694,7 +694,7 @@ static inline int free_pages_check(struc
+@@ -777,7 +777,7 @@ static inline int free_pages_check(struc
}
/*
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -705,18 +705,51 @@ static inline int free_pages_check(struc
+@@ -788,18 +788,53 @@ static inline int free_pages_check(struc
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -49,11 +49,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ /* must delete as __free_one_page list manipulates */
+ list_del(&page->lru);
+
-+ mt = get_freepage_migratetype(page);
++ mt = get_pcppage_migratetype(page);
++ /* MIGRATE_ISOLATE page should not go to pcplists */
++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
++ /* Pageblock could have been isolated meanwhile */
+ if (unlikely(has_isolate_pageblock(zone)))
+ mt = get_pageblock_migratetype(page);
+
-+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+ trace_mm_page_pcpu_drain(page, 0, mt);
+ to_free--;
@@ -78,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (to_free) {
struct page *page;
struct list_head *list;
-@@ -732,7 +765,7 @@ static void free_pcppages_bulk(struct zo
+@@ -815,7 +850,7 @@ static void free_pcppages_bulk(struct zo
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -87,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -740,21 +773,11 @@ static void free_pcppages_bulk(struct zo
+@@ -823,24 +858,12 @@ static void free_pcppages_bulk(struct zo
batch_free = to_free;
do {
@@ -97,11 +99,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- /* must delete as __free_one_page list manipulates */
+ page = list_last_entry(list, struct page, lru);
list_del(&page->lru);
-- mt = get_freepage_migratetype(page);
+
+- mt = get_pcppage_migratetype(page);
+- /* MIGRATE_ISOLATE page should not go to pcplists */
+- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
+- /* Pageblock could have been isolated meanwhile */
- if (unlikely(has_isolate_pageblock(zone)))
- mt = get_pageblock_migratetype(page);
-
-- /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
- __free_one_page(page, page_to_pfn(page), zone, 0, mt);
- trace_mm_page_pcpu_drain(page, 0, mt);
+ list_add(&page->lru, dst);
@@ -111,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -763,7 +786,9 @@ static void free_one_page(struct zone *z
+@@ -849,7 +872,9 @@ static void free_one_page(struct zone *z
int migratetype)
{
unsigned long nr_scanned;
@@ -122,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -773,7 +798,7 @@ static void free_one_page(struct zone *z
+@@ -859,7 +884,7 @@ static void free_one_page(struct zone *z
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -131,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -1384,16 +1409,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -1870,16 +1895,18 @@ static int rmqueue_bulk(struct zone *zon
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -151,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -1409,16 +1436,21 @@ static void drain_pages_zone(unsigned in
+@@ -1895,16 +1922,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -175,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1596,8 +1628,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2082,8 +2114,13 @@ void free_hot_cold_page(struct page *pag
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 01237e5369708..31ecdd4469da6 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -23,9 +23,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/sched/rt.h>
+#include <linux/locallock.h>
#include <linux/page_owner.h>
+ #include <linux/kthread.h>
- #include <asm/sections.h>
-@@ -233,6 +234,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -264,6 +265,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -43,22 +43,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
int page_group_by_mobility_disabled __read_mostly;
- void set_pageblock_migratetype(struct page *page, int migratetype)
-@@ -825,11 +838,11 @@ static void __free_pages_ok(struct page
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+@@ -997,10 +1010,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
__count_vm_events(PGFREE, 1 << order);
- set_freepage_migratetype(page, migratetype);
free_one_page(page_zone(page), page, pfn, order, migratetype);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
}
- void __init __free_pages_bootmem(struct page *page, unsigned int order)
-@@ -1373,14 +1386,14 @@ void drain_zone_pages(struct zone *zone,
+ static void __init __free_pages_boot_core(struct page *page,
+@@ -1859,14 +1872,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -75,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -1397,7 +1410,7 @@ static void drain_pages_zone(unsigned in
+@@ -1883,7 +1896,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -84,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -1405,7 +1418,7 @@ static void drain_pages_zone(unsigned in
+@@ -1891,7 +1904,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -93,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1491,8 +1504,17 @@ void drain_all_pages(struct zone *zone)
+@@ -1977,8 +1990,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -111,16 +110,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_HIBERNATION
-@@ -1548,7 +1570,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2034,7 +2056,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
- set_freepage_migratetype(page, migratetype);
+ set_pcppage_migratetype(page, migratetype);
- local_irq_save(flags);
+ local_lock_irqsave(pa_lock, flags);
__count_vm_event(PGFREE);
/*
-@@ -1579,7 +1601,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2065,7 +2087,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -129,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1710,7 +1732,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2200,7 +2222,7 @@ struct page *buffered_rmqueue(struct zon
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -138,13 +137,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
-@@ -1742,13 +1764,15 @@ struct page *buffered_rmqueue(struct zon
+@@ -2232,7 +2254,7 @@ struct page *buffered_rmqueue(struct zon
*/
WARN_ON_ONCE(order > 1);
}
- spin_lock_irqsave(&zone->lock, flags);
+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
- page = __rmqueue(zone, order, migratetype);
+
+ page = NULL;
+ if (alloc_flags & ALLOC_HARDER) {
+@@ -2242,11 +2264,13 @@ struct page *buffered_rmqueue(struct zon
+ }
+ if (!page)
+ page = __rmqueue(zone, order, migratetype, gfp_flags);
- spin_unlock(&zone->lock);
- if (!page)
+ if (!page) {
@@ -152,12 +157,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto failed;
+ }
__mod_zone_freepage_state(zone, -(1 << order),
- get_freepage_migratetype(page));
+ get_pcppage_migratetype(page));
+ spin_unlock(&zone->lock);
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -1758,13 +1782,13 @@ struct page *buffered_rmqueue(struct zon
+@@ -2256,13 +2280,13 @@ struct page *buffered_rmqueue(struct zon
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
@@ -173,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -5653,6 +5677,7 @@ static int page_alloc_cpu_notify(struct
+@@ -5928,6 +5952,7 @@ static int page_alloc_cpu_notify(struct
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -181,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -6547,7 +6572,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -6822,7 +6847,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -190,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -6556,7 +6581,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -6831,7 +6856,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-protect-activate-switch-mm.patch b/patches/mm-protect-activate-switch-mm.patch
index 383e79fdb2651..01118d85e11dd 100644
--- a/patches/mm-protect-activate-switch-mm.patch
+++ b/patches/mm-protect-activate-switch-mm.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -859,12 +859,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -865,12 +865,14 @@ static int exec_mmap(struct mm_struct *m
}
}
task_lock(tsk);
diff --git a/patches/mm-rmap-retry-lock-check-in-anon_vma_free.patch_vma_free.patch b/patches/mm-rmap-retry-lock-check-in-anon_vma_free.patch_vma_free.patch
new file mode 100644
index 0000000000000..4ea33c7c6bd0b
--- /dev/null
+++ b/patches/mm-rmap-retry-lock-check-in-anon_vma_free.patch_vma_free.patch
@@ -0,0 +1,52 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 1 Dec 2015 17:57:02 +0100
+Subject: mm/rmap: retry lock check in anon_vma_free()
+
+anon_vma_free() checks if the rwsem is locked and if so performs a
+rw lock + unlock operation. It seems the purpose is to flush the current
+reader out.
+From testing it seems that after the anon_vma_unlock_write() there is
+the rt_mutex's owner field has the waiter bit set. It does seem right to
+leave and kfree() that memory if there is still a waiter on that lock.
+The msleep() is there in case the anon_vma_free() caller has the highest
+priority and the waiter never gets scheduled.
+
+XXX
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/rmap.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -89,8 +89,10 @@ static inline struct anon_vma *anon_vma_
+ return anon_vma;
+ }
+
+-static inline void anon_vma_free(struct anon_vma *anon_vma)
++#include <linux/delay.h>
++static void anon_vma_free(struct anon_vma *anon_vma)
+ {
++ int cnt = 0;
+ VM_BUG_ON(atomic_read(&anon_vma->refcount));
+
+ /*
+@@ -111,9 +113,17 @@ static inline void anon_vma_free(struct
+ * happen _before_ what follows.
+ */
+ might_sleep();
++retry:
+ if (rwsem_is_locked(&anon_vma->root->rwsem)) {
+ anon_vma_lock_write(anon_vma);
+ anon_vma_unlock_write(anon_vma);
++
++ if (rwsem_is_locked(&anon_vma->root->rwsem)) {
++ cnt++;
++ if (cnt > 3)
++ msleep(1);
++ }
++ goto retry;
+ }
+
+ kmem_cache_free(anon_vma_cachep, anon_vma);
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index ad4843a6c8baf..43fa53f1d2e80 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -74,7 +74,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/*
* switch_to(x,y) should switch tasks from x to y.
-@@ -292,6 +322,8 @@ EXPORT_SYMBOL_GPL(start_thread);
+@@ -286,6 +316,8 @@ EXPORT_SYMBOL_GPL(start_thread);
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
__switch_to_xtra(prev_p, next_p, tss);
@@ -163,7 +163,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
}
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
-@@ -87,32 +87,51 @@ static inline void __kunmap_atomic(void
+@@ -86,32 +86,51 @@ static inline void __kunmap_atomic(void
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
@@ -224,12 +224,12 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
@@ -26,6 +26,7 @@ struct sched_param {
#include <linux/nodemask.h>
#include <linux/mm_types.h>
- #include <linux/preempt_mask.h>
+ #include <linux/preempt.h>
+#include <asm/kmap_types.h>
#include <asm/page.h>
#include <asm/ptrace.h>
-@@ -1796,6 +1797,12 @@ struct task_struct {
+@@ -1847,6 +1848,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
diff --git a/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch b/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
index 6da9ab217b4c6..3990c550b7c66 100644
--- a/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
-@@ -592,7 +592,7 @@ void sg_miter_stop(struct sg_mapping_ite
+@@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_ite
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
-@@ -637,7 +637,7 @@ static size_t sg_copy_buffer(struct scat
+@@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist
if (!sg_miter_skip(&miter, skip))
return false;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (sg_miter_next(&miter) && offset < buflen) {
unsigned int len;
-@@ -654,7 +654,7 @@ static size_t sg_copy_buffer(struct scat
+@@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist
sg_miter_stop(&miter);
@@ -40,4 +40,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ local_irq_restore_nort(flags);
return offset;
}
-
+ EXPORT_SYMBOL(sg_copy_buffer);
diff --git a/patches/mm-slub-move-slab-initialization-into-irq-enabled-region.patch b/patches/mm-slub-move-slab-initialization-into-irq-enabled-region.patch
deleted file mode 100644
index 5104424d13df2..0000000000000
--- a/patches/mm-slub-move-slab-initialization-into-irq-enabled-region.patch
+++ /dev/null
@@ -1,162 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: mm/slub: move slab initialization into irq enabled region
-
-Initializing a new slab can introduce rather large latencies because most
-of the initialization runs always with interrupts disabled.
-
-There is no point in doing so. The newly allocated slab is not visible
-yet, so there is no reason to protect it against concurrent alloc/free.
-
-Move the expensive parts of the initialization into allocate_slab(), so
-for all allocations with GFP_WAIT set, interrupts are enabled.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Christoph Lameter <cl@linux.com>
-Cc: Pekka Enberg <penberg@kernel.org>
-Cc: David Rientjes <rientjes@google.com>
-Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
-Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
----
-
- mm/slub.c | 89 +++++++++++++++++++++++++++++---------------------------------
- 1 file changed, 42 insertions(+), 47 deletions(-)
-
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -1306,6 +1306,17 @@ static inline void slab_free_hook(struct
- kasan_slab_free(s, x);
- }
-
-+static void setup_object(struct kmem_cache *s, struct page *page,
-+ void *object)
-+{
-+ setup_object_debug(s, page, object);
-+ if (unlikely(s->ctor)) {
-+ kasan_unpoison_object_data(s, object);
-+ s->ctor(object);
-+ kasan_poison_object_data(s, object);
-+ }
-+}
-+
- /*
- * Slab allocation and freeing
- */
-@@ -1336,6 +1347,8 @@ static struct page *allocate_slab(struct
- struct page *page;
- struct kmem_cache_order_objects oo = s->oo;
- gfp_t alloc_gfp;
-+ void *start, *p;
-+ int idx, order;
-
- flags &= gfp_allowed_mask;
-
-@@ -1359,13 +1372,13 @@ static struct page *allocate_slab(struct
- * Try a lower order alloc if possible
- */
- page = alloc_slab_page(s, alloc_gfp, node, oo);
--
-- if (page)
-- stat(s, ORDER_FALLBACK);
-+ if (unlikely(!page))
-+ goto out;
-+ stat(s, ORDER_FALLBACK);
- }
-
-- if (kmemcheck_enabled && page
-- && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
-+ if (kmemcheck_enabled &&
-+ !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
- int pages = 1 << oo_order(oo);
-
- kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
-@@ -1380,51 +1393,9 @@ static struct page *allocate_slab(struct
- kmemcheck_mark_unallocated_pages(page, pages);
- }
-
-- if (flags & __GFP_WAIT)
-- local_irq_disable();
-- if (!page)
-- return NULL;
--
- page->objects = oo_objects(oo);
-- mod_zone_page_state(page_zone(page),
-- (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-- NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-- 1 << oo_order(oo));
--
-- return page;
--}
--
--static void setup_object(struct kmem_cache *s, struct page *page,
-- void *object)
--{
-- setup_object_debug(s, page, object);
-- if (unlikely(s->ctor)) {
-- kasan_unpoison_object_data(s, object);
-- s->ctor(object);
-- kasan_poison_object_data(s, object);
-- }
--}
--
--static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
--{
-- struct page *page;
-- void *start;
-- void *p;
-- int order;
-- int idx;
--
-- if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-- pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-- BUG();
-- }
--
-- page = allocate_slab(s,
-- flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-- if (!page)
-- goto out;
-
- order = compound_order(page);
-- inc_slabs_node(s, page_to_nid(page), page->objects);
- page->slab_cache = s;
- __SetPageSlab(page);
- if (page_is_pfmemalloc(page))
-@@ -1448,10 +1419,34 @@ static struct page *new_slab(struct kmem
- page->freelist = start;
- page->inuse = page->objects;
- page->frozen = 1;
-+
- out:
-+ if (flags & __GFP_WAIT)
-+ local_irq_disable();
-+ if (!page)
-+ return NULL;
-+
-+ mod_zone_page_state(page_zone(page),
-+ (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-+ NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-+ 1 << oo_order(oo));
-+
-+ inc_slabs_node(s, page_to_nid(page), page->objects);
-+
- return page;
- }
-
-+static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
-+{
-+ if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
-+ pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK);
-+ BUG();
-+ }
-+
-+ return allocate_slab(s,
-+ flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
-+}
-+
- static void __free_slab(struct kmem_cache *s, struct page *page)
- {
- int order = compound_order(page);
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index fa6fad8e976fd..1344dd0245137 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -819,7 +819,7 @@ static void *new_vmap_block(unsigned int
+@@ -821,7 +821,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -862,11 +862,12 @@ static void *new_vmap_block(unsigned int
+@@ -864,11 +864,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,15 +36,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -935,6 +936,7 @@ static void *vb_alloc(unsigned long size
+@@ -937,6 +938,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
+ int cpu;
- BUG_ON(size & ~PAGE_MASK);
+ BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -949,7 +951,8 @@ static void *vb_alloc(unsigned long size
+@@ -951,7 +953,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -972,7 +975,7 @@ static void *vb_alloc(unsigned long size
+@@ -974,7 +977,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index 9c388ef7fcd03..61efb48c4d2fa 100644
--- a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/mm/filemap.c
+++ b/mm/filemap.c
-@@ -167,7 +167,9 @@ static void page_cache_tree_delete(struc
+@@ -168,7 +168,9 @@ static void page_cache_tree_delete(struc
if (!workingset_node_pages(node) &&
list_empty(&node->private_list)) {
node->private_data = mapping;
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -533,9 +535,12 @@ static int page_cache_tree_insert(struct
+@@ -597,9 +599,12 @@ static int page_cache_tree_insert(struct
* node->private_list is protected by
* mapping->tree_lock.
*/
diff --git a/patches/move_sched_delayed_work_to_helper.patch b/patches/move_sched_delayed_work_to_helper.patch
index 044382974da4a..b1f7b3393881b 100644
--- a/patches/move_sched_delayed_work_to_helper.patch
+++ b/patches/move_sched_delayed_work_to_helper.patch
@@ -33,8 +33,8 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
#include <linux/math64.h>
#include <linux/timex.h>
#include <linux/time.h>
-@@ -529,10 +530,52 @@ static void sync_cmos_clock(struct work_
- &sync_cmos_work, timespec_to_jiffies(&next));
+@@ -562,10 +563,52 @@ static void sync_cmos_clock(struct work_
+ &sync_cmos_work, timespec64_to_jiffies(&next));
}
+#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
index 884c953c1aba7..19a6826d8af0a 100644
--- a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -19,21 +19,22 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <net/protocol.h>
#include <net/dst.h>
-@@ -356,6 +357,7 @@ struct netdev_alloc_cache {
- };
- static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
- static DEFINE_PER_CPU(struct netdev_alloc_cache, napi_alloc_cache);
+@@ -349,6 +350,7 @@ EXPORT_SYMBOL(build_skb);
+
+ static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
+ static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache);
+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
- static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
- gfp_t gfp_mask)
-@@ -433,9 +435,9 @@ static void *__netdev_alloc_frag(unsigne
+ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
+ {
+@@ -356,10 +358,10 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
void *data;
- local_irq_save(flags);
+ local_lock_irqsave(netdev_alloc_lock, flags);
- data = __alloc_page_frag(&netdev_alloc_cache, fragsz, gfp_mask);
+ nc = this_cpu_ptr(&netdev_alloc_cache);
+ data = __alloc_page_frag(nc, fragsz, gfp_mask);
- local_irq_restore(flags);
+ local_unlock_irqrestore(netdev_alloc_lock, flags);
return data;
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 8a7ce0c52a6bd..8d94fd0e39319 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -7181,7 +7181,7 @@ static int dev_cpu_callback(struct notif
+@@ -7463,7 +7463,7 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index 45c32002b1c51..35b702cbfd548 100644
--- a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -16,10 +16,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
-@@ -3,6 +3,7 @@
-
+@@ -4,6 +4,7 @@
#include <linux/netdevice.h>
+ #include <linux/static_key.h>
+#include <linux/locallock.h>
#include <uapi/linux/netfilter/x_tables.h>
@@ -30,10 +30,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
+
- /**
- * xt_write_recseq_begin - start of a write section
+ /* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
-@@ -296,6 +299,9 @@ static inline unsigned int xt_write_recs
+ * Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
+@@ -302,6 +305,9 @@ static inline unsigned int xt_write_recs
{
unsigned int addend;
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -326,6 +332,7 @@ static inline void xt_write_recseq_end(u
+@@ -332,6 +338,7 @@ static inline void xt_write_recseq_end(u
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
diff --git a/patches/net-gianfar-do-not-disable-interrupts.patch b/patches/net-gianfar-do-not-disable-interrupts.patch
deleted file mode 100644
index 2533c46fc0e95..0000000000000
--- a/patches/net-gianfar-do-not-disable-interrupts.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 25 Mar 2014 18:34:20 +0100
-Subject: net: gianfar: Do not disable interrupts
-
-each per-queue lock is taken with spin_lock_irqsave() except in the case
-where all of them are taken for some kind of serialisation. As an
-optimisation local_irq_save() is used so that lock_tx_qs() and
-lock_rx_qs() can use just the spin_lock() variant instead.
-On RT local_irq_save() behaves differently so we use the nort()
-variant.
-Lockdep screems easily by "ethtool -K eth0 rx off tx off"
-
-What remains is missing lockdep annotation that makes lockdep think
-lock_tx_qs() may cause a dead lock.
-
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/net/ethernet/freescale/gianfar.c | 12 ++++++------
- 1 file changed, 6 insertions(+), 6 deletions(-)
-
---- a/drivers/net/ethernet/freescale/gianfar.c
-+++ b/drivers/net/ethernet/freescale/gianfar.c
-@@ -1540,7 +1540,7 @@ static int gfar_suspend(struct device *d
-
- if (netif_running(ndev)) {
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- lock_tx_qs(priv);
-
- gfar_halt_nodisable(priv);
-@@ -1556,7 +1556,7 @@ static int gfar_suspend(struct device *d
- gfar_write(&regs->maccfg1, tempval);
-
- unlock_tx_qs(priv);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- disable_napi(priv);
-
-@@ -1598,7 +1598,7 @@ static int gfar_resume(struct device *de
- /* Disable Magic Packet mode, in case something
- * else woke us up.
- */
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- lock_tx_qs(priv);
-
- tempval = gfar_read(&regs->maccfg2);
-@@ -1608,7 +1608,7 @@ static int gfar_resume(struct device *de
- gfar_start(priv);
-
- unlock_tx_qs(priv);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
-
- netif_device_attach(ndev);
-
-@@ -3418,14 +3418,14 @@ static irqreturn_t gfar_error(int irq, v
- dev->stats.tx_dropped++;
- atomic64_inc(&priv->extra_stats.tx_underrun);
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
- lock_tx_qs(priv);
-
- /* Reactivate the Tx Queues */
- gfar_write(&regs->tstat, gfargrp->tstat);
-
- unlock_tx_qs(priv);
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- }
- netif_dbg(priv, tx_err, dev, "Transmit Error\n");
- }
diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch
index cd81c4fd63207..cb15f31b218ed 100644
--- a/patches/net-make-devnet_rename_seq-a-mutex.patch
+++ b/patches/net-make-devnet_rename_seq-a-mutex.patch
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -184,6 +184,7 @@ static unsigned int napi_gen_id;
+@@ -186,6 +186,7 @@ static unsigned int napi_gen_id;
static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -852,7 +853,8 @@ int netdev_get_name(struct net *net, cha
+@@ -884,7 +885,8 @@ int netdev_get_name(struct net *net, cha
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto retry;
}
-@@ -1121,20 +1123,17 @@ int dev_change_name(struct net_device *d
+@@ -1153,20 +1155,17 @@ int dev_change_name(struct net_device *d
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1147,11 +1146,12 @@ int dev_change_name(struct net_device *d
+@@ -1179,11 +1178,12 @@ int dev_change_name(struct net_device *d
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netdev_adjacent_rename_links(dev, oldname);
-@@ -1172,7 +1172,8 @@ int dev_change_name(struct net_device *d
+@@ -1204,7 +1204,8 @@ int dev_change_name(struct net_device *d
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1185,6 +1186,11 @@ int dev_change_name(struct net_device *d
+@@ -1217,6 +1218,11 @@ int dev_change_name(struct net_device *d
}
return err;
diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch
index 223c05704549a..04181bc8387c1 100644
--- a/patches/net-prevent-abba-deadlock.patch
+++ b/patches/net-prevent-abba-deadlock.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -2370,12 +2370,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2435,12 +2435,11 @@ void lock_sock_nested(struct sock *sk, i
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
diff --git a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 92829e408434c..455a7e7f5af49 100644
--- a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -894,7 +894,7 @@ void dev_deactivate_many(struct list_hea
+@@ -888,7 +888,7 @@ void dev_deactivate_many(struct list_hea
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
diff --git a/patches/net-tx-action-avoid-livelock-on-rt.patch b/patches/net-tx-action-avoid-livelock-on-rt.patch
index aa776db4207fd..0edb32ae15308 100644
--- a/patches/net-tx-action-avoid-livelock-on-rt.patch
+++ b/patches/net-tx-action-avoid-livelock-on-rt.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3444,6 +3444,36 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3598,6 +3598,36 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -3485,7 +3515,7 @@ static void net_tx_action(struct softirq
+@@ -3639,7 +3669,7 @@ static void net_tx_action(struct softirq
head = head->next_sched;
root_lock = qdisc_lock(q);
diff --git a/patches/net-use-cpu-chill.patch b/patches/net-use-cpu-chill.patch
index b5e52e5c41b3a..f945d89790e42 100644
--- a/patches/net-use-cpu-chill.patch
+++ b/patches/net-use-cpu-chill.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -698,7 +699,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -695,7 +696,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -960,7 +961,7 @@ static void prb_retire_current_block(str
+@@ -957,7 +958,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "rds.h"
#include "ib.h"
-@@ -286,7 +287,7 @@ static inline void wait_clean_list_grace
+@@ -313,7 +314,7 @@ static inline void wait_clean_list_grace
for_each_online_cpu(cpu) {
flag = &per_cpu(clean_list_grace, cpu);
while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
diff --git a/patches/net-wireless-warn-nort.patch b/patches/net-wireless-warn-nort.patch
index 4f98708a2dbf7..05d0332697017 100644
--- a/patches/net-wireless-warn-nort.patch
+++ b/patches/net-wireless-warn-nort.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
-@@ -3554,7 +3554,7 @@ void ieee80211_rx(struct ieee80211_hw *h
+@@ -3554,7 +3554,7 @@ void ieee80211_rx_napi(struct ieee80211_
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
diff --git a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt
index 73c7de4066796..56c2621517cae 100644
--- a/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt
+++ b/patches/net__Make_synchronize-rcu_expedited_conditional-on-non-rt
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -6930,7 +6930,7 @@ EXPORT_SYMBOL(free_netdev);
+@@ -7212,7 +7212,7 @@ EXPORT_SYMBOL(free_netdev);
void synchronize_net(void)
{
might_sleep();
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 21b69c1a7672e..7222dd3ca80d3 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -30,12 +30,28 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
+ arch/x86/entry/common.c | 7 +++++++
arch/x86/include/asm/signal.h | 13 +++++++++++++
- arch/x86/kernel/signal.c | 8 ++++++++
include/linux/sched.h | 4 ++++
kernel/signal.c | 37 +++++++++++++++++++++++++++++++++++--
- 4 files changed, 60 insertions(+), 2 deletions(-)
+ 4 files changed, 59 insertions(+), 2 deletions(-)
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -239,6 +239,13 @@ static void exit_to_usermode_loop(struct
+ if (cached_flags & _TIF_NEED_RESCHED)
+ schedule();
+
++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
++ if (unlikely(current->forced_info.si_signo)) {
++ struct task_struct *t = current;
++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
++ t->forced_info.si_signo = 0;
++ }
++#endif
+ if (cached_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
@@ -23,6 +23,19 @@ typedef struct {
@@ -58,26 +74,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifndef CONFIG_COMPAT
typedef sigset_t compat_sigset_t;
#endif
---- a/arch/x86/kernel/signal.c
-+++ b/arch/x86/kernel/signal.c
-@@ -723,6 +723,14 @@ do_notify_resume(struct pt_regs *regs, v
- {
- user_exit();
-
-+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
-+ if (unlikely(current->forced_info.si_signo)) {
-+ struct task_struct *t = current;
-+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t);
-+ t->forced_info.si_signo = 0;
-+ }
-+#endif
-+
- if (thread_info_flags & _TIF_UPROBE)
- uprobe_notify_resume(regs);
-
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1538,6 +1538,10 @@ struct task_struct {
+@@ -1577,6 +1577,10 @@ struct task_struct {
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
@@ -90,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
size_t sas_ss_size;
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1282,8 +1282,8 @@ int do_send_sig_info(int sig, struct sig
+@@ -1216,8 +1216,8 @@ int do_send_sig_info(int sig, struct sig
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@@ -101,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1308,6 +1308,39 @@ force_sig_info(int sig, struct siginfo *
+@@ -1242,6 +1242,39 @@ force_sig_info(int sig, struct siginfo *
return ret;
}
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index 3f2f7fd466ad0..14b3402e3fe08 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -387,9 +387,11 @@ static u64 oops_id;
+@@ -401,9 +401,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index 2c3aed8dae6ea..97ce2141e9bcf 100644
--- a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -23,30 +23,28 @@ Link: http://lkml.kernel.org/r/20111005184518.GA21601@linux.vnet.ibm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/rcupdate.h | 6 ------
+ include/linux/rcupdate.h | 4 ----
kernel/rcu/tree.c | 9 ++++++++-
- kernel/rcu/tree_plugin.h | 9 +++++++--
- 3 files changed, 15 insertions(+), 9 deletions(-)
+ kernel/rcu/tree_plugin.h | 8 +++++++-
+ 3 files changed, 15 insertions(+), 6 deletions(-)
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -300,13 +300,7 @@ static inline int rcu_preempt_depth(void
+@@ -334,11 +334,7 @@ static inline int rcu_preempt_depth(void
void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
--
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline void rcu_bh_qs(void) { }
-#else
void rcu_bh_qs(void);
-#endif
--
void rcu_check_callbacks(int user);
struct notifier_block;
- void rcu_idle_enter(void);
+ int rcu_cpu_notify(struct notifier_block *self,
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -220,7 +220,14 @@ void rcu_sched_qs(void)
+@@ -266,7 +266,14 @@ void rcu_sched_qs(void)
}
}
@@ -61,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#else
void rcu_bh_qs(void)
{
- if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
+ if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -28,6 +28,7 @@
@@ -72,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "../time/tick-internal.h"
#ifdef CONFIG_RCU_BOOST
-@@ -1356,7 +1357,7 @@ static void rcu_prepare_kthreads(int cpu
+@@ -1346,7 +1347,7 @@ static void rcu_prepare_kthreads(int cpu
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -81,17 +79,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Check to see if any future RCU-related work will need to be done
-@@ -1374,7 +1375,9 @@ int rcu_needs_cpu(unsigned long *delta_j
- return rcu_cpu_has_callbacks(NULL);
+@@ -1363,7 +1364,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+ return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
+ ? 0 : rcu_cpu_has_callbacks(NULL);
}
- #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
-@@ -1472,6 +1475,8 @@ static bool __maybe_unused rcu_try_advan
+@@ -1459,6 +1462,8 @@ static bool __maybe_unused rcu_try_advan
return cbs_ready;
}
@@ -100,12 +98,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1512,7 +1517,7 @@ int rcu_needs_cpu(unsigned long *dj)
+@@ -1504,6 +1509,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+ *nextevt = basemono + dj * TICK_NSEC;
return 0;
}
- #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
--
+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
+
/*
* Prepare a CPU for idle from an RCU perspective. The first major task
- * is to sense whether nohz mode has been enabled or disabled via sysfs.
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index 5d525e03d5eae..da73668e16e81 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -6925,6 +6925,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -7243,6 +7243,7 @@ static void perf_swevent_init_hrtimer(st
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/peter_zijlstra-frob-rcu.patch b/patches/peter_zijlstra-frob-rcu.patch
index 695632b01453a..40bb2650f983c 100644
--- a/patches/peter_zijlstra-frob-rcu.patch
+++ b/patches/peter_zijlstra-frob-rcu.patch
@@ -155,7 +155,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -291,7 +291,7 @@ void rcu_read_unlock_special(struct task
+@@ -432,7 +432,7 @@ void rcu_read_unlock_special(struct task
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
@@ -163,4 +163,4 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
lockdep_rcu_suspicious(__FILE__, __LINE__,
"rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
- pr_alert("->rcu_read_unlock_special: %#x (b: %d, nq: %d)\n",
+ pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
diff --git a/patches/peterz-srcu-crypto-chain.patch b/patches/peterz-srcu-crypto-chain.patch
index 77d83b184494f..b612cb52ccb31 100644
--- a/patches/peterz-srcu-crypto-chain.patch
+++ b/patches/peterz-srcu-crypto-chain.patch
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
-@@ -695,13 +695,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+@@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb)
{
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ok;
--- a/crypto/internal.h
+++ b/crypto/internal.h
-@@ -48,7 +48,7 @@ struct crypto_larval {
+@@ -47,7 +47,7 @@ struct crypto_larval {
extern struct list_head crypto_alg_list;
extern struct rw_semaphore crypto_alg_sem;
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
-@@ -142,7 +142,7 @@ static inline int crypto_is_moribund(str
+@@ -143,7 +143,7 @@ static inline int crypto_is_moribund(str
static inline void crypto_notify(unsigned long val, void *v)
{
diff --git a/patches/ping-sysrq.patch b/patches/ping-sysrq.patch
index 276a332fcb299..d4aa3f4b0b0ed 100644
--- a/patches/ping-sysrq.patch
+++ b/patches/ping-sysrq.patch
@@ -42,7 +42,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
'b' - Will immediately reboot the system without syncing or unmounting
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
-@@ -69,6 +69,7 @@ struct netns_ipv4 {
+@@ -70,6 +70,7 @@ struct netns_ipv4 {
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
@@ -60,7 +60,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
-@@ -867,6 +868,30 @@ static bool icmp_redirect(struct sk_buff
+@@ -891,6 +892,30 @@ static bool icmp_redirect(struct sk_buff
}
/*
@@ -91,7 +91,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -893,6 +918,11 @@ static bool icmp_echo(struct sk_buff *sk
+@@ -917,6 +942,11 @@ static bool icmp_echo(struct sk_buff *sk
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
@@ -105,7 +105,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
return true;
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -779,6 +779,13 @@ static struct ctl_table ipv4_net_table[]
+@@ -818,6 +818,13 @@ static struct ctl_table ipv4_net_table[]
.proc_handler = proc_dointvec
},
{
diff --git a/patches/posix-timers-no-broadcast.patch b/patches/posix-timers-no-broadcast.patch
index 21adaced5a86b..287b8f083d48c 100644
--- a/patches/posix-timers-no-broadcast.patch
+++ b/patches/posix-timers-no-broadcast.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
-@@ -499,6 +499,7 @@ static enum hrtimer_restart posix_timer_
+@@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-@@ -507,7 +508,8 @@ static struct pid *good_sigevent(sigeven
+@@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigeven
return NULL;
if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 5d276e6e61eeb..b150e65775d6c 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -14,12 +14,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/init_task.h | 7 +
include/linux/sched.h | 3
kernel/fork.c | 3
- kernel/time/posix-cpu-timers.c | 198 +++++++++++++++++++++++++++++++++++++++--
- 4 files changed, 205 insertions(+), 6 deletions(-)
+ kernel/time/posix-cpu-timers.c | 193 ++++++++++++++++++++++++++++++++++++++++-
+ 4 files changed, 202 insertions(+), 4 deletions(-)
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -147,6 +147,12 @@ extern struct task_group root_task_group
+@@ -148,6 +148,12 @@ extern struct task_group root_task_group
# define INIT_PERF_EVENTS(tsk)
#endif
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
.vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock), \
-@@ -239,6 +245,7 @@ extern struct task_group root_task_group
+@@ -240,6 +246,7 @@ extern struct task_group root_task_group
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1501,6 +1501,9 @@ struct task_struct {
+@@ -1542,6 +1542,9 @@ struct task_struct {
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
const struct cred __rcu *real_cred; /* objective and real subjective task
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1214,6 +1214,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1218,6 +1218,9 @@ static void rt_mutex_init_task(struct ta
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
-@@ -626,7 +627,7 @@ static int posix_cpu_timer_set(struct k_
+@@ -650,7 +651,7 @@ static int posix_cpu_timer_set(struct k_
/*
* Disarm any old timer after extracting its expiry time.
*/
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = 0;
old_incr = timer->it.cpu.incr;
-@@ -1047,7 +1048,7 @@ void posix_cpu_timer_schedule(struct k_i
+@@ -1091,7 +1092,7 @@ void posix_cpu_timer_schedule(struct k_i
/*
* Now re-arm for the new expiry time.
*/
@@ -92,21 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arm_timer(timer);
unlock_task_sighand(p, &flags);
-@@ -1113,10 +1114,11 @@ static inline int fastpath_timer_check(s
- sig = tsk->signal;
- if (sig->cputimer.running) {
- struct task_cputime group_sample;
-+ unsigned long flags;
-
-- raw_spin_lock(&sig->cputimer.lock);
-+ raw_spin_lock_irqsave(&sig->cputimer.lock, flags);
- group_sample = sig->cputimer.cputime;
-- raw_spin_unlock(&sig->cputimer.lock);
-+ raw_spin_unlock_irqrestore(&sig->cputimer.lock, flags);
-
- if (task_cputime_expired(&group_sample, &sig->cputime_expires))
- return 1;
-@@ -1130,13 +1132,13 @@ static inline int fastpath_timer_check(s
+@@ -1182,13 +1183,13 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -122,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The fast path checks that there are no expired thread or thread
-@@ -1194,6 +1196,190 @@ void run_posix_cpu_timers(struct task_st
+@@ -1242,6 +1243,190 @@ void run_posix_cpu_timers(struct task_st
}
}
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
index 725a8b4aa4e4d..33863c99cc0d1 100644
--- a/patches/power-disable-highmem-on-rt.patch
+++ b/patches/power-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -313,7 +313,7 @@ menu "Kernel options"
+@@ -320,7 +320,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index b12421a4c448d..b7effe28ad235 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -139,6 +139,7 @@ config PPC
+@@ -142,6 +142,7 @@ config PPC
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -813,7 +813,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -818,7 +818,14 @@ user_exc_return: /* r10 contains MSR_KE
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-@@ -824,11 +831,11 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -829,11 +836,11 @@ user_exc_return: /* r10 contains MSR_KE
*/
bl trace_hardirqs_off
#endif
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
-@@ -1149,7 +1156,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1154,7 +1161,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1170,7 +1177,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1175,7 +1182,7 @@ do_resched: /* r10 contains MSR_KERNEL
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq restore_user
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
-@@ -636,7 +636,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+@@ -683,7 +683,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#else
beq restore
#endif
@@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq 2f
bl restore_interrupts
SCHEDULE_USER
-@@ -698,10 +698,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+@@ -745,10 +745,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PREEMPT
/* Check if we need to preempt */
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cmpwi cr1,r8,0
ld r0,SOFTE(r1)
cmpdi r0,0
-@@ -718,7 +726,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
+@@ -765,7 +773,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
/* Re-test flags and eventually loop */
CURRENT_THREAD_INFO(r9, r1)
ld r4,TI_FLAGS(r9)
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index decc31c4ede17..0b15414f106cf 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -53,10 +53,10 @@ performance.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/include/asm/preempt.h | 18 +++++++++++++-
- include/linux/ftrace_event.h | 1
include/linux/preempt.h | 29 ++++++++++++++++++++++-
include/linux/sched.h | 37 ++++++++++++++++++++++++++++++
include/linux/thread_info.h | 12 +++++++++
+ include/linux/trace_events.h | 1
kernel/Kconfig.preempt | 6 ++++
kernel/sched/core.c | 50 ++++++++++++++++++++++++++++++++++++++++-
kernel/sched/fair.c | 16 ++++++-------
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
-@@ -82,17 +82,33 @@ static __always_inline void __preempt_co
+@@ -79,17 +79,33 @@ static __always_inline void __preempt_co
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
@@ -104,19 +104,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_PREEMPT
---- a/include/linux/ftrace_event.h
-+++ b/include/linux/ftrace_event.h
-@@ -68,6 +68,7 @@ struct trace_entry {
- int pid;
- unsigned short migrate_disable;
- unsigned short padding;
-+ unsigned char preempt_lazy_count;
- };
-
- #define FTRACE_MAX_EVENT \
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -34,6 +34,20 @@ extern void preempt_count_sub(int val);
+@@ -153,6 +153,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -137,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
-@@ -42,6 +56,12 @@ do { \
+@@ -161,6 +175,12 @@ do { \
barrier(); \
} while (0)
@@ -150,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
-@@ -70,6 +90,13 @@ do { \
+@@ -198,6 +218,13 @@ do { \
__preempt_schedule(); \
} while (0)
@@ -161,10 +151,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_check_resched(); \
+} while (0)
+
- #else
+ #else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
-@@ -148,7 +175,7 @@ do { \
+@@ -264,7 +291,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
@@ -175,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2898,6 +2898,43 @@ static inline int test_tsk_need_resched(
+@@ -2961,6 +2961,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -240,6 +230,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -68,6 +68,7 @@ struct trace_entry {
+ int pid;
+ unsigned short migrate_disable;
+ unsigned short padding;
++ unsigned char preempt_lazy_count;
+ };
+
+ #define TRACE_EVENT_TYPE_MAX \
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -6,6 +6,12 @@ config PREEMPT_RT_BASE
@@ -257,7 +257,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -623,6 +623,38 @@ void resched_curr(struct rq *rq)
+@@ -603,6 +603,38 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -296,7 +296,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2018,6 +2050,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2353,6 +2385,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -306,7 +306,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -2781,6 +2816,7 @@ void migrate_disable(void)
+@@ -3183,6 +3218,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -314,15 +314,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
p->migrate_disable = 1;
preempt_enable();
-@@ -2838,6 +2874,7 @@ void migrate_enable(void)
+@@ -3241,6 +3277,7 @@ void migrate_enable(void)
unpin_current_cpu();
preempt_enable();
+ preempt_lazy_enable();
}
EXPORT_SYMBOL(migrate_enable);
- #else
-@@ -2971,6 +3008,7 @@ static void __sched __schedule(void)
+ #endif
+@@ -3380,6 +3417,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
@@ -330,7 +330,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
rq->clock_skip_update = 0;
-@@ -3115,6 +3153,14 @@ asmlinkage __visible void __sched notrac
+@@ -3525,6 +3563,14 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -339,13 +339,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * Check for lazy preemption
+ */
+ if (current_thread_info()->preempt_lazy_count &&
-+ !test_thread_flag(TIF_NEED_RESCHED))
++ !test_thread_flag(TIF_NEED_RESCHED))
+ return;
+#endif
do {
- __preempt_count_add(PREEMPT_ACTIVE);
+ preempt_disable_notrace();
/*
-@@ -4838,7 +4884,9 @@ void init_idle(struct task_struct *idle,
+@@ -5265,7 +5311,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -358,7 +358,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -3201,7 +3201,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3135,7 +3135,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3225,7 +3225,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3159,7 +3159,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -376,7 +376,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -3366,7 +3366,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3299,7 +3299,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -385,7 +385,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -3557,7 +3557,7 @@ static void __account_cfs_rq_runtime(str
+@@ -3481,7 +3481,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -394,7 +394,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -4180,7 +4180,7 @@ static void hrtick_start_fair(struct rq
+@@ -4093,7 +4093,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -403,7 +403,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -5076,7 +5076,7 @@ static void check_preempt_wakeup(struct
+@@ -5177,7 +5177,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -412,7 +412,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -7869,7 +7869,7 @@ static void task_fork_fair(struct task_s
+@@ -7928,7 +7928,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -421,7 +421,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -7894,7 +7894,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -7953,7 +7953,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -432,7 +432,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -52,6 +52,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
+@@ -47,6 +47,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
#ifdef CONFIG_PREEMPT_RT_FULL
SCHED_FEAT(TTWU_QUEUE, false)
@@ -444,7 +444,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1291,6 +1291,15 @@ extern void init_sched_dl_class(void);
+@@ -1300,6 +1300,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -462,7 +462,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1630,6 +1630,7 @@ tracing_generic_entry_update(struct trac
+@@ -1652,6 +1652,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -470,7 +470,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1639,7 +1640,8 @@ tracing_generic_entry_update(struct trac
+@@ -1661,7 +1662,8 @@ tracing_generic_entry_update(struct trac
#endif
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -480,7 +480,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -2560,15 +2562,17 @@ get_total_entries(struct trace_buffer *b
+@@ -2557,15 +2559,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -507,7 +507,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2594,11 +2598,14 @@ static void print_func_help_header_irq(s
+@@ -2591,11 +2595,14 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n"
"# / _----=> need-resched\n"
@@ -529,7 +529,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -120,6 +120,7 @@ struct kretprobe_trace_entry_head {
+@@ -117,6 +117,7 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
@@ -537,7 +537,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -128,6 +129,7 @@ enum trace_flag_type {
+@@ -125,6 +126,7 @@ enum trace_flag_type {
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
@@ -547,7 +547,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_BUF_SIZE 1024
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -430,6 +430,7 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq
{
char hardsoft_irq;
char need_resched;
@@ -555,7 +555,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
char irqs_off;
int hardirq;
int softirq;
-@@ -457,6 +458,8 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -413,6 +414,8 @@ int trace_print_lat_fmt(struct trace_seq
need_resched = '.';
break;
}
@@ -564,7 +564,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hardsoft_irq =
(hardirq && softirq) ? 'H' :
-@@ -464,14 +467,20 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -420,14 +423,20 @@ int trace_print_lat_fmt(struct trace_seq
softirq ? 's' :
'.';
diff --git a/patches/preempt-nort-rt-variants.patch b/patches/preempt-nort-rt-variants.patch
index 1f4b5e6d2cd18..f34f4418a2218 100644
--- a/patches/preempt-nort-rt-variants.patch
+++ b/patches/preempt-nort-rt-variants.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -48,7 +48,11 @@ do { \
+@@ -154,7 +154,11 @@ do { \
preempt_count_dec(); \
} while (0)
@@ -24,9 +24,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+# define preempt_enable_no_resched() preempt_enable()
+#endif
- #ifdef CONFIG_PREEMPT
- #define preempt_enable() \
-@@ -145,6 +149,18 @@ do { \
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
+@@ -248,6 +252,18 @@ do { \
set_preempt_need_resched(); \
} while (0)
diff --git a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index bd2ea50c2be89..0ad66cbb99fcb 100644
--- a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -15,7 +15,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1637,6 +1637,13 @@ asmlinkage void early_printk(const char
+@@ -1686,6 +1686,13 @@ asmlinkage void early_printk(const char
*/
static bool __read_mostly printk_killswitch;
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index a6698f6f9c533..7715c6ccc9a90 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -9,12 +9,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/printk.h | 2 +
kernel/printk/printk.c | 76 ++++++++++++++++++++++++++++++++++++-------------
- kernel/watchdog.c | 14 +++++++--
- 3 files changed, 70 insertions(+), 22 deletions(-)
+ kernel/watchdog.c | 10 ++++++
+ 3 files changed, 68 insertions(+), 20 deletions(-)
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -115,9 +115,11 @@ int no_printk(const char *fmt, ...)
+@@ -117,9 +117,11 @@ int no_printk(const char *fmt, ...)
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -25,10 +25,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+static inline void printk_kill(void) { }
#endif
- typedef int(*printk_func_t)(const char *fmt, va_list args);
+ typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args);
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1607,6 +1607,55 @@ static size_t cont_print_text(char *text
+@@ -1656,6 +1656,55 @@ static size_t cont_print_text(char *text
return textlen;
}
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage int vprintk_emit(int facility, int level,
const char *dict, size_t dictlen,
const char *fmt, va_list args)
-@@ -1623,6 +1672,13 @@ asmlinkage int vprintk_emit(int facility
+@@ -1672,6 +1721,13 @@ asmlinkage int vprintk_emit(int facility
/* cpu currently holding logbuf_lock in this function */
static unsigned int logbuf_cpu = UINT_MAX;
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -1902,26 +1958,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -1961,26 +2017,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
#endif /* CONFIG_PRINTK */
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -262,6 +262,8 @@ static int is_softlockup(unsigned long t
+@@ -299,6 +299,8 @@ static int is_softlockup(unsigned long t
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static struct perf_event_attr wd_hw_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES,
-@@ -295,13 +297,21 @@ static void watchdog_overflow_callback(s
+@@ -333,6 +335,13 @@ static void watchdog_overflow_callback(s
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
@@ -145,18 +145,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * lock up in printk() and kill console logging:
+ */
+ printk_kill();
++
++ raw_spin_lock(&watchdog_output_lock);
+
+ pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
+ print_modules();
+@@ -350,6 +359,7 @@ static void watchdog_overflow_callback(s
+ !test_and_set_bit(0, &hardlockup_allcpu_dumped))
+ trigger_allbutself_cpu_backtrace();
-- if (hardlockup_panic)
-+ if (hardlockup_panic) {
- panic("Watchdog detected hard LOCKUP on cpu %d",
- this_cpu);
-- else
-+ } else {
-+ raw_spin_lock(&watchdog_output_lock);
- WARN(1, "Watchdog detected hard LOCKUP on cpu %d",
- this_cpu);
-+ raw_spin_unlock(&watchdog_output_lock);
-+ }
++ raw_spin_unlock(&watchdog_output_lock);
+ if (hardlockup_panic)
+ panic("Hard LOCKUP");
- __this_cpu_write(hard_watchdog_warn, true);
- return;
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index 2396f51bb83ea..8fa8586d52049 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1401,6 +1401,7 @@ static void call_console_drivers(int lev
+@@ -1443,6 +1443,7 @@ static void call_console_drivers(int lev
if (!console_drivers)
return;
@@ -20,15 +20,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1413,6 +1414,7 @@ static void call_console_drivers(int lev
- continue;
- con->write(con, text, len);
+@@ -1458,6 +1459,7 @@ static void call_console_drivers(int lev
+ else
+ con->write(con, text, len);
}
+ migrate_enable();
}
/*
-@@ -1473,6 +1475,15 @@ static inline int can_use_console(unsign
+@@ -1518,6 +1520,15 @@ static inline int can_use_console(unsign
static int console_trylock_for_printk(void)
{
unsigned int cpu = smp_processor_id();
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!console_trylock())
return 0;
-@@ -1827,8 +1838,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1876,8 +1887,7 @@ asmlinkage int vprintk_emit(int facility
* console_sem which would prevent anyone from printing to
* console
*/
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Try to acquire and then immediately release the console
* semaphore. The release will print out buffers and wake up
-@@ -1836,7 +1846,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1885,7 +1895,7 @@ asmlinkage int vprintk_emit(int facility
*/
if (console_trylock_for_printk())
console_unlock();
@@ -63,35 +63,35 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lockdep_on();
}
-@@ -2186,11 +2196,16 @@ static void console_cont_flush(char *tex
+@@ -2245,11 +2255,16 @@ static void console_cont_flush(char *tex
goto out;
len = cont_print_text(text, size);
-+#ifndef CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT_FULL
++ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
++ call_console_drivers(cont.level, NULL, 0, text, len);
++#else
raw_spin_unlock(&logbuf_lock);
stop_critical_timings();
- call_console_drivers(cont.level, text, len);
+ call_console_drivers(cont.level, NULL, 0, text, len);
start_critical_timings();
local_irq_restore(flags);
-+#else
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(cont.level, text, len);
+#endif
return;
out:
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2278,12 +2293,17 @@ void console_unlock(void)
+@@ -2348,12 +2363,17 @@ void console_unlock(void)
console_idx = log_next(console_idx);
console_seq++;
console_prev = msg->flags;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(level, text, len);
++ call_console_drivers(level, ext_text, ext_len, text, len);
+#else
raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(level, text, len);
+ call_console_drivers(level, ext_text, ext_len, text, len);
start_critical_timings();
local_irq_restore(flags);
+#endif
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 7592c4b302877..3592ac0874320 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -25,12 +25,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/sched.h | 48 +++++++++++++++++++++++++++++++++++++++++++++---
kernel/ptrace.c | 7 ++++++-
- kernel/sched/core.c | 19 ++++++++++++++++---
- 3 files changed, 67 insertions(+), 7 deletions(-)
+ kernel/sched/core.c | 17 +++++++++++++++--
+ 3 files changed, 66 insertions(+), 6 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -234,10 +234,7 @@ extern char ___assert_task_state[1 - 2*!
+@@ -242,10 +242,7 @@ extern char ___assert_task_state[1 - 2*!
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
@@ -40,8 +40,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0)
-@@ -2918,6 +2915,51 @@ static inline int signal_pending_state(l
+ (task->flags & PF_FROZEN) == 0 && \
+@@ -2981,6 +2978,51 @@ static inline int signal_pending_state(l
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
@@ -111,9 +111,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1219,6 +1219,18 @@ struct migration_arg {
-
- static int migration_cpu_stop(void *data);
+@@ -1435,6 +1435,18 @@ int migrate_swap(struct task_struct *cur
+ return ret;
+ }
+static bool check_task_state(struct task_struct *p, long match_state)
+{
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1263,7 +1275,7 @@ unsigned long wait_task_inactive(struct
+@@ -1479,7 +1491,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1278,7 +1290,8 @@ unsigned long wait_task_inactive(struct
+@@ -1494,7 +1506,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
@@ -149,12 +149,3 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
-@@ -1833,7 +1846,7 @@ static void try_to_wake_up_local(struct
- */
- int wake_up_process(struct task_struct *p)
- {
-- WARN_ON(task_is_stopped_or_traced(p));
-+ WARN_ON(__task_is_stopped_or_traced(p));
- return try_to_wake_up(p, TASK_NORMAL, 0);
- }
- EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/radix-tree-rt-aware.patch b/patches/radix-tree-rt-aware.patch
index 78762efd6a5a6..e2b4d350dc1c2 100644
--- a/patches/radix-tree-rt-aware.patch
+++ b/patches/radix-tree-rt-aware.patch
@@ -39,22 +39,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
-@@ -195,12 +195,13 @@ radix_tree_node_alloc(struct radix_tree_
+@@ -196,13 +196,14 @@ radix_tree_node_alloc(struct radix_tree_
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
- rtp = this_cpu_ptr(&radix_tree_preloads);
+ rtp = &get_cpu_var(radix_tree_preloads);
if (rtp->nr) {
- ret = rtp->nodes[rtp->nr - 1];
- rtp->nodes[rtp->nr - 1] = NULL;
+ ret = rtp->nodes;
+ rtp->nodes = ret->private_data;
+ ret->private_data = NULL;
rtp->nr--;
}
+ put_cpu_var(radix_tree_preloads);
/*
* Update the allocation stack trace as this is more useful
* for debugging.
-@@ -240,6 +241,7 @@ radix_tree_node_free(struct radix_tree_n
+@@ -242,6 +243,7 @@ radix_tree_node_free(struct radix_tree_n
call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
}
@@ -62,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
-@@ -305,6 +307,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
+@@ -310,6 +312,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 299e0bbaa9e8d..4acd883f210f9 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -868,28 +868,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -888,28 +888,27 @@ static __u32 get_reg(struct fast_pool *f
return *(ptr + f->reg_idx++);
}
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
add_interrupt_bench(cycles);
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
-@@ -63,6 +63,7 @@ struct irq_desc {
+@@ -61,6 +61,7 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/include/linux/random.h
+++ b/include/linux/random.h
-@@ -11,7 +11,7 @@
+@@ -20,7 +20,7 @@ struct random_ready_callback {
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
@@ -73,34 +73,34 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
extern void get_random_bytes(void *buf, int nbytes);
- extern void get_random_bytes_arch(void *buf, int nbytes);
+ extern int add_random_ready_callback(struct random_ready_callback *rdy);
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
-@@ -133,6 +133,8 @@ void __irq_wake_thread(struct irq_desc *
- irqreturn_t
- handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
+@@ -134,6 +134,8 @@ void __irq_wake_thread(struct irq_desc *
+
+ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
{
+ struct pt_regs *regs = get_irq_regs();
+ u64 ip = regs ? instruction_pointer(regs) : 0;
irqreturn_t retval = IRQ_NONE;
unsigned int flags = 0, irq = desc->irq_data.irq;
-
-@@ -173,7 +175,11 @@ handle_irq_event_percpu(struct irq_desc
+ struct irqaction *action = desc->action;
+@@ -175,7 +177,11 @@ irqreturn_t handle_irq_event_percpu(stru
action = action->next;
} while (action);
- add_interrupt_randomness(irq, flags);
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+ add_interrupt_randomness(irq, flags, ip);
-+#else
++#ifdef CONFIG_PREEMPT_RT_FULL
+ desc->random_ip = ip;
++#else
++ add_interrupt_randomness(irq, flags, ip);
+#endif
if (!noirqdebug)
- note_interrupt(irq, desc, retval);
+ note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1012,6 +1012,12 @@ static int irq_thread(void *data)
+@@ -1045,6 +1045,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index ff4adc3db39b3..fc1c537d45765 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -16,16 +16,16 @@ Tested-by: Mike Galbraith <bitbucket@online.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/rcu/tree.c | 110 +++++++++++++++++++++++++++++++++---
+ kernel/rcu/tree.c | 110 ++++++++++++++++++++++++++++++---
kernel/rcu/tree.h | 5 -
- kernel/rcu/tree_plugin.h | 141 +++++------------------------------------------
- 3 files changed, 116 insertions(+), 140 deletions(-)
+ kernel/rcu/tree_plugin.h | 153 ++++++-----------------------------------------
+ 3 files changed, 122 insertions(+), 146 deletions(-)
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -56,6 +56,11 @@
#include <linux/random.h>
- #include <linux/ftrace_event.h>
+ #include <linux/trace_events.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -2882,18 +2887,17 @@ static void
+@@ -2956,18 +2961,17 @@ static void
/*
* Do RCU core processing for the current CPU.
*/
@@ -56,9 +56,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -2905,18 +2909,105 @@ static void invoke_rcu_callbacks(struct
+@@ -2979,18 +2983,105 @@ static void invoke_rcu_callbacks(struct
{
- if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+ if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
- if (likely(!rsp->boost)) {
- rcu_do_batch(rsp, rdp);
@@ -168,9 +168,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4148,7 +4239,6 @@ void __init rcu_init(void)
- rcu_init_one(&rcu_bh_state, &rcu_bh_data);
- rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+@@ -4609,7 +4700,6 @@ void __init rcu_init(void)
+ if (dump_tree)
+ rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
@@ -178,8 +178,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We don't need protection against CPU-hotplug here because
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -530,12 +530,10 @@ extern struct rcu_state rcu_preempt_stat
- DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
+@@ -563,12 +563,10 @@ extern struct rcu_state rcu_bh_state;
+ extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_RCU_BOOST
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef RCU_TREE_NONCORE
-@@ -554,10 +552,9 @@ void call_rcu(struct rcu_head *head, voi
+@@ -588,10 +586,9 @@ void call_rcu(struct rcu_head *head, rcu
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* #ifdef CONFIG_RCU_BOOST */
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -24,28 +24,20 @@
+@@ -24,26 +24,10 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
@@ -220,23 +220,34 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "../locking/rtmutex_common.h"
-+#endif /* #ifdef CONFIG_RCU_BOOST */
-+
- /*
- * Control variables for per-CPU and per-rcu_node kthreads. These
- * handle all flavors of RCU.
- */
+-/*
+- * Control variables for per-CPU and per-rcu_node kthreads. These
+- * handle all flavors of RCU.
+- */
-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
- DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
- DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
- DEFINE_PER_CPU(char, rcu_cpu_has_work);
-
--#endif /* #ifdef CONFIG_RCU_BOOST */
+-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
+-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
+-DEFINE_PER_CPU(char, rcu_cpu_has_work);
-
+ #else /* #ifdef CONFIG_RCU_BOOST */
+
+ /*
+@@ -56,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
+
+ #endif /* #else #ifdef CONFIG_RCU_BOOST */
+
++/*
++ * Control variables for per-CPU and per-rcu_node kthreads. These
++ * handle all flavors of RCU.
++ */
++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
++DEFINE_PER_CPU(char, rcu_cpu_has_work);
++
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -497,15 +489,6 @@ static void rcu_preempt_check_callbacks(
+@@ -646,15 +638,6 @@ static void rcu_preempt_check_callbacks(
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -244,7 +255,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-static void rcu_preempt_do_callbacks(void)
-{
-- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
+- rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
-}
-
-#endif /* #ifdef CONFIG_RCU_BOOST */
@@ -252,7 +263,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
-@@ -940,6 +923,19 @@ void exit_rcu(void)
+@@ -931,6 +914,19 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -272,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_BOOST
#include "../locking/rtmutex_common.h"
-@@ -971,16 +967,6 @@ static void rcu_initiate_boost_trace(str
+@@ -962,16 +958,6 @@ static void rcu_initiate_boost_trace(str
#endif /* #else #ifdef CONFIG_RCU_TRACE */
@@ -289,7 +300,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1126,23 +1112,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1116,23 +1102,6 @@ static void rcu_initiate_boost(struct rc
}
/*
@@ -313,7 +324,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
-@@ -1197,67 +1166,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1187,67 +1156,6 @@ static int rcu_spawn_one_boost_kthread(s
return 0;
}
@@ -381,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1287,26 +1195,12 @@ static void rcu_boost_kthread_setaffinit
+@@ -1277,26 +1185,12 @@ static void rcu_boost_kthread_setaffinit
free_cpumask_var(cm);
}
@@ -408,7 +419,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
-@@ -1329,11 +1223,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1319,11 +1213,6 @@ static void rcu_initiate_boost(struct rc
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
diff --git a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
index 12053fabce993..4f5760e1dee12 100644
--- a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -13,12 +13,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -637,7 +637,7 @@ config RCU_FANOUT_EXACT
+@@ -614,7 +614,7 @@ config RCU_FANOUT_LEAF
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
-- depends on NO_HZ_COMMON && SMP
-+ depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL
+- depends on NO_HZ_COMMON && SMP && RCU_EXPERT
++ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL
default n
help
This option permits CPUs to enter dynticks-idle state even if
diff --git a/patches/rcu-make-RCU_BOOST-default-on-RT.patch b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
index 71fa55a7fa776..72d8a1dd7df81 100644
--- a/patches/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -15,10 +15,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -664,7 +664,7 @@ config TREE_RCU_TRACE
+@@ -641,7 +641,7 @@ config TREE_RCU_TRACE
config RCU_BOOST
bool "Enable RCU priority boosting"
- depends on RT_MUTEXES && PREEMPT_RCU
+ depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
- default n
+ default y if PREEMPT_RT_FULL
help
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index 1d3b4a2c4fc12..68b0992c89f1a 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -24,15 +24,15 @@ Link: http://lkml.kernel.org/r/20111005185938.GA20403@linux.vnet.ibm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/rcupdate.h | 25 +++++++++++++++++++++++++
+ include/linux/rcupdate.h | 23 +++++++++++++++++++++++
include/linux/rcutree.h | 18 ++++++++++++++++--
kernel/rcu/tree.c | 16 ++++++++++++++++
kernel/rcu/update.c | 2 ++
- 4 files changed, 59 insertions(+), 2 deletions(-)
+ 4 files changed, 57 insertions(+), 2 deletions(-)
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -167,6 +167,9 @@ void call_rcu(struct rcu_head *head,
+@@ -169,6 +169,9 @@ void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -42,29 +42,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -190,6 +193,7 @@ void call_rcu(struct rcu_head *head,
+@@ -192,6 +195,7 @@ void call_rcu(struct rcu_head *head,
*/
void call_rcu_bh(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+ rcu_callback_t func);
+#endif
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -296,7 +300,13 @@ static inline int rcu_preempt_depth(void
+@@ -330,7 +334,11 @@ static inline int rcu_preempt_depth(void
void rcu_init(void);
void rcu_end_inkernel_boot(void);
void rcu_sched_qs(void);
-+
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void rcu_bh_qs(void) { }
+#else
void rcu_bh_qs(void);
+#endif
-+
void rcu_check_callbacks(int user);
struct notifier_block;
- void rcu_idle_enter(void);
-@@ -470,7 +480,14 @@ extern struct lockdep_map rcu_callback_m
+ int rcu_cpu_notify(struct notifier_block *self,
+@@ -496,7 +504,14 @@ extern struct lockdep_map rcu_callback_m
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
@@ -79,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -997,10 +1014,14 @@ static inline void rcu_read_unlock(void)
+@@ -944,10 +959,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
@@ -88,21 +86,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#else
__acquire(RCU_BH);
rcu_lock_acquire(&rcu_bh_lock_map);
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_lock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_lock_bh() used illegally while idle");
+#endif
}
/*
-@@ -1010,10 +1031,14 @@ static inline void rcu_read_lock_bh(void
+@@ -957,10 +976,14 @@ static inline void rcu_read_lock_bh(void
*/
static inline void rcu_read_unlock_bh(void)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
+ rcu_read_unlock();
+#else
- rcu_lockdep_assert(rcu_is_watching(),
- "rcu_read_unlock_bh() used illegally while idle");
+ RCU_LOCKDEP_WARN(!rcu_is_watching(),
+ "rcu_read_unlock_bh() used illegally while idle");
rcu_lock_release(&rcu_bh_lock_map);
__release(RCU_BH);
+#endif
@@ -111,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
-@@ -46,7 +46,11 @@ static inline void rcu_virt_note_context
+@@ -44,7 +44,11 @@ static inline void rcu_virt_note_context
rcu_note_context_switch();
}
@@ -123,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void);
-@@ -74,7 +78,11 @@ static inline void synchronize_rcu_bh_ex
+@@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_ex
}
void rcu_barrier(void);
@@ -147,8 +145,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-void rcu_bh_force_quiescent_state(void);
void rcu_sched_force_quiescent_state(void);
- void exit_rcu(void);
-@@ -100,6 +106,14 @@ extern int rcu_scheduler_active __read_m
+ void rcu_idle_enter(void);
+@@ -105,6 +111,14 @@ extern int rcu_scheduler_active __read_m
bool rcu_is_watching(void);
@@ -165,23 +163,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* __LINUX_RCUTREE_H */
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -220,6 +220,7 @@ void rcu_sched_qs(void)
+@@ -266,6 +266,7 @@ void rcu_sched_qs(void)
}
}
+#ifndef CONFIG_PREEMPT_RT_FULL
void rcu_bh_qs(void)
{
- if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
-@@ -229,6 +230,7 @@ void rcu_bh_qs(void)
- __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
+ if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
+@@ -275,6 +276,7 @@ void rcu_bh_qs(void)
+ __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
+#endif
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-@@ -404,6 +406,7 @@ unsigned long rcu_batches_completed_sche
+@@ -459,6 +461,7 @@ unsigned long rcu_batches_completed_sche
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
@@ -189,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
-@@ -431,6 +434,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -486,6 +489,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@@ -203,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Force a quiescent state for RCU-sched.
*/
-@@ -3040,6 +3050,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -3114,6 +3124,7 @@ void call_rcu_sched(struct rcu_head *hea
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -211,7 +209,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-@@ -3048,6 +3059,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3122,6 +3133,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -219,7 +217,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3139,6 +3151,7 @@ void synchronize_sched(void)
+@@ -3213,6 +3225,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -227,7 +225,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3165,6 +3178,7 @@ void synchronize_rcu_bh(void)
+@@ -3239,6 +3252,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -235,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3677,6 +3691,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -4101,6 +4115,7 @@ static void _rcu_barrier(struct rcu_stat
mutex_unlock(&rsp->barrier_mutex);
}
@@ -243,7 +241,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -3685,6 +3700,7 @@ void rcu_barrier_bh(void)
+@@ -4109,6 +4124,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -253,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
-@@ -227,6 +227,7 @@ int rcu_read_lock_held(void)
+@@ -276,6 +276,7 @@ int rcu_read_lock_held(void)
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -261,7 +259,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
-@@ -253,6 +254,7 @@ int rcu_read_lock_bh_held(void)
+@@ -302,6 +303,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/patches/rcu-more-swait-conversions.patch b/patches/rcu-more-swait-conversions.patch
index 3ec0dc8b7916e..1b7e5657972b4 100644
--- a/patches/rcu-more-swait-conversions.patch
+++ b/patches/rcu-more-swait-conversions.patch
@@ -16,15 +16,15 @@ Merged Steven's
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/rcu/tree.c | 8 ++++----
- kernel/rcu/tree.h | 7 ++++---
+ kernel/rcu/tree.c | 16 ++++++++--------
+ kernel/rcu/tree.h | 9 +++++----
kernel/rcu/tree_plugin.h | 18 +++++++++---------
- 3 files changed, 17 insertions(+), 16 deletions(-)
+ 3 files changed, 22 insertions(+), 21 deletions(-)
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -1567,7 +1567,7 @@ static void rcu_gp_kthread_wake(struct r
- !ACCESS_ONCE(rsp->gp_flags) ||
+@@ -1633,7 +1633,7 @@ static void rcu_gp_kthread_wake(struct r
+ !READ_ONCE(rsp->gp_flags) ||
!rsp->gp_kthread)
return;
- wake_up(&rsp->gp_wq);
@@ -32,53 +32,82 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2008,7 +2008,7 @@ static int __noreturn rcu_gp_kthread(voi
- ACCESS_ONCE(rsp->gpnum),
+@@ -2098,7 +2098,7 @@ static int __noreturn rcu_gp_kthread(voi
+ READ_ONCE(rsp->gpnum),
TPS("reqwait"));
rsp->gp_state = RCU_GP_WAIT_GPS;
- wait_event_interruptible(rsp->gp_wq,
+ swait_event_interruptible(rsp->gp_wq,
- ACCESS_ONCE(rsp->gp_flags) &
+ READ_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT);
- /* Locking provides needed memory barrier. */
-@@ -2037,7 +2037,7 @@ static int __noreturn rcu_gp_kthread(voi
- ACCESS_ONCE(rsp->gpnum),
+ rsp->gp_state = RCU_GP_DONE_GPS;
+@@ -2128,7 +2128,7 @@ static int __noreturn rcu_gp_kthread(voi
+ READ_ONCE(rsp->gpnum),
TPS("fqswait"));
rsp->gp_state = RCU_GP_WAIT_FQS;
- ret = wait_event_interruptible_timeout(rsp->gp_wq,
+ ret = swait_event_interruptible_timeout(rsp->gp_wq,
- ((gf = ACCESS_ONCE(rsp->gp_flags)) &
- RCU_GP_FLAG_FQS) ||
- (!ACCESS_ONCE(rnp->qsmask) &&
-@@ -4049,7 +4049,7 @@ static void __init rcu_init_one(struct r
+ rcu_gp_fqs_check_wake(rsp, &gf), j);
+ rsp->gp_state = RCU_GP_DOING_FQS;
+ /* Locking provides needed memory barriers. */
+@@ -3550,7 +3550,7 @@ static void __rcu_report_exp_rnp(struct
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ if (wake) {
+ smp_mb(); /* EGP done before wake_up(). */
+- wake_up(&rsp->expedited_wq);
++ swait_wake(&rsp->expedited_wq);
+ }
+ break;
+ }
+@@ -3807,7 +3807,7 @@ static void synchronize_sched_expedited_
+ jiffies_start = jiffies;
+
+ for (;;) {
+- ret = wait_event_interruptible_timeout(
++ ret = swait_event_interruptible_timeout(
+ rsp->expedited_wq,
+ sync_rcu_preempt_exp_done(rnp_root),
+ jiffies_stall);
+@@ -3815,7 +3815,7 @@ static void synchronize_sched_expedited_
+ return;
+ if (ret < 0) {
+ /* Hit a signal, disable CPU stall warnings. */
+- wait_event(rsp->expedited_wq,
++ swait_event(rsp->expedited_wq,
+ sync_rcu_preempt_exp_done(rnp_root));
+ return;
+ }
+@@ -4483,8 +4483,8 @@ static void __init rcu_init_one(struct r
}
}
- init_waitqueue_head(&rsp->gp_wq);
+- init_waitqueue_head(&rsp->expedited_wq);
+ init_swait_head(&rsp->gp_wq);
++ init_swait_head(&rsp->expedited_wq);
rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -27,6 +27,7 @@
- #include <linux/threads.h>
+@@ -28,6 +28,7 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
+ #include <linux/stop_machine.h>
+#include <linux/wait-simple.h>
/*
* Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
-@@ -210,7 +211,7 @@ struct rcu_node {
+@@ -241,7 +242,7 @@ struct rcu_node {
+ /* Refused to boost: not sure why, though. */
/* This can happen due to race conditions. */
- #endif /* #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_NOCB_CPU
- wait_queue_head_t nocb_gp_wq[2];
+ struct swait_head nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
int need_future_gp[2];
-@@ -349,7 +350,7 @@ struct rcu_data {
+@@ -393,7 +394,7 @@ struct rcu_data {
atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */
struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
struct rcu_head **nocb_follower_tail;
@@ -87,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct task_struct *nocb_kthread;
int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */
-@@ -438,7 +439,7 @@ struct rcu_state {
+@@ -472,7 +473,7 @@ struct rcu_state {
unsigned long gpnum; /* Current gp number. */
unsigned long completed; /* # of last completed gp. */
struct task_struct *gp_kthread; /* Task for grace periods. */
@@ -96,9 +125,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
short gp_flags; /* Commands for GP task. */
short gp_state; /* GP kthread sleep state. */
+@@ -504,7 +505,7 @@ struct rcu_state {
+ atomic_long_t expedited_workdone3; /* # done by others #3. */
+ atomic_long_t expedited_normal; /* # fallbacks to normal. */
+ atomic_t expedited_need_qs; /* # CPUs left to check in. */
+- wait_queue_head_t expedited_wq; /* Wait for check-ins. */
++ struct swait_head expedited_wq; /* Wait for check-ins. */
+ int ncpus_snap; /* # CPUs seen last time. */
+
+ unsigned long jiffies_force_qs; /* Time at which to invoke */
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -1864,7 +1864,7 @@ early_param("rcu_nocb_poll", parse_rcu_n
+@@ -1830,7 +1830,7 @@ early_param("rcu_nocb_poll", parse_rcu_n
*/
static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
{
@@ -107,7 +145,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1882,8 +1882,8 @@ static void rcu_nocb_gp_set(struct rcu_n
+@@ -1848,8 +1848,8 @@ static void rcu_nocb_gp_set(struct rcu_n
static void rcu_init_one_nocb(struct rcu_node *rnp)
{
@@ -118,34 +156,34 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifndef CONFIG_RCU_NOCB_CPU_ALL
-@@ -1908,7 +1908,7 @@ static void wake_nocb_leader(struct rcu_
- if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
+@@ -1874,7 +1874,7 @@ static void wake_nocb_leader(struct rcu_
+ if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
/* Prior smp_mb__after_atomic() orders against prior enqueue. */
- ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
+ WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
- wake_up(&rdp_leader->nocb_wq);
+ swait_wake(&rdp_leader->nocb_wq);
}
}
-@@ -2121,7 +2121,7 @@ static void rcu_nocb_wait_gp(struct rcu_
+@@ -2087,7 +2087,7 @@ static void rcu_nocb_wait_gp(struct rcu_
*/
trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
- wait_event_interruptible(
+ swait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1],
- (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
+ (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
if (likely(d))
-@@ -2149,7 +2149,7 @@ static void nocb_leader_wait(struct rcu_
+@@ -2115,7 +2115,7 @@ static void nocb_leader_wait(struct rcu_
/* Wait for callbacks to appear. */
if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
- wait_event_interruptible(my_rdp->nocb_wq,
+ swait_event_interruptible(my_rdp->nocb_wq,
- !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
+ !READ_ONCE(my_rdp->nocb_leader_sleep));
/* Memory barrier handled by smp_mb() calls below and repoll. */
} else if (firsttime) {
-@@ -2224,7 +2224,7 @@ static void nocb_leader_wait(struct rcu_
+@@ -2190,7 +2190,7 @@ static void nocb_leader_wait(struct rcu_
* List was empty, wake up the follower.
* Memory barriers supplied by atomic_long_add().
*/
@@ -154,16 +192,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2245,7 +2245,7 @@ static void nocb_follower_wait(struct rc
+@@ -2211,7 +2211,7 @@ static void nocb_follower_wait(struct rc
if (!rcu_nocb_poll) {
trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
"FollowerSleep");
- wait_event_interruptible(rdp->nocb_wq,
+ swait_event_interruptible(rdp->nocb_wq,
- ACCESS_ONCE(rdp->nocb_follower_head));
+ READ_ONCE(rdp->nocb_follower_head));
} else if (firsttime) {
/* Don't drown trace log with "Poll"! */
-@@ -2404,7 +2404,7 @@ void __init rcu_init_nohz(void)
+@@ -2370,7 +2370,7 @@ void __init rcu_init_nohz(void)
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{
rdp->nocb_tail = &rdp->nocb_head;
diff --git a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index b1c160a14230c..017e4f31aa69d 100644
--- a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -225,7 +225,12 @@ static void rcu_preempt_qs(void);
+@@ -271,7 +271,12 @@ static void rcu_preempt_qs(void);
void rcu_bh_qs(void)
{
diff --git a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index dc5416f0b255a..15422647c7ffb 100644
--- a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -106,9 +106,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -107,9 +107,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
*/
void pin_current_cpu(void)
{
diff --git a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index a89c4d0733237..206dfeb9786b6 100644
--- a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -290,6 +290,30 @@ unsigned long arch_randomize_brk(struct
+@@ -319,6 +319,30 @@ unsigned long arch_randomize_brk(struct
}
#ifdef CONFIG_MMU
diff --git a/patches/relay-fix-timer-madness.patch b/patches/relay-fix-timer-madness.patch
index e872958b3a1c8..c5d30f9621204 100644
--- a/patches/relay-fix-timer-madness.patch
+++ b/patches/relay-fix-timer-madness.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/relay.c
+++ b/kernel/relay.c
-@@ -339,6 +339,10 @@ static void wakeup_readers(unsigned long
+@@ -336,6 +336,10 @@ static void wakeup_readers(unsigned long
{
struct rchan_buf *buf = (struct rchan_buf *)data;
wake_up_interruptible(&buf->read_wait);
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -356,6 +360,7 @@ static void __relay_reset(struct rchan_b
+@@ -353,6 +357,7 @@ static void __relay_reset(struct rchan_b
init_waitqueue_head(&buf->read_wait);
kref_init(&buf->kref);
setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
del_timer_sync(&buf->timer);
-@@ -739,15 +744,6 @@ size_t relay_switch_subbuf(struct rchan_
+@@ -736,15 +741,6 @@ size_t relay_switch_subbuf(struct rchan_
else
buf->early_bytes += buf->chan->subbuf_size -
buf->padding[old_subbuf];
diff --git a/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
index 89c0f9cb51045..beec55cc03094 100644
--- a/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
+++ b/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
@@ -74,7 +74,7 @@ This issue was first reported in:
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
-@@ -213,8 +213,6 @@ int __cpu_disable(void)
+@@ -230,8 +230,6 @@ int __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
@@ -83,7 +83,7 @@ This issue was first reported in:
return 0;
}
-@@ -230,6 +228,9 @@ void __cpu_die(unsigned int cpu)
+@@ -247,6 +245,9 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: cpu didn't die\n", cpu);
return;
}
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index 0f7e02028131d..e250c1329b85b 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -13,25 +13,26 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/locallock.h | 6
include/linux/mutex.h | 20 +
include/linux/mutex_rt.h | 84 ++++++
- include/linux/rtmutex.h | 27 +-
- include/linux/rwlock_rt.h | 99 ++++++++
+ include/linux/rtmutex.h | 29 +-
+ include/linux/rwlock_rt.h | 99 +++++++
include/linux/rwlock_types_rt.h | 33 ++
include/linux/rwsem.h | 6
- include/linux/rwsem_rt.h | 140 +++++++++++
- include/linux/sched.h | 5
+ include/linux/rwsem_rt.h | 152 ++++++++++++
+ include/linux/sched.h | 19 +
include/linux/spinlock.h | 12
include/linux/spinlock_api_smp.h | 4
- include/linux/spinlock_rt.h | 173 ++++++++++++++
+ include/linux/spinlock_rt.h | 173 +++++++++++++
include/linux/spinlock_types.h | 11
include/linux/spinlock_types_rt.h | 51 ++++
- kernel/futex.c | 5
+ kernel/futex.c | 10
kernel/locking/Makefile | 9
- kernel/locking/rt.c | 461 ++++++++++++++++++++++++++++++++++++++
- kernel/locking/rtmutex.c | 395 ++++++++++++++++++++++++++++++--
- kernel/locking/rtmutex_common.h | 11
+ kernel/locking/rt.c | 476 ++++++++++++++++++++++++++++++++++++++
+ kernel/locking/rtmutex.c | 423 +++++++++++++++++++++++++++++++--
+ kernel/locking/rtmutex_common.h | 14 +
kernel/locking/spinlock.c | 7
kernel/locking/spinlock_debug.c | 5
- 22 files changed, 1525 insertions(+), 43 deletions(-)
+ kernel/sched/core.c | 7
+ 23 files changed, 1598 insertions(+), 56 deletions(-)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -203,7 +204,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -18,6 +18,10 @@
+@@ -13,11 +13,15 @@
+ #define __LINUX_RT_MUTEX_H
+
+ #include <linux/linkage.h>
+-#include <linux/rbtree.h>
+ #include <linux/spinlock_types_raw.h>
++#include <linux/rbtree.h>
extern int max_lock_depth; /* for sysctl */
@@ -423,7 +430,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_RWSEM_H */
--- /dev/null
+++ b/include/linux/rwsem_rt.h
-@@ -0,0 +1,140 @@
+@@ -0,0 +1,152 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
+
@@ -477,18 +484,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ __rt_init_rwsem((sem), #sem, &__key); \
+} while (0)
+
-+extern void rt_down_write(struct rw_semaphore *rwsem);
++extern void rt_down_write(struct rw_semaphore *rwsem);
+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
-+ struct lockdep_map *nest);
-+extern void rt_down_read(struct rw_semaphore *rwsem);
++ struct lockdep_map *nest);
++extern void rt__down_read(struct rw_semaphore *rwsem);
++extern void rt_down_read(struct rw_semaphore *rwsem);
+extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
++extern int rt__down_read_trylock(struct rw_semaphore *rwsem);
+extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
-+extern void __rt_up_read(struct rw_semaphore *rwsem);
-+extern void rt_up_read(struct rw_semaphore *rwsem);
-+extern void rt_up_write(struct rw_semaphore *rwsem);
-+extern void rt_downgrade_write(struct rw_semaphore *rwsem);
++extern void __rt_up_read(struct rw_semaphore *rwsem);
++extern void rt_up_read(struct rw_semaphore *rwsem);
++extern void rt_up_write(struct rw_semaphore *rwsem);
++extern void rt_downgrade_write(struct rw_semaphore *rwsem);
+
+#define init_rwsem(sem) rt_init_rwsem(sem)
+#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
@@ -499,11 +508,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ return !RB_EMPTY_ROOT(&sem->lock.waiters);
+}
+
++static inline void __down_read(struct rw_semaphore *sem)
++{
++ rt__down_read(sem);
++}
++
+static inline void down_read(struct rw_semaphore *sem)
+{
+ rt_down_read(sem);
+}
+
++static inline int __down_read_trylock(struct rw_semaphore *sem)
++{
++ return rt__down_read_trylock(sem);
++}
++
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+ return rt_down_read_trylock(sem);
@@ -566,7 +585,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -302,6 +302,11 @@ extern char ___assert_task_state[1 - 2*!
+@@ -311,6 +311,11 @@ extern char ___assert_task_state[1 - 2*!
#endif
@@ -578,9 +597,30 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Task command name length */
#define TASK_COMM_LEN 16
+@@ -968,8 +973,18 @@ struct wake_q_head {
+ struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+
+ extern void wake_q_add(struct wake_q_head *head,
+- struct task_struct *task);
+-extern void wake_up_q(struct wake_q_head *head);
++ struct task_struct *task);
++extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
++
++static inline void wake_up_q(struct wake_q_head *head)
++{
++ __wake_up_q(head, false);
++}
++
++static inline void wake_up_q_sleeper(struct wake_q_head *head)
++{
++ __wake_up_q(head, true);
++}
+
+ /*
+ * sched-domains (multiprocessor balancing) declarations:
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
-@@ -281,7 +281,11 @@ static inline void do_raw_spin_unlock(ra
+@@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(ra
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
@@ -593,7 +633,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -292,6 +296,10 @@ static inline void do_raw_spin_unlock(ra
+@@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(ra
# include <linux/spinlock_api_up.h>
#endif
@@ -604,7 +644,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-@@ -426,4 +434,6 @@ extern int _atomic_dec_and_lock(atomic_t
+@@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
@@ -874,7 +914,33 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2613,10 +2613,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -1212,6 +1212,7 @@ static int wake_futex_pi(u32 __user *uad
+ struct futex_pi_state *pi_state = this->pi_state;
+ u32 uninitialized_var(curval), newval;
+ WAKE_Q(wake_q);
++ WAKE_Q(wake_sleeper_q);
+ bool deboost;
+ int ret = 0;
+
+@@ -1268,7 +1269,8 @@ static int wake_futex_pi(u32 __user *uad
+
+ raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+
+- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
++ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
++ &wake_sleeper_q);
+
+ /*
+ * First unlock HB so the waiter does not spin on it once he got woken
+@@ -1278,6 +1280,7 @@ static int wake_futex_pi(u32 __user *uad
+ */
+ spin_unlock(&hb->lock);
+ wake_up_q(&wake_q);
++ wake_up_q_sleeper(&wake_sleeper_q);
+ if (deboost)
+ rt_mutex_adjust_prio(current);
+
+@@ -2709,10 +2712,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -890,8 +956,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+++ b/kernel/locking/Makefile
@@ -1,5 +1,5 @@
--obj-y += mutex.o semaphore.o rwsem.o
-+obj-y += semaphore.o
+-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o
++obj-y += semaphore.o percpu-rwsem.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
@@ -907,21 +973,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -22,8 +26,11 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmute
- obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
+@@ -22,7 +26,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
+endif
- obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
- obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
+ obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
--- /dev/null
+++ b/kernel/locking/rt.c
-@@ -0,0 +1,461 @@
+@@ -0,0 +1,476 @@
+/*
+ * kernel/rt.c
+ *
@@ -1299,7 +1364,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+EXPORT_SYMBOL(rt_down_write_nested_lock);
+
-+int rt_down_read_trylock(struct rw_semaphore *rwsem)
++int rt__down_read_trylock(struct rw_semaphore *rwsem)
+{
+ struct rt_mutex *lock = &rwsem->lock;
+ int ret = 1;
@@ -1314,24 +1379,39 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ else if (!rwsem->read_depth)
+ ret = 0;
+
-+ if (ret) {
++ if (ret)
+ rwsem->read_depth++;
++ return ret;
++
++}
++
++int rt_down_read_trylock(struct rw_semaphore *rwsem)
++{
++ int ret;
++
++ ret = rt__down_read_trylock(rwsem);
++ if (ret)
+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
-+ }
++
+ return ret;
+}
+EXPORT_SYMBOL(rt_down_read_trylock);
+
-+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++void rt__down_read(struct rw_semaphore *rwsem)
+{
+ struct rt_mutex *lock = &rwsem->lock;
+
-+ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
-+
+ if (rt_mutex_owner(lock) != current)
+ rt_mutex_lock(&rwsem->lock);
+ rwsem->read_depth++;
+}
++EXPORT_SYMBOL(rt__down_read);
++
++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
++{
++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
++ rt__down_read(rwsem);
++}
+
+void rt_down_read(struct rw_semaphore *rwsem)
+{
@@ -1397,7 +1477,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* See Documentation/locking/rt-mutex-design.txt for details.
*/
-@@ -341,6 +346,14 @@ static bool rt_mutex_cond_detect_deadloc
+@@ -354,6 +359,14 @@ static bool rt_mutex_cond_detect_deadloc
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -1412,7 +1492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -648,13 +661,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -661,13 +674,16 @@ static int rt_mutex_adjust_prio_chain(st
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -1431,7 +1511,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock(&lock->wait_lock);
return 0;
}
-@@ -747,6 +763,25 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -760,6 +776,25 @@ static int rt_mutex_adjust_prio_chain(st
return ret;
}
@@ -1457,8 +1537,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Try to take an rt-mutex
*
-@@ -757,8 +792,9 @@ static int rt_mutex_adjust_prio_chain(st
- * @waiter: The waiter that is queued to the lock's wait list if the
+@@ -770,8 +805,9 @@ static int rt_mutex_adjust_prio_chain(st
+ * @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
*/
-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
@@ -1469,7 +1549,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long flags;
-@@ -797,8 +833,10 @@ static int try_to_take_rt_mutex(struct r
+@@ -810,8 +846,10 @@ static int try_to_take_rt_mutex(struct r
* If waiter is not the highest priority waiter of
* @lock, give up.
*/
@@ -1481,7 +1561,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We can acquire the lock. Remove the waiter from the
-@@ -816,14 +854,10 @@ static int try_to_take_rt_mutex(struct r
+@@ -829,14 +867,10 @@ static int try_to_take_rt_mutex(struct r
* not need to be dequeued.
*/
if (rt_mutex_has_waiters(lock)) {
@@ -1499,7 +1579,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -872,6 +906,308 @@ static int try_to_take_rt_mutex(struct r
+@@ -885,6 +919,315 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -1512,7 +1592,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+{
+ might_sleep_no_state_check();
+
-+ if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ rt_mutex_deadlock_account_lock(lock, current);
+ else
+ slowfn(lock);
@@ -1521,7 +1601,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
-+ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
+ rt_mutex_deadlock_account_unlock(current);
+ else
+ slowfn(lock);
@@ -1620,7 +1700,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ debug_rt_mutex_print_deadlock(&waiter);
+
+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
-+ schedule_rt_mutex(lock);
++ schedule();
+
+ raw_spin_lock(&lock->wait_lock);
+
@@ -1655,12 +1735,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ debug_rt_mutex_free_waiter(&waiter);
+}
+
-+static void wakeup_next_waiter(struct rt_mutex *lock);
++static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q,
++ struct rt_mutex *lock);
+/*
+ * Slow path to release a rt_mutex spin_lock style
+ */
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
++ WAKE_Q(wake_q);
++ WAKE_Q(wake_sleeper_q);
++
+ raw_spin_lock(&lock->wait_lock);
+
+ debug_rt_mutex_unlock(lock);
@@ -1673,9 +1758,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ return;
+ }
+
-+ wakeup_next_waiter(lock);
++ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
+
+ raw_spin_unlock(&lock->wait_lock);
++ wake_up_q(&wake_q);
++ wake_up_q_sleeper(&wake_sleeper_q);
+
+ /* Undo pi boosting.when necessary */
+ rt_mutex_adjust_prio(current);
@@ -1808,16 +1895,27 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Task blocks on lock.
*
-@@ -1021,7 +1357,7 @@ static void wakeup_next_waiter(struct rt
- * long as we hold lock->wait_lock. The waiter task needs to
- * acquire it in order to dequeue the waiter.
- */
-- wake_up_process(waiter->task);
-+ rt_mutex_wake_waiter(waiter);
+@@ -998,6 +1341,7 @@ static int task_blocks_on_rt_mutex(struc
+ * Called with lock->wait_lock held.
+ */
+ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q,
+ struct rt_mutex *lock)
+ {
+ struct rt_mutex_waiter *waiter;
+@@ -1027,7 +1371,10 @@ static void mark_wakeup_next_waiter(stru
+
+ raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+
+- wake_q_add(wake_q, waiter->task);
++ if (waiter->savestate)
++ wake_q_add(wake_sleeper_q, waiter->task);
++ else
++ wake_q_add(wake_q, waiter->task);
}
/*
-@@ -1103,11 +1439,11 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1109,11 +1456,11 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -1830,7 +1928,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
next_lock, NULL, task);
}
-@@ -1193,9 +1529,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1199,9 +1546,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
struct rt_mutex_waiter waiter;
int ret = 0;
@@ -1841,7 +1939,66 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_lock(&lock->wait_lock);
-@@ -1554,13 +1888,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1286,7 +1631,8 @@ static inline int rt_mutex_slowtrylock(s
+ * Return whether the current task needs to undo a potential priority boosting.
+ */
+ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+- struct wake_q_head *wake_q)
++ struct wake_q_head *wake_q,
++ struct wake_q_head *wake_sleeper_q)
+ {
+ raw_spin_lock(&lock->wait_lock);
+
+@@ -1339,7 +1685,7 @@ static bool __sched rt_mutex_slowunlock(
+ *
+ * Queue the next waiter for wakeup once we release the wait_lock.
+ */
+- mark_wakeup_next_waiter(wake_q, lock);
++ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
+
+ raw_spin_unlock(&lock->wait_lock);
+
+@@ -1396,17 +1742,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+ static inline void
+ rt_mutex_fastunlock(struct rt_mutex *lock,
+ bool (*slowfn)(struct rt_mutex *lock,
+- struct wake_q_head *wqh))
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper))
+ {
+ WAKE_Q(wake_q);
++ WAKE_Q(wake_sleeper_q);
+
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+ rt_mutex_deadlock_account_unlock(current);
+
+ } else {
+- bool deboost = slowfn(lock, &wake_q);
++ bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q);
+
+ wake_up_q(&wake_q);
++ wake_up_q_sleeper(&wake_sleeper_q);
+
+ /* Undo pi boosting if necessary: */
+ if (deboost)
+@@ -1543,13 +1892,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+ * required or not.
+ */
+ bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh)
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper)
+ {
+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
+ rt_mutex_deadlock_account_unlock(current);
+ return false;
+ }
+- return rt_mutex_slowunlock(lock, wqh);
++ return rt_mutex_slowunlock(lock, wqh, wq_sleeper);
+ }
+
+ /**
+@@ -1582,13 +1932,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -1856,7 +2013,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1575,7 +1908,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1603,7 +1952,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -1865,7 +2022,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
-@@ -1737,3 +2070,25 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1765,3 +2114,25 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
@@ -1893,7 +2050,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -49,6 +49,7 @@ struct rt_mutex_waiter {
+@@ -27,6 +27,7 @@ struct rt_mutex_waiter {
struct rb_node pi_tree_entry;
struct task_struct *task;
struct rt_mutex *lock;
@@ -1901,7 +2058,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -145,4 +146,14 @@ extern void rt_mutex_adjust_prio(struct
+@@ -113,7 +114,8 @@ extern int rt_mutex_finish_proxy_lock(st
+ struct rt_mutex_waiter *waiter);
+ extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
+ extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
+- struct wake_q_head *wqh);
++ struct wake_q_head *wqh,
++ struct wake_q_head *wq_sleeper);
+ extern void rt_mutex_adjust_prio(struct task_struct *task);
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+@@ -122,4 +124,14 @@ extern void rt_mutex_adjust_prio(struct
# include "rtmutex.h"
#endif
@@ -1980,3 +2147,26 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
+
+#endif
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -547,7 +547,7 @@ void wake_q_add(struct wake_q_head *head
+ head->lastp = &node->next;
+ }
+
+-void wake_up_q(struct wake_q_head *head)
++void __wake_up_q(struct wake_q_head *head, bool sleeper)
+ {
+ struct wake_q_node *node = head->first;
+
+@@ -564,7 +564,10 @@ void wake_up_q(struct wake_q_head *head)
+ * wake_up_process() implies a wmb() to pair with the queueing
+ * in wake_q_add() so as not to miss wakeups.
+ */
+- wake_up_process(task);
++ if (sleeper)
++ wake_up_lock_sleeper(task);
++ else
++ wake_up_process(task);
+ put_task_struct(task);
+ }
+ }
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index 894b7abca8ed5..c802a82edc978 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1867,6 +1867,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1776,6 +1776,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
diff --git a/patches/rt-serial-warn-fix.patch b/patches/rt-serial-warn-fix.patch
index cf13097b5e7cf..da9b5306f69c6 100644
--- a/patches/rt-serial-warn-fix.patch
+++ b/patches/rt-serial-warn-fix.patch
@@ -17,9 +17,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
-@@ -80,7 +80,16 @@ static unsigned int skip_txen_test; /* f
- #define DEBUG_INTR(fmt...) do { } while (0)
- #endif
+@@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg
+
+ static unsigned int skip_txen_test; /* force skip of txen test at init time */
-#define PASS_LIMIT 512
+/*
@@ -33,5 +33,5 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+# define PASS_LIMIT 512
+#endif
- #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-
+ #include <asm/serial.h>
+ /*
diff --git a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully
index 745cb63686b3b..4da5c7acc3545 100644
--- a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully
+++ b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully
@@ -21,7 +21,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1575,7 +1575,7 @@ int rt_mutex_start_proxy_lock(struct rt_
+@@ -1617,7 +1617,7 @@ int rt_mutex_start_proxy_lock(struct rt_
ret = 0;
}
diff --git a/patches/rtmutex-Use-chainwalking-control-enum.patch b/patches/rtmutex-Use-chainwalking-control-enum.patch
index 2ecfc54b064ab..dcd12f08db945 100644
--- a/patches/rtmutex-Use-chainwalking-control-enum.patch
+++ b/patches/rtmutex-Use-chainwalking-control-enum.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1007,7 +1007,7 @@ static void noinline __sched rt_spin_lo
+@@ -1020,7 +1020,7 @@ static void noinline __sched rt_spin_lo
__set_current_state_no_track(TASK_UNINTERRUPTIBLE);
pi_unlock(&self->pi_lock);
diff --git a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 97c8c8c1cb30b..b279c7249395f 100644
--- a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
#include "rtmutex_common.h"
-@@ -1201,6 +1202,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1221,6 +1222,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1461,7 +1496,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1478,7 +1513,8 @@ void rt_mutex_adjust_pi(struct task_stru
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
int ret = 0;
-@@ -1484,6 +1520,12 @@ static int __sched
+@@ -1501,6 +1537,12 @@ static int __sched
break;
}
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1518,13 +1560,90 @@ static void rt_mutex_handle_deadlock(int
+@@ -1535,13 +1577,90 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
struct rt_mutex_waiter waiter;
int ret = 0;
-@@ -1535,6 +1654,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1552,6 +1671,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock(&lock->wait_lock);
return 0;
}
-@@ -1552,13 +1673,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1566,13 +1687,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (likely(!ret))
/* sleep on the mutex */
@@ -226,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
/*
-@@ -1682,31 +1813,36 @@ rt_mutex_slowunlock(struct rt_mutex *loc
+@@ -1701,31 +1832,36 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -237,7 +237,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+ enum rtmutex_chainwalk chwalk,
+ struct ww_acquire_ctx *ww_ctx))
{
- if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
rt_mutex_deadlock_account_lock(lock, current);
return 0;
} else
@@ -258,7 +258,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+ struct ww_acquire_ctx *ww_ctx))
{
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
- likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
rt_mutex_deadlock_account_lock(lock, current);
return 0;
} else
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
static inline int
-@@ -1741,7 +1877,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1772,7 +1908,7 @@ void __sched rt_mutex_lock(struct rt_mut
{
might_sleep();
@@ -276,7 +276,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1758,7 +1894,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1789,7 +1925,7 @@ int __sched rt_mutex_lock_interruptible(
{
might_sleep();
@@ -285,7 +285,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1771,7 +1907,7 @@ int rt_mutex_timed_futex_lock(struct rt_
+@@ -1802,7 +1938,7 @@ int rt_mutex_timed_futex_lock(struct rt_
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
@@ -294,7 +294,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
-@@ -1790,7 +1926,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1821,7 +1957,7 @@ int __sched rt_mutex_lock_killable(struc
{
might_sleep();
@@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1814,6 +1950,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1845,6 +1981,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -311,7 +311,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2055,7 +2192,7 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2099,7 +2236,7 @@ int rt_mutex_finish_proxy_lock(struct rt
set_current_state(TASK_INTERRUPTIBLE);
/* sleep on the mutex */
@@ -320,7 +320,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
if (unlikely(ret))
remove_waiter(lock, waiter);
-@@ -2071,24 +2208,88 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2115,24 +2252,88 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index b70f3bb4cdd23..01c724efcfd2a 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1717,6 +1717,16 @@ static int futex_requeue(u32 __user *uad
+@@ -1812,6 +1812,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/* -EDEADLK */
this->pi_state = NULL;
-@@ -2576,7 +2586,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2672,7 +2682,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct rt_mutex_waiter rt_waiter;
struct rt_mutex *pi_mutex = NULL;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2635,20 +2645,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2731,20 +2741,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -2657,9 +2702,10 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2753,9 +2798,10 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
/*
-@@ -2672,7 +2718,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2768,7 +2814,8 @@ static int futex_wait_requeue_pi(u32 __u
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -143,9 +143,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+
/*
- * We can speed up the acquire/release, if the architecture
- * supports cmpxchg and if there's no debugging state to be set up
-@@ -342,7 +347,8 @@ int max_lock_depth = 1024;
+ * We can speed up the acquire/release, if there's no debugging state to be
+ * set up.
+@@ -355,7 +360,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -155,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -479,7 +485,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -492,7 +498,7 @@ static int rt_mutex_adjust_prio_chain(st
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -164,7 +164,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out_unlock_pi;
/*
-@@ -896,6 +902,23 @@ static int task_blocks_on_rt_mutex(struc
+@@ -909,6 +915,23 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
-@@ -919,7 +942,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -932,7 +955,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
__rt_mutex_adjust_prio(owner);
@@ -197,7 +197,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1011,7 +1034,7 @@ static void remove_waiter(struct rt_mute
+@@ -1017,7 +1040,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -206,7 +206,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long flags;
raw_spin_lock_irqsave(&current->pi_lock, flags);
-@@ -1036,7 +1059,8 @@ static void remove_waiter(struct rt_mute
+@@ -1042,7 +1065,8 @@ static void remove_waiter(struct rt_mute
__rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -216,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-@@ -1072,7 +1096,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1078,7 +1102,7 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
@@ -227,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -119,6 +119,8 @@ enum rtmutex_chainwalk {
+@@ -97,6 +97,8 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
diff --git a/patches/rtmutex-lock-killable.patch b/patches/rtmutex-lock-killable.patch
index 7c8865764abc3..7edd08a80e30f 100644
--- a/patches/rtmutex-lock-killable.patch
+++ b/patches/rtmutex-lock-killable.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1442,6 +1442,25 @@ int rt_mutex_timed_futex_lock(struct rt_
+@@ -1458,6 +1458,25 @@ int rt_mutex_timed_futex_lock(struct rt_
}
/**
diff --git a/patches/rtmutex-trylock-is-okay-on-RT.patch b/patches/rtmutex-trylock-is-okay-on-RT.patch
new file mode 100644
index 0000000000000..55d58870f3895
--- /dev/null
+++ b/patches/rtmutex-trylock-is-okay-on-RT.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed 02 Dec 2015 11:34:07 +0100
+Subject: rtmutex: trylock is okay on -RT
+
+non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On
+-RT we don't run softirqs in IRQ context but in thread context so it is
+not a issue here.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1469,7 +1469,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+ */
+ int __sched rt_mutex_trylock(struct rt_mutex *lock)
+ {
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (WARN_ON(in_irq() || in_nmi()))
++#else
+ if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
++#endif
+ return 0;
+
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
diff --git a/patches/rtmutex_dont_include_rcu.patch b/patches/rtmutex_dont_include_rcu.patch
new file mode 100644
index 0000000000000..d3774cde4636a
--- /dev/null
+++ b/patches/rtmutex_dont_include_rcu.patch
@@ -0,0 +1,75 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: rbtree: don't include the rcu header
+
+The RCU header pulls in spinlock.h and fails due not yet defined types:
+
+|In file included from include/linux/spinlock.h:275:0,
+| from include/linux/rcupdate.h:38,
+| from include/linux/rbtree.h:34,
+| from include/linux/rtmutex.h:17,
+| from include/linux/spinlock_types.h:18,
+| from kernel/bounds.c:13:
+|include/linux/rwlock_rt.h:16:38: error: unknown type name ‘rwlock_t’
+| extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
+| ^
+
+This patch moves the only RCU user from the header file into c file so the
+inclusion can be avoided.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rbtree.h | 11 ++---------
+ lib/rbtree.c | 11 +++++++++++
+ 2 files changed, 13 insertions(+), 9 deletions(-)
+
+--- a/include/linux/rbtree.h
++++ b/include/linux/rbtree.h
+@@ -31,7 +31,6 @@
+
+ #include <linux/kernel.h>
+ #include <linux/stddef.h>
+-#include <linux/rcupdate.h>
+
+ struct rb_node {
+ unsigned long __rb_parent_color;
+@@ -86,14 +85,8 @@ static inline void rb_link_node(struct r
+ *rb_link = node;
+ }
+
+-static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
+- struct rb_node **rb_link)
+-{
+- node->__rb_parent_color = (unsigned long)parent;
+- node->rb_left = node->rb_right = NULL;
+-
+- rcu_assign_pointer(*rb_link, node);
+-}
++void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
++ struct rb_node **rb_link);
+
+ #define rb_entry_safe(ptr, type, member) \
+ ({ typeof(ptr) ____ptr = (ptr); \
+--- a/lib/rbtree.c
++++ b/lib/rbtree.c
+@@ -23,6 +23,7 @@
+
+ #include <linux/rbtree_augmented.h>
+ #include <linux/export.h>
++#include <linux/rcupdate.h>
+
+ /*
+ * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
+@@ -590,3 +591,13 @@ struct rb_node *rb_first_postorder(const
+ return rb_left_deepest_node(root->rb_node);
+ }
+ EXPORT_SYMBOL(rb_first_postorder);
++
++void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
++ struct rb_node **rb_link)
++{
++ node->__rb_parent_color = (unsigned long)parent;
++ node->rb_left = node->rb_right = NULL;
++
++ rcu_assign_pointer(*rb_link, node);
++}
++EXPORT_SYMBOL(rb_link_node_rcu);
diff --git a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 7680aaa3249c4..7620c76e13531 100644
--- a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -637,6 +637,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -697,6 +697,7 @@ void init_dl_task_timer(struct sched_dl_
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index 2312781575fad..b627092852028 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1788,6 +1788,9 @@ struct task_struct {
+@@ -1829,6 +1829,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
-@@ -1982,6 +1985,15 @@ extern struct pid *cad_pid;
+@@ -2037,6 +2040,15 @@ extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -1989,6 +2001,7 @@ static inline void put_task_struct(struc
+@@ -2044,6 +2056,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
-@@ -260,7 +262,18 @@ void __put_task_struct(struct task_struc
+@@ -261,7 +263,18 @@ void __put_task_struct(struct task_struc
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index b4ed5bc8e5def..fe17c667fbba9 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1101,6 +1101,7 @@ config CFS_BANDWIDTH
+@@ -1106,6 +1106,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/patches/sched-disable-ttwu-queue.patch b/patches/sched-disable-ttwu-queue.patch
index 9bb14049d1635..b89c7cd4c7532 100644
--- a/patches/sched-disable-ttwu-queue.patch
+++ b/patches/sched-disable-ttwu-queue.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -50,11 +50,16 @@ SCHED_FEAT(LB_BIAS, true)
+@@ -45,11 +45,16 @@ SCHED_FEAT(LB_BIAS, true)
*/
SCHED_FEAT(NONTASK_CAPACITY, true)
diff --git a/patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch b/patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch
deleted file mode 100644
index 698923d176e08..0000000000000
--- a/patches/sched-introduce-the-27trace_sched_waking-27-tracepoint.patch
+++ /dev/null
@@ -1,172 +0,0 @@
-Subject: sched: Introduce the trace_sched_waking tracepoint
-Date: Sun, 25 Oct 2015 16:35:24 -0400
-From: Peter Zijlstra <peterz@infradead.org>
-
-Upstream commit fbd705a0c6184580d0e2fbcbd47a37b6e5822511
-
-Mathieu reported that since 317f394160e9 ("sched: Move the second half
-of ttwu() to the remote cpu") trace_sched_wakeup() can happen out of
-context of the waker.
-
-This is a problem when you want to analyse wakeup paths because it is
-now very hard to correlate the wakeup event to whoever issued the
-wakeup.
-
-OTOH trace_sched_wakeup() is issued at the point where we set
-p->state = TASK_RUNNING, which is right were we hand the task off to
-the scheduler, so this is an important point when looking at
-scheduling behaviour, up to here its been the wakeup path everything
-hereafter is due to scheduler policy.
-
-To bridge this gap, introduce a second tracepoint: trace_sched_waking.
-It is guaranteed to be called in the waker context.
-
-[ Ported to linux-4.1.y-rt kernel by Mathieu Desnoyers. Resolved
- conflict: try_to_wake_up_local() does not exist in -rt kernel. Removed
- its instrumentation hunk. ]
-
-Reported-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-CC: Julien Desfossez <jdesfossez@efficios.com>
-CC: Peter Zijlstra (Intel) <peterz@infradead.org>
-Cc: Francis Giraldeau <francis.giraldeau@gmail.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Mike Galbraith <efault@gmx.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-CC: Ingo Molnar <mingo@kernel.org>
-Link: http://lkml.kernel.org/r/20150609091336.GQ3644@twins.programming.kicks-ass.net
-Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
----
-Julien Desfossez is currently implementing an in-kernel latency tracker
-module performing automated latency/deadline analysis, which main target
-is the real time kernel.
-
-In order to follow the wakeup chains, we need the sched_waking
-tracepoint. This is a backport of this tracepoint to the 4.1-rt kernel.
-I'm not sure what is the policy regarding patch backport from mainline
-to -rt kernels, hence the RFC.
-
-Thanks,
-
-Mathieu
----
- include/trace/events/sched.h | 30 +++++++++++++++++++++---------
- kernel/sched/core.c | 8 +++++---
- kernel/trace/trace_sched_switch.c | 2 +-
- kernel/trace/trace_sched_wakeup.c | 2 +-
- 4 files changed, 28 insertions(+), 14 deletions(-)
-
---- a/include/trace/events/sched.h
-+++ b/include/trace/events/sched.h
-@@ -55,9 +55,9 @@ TRACE_EVENT(sched_kthread_stop_ret,
- */
- DECLARE_EVENT_CLASS(sched_wakeup_template,
-
-- TP_PROTO(struct task_struct *p, int success),
-+ TP_PROTO(struct task_struct *p),
-
-- TP_ARGS(__perf_task(p), success),
-+ TP_ARGS(__perf_task(p)),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN )
-@@ -71,25 +71,37 @@ DECLARE_EVENT_CLASS(sched_wakeup_templat
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->prio = p->prio;
-- __entry->success = success;
-+ __entry->success = 1; /* rudiment, kill when possible */
- __entry->target_cpu = task_cpu(p);
- ),
-
-- TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
-+ TP_printk("comm=%s pid=%d prio=%d target_cpu=%03d",
- __entry->comm, __entry->pid, __entry->prio,
-- __entry->success, __entry->target_cpu)
-+ __entry->target_cpu)
- );
-
-+/*
-+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
-+ * called from the waking context.
-+ */
-+DEFINE_EVENT(sched_wakeup_template, sched_waking,
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-+
-+/*
-+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
-+ * It it not always called from the waking context.
-+ */
- DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
-- TP_PROTO(struct task_struct *p, int success),
-- TP_ARGS(p, success));
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-
- /*
- * Tracepoint for waking up a new task:
- */
- DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
-- TP_PROTO(struct task_struct *p, int success),
-- TP_ARGS(p, success));
-+ TP_PROTO(struct task_struct *p),
-+ TP_ARGS(p));
-
- #ifdef CREATE_TRACE_POINTS
- static inline long __trace_sched_switch_state(struct task_struct *p)
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -1558,9 +1558,9 @@ static void
- ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
- {
- check_preempt_curr(rq, p, wake_flags);
-- trace_sched_wakeup(p, true);
--
- p->state = TASK_RUNNING;
-+ trace_sched_wakeup(p);
-+
- #ifdef CONFIG_SMP
- if (p->sched_class->task_woken)
- p->sched_class->task_woken(rq, p);
-@@ -1784,6 +1784,8 @@ try_to_wake_up(struct task_struct *p, un
- if (!(wake_flags & WF_LOCK_SLEEPER))
- p->saved_state = TASK_RUNNING;
-
-+ trace_sched_waking(p);
-+
- success = 1; /* we're going to change ->state */
- cpu = task_cpu(p);
-
-@@ -2188,7 +2190,7 @@ void wake_up_new_task(struct task_struct
- rq = __task_rq_lock(p);
- activate_task(rq, p, 0);
- p->on_rq = TASK_ON_RQ_QUEUED;
-- trace_sched_wakeup_new(p, true);
-+ trace_sched_wakeup_new(p);
- check_preempt_curr(rq, p, WF_FORK);
- #ifdef CONFIG_SMP
- if (p->sched_class->task_woken)
---- a/kernel/trace/trace_sched_switch.c
-+++ b/kernel/trace/trace_sched_switch.c
-@@ -26,7 +26,7 @@ probe_sched_switch(void *ignore, struct
- }
-
- static void
--probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
-+probe_sched_wakeup(void *ignore, struct task_struct *wakee)
- {
- if (unlikely(!sched_ref))
- return;
---- a/kernel/trace/trace_sched_wakeup.c
-+++ b/kernel/trace/trace_sched_wakeup.c
-@@ -514,7 +514,7 @@ static void wakeup_reset(struct trace_ar
- }
-
- static void
--probe_wakeup(void *ignore, struct task_struct *p, int success)
-+probe_wakeup(void *ignore, struct task_struct *p)
- {
- struct trace_array_cpu *data;
- int cpu = smp_processor_id();
diff --git a/patches/sched-limit-nr-migrate.patch b/patches/sched-limit-nr-migrate.patch
index 3c52ca37143bf..ac934db3442c8 100644
--- a/patches/sched-limit-nr-migrate.patch
+++ b/patches/sched-limit-nr-migrate.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -282,7 +282,11 @@ late_initcall(sched_init_debug);
+@@ -260,7 +260,11 @@ late_initcall(sched_init_debug);
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index dcca0c492d46e..e5539c5fa3380 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -8,12 +8,12 @@ in might_sleep().
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rcupdate.h | 7 +++++++
- kernel/sched/core.c | 3 ++-
- 2 files changed, 9 insertions(+), 1 deletion(-)
+ kernel/sched/core.c | 2 +-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -260,6 +260,11 @@ void synchronize_rcu(void);
+@@ -292,6 +292,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -283,6 +288,8 @@ static inline int rcu_preempt_depth(void
+@@ -317,6 +322,8 @@ static inline int rcu_preempt_depth(void
return 0;
}
@@ -36,13 +36,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7511,7 +7511,8 @@ void __init sched_init(void)
+@@ -7729,7 +7729,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
-- int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
-+ int nested = (preempt_count() & ~PREEMPT_ACTIVE) +
-+ sched_rcu_preempt_depth();
+- int nested = preempt_count() + rcu_preempt_depth();
++ int nested = preempt_count() + sched_rcu_preempt_depth();
return (nested == preempt_offset);
}
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index f5357727ee250..65b7088e30d28 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page-flags-layout.h>
#include <asm/page.h>
#include <asm/mmu.h>
-@@ -453,6 +454,9 @@ struct mm_struct {
+@@ -504,6 +505,9 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __user *bd_addr;
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2548,12 +2548,24 @@ extern struct mm_struct * mm_alloc(void)
+@@ -2603,12 +2603,24 @@ extern struct mm_struct * mm_alloc(void)
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Grab a reference to a task's mm, if it is not already going away */
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -693,6 +693,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -702,6 +702,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2286,8 +2286,12 @@ static struct rq *finish_task_switch(str
+@@ -2602,8 +2602,12 @@ static struct rq *finish_task_switch(str
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -98,16 +98,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5132,6 +5136,8 @@ static int migration_cpu_stop(void *data
+@@ -5317,6 +5321,8 @@ void sched_setnuma(struct task_struct *p
+ #endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
-
+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
+
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5146,7 +5152,11 @@ void idle_task_exit(void)
+@@ -5331,7 +5337,11 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5489,6 +5499,10 @@ migration_call(struct notifier_block *nf
+@@ -5703,6 +5713,10 @@ migration_call(struct notifier_block *nf
case CPU_DEAD:
calc_load_migrate(rq);
diff --git a/patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch b/patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch
new file mode 100644
index 0000000000000..2a2c272b608e9
--- /dev/null
+++ b/patches/sched-reset-task-s-lockless-wake-queues-on-fork.patch
@@ -0,0 +1,34 @@
+From c4c38e7dcd4e925f624cc7fe18aeaad841fd7d6f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 21 Dec 2015 18:17:10 +0100
+Subject: [PATCH] sched: reset task's lockless wake-queues on fork()
+
+In 7675104990ed ("sched: Implement lockless wake-queues") we gained
+lockless wake-queues. -RT managed to lockup itself with those. There
+could be multiple attempts for task X to enqueue it for a wakeup
+_even_ if task X is already running.
+The reason is that task X could be runnable but not yet on CPU. The the
+task performing the wakeup did not leave the CPU it could performe
+multiple wakeups.
+With the proper timming task X could be running and enqueued for a
+wakeup. If this happens while X is performing a fork() then its its
+child will have a !NULL `wake_q` member copied.
+This is not a problem as long as the child task does not participate in
+lockless wakeups :)
+
+Fixes: 7675104990ed ("sched: Implement lockless wake-queues")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/fork.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -380,6 +380,7 @@ static struct task_struct *dup_task_stru
+ #endif
+ tsk->splice_pipe = NULL;
+ tsk->task_frag.page = NULL;
++ tsk->wake_q.next = NULL;
+
+ account_kernel_stack(ti, 1);
+
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index c215c9b44ea45..66f46e6896fbc 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1335,6 +1335,7 @@ enum perf_event_task_context {
+@@ -1377,6 +1377,7 @@ struct tlbflush_unmap_batch {
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
-@@ -2432,6 +2433,7 @@ extern void xtime_update(unsigned long t
+@@ -2480,6 +2481,7 @@ extern void xtime_update(unsigned long t
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void kick_process(struct task_struct *tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1721,8 +1721,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -1958,8 +1958,25 @@ try_to_wake_up(struct task_struct *p, un
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -60,9 +60,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ if (!(wake_flags & WF_LOCK_SLEEPER))
+ p->saved_state = TASK_RUNNING;
- success = 1; /* we're going to change ->state */
- cpu = task_cpu(p);
-@@ -1819,6 +1836,18 @@ int wake_up_process(struct task_struct *
+ trace_sched_waking(p);
+
+@@ -2092,6 +2109,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1093,6 +1093,7 @@ static inline void finish_lock_switch(st
+@@ -1100,6 +1100,7 @@ static inline void finish_lock_switch(st
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index 5ec55b369b2ce..5c1d0b3a5f7b8 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1728,8 +1728,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -1965,8 +1965,10 @@ try_to_wake_up(struct task_struct *p, un
* if the wakeup condition is true.
*/
if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 27f5fc0082454..d7f676b5b7d32 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2988,8 +2988,10 @@ static void __sched __schedule(void)
+@@ -3326,8 +3326,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/scsi-fcoe-rt-aware.patch b/patches/scsi-fcoe-rt-aware.patch
index 4cd758c772e9d..731be3a4db011 100644
--- a/patches/scsi-fcoe-rt-aware.patch
+++ b/patches/scsi-fcoe-rt-aware.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
-@@ -1287,7 +1287,7 @@ static void fcoe_percpu_thread_destroy(u
+@@ -1286,7 +1286,7 @@ static void fcoe_percpu_thread_destroy(u
struct sk_buff *skb;
#ifdef CONFIG_SMP
struct fcoe_percpu_s *p0;
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* CONFIG_SMP */
FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
-@@ -1343,7 +1343,7 @@ static void fcoe_percpu_thread_destroy(u
+@@ -1342,7 +1342,7 @@ static void fcoe_percpu_thread_destroy(u
kfree_skb(skb);
spin_unlock_bh(&p->fcoe_rx_list.lock);
}
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
/*
* This a non-SMP scenario where the singular Rx thread is
-@@ -1567,11 +1567,11 @@ static int fcoe_rcv(struct sk_buff *skb,
+@@ -1566,11 +1566,11 @@ static int fcoe_rcv(struct sk_buff *skb,
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return rc;
}
-@@ -1767,11 +1767,11 @@ static inline int fcoe_filter_frames(str
+@@ -1766,11 +1766,11 @@ static inline int fcoe_filter_frames(str
return 0;
}
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return -EINVAL;
}
-@@ -1847,13 +1847,13 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1846,13 +1846,13 @@ static void fcoe_recv_frame(struct sk_bu
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
diff --git a/patches/seqlock-prevent-rt-starvation.patch b/patches/seqlock-prevent-rt-starvation.patch
index a83fee4a2b8a8..f5b231e4b0705 100644
--- a/patches/seqlock-prevent-rt-starvation.patch
+++ b/patches/seqlock-prevent-rt-starvation.patch
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
-@@ -219,20 +219,30 @@ static inline int read_seqcount_retry(co
+@@ -220,20 +220,30 @@ static inline int read_seqcount_retry(co
return __read_seqcount_retry(s, start);
}
@@ -60,10 +60,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_enable_rt();
+}
+
- /*
- * raw_write_seqcount_latch - redirect readers to even/odd copy
+ /**
+ * raw_write_seqcount_barrier - do a seq write barrier
* @s: pointer to seqcount_t
-@@ -305,10 +315,32 @@ typedef struct {
+@@ -425,10 +435,32 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
-@@ -323,36 +355,36 @@ static inline unsigned read_seqretry(con
+@@ -443,36 +475,36 @@ static inline unsigned read_seqretry(con
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irq(&sl->lock);
}
-@@ -361,7 +393,7 @@ static inline unsigned long __write_seql
+@@ -481,7 +513,7 @@ static inline unsigned long __write_seql
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flags;
}
-@@ -371,7 +403,7 @@ static inline unsigned long __write_seql
+@@ -491,7 +523,7 @@ static inline unsigned long __write_seql
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/net/dst.h
+++ b/include/net/dst.h
-@@ -403,7 +403,7 @@ static inline void dst_confirm(struct ds
+@@ -437,7 +437,7 @@ static inline void dst_confirm(struct ds
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
{
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long now = jiffies;
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
-@@ -445,7 +445,7 @@ static inline int neigh_hh_bridge(struct
+@@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct
}
#endif
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned int seq;
int hh_len;
-@@ -500,7 +500,7 @@ struct neighbour_cb {
+@@ -501,7 +501,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
diff --git a/patches/series b/patches/series
index f90b2efe10698..b980cbaccecbb 100644
--- a/patches/series
+++ b/patches/series
@@ -5,40 +5,15 @@
############################################################
# UPSTREAM changes queued
############################################################
-xfs--clean-up-inode-lockdep-annotations
############################################################
# UPSTREAM FIXES, patches pending
############################################################
-0001-arm64-Mark-PMU-interrupt-IRQF_NO_THREAD.patch
-0002-arm64-Allow-forced-irq-threading.patch
-0001-uaccess-count-pagefault_disable-levels-in-pagefault_.patch
-0002-mm-uaccess-trigger-might_sleep-in-might_fault-with-d.patch
-0003-uaccess-clarify-that-uaccess-may-only-sleep-if-pagef.patch
-0004-mm-explicitly-disable-enable-preemption-in-kmap_atom.patch
-0005-mips-kmap_coherent-relies-on-disabled-preemption.patch
-0006-mm-use-pagefault_disable-to-check-for-disabled-pagef.patch
-0007-drm-i915-use-pagefault_disabled-to-check-for-disable.patch
-0008-futex-UP-futex_atomic_op_inuser-relies-on-disabled-p.patch
-0009-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on-dis.patch
-0010-arm-futex-UP-futex_atomic_cmpxchg_inatomic-relies-on.patch
-0011-arm-futex-UP-futex_atomic_op_inuser-relies-on-disabl.patch
-0012-futex-clarify-that-preemption-doesn-t-have-to-be-dis.patch
-0013-mips-properly-lock-access-to-the-fpu.patch
-0014-uaccess-decouple-preemption-from-the-pagefault-logic.patch
-0001-sched-Implement-lockless-wake-queues.patch
-0002-futex-Implement-lockless-wakeups.patch
-0004-ipc-mqueue-Implement-lockless-pipelined-wakeups.patch
-mm-slub-move-slab-initialization-into-irq-enabled-region.patch
-arm64-convert-patch_lock-to-raw-lock.patch
-arm64-replace-read_lock-to-rcu-lock-in-call_break_hook.patch
-bpf-convert-hashtab-lock-to-raw-lock.patch
############################################################
# Stuff broken upstream, patches submitted
############################################################
-cpufreq-Remove-cpufreq_rwsem.patch
-genirq--Handle-interrupts-with-primary-and-threaded-handler-gracefully
+sched-reset-task-s-lockless-wake-queues-on-fork.patch
############################################################
# Stuff which needs addressing upstream, but requires more
@@ -89,27 +64,7 @@ kernel-SRCU-provide-a-static-initializer.patch
############################################################
# Stuff which should go upstream ASAP
############################################################
-0001-gpio-omap-Allow-building-as-a-loadable-module.patch
-0002-gpio-omap-fix-omap_gpio_free-to-not-clean-up-irq-con.patch
-0003-gpio-omap-fix-error-handling-in-omap_gpio_irq_type.patch
-0004-gpio-omap-rework-omap_x_irq_shutdown-to-touch-only-i.patch
-0005-gpio-omap-rework-omap_gpio_request-to-touch-only-gpi.patch
-0006-gpio-omap-rework-omap_gpio_irq_startup-to-handle-cur.patch
-0007-gpio-omap-add-missed-spin_unlock_irqrestore-in-omap_.patch
-0008-gpio-omap-prevent-module-from-being-unloaded-while-i.patch
0009-ARM-OMAP2-Drop-the-concept-of-certain-power-domains-.patch
-0010-gpio-omap-use-raw-locks-for-locking.patch
-0011-gpio-omap-Fix-missing-raw-locks-conversion.patch
-0012-gpio-omap-remove-wrong-irq_domain_remove-usage-in-pr.patch
-0013-gpio-omap-switch-to-use-platform_get_irq.patch
-0014-gpio-omap-fix-omap2_set_gpio_debounce.patch
-0015-gpio-omap-protect-regs-access-in-omap_gpio_irq_handl.patch
-0016-gpio-omap-fix-clk_prepare-unprepare-usage.patch
-0017-gpio-omap-Fix-gpiochip_add-handling-for-deferred-pro.patch
-0018-gpio-omap-Fix-GPIO-numbering-for-deferred-probe.patch
-0019-gpio-omap-fix-static-checker-warning.patch
-0020-gpio-omap-move-pm-runtime-in-irq_chip.irq_bus_lock-s.patch
-0021-gpio-omap-convert-to-use-generic-irq-handler.patch
# SCHED BLOCK/WQ
block-shorten-interrupt-disabled-regions.patch
@@ -160,8 +115,6 @@ net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
# X86
x86-io-apic-migra-no-unmask.patch
-fix-rt-int3-x86_32-3.2-rt.patch
-Revert-x86-Do-not-disable-preemption-in-int3-on-32bi.patch
# RCU
@@ -176,7 +129,6 @@ pci-access-use-__wake_up_all_locked.patch
#####################################################
# Stuff which should go mainline, but wants some care
#####################################################
-futex-avoid-double-wake-up-in-PI-futex-wait-wake-on-.patch
# SEQLOCK
@@ -249,7 +201,6 @@ genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
# DRIVERS NET
drivers-net-fix-livelock-issues.patch
drivers-net-vortex-fix-locking-issues.patch
-net-gianfar-do-not-disable-interrupts.patch
# MM PAGE_ALLOC
mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -277,6 +228,7 @@ slub-disable-SLUB_CPU_PARTIAL.patch
mm-page-alloc-use-local-lock-on-target-cpu.patch
mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
mm-memcontrol-do_not_disable_irq.patch
+mm-rmap-retry-lock-check-in-anon_vma_free.patch_vma_free.patch
# RADIX TREE
radix-tree-rt-aware.patch
@@ -285,7 +237,7 @@ radix-tree-rt-aware.patch
panic-disable-random-on-rt.patch
# IPC
-ipc-make-rt-aware.patch
+ipc-msg-Implement-lockless-pipelined-wakeups.patch
# RELAY
relay-fix-timer-madness.patch
@@ -298,10 +250,10 @@ timers-avoid-the-base-null-otptimization-on-rt.patch
# HRTIMERS
hrtimers-prepare-full-preemption.patch
+hrtimer-enfore-64byte-alignment.patch
hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
timer-fd-avoid-live-lock.patch
-hrtimer-raise-softirq-if-hrtimer-irq-stalled.patch
hrtimer-Move-schedule_work-call-to-helper-thread.patch
# POSIX-CPU-TIMERS
@@ -343,6 +295,7 @@ softirq-preempt-fix-3-re.patch
softirq-disable-softirq-stacks-for-rt.patch
softirq-split-locks.patch
irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+rtmutex-trylock-is-okay-on-RT.patch
# RAID5
md-raid5-percpu-handling-rt-aware.patch
@@ -358,6 +311,7 @@ locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
rtmutex-lock-killable.patch
spinlock-types-separate-raw.patch
rtmutex-avoid-include-hell.patch
+rtmutex_dont_include_rcu.patch
rt-add-rt-locks.patch
rtmutex-Use-chainwalking-control-enum.patch
rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -398,7 +352,6 @@ fs-namespace-preemption-fix.patch
mm-protect-activate-switch-mm.patch
fs-block-rt-support.patch
fs-ntfs-disable-interrupt-non-rt.patch
-fs-jbd-pull-plug-when-waiting-for-space.patch
fs-jbd2-pull-your-plug-when-waiting-for-space.patch
# X86
@@ -424,6 +377,7 @@ cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
# block
blk-mq-revert-raw-locks-post-pone-notifier-to-POST_D.patchto-POST_D.patch
block-blk-mq-use-swait.patch
+# XXX melt
block-mq-drop-per-ctx-cpu_lock.patch
# BLOCK LIVELOCK PREVENTION
@@ -461,7 +415,6 @@ irqwork-Move-irq-safe-work-to-irq-context.patch
# Sound
snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
-ASoC-Intel-sst-use-instead-of-at-the-of-a-C-statemen.patch
# CONSOLE. NEEDS more thought !!!
printk-rt-aware.patch
@@ -476,7 +429,6 @@ powerpc-ps3-device-init.c-adapt-to-completions-using.patch
arm-at91-tclib-default-to-tclib-timer-for-rt.patch
arm-unwind-use_raw_lock.patch
ARM-enable-irq-in-translation-section-permission-fau.patch
-ARM-cmpxchg-define-__HAVE_ARCH_CMPXCHG-for-armv6-and.patch
# ARM64
arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -603,7 +555,6 @@ md-disable-bcache.patch
workqueue-prevent-deadlock-stall.patch
# TRACING
-sched-introduce-the-27trace_sched_waking-27-tracepoint.patch
latency_hist-update-sched_wakeup-probe.patch
# Add RT to version
diff --git a/patches/signal-fix-up-rcu-wreckage.patch b/patches/signal-fix-up-rcu-wreckage.patch
index 97f2a9e8cd238..71a57f38292a3 100644
--- a/patches/signal-fix-up-rcu-wreckage.patch
+++ b/patches/signal-fix-up-rcu-wreckage.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1342,12 +1342,12 @@ struct sighand_struct *__lock_task_sigha
+@@ -1276,12 +1276,12 @@ struct sighand_struct *__lock_task_sigha
* Disable interrupts early to avoid deadlocks.
* See rcu_read_unlock() comment header for details.
*/
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
break;
}
/*
-@@ -1368,7 +1368,7 @@ struct sighand_struct *__lock_task_sigha
+@@ -1302,7 +1302,7 @@ struct sighand_struct *__lock_task_sigha
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
diff --git a/patches/signal-revert-ptrace-preempt-magic.patch b/patches/signal-revert-ptrace-preempt-magic.patch
index 0dae85433fdeb..7152ebca6ea5b 100644
--- a/patches/signal-revert-ptrace-preempt-magic.patch
+++ b/patches/signal-revert-ptrace-preempt-magic.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1897,15 +1897,7 @@ static void ptrace_stop(int exit_code, i
+@@ -1846,15 +1846,7 @@ static void ptrace_stop(int exit_code, i
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index f123d926a44f6..96e8c4719d24c 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -12,12 +12,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/signal.h | 1
kernel/exit.c | 2 -
kernel/fork.c | 1
- kernel/signal.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++---
- 5 files changed, 84 insertions(+), 5 deletions(-)
+ kernel/signal.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++---
+ 5 files changed, 69 insertions(+), 5 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1527,6 +1527,7 @@ struct task_struct {
+@@ -1566,6 +1566,7 @@ struct task_struct {
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1338,6 +1338,7 @@ static struct task_struct *copy_process(
+@@ -1343,6 +1343,7 @@ static struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
@@ -66,11 +66,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-@@ -352,13 +353,45 @@ static bool task_participate_group_stop(
+@@ -352,13 +353,30 @@ static bool task_participate_group_stop(
return false;
}
-+#ifdef __HAVE_ARCH_CMPXCHG
+static inline struct sigqueue *get_task_cache(struct task_struct *t)
+{
+ struct sigqueue *q = t->sigqueue_cache;
@@ -87,20 +86,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ return 1;
+}
+
-+#else
-+
-+static inline struct sigqueue *get_task_cache(struct task_struct *t)
-+{
-+ return NULL;
-+}
-+
-+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q)
-+{
-+ return 1;
-+}
-+
-+#endif
-+
/*
* allocate a new signal queue record
* - this may be called without locks if and only if t == current, otherwise an
@@ -113,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct sigqueue *q = NULL;
struct user_struct *user;
-@@ -375,7 +408,10 @@ static struct sigqueue *
+@@ -375,7 +393,10 @@ static struct sigqueue *
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -125,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
print_dropped_signal(sig);
}
-@@ -392,6 +428,13 @@ static struct sigqueue *
+@@ -392,6 +413,13 @@ static struct sigqueue *
return q;
}
@@ -139,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
-@@ -401,6 +444,21 @@ static void __sigqueue_free(struct sigqu
+@@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqu
kmem_cache_free(sigqueue_cachep, q);
}
@@ -161,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
-@@ -414,6 +472,21 @@ void flush_sigqueue(struct sigpending *q
+@@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *q
}
/*
@@ -180,10 +165,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+
+/*
- * Flush all pending signals for a task.
+ * Flush all pending signals for this kthread.
*/
- void __flush_signals(struct task_struct *t)
-@@ -565,7 +638,7 @@ static void collect_signal(int sig, stru
+ void flush_signals(struct task_struct *t)
+@@ -525,7 +583,7 @@ static void collect_signal(int sig, stru
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
@@ -192,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* Ok, it wasn't in the queue. This must be
-@@ -611,6 +684,8 @@ int dequeue_signal(struct task_struct *t
+@@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *t
{
int signr;
@@ -201,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1536,7 +1611,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1485,7 +1545,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 7cbfc3f33c044..f4dff423e178a 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2469,6 +2469,7 @@ struct softnet_data {
+@@ -2546,6 +2546,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -187,6 +187,7 @@ struct sk_buff_head {
+@@ -203,6 +203,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1336,6 +1337,12 @@ static inline void skb_queue_head_init(s
+@@ -1463,6 +1464,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -205,14 +205,14 @@ static inline struct hlist_head *dev_ind
+@@ -207,14 +207,14 @@ static inline struct hlist_head *dev_ind
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -3882,7 +3882,7 @@ static void flush_backlog(void *arg)
+@@ -4045,7 +4045,7 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -3891,10 +3891,13 @@ static void flush_backlog(void *arg)
+@@ -4054,10 +4054,13 @@ static void flush_backlog(void *arg)
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev == dev) {
__skb_unlink(skb, &sd->process_queue);
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int napi_gro_complete(struct sk_buff *skb)
-@@ -7182,6 +7185,9 @@ static int dev_cpu_callback(struct notif
+@@ -7464,6 +7467,9 @@ static int dev_cpu_callback(struct notif
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NOTIFY_OK;
}
-@@ -7483,8 +7489,9 @@ static int __init net_dev_init(void)
+@@ -7765,8 +7771,9 @@ static int __init net_dev_init(void)
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 12c21be64bf5a..810e48b8a1ef2 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1717,7 +1717,7 @@ endchoice
+@@ -1748,7 +1748,7 @@ endchoice
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch
index 5da834967e9f7..ef8211a5f5741 100644
--- a/patches/slub-enable-irqs-for-no-wait.patch
+++ b/patches/slub-enable-irqs-for-no-wait.patch
@@ -7,38 +7,39 @@ with GFP_WAIT can happen before that. So use this as an indicator.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- mm/slub.c | 13 +++++--------
- 1 file changed, 5 insertions(+), 8 deletions(-)
+ mm/slub.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1355,14 +1355,15 @@ static struct page *allocate_slab(struct
+@@ -1405,14 +1405,17 @@ static struct page *allocate_slab(struct
gfp_t alloc_gfp;
void *start, *p;
int idx, order;
-+ bool enableirqs;
++ bool enableirqs = false;
flags &= gfp_allowed_mask;
-+ enableirqs = (flags & __GFP_WAIT) != 0;
++ if (gfpflags_allow_blocking(flags))
++ enableirqs = true;
#ifdef CONFIG_PREEMPT_RT_FULL
-- if (system_state == SYSTEM_RUNNING)
+ if (system_state == SYSTEM_RUNNING)
-#else
-- if (flags & __GFP_WAIT)
-+ enableirqs |= system_state == SYSTEM_RUNNING;
+- if (gfpflags_allow_blocking(flags))
++ enableirqs = true;
#endif
+ if (enableirqs)
local_irq_enable();
flags |= s->allocflags;
-@@ -1431,11 +1432,7 @@ static struct page *allocate_slab(struct
+@@ -1483,11 +1486,7 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
-#ifdef CONFIG_PREEMPT_RT_FULL
- if (system_state == SYSTEM_RUNNING)
-#else
-- if (flags & __GFP_WAIT)
+- if (gfpflags_allow_blocking(flags))
-#endif
+ if (enableirqs)
local_irq_disable();
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index 19f308050e08a..af38648537fa1 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/powerpc/kernel/misc_64.S | 2 ++
arch/sh/kernel/irq.c | 2 ++
arch/sparc/kernel/irq_64.c | 2 ++
- arch/x86/kernel/entry_64.S | 2 ++
+ arch/x86/entry/entry_64.S | 2 ++
arch/x86/kernel/irq_32.c | 2 ++
include/linux/interrupt.h | 2 +-
8 files changed, 15 insertions(+), 1 deletion(-)
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
-@@ -29,6 +29,7 @@
+@@ -30,6 +30,7 @@
.text
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
-@@ -39,6 +40,7 @@
+@@ -40,6 +41,7 @@
ld r0,16(r1)
mtlr r0
blr
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
-@@ -849,6 +849,7 @@ void __irq_entry handler_irq(int pil, st
+@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, st
set_irq_regs(old_regs);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void)
{
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -863,6 +864,7 @@ void do_softirq_own_stack(void)
+@@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
@@ -107,19 +107,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
---- a/arch/x86/kernel/entry_64.S
-+++ b/arch/x86/kernel/entry_64.S
-@@ -1118,6 +1118,7 @@ END(native_load_gs_index)
- jmp 2b
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -867,6 +867,7 @@ END(native_load_gs_index)
+ jmp 2b
.previous
+#ifndef CONFIG_PREEMPT_RT_FULL
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
- CFI_STARTPROC
-@@ -1137,6 +1138,7 @@ ENTRY(do_softirq_own_stack)
+ pushq %rbp
+@@ -879,6 +880,7 @@ ENTRY(do_softirq_own_stack)
+ decl PER_CPU_VAR(irq_count)
ret
- CFI_ENDPROC
END(do_softirq_own_stack)
+#endif
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
-@@ -135,6 +135,7 @@ void irq_ctx_init(int cpu)
+@@ -128,6 +128,7 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
@@ -135,17 +135,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void)
{
struct thread_info *curstk;
-@@ -153,6 +154,7 @@ void do_softirq_own_stack(void)
+@@ -146,6 +147,7 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
+#endif
- bool handle_irq(unsigned irq, struct pt_regs *regs)
+ bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
{
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -445,7 +445,7 @@ struct softirq_action
+@@ -446,7 +446,7 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index e9a6713ae83f0..48944fd2bfb35 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -50,8 +50,10 @@ do { \
+@@ -160,8 +160,10 @@ do { \
#ifdef CONFIG_PREEMPT_RT_BASE
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -84,18 +84,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+# define preempt_check_resched_rt() barrier();
#endif
- #ifdef CONFIG_PREEMPT
-@@ -126,6 +128,7 @@ do { \
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+@@ -232,6 +234,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
+#define preempt_check_resched_rt() barrier()
+ #define preemptible() 0
#endif /* CONFIG_PREEMPT_COUNT */
-
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2214,6 +2214,7 @@ static inline void __netif_reschedule(st
+@@ -2246,6 +2246,7 @@ static inline void __netif_reschedule(st
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2295,6 +2296,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2327,6 +2328,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3365,6 +3367,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3519,6 +3521,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4344,6 +4347,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4507,6 +4510,7 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4357,6 +4361,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4520,6 +4524,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4438,6 +4443,7 @@ void __napi_schedule(struct napi_struct
+@@ -4601,6 +4606,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -7167,6 +7173,7 @@ static int dev_cpu_callback(struct notif
+@@ -7449,6 +7455,7 @@ static int dev_cpu_callback(struct notif
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 5809d521033de..00d527083e6f1 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -24,21 +24,21 @@ threads.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/bottom_half.h | 34 ++
- include/linux/interrupt.h | 15 +
- include/linux/preempt_mask.h | 15 +
- include/linux/sched.h | 3
- init/main.c | 1
- kernel/softirq.c | 488 ++++++++++++++++++++++++++++++++++++-------
- kernel/time/tick-sched.c | 9
- net/core/dev.c | 6
+ include/linux/bottom_half.h | 34 +++
+ include/linux/interrupt.h | 15 +
+ include/linux/preempt.h | 15 +
+ include/linux/sched.h | 3
+ init/main.c | 1
+ kernel/softirq.c | 488 +++++++++++++++++++++++++++++++++++++-------
+ kernel/time/tick-sched.c | 9
+ net/core/dev.c | 6
8 files changed, 477 insertions(+), 94 deletions(-)
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
-@@ -4,6 +4,39 @@
+@@ -3,6 +3,39 @@
+
#include <linux/preempt.h>
- #include <linux/preempt_mask.h>
+#ifdef CONFIG_PREEMPT_RT_FULL
+
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
-@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
+@@ -30,5 +63,6 @@ static inline void local_bh_enable(void)
{
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_BH_H */
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -442,10 +442,11 @@ struct softirq_action
+@@ -443,10 +443,11 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
-@@ -453,6 +454,9 @@ static inline void do_softirq_own_stack(
+@@ -454,6 +455,9 @@ static inline void do_softirq_own_stack(
__do_softirq();
}
#endif
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-@@ -460,6 +464,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -461,6 +465,7 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -617,6 +622,12 @@ void tasklet_hrtimer_cancel(struct taskl
+@@ -618,6 +623,12 @@ void tasklet_hrtimer_cancel(struct taskl
tasklet_kill(&ttimer->tasklet);
}
@@ -130,22 +130,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Autoprobing for irqs:
*
---- a/include/linux/preempt_mask.h
-+++ b/include/linux/preempt_mask.h
-@@ -44,16 +44,26 @@
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -50,7 +50,11 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#ifndef CONFIG_PREEMPT_RT_FULL
-+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+#else
-+# define SOFTIRQ_DISABLE_OFFSET (0)
++# define SOFTIRQ_DISABLE_OFFSET (0)
+#endif
- #define PREEMPT_ACTIVE_BITS 1
- #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
- #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+ /* We use the MSB mostly because its available */
+ #define PREEMPT_NEED_RESCHED 0x80000000
+@@ -59,9 +63,15 @@
+ #include <asm/preempt.h>
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
@@ -161,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Are we doing bottom half or hardware interrupt processing?
-@@ -64,7 +74,6 @@
+@@ -72,7 +82,6 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
@@ -171,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Are we in NMI context?
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1791,6 +1791,8 @@ struct task_struct {
+@@ -1832,6 +1832,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -180,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -2041,6 +2043,7 @@ extern void thread_group_cputime_adjuste
+@@ -2096,6 +2098,7 @@ extern void thread_group_cputime_adjuste
/*
* Per process flags
*/
@@ -190,7 +191,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
--- a/init/main.c
+++ b/init/main.c
-@@ -525,6 +525,7 @@ asmlinkage __visible void __init start_k
+@@ -530,6 +530,7 @@ asmlinkage __visible void __init start_k
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -784,7 +785,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.thread_comm = "ksoftirqd/%u",
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -764,14 +764,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -758,14 +758,7 @@ static bool can_stop_idle_tick(int cpu,
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -802,7 +803,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3436,11 +3436,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3590,11 +3590,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
index d8dcb4f01d05d..29d7622f5aed8 100644
--- a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
+++ b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
-@@ -76,6 +76,12 @@ void lg_local_unlock_cpu(struct lglock *
+@@ -82,6 +82,12 @@ void lg_double_unlock(struct lglock *lg,
void lg_global_lock(struct lglock *lg);
void lg_global_unlock(struct lglock *lg);
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
do { \
--- a/kernel/locking/lglock.c
+++ b/kernel/locking/lglock.c
-@@ -105,3 +105,28 @@ void lg_global_unlock(struct lglock *lg)
+@@ -127,3 +127,28 @@ void lg_global_unlock(struct lglock *lg)
preempt_enable_nort();
}
EXPORT_SYMBOL(lg_global_unlock);
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1133,6 +1133,11 @@ void __lockfunc rt_spin_unlock_wait(spin
+@@ -1153,6 +1153,11 @@ void __lockfunc rt_spin_unlock_wait(spin
}
EXPORT_SYMBOL(rt_spin_unlock_wait);
diff --git a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
index 7a194d728495b..90536050c2ee5 100644
--- a/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
+++ b/patches/stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
@@ -12,13 +12,13 @@ now do that trylock()/relax() across an entire herd of locks. Joy.
Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/stop_machine.c | 24 ++++++++++++++----------
- 1 file changed, 14 insertions(+), 10 deletions(-)
+ kernel/stop_machine.c | 25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -266,7 +266,7 @@ int stop_two_cpus(unsigned int cpu1, uns
- struct irq_cpu_stop_queue_work_info call_args;
+@@ -276,7 +276,7 @@ int stop_two_cpus(unsigned int cpu1, uns
+ struct cpu_stop_work work1, work2;
struct multi_stop_data msdata;
- preempt_disable();
@@ -26,25 +26,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
msdata = (struct multi_stop_data){
.fn = fn,
.data = arg,
-@@ -299,7 +299,7 @@ int stop_two_cpus(unsigned int cpu1, uns
- * This relies on the stopper workqueues to be FIFO.
- */
- if (!cpu_active(cpu1) || !cpu_active(cpu2)) {
+@@ -296,11 +296,11 @@ int stop_two_cpus(unsigned int cpu1, uns
+ if (cpu1 > cpu2)
+ swap(cpu1, cpu2);
+ if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) {
- preempt_enable();
+ preempt_enable_nort();
return -ENOENT;
}
-@@ -313,7 +313,7 @@ int stop_two_cpus(unsigned int cpu1, uns
- &irq_cpu_stop_queue_work,
- &call_args, 1);
- lg_local_unlock(&stop_cpus_lock);
- preempt_enable();
+ preempt_enable_nort();
wait_for_stop_done(&done);
-@@ -347,7 +347,7 @@ static DEFINE_PER_CPU(struct cpu_stop_wo
+@@ -333,17 +333,20 @@ static DEFINE_MUTEX(stop_cpus_mutex);
static void queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
@@ -53,8 +49,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct cpu_stop_work *work;
unsigned int cpu;
-@@ -361,11 +361,13 @@ static void queue_stop_cpus_work(const s
- }
/*
- * Disable preemption while queueing to avoid getting
@@ -68,10 +62,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ lg_global_lock(&stop_cpus_lock);
+ else
+ lg_global_trylock_relax(&stop_cpus_lock);
- for_each_cpu(cpu, cpumask)
- cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu));
- lg_global_unlock(&stop_cpus_lock);
-@@ -377,7 +379,7 @@ static int __stop_cpus(const struct cpum
++
+ for_each_cpu(cpu, cpumask) {
+ work = &per_cpu(cpu_stopper.stop_work, cpu);
+ work->fn = fn;
+@@ -360,7 +363,7 @@ static int __stop_cpus(const struct cpum
struct cpu_stop_done done;
cpu_stop_init_done(&done, cpumask_weight(cpumask));
@@ -80,16 +75,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
wait_for_stop_done(&done);
return done.executed ? done.ret : -ENOENT;
}
-@@ -573,6 +575,8 @@ static int __init cpu_stop_init(void)
+@@ -558,6 +561,8 @@ static int __init cpu_stop_init(void)
INIT_LIST_HEAD(&stopper->works);
}
+ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock");
+
BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
+ stop_machine_unpark(raw_smp_processor_id());
stop_machine_initialized = true;
- return 0;
-@@ -668,7 +672,7 @@ int stop_machine_from_inactive_cpu(int (
+@@ -654,7 +659,7 @@ int stop_machine_from_inactive_cpu(cpu_s
set_state(&msdata, MULTI_STOP_PREPARE);
cpu_stop_init_done(&done, num_active_cpus());
queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
diff --git a/patches/stop-machine-raw-lock.patch b/patches/stop-machine-raw-lock.patch
index ca28bc805664a..6155df97d0e41 100644
--- a/patches/stop-machine-raw-lock.patch
+++ b/patches/stop-machine-raw-lock.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -30,12 +30,12 @@ struct cpu_stop_done {
+@@ -30,14 +30,14 @@ struct cpu_stop_done {
atomic_t nr_todo; /* nr left to execute */
bool executed; /* actually executed? */
int ret; /* collected return value */
@@ -21,12 +21,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper {
+ struct task_struct *thread;
+
- spinlock_t lock;
+ raw_spinlock_t lock;
bool enabled; /* is this stopper enabled? */
struct list_head works; /* list of pending works */
- };
-@@ -56,7 +56,7 @@ static void cpu_stop_init_done(struct cp
+
+@@ -59,7 +59,7 @@ static void cpu_stop_init_done(struct cp
{
memset(done, 0, sizeof(*done));
atomic_set(&done->nr_todo, nr_todo);
@@ -35,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/* signal completion unless @done is NULL */
-@@ -65,8 +65,10 @@ static void cpu_stop_signal_done(struct
+@@ -68,8 +68,10 @@ static void cpu_stop_signal_done(struct
if (done) {
if (executed)
done->executed = true;
@@ -48,19 +50,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -78,7 +80,7 @@ static void cpu_stop_queue_work(unsigned
-
+@@ -86,12 +88,28 @@ static void cpu_stop_queue_work(unsigned
+ struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
unsigned long flags;
- spin_lock_irqsave(&stopper->lock, flags);
+ raw_spin_lock_irqsave(&stopper->lock, flags);
-
- if (stopper->enabled) {
- list_add_tail(&work->list, &stopper->works);
-@@ -86,7 +88,23 @@ static void cpu_stop_queue_work(unsigned
- } else
+ if (stopper->enabled)
+ __cpu_stop_queue_work(stopper, work);
+ else
cpu_stop_signal_done(work->done, false);
-
- spin_unlock_irqrestore(&stopper->lock, flags);
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+}
@@ -82,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -120,7 +138,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
+@@ -125,7 +143,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
cpu_stop_init_done(&done, 1);
cpu_stop_queue_work(cpu, &work);
@@ -91,8 +90,30 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return done.executed ? done.ret : -ENOENT;
}
-@@ -297,7 +315,7 @@ int stop_two_cpus(unsigned int cpu1, uns
- lg_local_unlock(&stop_cpus_lock);
+@@ -224,8 +242,8 @@ static int cpu_stop_queue_two_works(int
+ int err;
+
+ lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+- spin_lock_irq(&stopper1->lock);
+- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
++ raw_spin_lock_irq(&stopper1->lock);
++ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
+
+ err = -ENOENT;
+ if (!stopper1->enabled || !stopper2->enabled)
+@@ -235,8 +253,8 @@ static int cpu_stop_queue_two_works(int
+ __cpu_stop_queue_work(stopper1, work1);
+ __cpu_stop_queue_work(stopper2, work2);
+ unlock:
+- spin_unlock(&stopper2->lock);
+- spin_unlock_irq(&stopper1->lock);
++ raw_spin_unlock(&stopper2->lock);
++ raw_spin_unlock_irq(&stopper1->lock);
+ lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+
+ return err;
+@@ -284,7 +302,7 @@ int stop_two_cpus(unsigned int cpu1, uns
+
preempt_enable();
- wait_for_completion(&done.completion);
@@ -100,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return done.executed ? done.ret : -ENOENT;
}
-@@ -360,7 +378,7 @@ static int __stop_cpus(const struct cpum
+@@ -343,7 +361,7 @@ static int __stop_cpus(const struct cpum
cpu_stop_init_done(&done, cpumask_weight(cpumask));
queue_stop_cpus_work(cpumask, fn, arg, &done);
@@ -109,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return done.executed ? done.ret : -ENOENT;
}
-@@ -439,9 +457,9 @@ static int cpu_stop_should_run(unsigned
+@@ -422,9 +440,9 @@ static int cpu_stop_should_run(unsigned
unsigned long flags;
int run;
@@ -121,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return run;
}
-@@ -453,13 +471,13 @@ static void cpu_stopper_thread(unsigned
+@@ -436,13 +454,13 @@ static void cpu_stopper_thread(unsigned
repeat:
work = NULL;
@@ -137,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work) {
cpu_stop_fn_t fn = work->fn;
-@@ -491,7 +509,13 @@ static void cpu_stopper_thread(unsigned
+@@ -474,7 +492,13 @@ static void cpu_stopper_thread(unsigned
kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
ksym_buf), arg);
@@ -151,32 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto repeat;
}
}
-@@ -510,20 +534,20 @@ static void cpu_stop_park(unsigned int c
- unsigned long flags;
-
- /* drain remaining works */
-- spin_lock_irqsave(&stopper->lock, flags);
-+ raw_spin_lock_irqsave(&stopper->lock, flags);
- list_for_each_entry(work, &stopper->works, list)
- cpu_stop_signal_done(work->done, false);
- stopper->enabled = false;
-- spin_unlock_irqrestore(&stopper->lock, flags);
-+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
- }
-
- static void cpu_stop_unpark(unsigned int cpu)
- {
- struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-
-- spin_lock_irq(&stopper->lock);
-+ raw_spin_lock_irq(&stopper->lock);
- stopper->enabled = true;
-- spin_unlock_irq(&stopper->lock);
-+ raw_spin_unlock_irq(&stopper->lock);
- }
-
- static struct smp_hotplug_thread cpu_stop_threads = {
-@@ -545,7 +569,7 @@ static int __init cpu_stop_init(void)
+@@ -530,7 +554,7 @@ static int __init cpu_stop_init(void)
for_each_possible_cpu(cpu) {
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
@@ -185,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&stopper->works);
}
-@@ -648,7 +672,7 @@ int stop_machine_from_inactive_cpu(int (
+@@ -634,7 +658,7 @@ int stop_machine_from_inactive_cpu(cpu_s
ret = multi_cpu_stop(&msdata);
/* Busy wait for completion. */
diff --git a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
index 4ec6647893c02..cf212bd2ff1c6 100644
--- a/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
+++ b/patches/stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
-@@ -467,6 +467,16 @@ static void cpu_stopper_thread(unsigned
+@@ -450,6 +450,16 @@ static void cpu_stopper_thread(unsigned
struct cpu_stop_done *done = work->done;
char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
diff --git a/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch b/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
index 91085dd857b73..275b36799b187 100644
--- a/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
+++ b/patches/sunrpc-make-svc_xprt_do_enqueue-use-get_cpu_light.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
-@@ -341,7 +341,7 @@ static void svc_xprt_do_enqueue(struct s
+@@ -340,7 +340,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
goto out;
}
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
atomic_long_inc(&pool->sp_stats.packets);
-@@ -377,7 +377,7 @@ static void svc_xprt_do_enqueue(struct s
+@@ -376,7 +376,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
atomic_long_inc(&pool->sp_stats.threads_woken);
wake_up_process(rqstp->rq_task);
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
}
rcu_read_unlock();
-@@ -398,7 +398,7 @@ static void svc_xprt_do_enqueue(struct s
+@@ -397,7 +397,7 @@ void svc_xprt_do_enqueue(struct svc_xprt
goto redo_search;
}
rqstp = NULL;
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/suspend-prevernt-might-sleep-splats.patch
index 3aeb8501f35ab..089e64f423a98 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/suspend-prevernt-might-sleep-splats.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -467,6 +467,7 @@ extern enum system_states {
+@@ -473,6 +473,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Enable_cpus:
@@ -555,6 +560,7 @@ int hibernation_platform_enter(void)
- goto Platform_finish;
+ goto Enable_cpus;
local_irq_disable();
+ system_state = SYSTEM_SUSPEND;
@@ -82,11 +82,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
syscore_resume();
+ system_state = SYSTEM_RUNNING;
local_irq_enable();
- enable_nonboot_cpus();
+ Enable_cpus:
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -356,6 +356,8 @@ static int suspend_enter(suspend_state_t
+@@ -359,6 +359,8 @@ static int suspend_enter(suspend_state_t
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
-@@ -370,6 +372,8 @@ static int suspend_enter(suspend_state_t
+@@ -375,6 +377,8 @@ static int suspend_enter(suspend_state_t
syscore_resume();
}
diff --git a/patches/sysfs-realtime-entry.patch b/patches/sysfs-realtime-entry.patch
index 1231bed700866..ec75898877cf9 100644
--- a/patches/sysfs-realtime-entry.patch
+++ b/patches/sysfs-realtime-entry.patch
@@ -21,7 +21,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+++ b/kernel/ksysfs.c
@@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
- #endif /* CONFIG_KEXEC */
+ #endif /* CONFIG_KEXEC_CORE */
+#if defined(CONFIG_PREEMPT_RT_FULL)
+static ssize_t realtime_show(struct kobject *kobj,
diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index a194d92ee9eff..e683595590572 100644
--- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -481,8 +481,9 @@ static inline struct task_struct *this_c
+@@ -482,8 +482,9 @@ static inline struct task_struct *this_c
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -507,27 +508,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -508,27 +509,36 @@ struct tasklet_struct name = { NULL, 0,
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -576,12 +586,7 @@ static inline void tasklet_disable(struc
+@@ -577,12 +587,7 @@ static inline void tasklet_disable(struc
smp_mb();
}
diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch
index f46e80560e645..418bb9fa38aaf 100644
--- a/patches/timekeeping-split-jiffies-lock.patch
+++ b/patches/timekeeping-split-jiffies-lock.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(get_jiffies_64);
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
-@@ -78,13 +78,15 @@ int tick_is_oneshot_available(void)
+@@ -79,13 +79,15 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
update_wall_time();
}
-@@ -146,9 +148,9 @@ void tick_setup_periodic(struct clock_ev
+@@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_ev
ktime_t next;
do {
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
- clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -120,16 +120,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
- last_update = last_jiffies_update;
- last_jiffies = jiffies;
+ basemono = last_jiffies_update.tv64;
+ basejiff = jiffies;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
+ ts->last_jiffies = basejiff;
- if (rcu_needs_cpu(&rcu_delta_jiffies) ||
- arch_needs_cpu() || irq_work_needs_cpu()) {
+ if (rcu_needs_cpu(basemono, &next_rcu) ||
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
-@@ -2065,8 +2065,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2048,8 +2048,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
-@@ -22,7 +22,8 @@ extern void timekeeping_resume(void);
+@@ -19,7 +19,8 @@ extern void timekeeping_resume(void);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
diff --git a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index fd3cea82f3591..204dda85151b6 100644
--- a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1428,13 +1428,13 @@ void update_process_times(int user_tick)
+@@ -1456,13 +1456,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
diff --git a/patches/timers-avoid-the-base-null-otptimization-on-rt.patch b/patches/timers-avoid-the-base-null-otptimization-on-rt.patch
index 0177c34be8cb8..4dbef98926672 100644
--- a/patches/timers-avoid-the-base-null-otptimization-on-rt.patch
+++ b/patches/timers-avoid-the-base-null-otptimization-on-rt.patch
@@ -7,59 +7,64 @@ base as a preempter would spin forever in lock_timer_base().
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/time/timer.c | 40 ++++++++++++++++++++++++++++++++--------
- 1 file changed, 32 insertions(+), 8 deletions(-)
+ kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 35 insertions(+), 10 deletions(-)
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -771,6 +771,36 @@ static struct tvec_base *lock_timer_base
+@@ -780,6 +780,39 @@ static struct tvec_base *lock_timer_base
+ cpu_relax();
}
}
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
-+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
-+ struct tvec_base *old,
-+ struct tvec_base *new)
-+{
-+ /* See the comment in lock_timer_base() */
-+ timer_set_base(timer, NULL);
-+ spin_unlock(&old->lock);
-+ spin_lock(&new->lock);
-+ timer_set_base(timer, new);
-+ return new;
-+}
-+#else
++#ifdef CONFIG_PREEMPT_RT_FULL
+static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
+ struct tvec_base *old,
+ struct tvec_base *new)
+{
+ /*
-+ * We cannot do the above because we might be preempted and
++ * We cannot do the below because we might be preempted and
+ * then the preempter would see NULL and loop forever.
+ */
+ if (spin_trylock(&new->lock)) {
-+ timer_set_base(timer, new);
++ WRITE_ONCE(timer->flags,
++ (timer->flags & ~TIMER_BASEMASK) | new->cpu);
+ spin_unlock(&old->lock);
+ return new;
+ }
+ return old;
+}
-+#endif
+
++#else
++static inline struct tvec_base *switch_timer_base(struct timer_list *timer,
++ struct tvec_base *old,
++ struct tvec_base *new)
++{
++ /* See the comment in lock_timer_base() */
++ timer->flags |= TIMER_MIGRATING;
++
++ spin_unlock(&old->lock);
++ spin_lock(&new->lock);
++ WRITE_ONCE(timer->flags,
++ (timer->flags & ~TIMER_BASEMASK) | new->cpu);
++ return new;
++}
++#endif
+
static inline int
__mod_timer(struct timer_list *timer, unsigned long expires,
- bool pending_only, int pinned)
-@@ -801,14 +831,8 @@ static inline int
+@@ -810,16 +843,8 @@ static inline int
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
- if (likely(base->running_timer != timer)) {
- /* See the comment in lock_timer_base() */
-- timer_set_base(timer, NULL);
+- timer->flags |= TIMER_MIGRATING;
+-
- spin_unlock(&base->lock);
- base = new_base;
- spin_lock(&base->lock);
-- timer_set_base(timer, base);
+- WRITE_ONCE(timer->flags,
+- (timer->flags & ~TIMER_BASEMASK) | base->cpu);
- }
+ if (likely(base->running_timer != timer))
+ base = switch_timer_base(timer, base, new_base);
diff --git a/patches/timers-preempt-rt-support.patch b/patches/timers-preempt-rt-support.patch
index 580251f50dec1..2018ed3b3b1bf 100644
--- a/patches/timers-preempt-rt-support.patch
+++ b/patches/timers-preempt-rt-support.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1395,6 +1395,14 @@ unsigned long get_next_timer_interrupt(u
+@@ -1422,6 +1422,14 @@ u64 get_next_timer_interrupt(unsigned lo
if (cpu_is_offline(smp_processor_id()))
return expires;
@@ -29,26 +29,26 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * the base lock to check when the next timer is pending and so
+ * we assume the next jiffy.
+ */
-+ return now + 1;
++ return basej;
+#endif
spin_lock(&base->lock);
if (base->active_timers) {
if (time_before_eq(base->next_timer, base->timer_jiffies))
-@@ -1594,7 +1602,7 @@ static void migrate_timers(int cpu)
+@@ -1621,7 +1629,7 @@ static void migrate_timers(int cpu)
BUG_ON(cpu_online(cpu));
- old_base = per_cpu(tvec_bases, cpu);
-- new_base = get_cpu_var(tvec_bases);
-+ new_base = get_local_var(tvec_bases);
+ old_base = per_cpu_ptr(&tvec_bases, cpu);
+- new_base = get_cpu_ptr(&tvec_bases);
++ new_base = get_local_ptr(&tvec_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
-@@ -1618,7 +1626,7 @@ static void migrate_timers(int cpu)
+@@ -1645,7 +1653,7 @@ static void migrate_timers(int cpu)
spin_unlock(&old_base->lock);
spin_unlock_irq(&new_base->lock);
-- put_cpu_var(tvec_bases);
-+ put_local_var(tvec_bases);
+- put_cpu_ptr(&tvec_bases);
++ put_local_ptr(&tvec_bases);
}
static int timer_cpu_notify(struct notifier_block *self,
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index 6d57691d47148..3ba59d4e61e8d 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -11,13 +11,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/timer.h | 2 +-
- kernel/sched/core.c | 8 ++++++--
- kernel/time/timer.c | 37 ++++++++++++++++++++++++++++++++++---
- 3 files changed, 41 insertions(+), 6 deletions(-)
+ kernel/sched/core.c | 9 +++++++--
+ kernel/time/timer.c | 39 +++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 45 insertions(+), 5 deletions(-)
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
-@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list
+@@ -225,7 +225,7 @@ extern void add_timer(struct timer_list
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -28,25 +28,25 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define del_timer_sync(t) del_timer(t)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -641,12 +641,14 @@ void resched_cpu(int cpu)
+@@ -618,11 +618,14 @@ void resched_cpu(int cpu)
*/
- int get_nohz_timer_target(int pinned)
+ int get_nohz_timer_target(void)
{
-- int cpu = smp_processor_id();
-+ int cpu;
- int i;
+- int i, cpu = smp_processor_id();
++ int i, cpu;
struct sched_domain *sd;
+ preempt_disable_rt();
+ cpu = smp_processor_id();
- if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
++
+ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu))
- return cpu;
+ goto preempt_en_rt;
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -659,6 +661,8 @@ int get_nohz_timer_target(int pinned)
- }
+@@ -638,6 +641,8 @@ int get_nohz_timer_target(void)
+ cpu = housekeeping_any_cpu();
unlock:
rcu_read_unlock();
+preempt_en_rt:
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -78,6 +78,9 @@ struct tvec_root {
+@@ -80,6 +80,9 @@ struct tvec_root {
struct tvec_base {
spinlock_t lock;
struct timer_list *running_timer;
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long timer_jiffies;
unsigned long next_timer;
unsigned long active_timers;
-@@ -979,6 +982,29 @@ void add_timer_on(struct timer_list *tim
+@@ -1006,6 +1009,33 @@ void add_timer_on(struct timer_list *tim
}
EXPORT_SYMBOL_GPL(add_timer_on);
@@ -76,11 +76,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ */
+static void wait_for_running_timer(struct timer_list *timer)
+{
-+ struct tvec_base *base = timer->base;
++ struct tvec_base *base;
++ u32 tf = timer->flags;
++
++ if (tf & TIMER_MIGRATING)
++ return;
+
-+ if (base->running_timer == timer)
-+ wait_event(base->wait_for_running_timer,
-+ base->running_timer != timer);
++ base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
++ wait_event(base->wait_for_running_timer,
++ base->running_timer != timer);
+}
+
+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer)
@@ -96,16 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
-@@ -1036,7 +1062,7 @@ int try_to_del_timer_sync(struct timer_l
- }
- EXPORT_SYMBOL(try_to_del_timer_sync);
-
--#ifdef CONFIG_SMP
-+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
-
- /**
-@@ -1098,7 +1124,7 @@ int del_timer_sync(struct timer_list *ti
+@@ -1123,7 +1153,7 @@ int del_timer_sync(struct timer_list *ti
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
@@ -114,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1219,15 +1245,17 @@ static inline void __run_timers(struct t
+@@ -1248,15 +1278,17 @@ static inline void __run_timers(struct t
if (irqsafe) {
spin_unlock(&base->lock);
call_timer_fn(timer, fn, data);
@@ -133,13 +128,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irq(&base->lock);
}
-@@ -1625,6 +1653,9 @@ static void __init init_timer_cpu(struct
+@@ -1645,6 +1677,9 @@ static void __init init_timer_cpu(int cp
+
base->cpu = cpu;
- per_cpu(tvec_bases, cpu) = base;
spin_lock_init(&base->lock);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ init_waitqueue_head(&base->wait_for_running_timer);
+#endif
- for (j = 0; j < TVN_SIZE; j++) {
- INIT_LIST_HEAD(base->tv5.vec + j);
+ base->timer_jiffies = jiffies;
+ base->next_timer = base->timer_jiffies;
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index 1368779148c11..344544eb98996 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2977,7 +2977,16 @@ asmlinkage __visible void __sched notrac
+@@ -3306,7 +3306,16 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -39,8 +39,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * we must force it manually.
+ */
+ start_critical_timings();
- __schedule();
+ __schedule(true);
+ stop_critical_timings();
exception_exit(prev_ctx);
- __preempt_count_sub(PREEMPT_ACTIVE);
+ preempt_enable_no_resched_notrace();
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 5a909aeda83b7..f26739a1185c8 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3386,7 +3386,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3540,7 +3540,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3396,13 +3396,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3550,13 +3550,13 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/usb-use-_nort-in-giveback.patch b/patches/usb-use-_nort-in-giveback.patch
index edcf460aed4dc..df2b37a2e1bfb 100644
--- a/patches/usb-use-_nort-in-giveback.patch
+++ b/patches/usb-use-_nort-in-giveback.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
-@@ -1684,9 +1684,9 @@ static void __usb_hcd_giveback_urb(struc
+@@ -1735,9 +1735,9 @@ static void __usb_hcd_giveback_urb(struc
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
*/
diff --git a/patches/vtime-split-lock-and-seqcount.patch b/patches/vtime-split-lock-and-seqcount.patch
index 5b02e38ea2fb9..1b792f26a42d9 100644
--- a/patches/vtime-split-lock-and-seqcount.patch
+++ b/patches/vtime-split-lock-and-seqcount.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
4 files changed, 46 insertions(+), 25 deletions(-)
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -149,7 +149,8 @@ extern struct task_group root_task_group
+@@ -150,7 +150,8 @@ extern struct task_group root_task_group
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
@@ -26,9 +26,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1478,7 +1478,8 @@ struct task_struct {
- struct cputime prev_cputime;
- #endif
+@@ -1519,7 +1519,8 @@ struct task_struct {
+ cputime_t gtime;
+ struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- seqlock_t vtime_seqlock;
+ raw_spinlock_t vtime_lock;
@@ -38,9 +38,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
VTIME_SLEEPING = 0,
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1345,7 +1345,8 @@ static struct task_struct *copy_process(
- p->prev_cputime.utime = p->prev_cputime.stime = 0;
- #endif
+@@ -1349,7 +1349,8 @@ static struct task_struct *copy_process(
+ prev_cputime_init(&p->prev_cputime);
+
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
- seqlock_init(&p->vtime_seqlock);
+ raw_spin_lock_init(&p->vtime_lock);
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
-@@ -675,37 +675,45 @@ static void __vtime_account_system(struc
+@@ -696,37 +696,45 @@ static void __vtime_account_system(struc
void vtime_account_system(struct task_struct *tsk)
{
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void vtime_guest_enter(struct task_struct *tsk)
-@@ -717,19 +725,23 @@ void vtime_guest_enter(struct task_struc
+@@ -738,19 +746,23 @@ void vtime_guest_enter(struct task_struc
* synchronization against the reader (task_gtime())
* that can thus safely catch up with a tickless delta.
*/
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(vtime_guest_exit);
-@@ -742,24 +754,30 @@ void vtime_account_idle(struct task_stru
+@@ -763,24 +775,30 @@ void vtime_account_idle(struct task_stru
void arch_vtime_task_switch(struct task_struct *prev)
{
@@ -169,8 +169,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
cputime_t task_gtime(struct task_struct *t)
-@@ -768,13 +786,13 @@ cputime_t task_gtime(struct task_struct
- cputime_t gtime;
+@@ -792,13 +810,13 @@ cputime_t task_gtime(struct task_struct
+ return t->gtime;
do {
- seq = read_seqbegin(&t->vtime_seqlock);
@@ -185,7 +185,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return gtime;
}
-@@ -797,7 +815,7 @@ fetch_task_cputime(struct task_struct *t
+@@ -821,7 +839,7 @@ fetch_task_cputime(struct task_struct *t
*udelta = 0;
*sdelta = 0;
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (u_dst)
*u_dst = *u_src;
-@@ -821,7 +839,7 @@ fetch_task_cputime(struct task_struct *t
+@@ -845,7 +863,7 @@ fetch_task_cputime(struct task_struct *t
if (t->vtime_snap_whence == VTIME_SYS)
*sdelta = delta;
}
diff --git a/patches/wait-simple-implementation.patch b/patches/wait-simple-implementation.patch
index 4bbd029be7634..199b44b0acd01 100644
--- a/patches/wait-simple-implementation.patch
+++ b/patches/wait-simple-implementation.patch
@@ -235,7 +235,7 @@ wakeups vs adding items to the list.
+++ b/kernel/sched/Makefile
@@ -13,7 +13,7 @@ endif
- obj-y += core.o proc.o clock.o cputime.o
+ obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o completion.o idle.o
+obj-y += wait.o wait-simple.o completion.o idle.o
diff --git a/patches/work-queue-work-around-irqsafe-timer-optimization.patch b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
index 20956776fff3b..290127556f05e 100644
--- a/patches/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "workqueue_internal.h"
-@@ -1239,7 +1240,7 @@ static int try_to_grab_pending(struct wo
+@@ -1246,7 +1247,7 @@ static int try_to_grab_pending(struct wo
local_unlock_irqrestore(pendingb_lock, *flags);
if (work_is_canceling(work))
return -ENOENT;
diff --git a/patches/work-simple-Simple-work-queue-implemenation.patch b/patches/work-simple-Simple-work-queue-implemenation.patch
index 38ea28323ca1b..74de1a1a9ebaa 100644
--- a/patches/work-simple-Simple-work-queue-implemenation.patch
+++ b/patches/work-simple-Simple-work-queue-implemenation.patch
@@ -48,7 +48,7 @@ Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+++ b/kernel/sched/Makefile
@@ -13,7 +13,7 @@ endif
- obj-y += core.o proc.o clock.o cputime.o
+ obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o wait-simple.o completion.o idle.o
+obj-y += wait.o wait-simple.o work-simple.o completion.o idle.o
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index 07f1cd49d196f..48d83bf589557 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -24,14 +24,14 @@ Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/sched/core.c | 70 +++++++++-----------------------------------
- kernel/workqueue.c | 55 ++++++++++++++--------------------
- kernel/workqueue_internal.h | 5 +--
- 3 files changed, 41 insertions(+), 89 deletions(-)
+ kernel/sched/core.c | 80 ++++++++------------------------------------
+ kernel/workqueue.c | 55 ++++++++++++------------------
+ kernel/workqueue_internal.h | 5 +-
+ 3 files changed, 41 insertions(+), 99 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1517,10 +1517,6 @@ static void ttwu_activate(struct rq *rq,
+@@ -1744,10 +1744,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1797,42 +1793,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2064,52 +2060,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -64,14 +64,24 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- lockdep_assert_held(&rq->lock);
-
- if (!raw_spin_trylock(&p->pi_lock)) {
+- /*
+- * This is OK, because current is on_cpu, which avoids it being
+- * picked for load-balance and preemption/IRQs are still
+- * disabled avoiding further scheduler activity on it and we've
+- * not yet picked a replacement task.
+- */
+- lockdep_unpin_lock(&rq->lock);
- raw_spin_unlock(&rq->lock);
- raw_spin_lock(&p->pi_lock);
- raw_spin_lock(&rq->lock);
+- lockdep_pin_lock(&rq->lock);
- }
-
- if (!(p->state & TASK_NORMAL))
- goto out;
-
+- trace_sched_waking(p);
+-
- if (!task_on_rq_queued(p))
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
-
@@ -85,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3002,21 +2962,6 @@ static void __sched __schedule(void)
+@@ -3343,21 +3293,6 @@ static void __sched notrace __schedule(b
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -107,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3048,6 +2993,14 @@ static inline void sched_submit_work(str
+@@ -3390,6 +3325,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -122,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3056,6 +3009,12 @@ static inline void sched_submit_work(str
+@@ -3398,6 +3341,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -135,9 +145,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3064,6 +3023,7 @@ asmlinkage __visible void __sched schedu
- do {
- __schedule();
+@@ -3408,6 +3357,7 @@ asmlinkage __visible void __sched schedu
+ __schedule(false);
+ sched_preempt_enable_no_resched();
} while (need_resched());
+ sched_update_worker(tsk);
}
@@ -145,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -804,44 +804,31 @@ static void wake_up_worker(struct worker
+@@ -811,44 +811,31 @@ static void wake_up_worker(struct worker
}
/**
@@ -202,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct worker_pool *pool;
/*
-@@ -850,14 +837,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -857,14 +844,15 @@ struct task_struct *wq_worker_sleeping(s
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -222,7 +232,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The counterpart of the following dec_and_test, implied mb,
* worklist not empty test sequence is in insert_work().
-@@ -870,9 +858,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -877,9 +865,12 @@ struct task_struct *wq_worker_sleeping(s
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 8d62354c0b5de..e52b942baea5c 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3029,9 +3029,8 @@ static void __sched __schedule(void)
+@@ -3439,9 +3439,8 @@ static void __sched notrace __schedule(b
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3039,6 +3038,10 @@ static inline void sched_submit_work(str
+@@ -3449,6 +3448,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);
@@ -79,7 +79,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
* A: pool->attach_mutex protected.
*
* PL: wq_pool_mutex protected.
-@@ -405,6 +410,31 @@ static void workqueue_sysfs_unregister(s
+@@ -411,6 +416,31 @@ static void workqueue_sysfs_unregister(s
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
@@ -111,7 +111,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -797,10 +827,16 @@ static struct worker *first_idle_worker(
+@@ -804,10 +834,16 @@ static struct worker *first_idle_worker(
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -129,7 +129,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -828,7 +864,7 @@ void wq_worker_running(struct task_struc
+@@ -835,7 +871,7 @@ void wq_worker_running(struct task_struc
*/
void wq_worker_sleeping(struct task_struct *task)
{
@@ -138,7 +138,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
struct worker_pool *pool;
/*
-@@ -845,25 +881,18 @@ void wq_worker_sleeping(struct task_stru
+@@ -852,25 +888,18 @@ void wq_worker_sleeping(struct task_stru
return;
worker->sleeping = 1;
@@ -168,7 +168,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -1554,7 +1583,9 @@ static void worker_enter_idle(struct wor
+@@ -1561,7 +1590,9 @@ static void worker_enter_idle(struct wor
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -178,7 +178,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1587,7 +1618,9 @@ static void worker_leave_idle(struct wor
+@@ -1594,7 +1625,9 @@ static void worker_leave_idle(struct wor
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -188,7 +188,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
static struct worker *alloc_worker(int node)
-@@ -1755,7 +1788,9 @@ static void destroy_worker(struct worker
+@@ -1760,7 +1793,9 @@ static void destroy_worker(struct worker
pool->nr_workers--;
pool->nr_idle--;
diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch
index ce3b20786b72a..ac5b32721ba57 100644
--- a/patches/workqueue-use-locallock.patch
+++ b/patches/workqueue-use-locallock.patch
@@ -20,16 +20,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "workqueue_internal.h"
-@@ -329,6 +330,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
+@@ -331,6 +332,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock);
+
static int worker_thread(void *__worker);
- static void copy_workqueue_attrs(struct workqueue_attrs *to,
- const struct workqueue_attrs *from);
-@@ -1065,9 +1068,9 @@ static void put_pwq_unlocked(struct pool
+ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+
+@@ -1072,9 +1075,9 @@ static void put_pwq_unlocked(struct pool
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1169,7 +1172,7 @@ static int try_to_grab_pending(struct wo
+@@ -1176,7 +1179,7 @@ static int try_to_grab_pending(struct wo
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1233,7 +1236,7 @@ static int try_to_grab_pending(struct wo
+@@ -1240,7 +1243,7 @@ static int try_to_grab_pending(struct wo
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
-@@ -1305,7 +1308,7 @@ static void __queue_work(int cpu, struct
+@@ -1312,7 +1315,7 @@ static void __queue_work(int cpu, struct
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_work_activate(work);
-@@ -1410,14 +1413,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1417,14 +1420,14 @@ bool queue_work_on(int cpu, struct workq
bool ret = false;
unsigned long flags;
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1484,14 +1487,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1491,14 +1494,14 @@ bool queue_delayed_work_on(int cpu, stru
unsigned long flags;
/* read the comment in __queue_work() */
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1526,7 +1529,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1533,7 +1536,7 @@ bool mod_delayed_work_on(int cpu, struct
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2802,7 +2805,7 @@ static bool __cancel_work_timer(struct w
+@@ -2807,7 +2810,7 @@ static bool __cancel_work_timer(struct w
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
flush_work(work);
clear_work_data(work);
-@@ -2857,10 +2860,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -2862,10 +2865,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -2895,7 +2898,7 @@ bool cancel_delayed_work(struct delayed_
+@@ -2900,7 +2903,7 @@ bool cancel_delayed_work(struct delayed_
set_work_pool_and_clear_pending(&dwork->work,
get_work_pool_id(&dwork->work));
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index e03025ee8146c..42ae05d752f55 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -9,18 +9,22 @@ protected by preempt or irq disabled regions.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/workqueue.c | 92 +++++++++++++++++++++++++++++------------------------
- 1 file changed, 51 insertions(+), 41 deletions(-)
+ kernel/workqueue.c | 96 +++++++++++++++++++++++++++++------------------------
+ 1 file changed, 53 insertions(+), 43 deletions(-)
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -125,11 +125,11 @@ enum {
+@@ -125,7 +125,7 @@ enum {
*
* PL: wq_pool_mutex protected.
*
- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
+ * PR: wq_pool_mutex protected for writes. RCU protected for reads.
*
+ * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
+ *
+@@ -134,7 +134,7 @@ enum {
+ *
* WQ: wq->mutex protected.
*
- * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
@@ -28,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* MD: wq_mayday_lock protected.
*/
-@@ -178,7 +178,7 @@ struct worker_pool {
+@@ -183,7 +183,7 @@ struct worker_pool {
atomic_t nr_running ____cacheline_aligned_in_smp;
/*
@@ -37,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* from get_work_pool().
*/
struct rcu_head rcu;
-@@ -207,7 +207,7 @@ struct pool_workqueue {
+@@ -212,7 +212,7 @@ struct pool_workqueue {
/*
* Release of unbound pwq is punted to system_wq. See put_pwq()
* and pwq_unbound_release_workfn() for details. pool_workqueue
@@ -46,26 +50,34 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
-@@ -338,14 +338,14 @@ static void workqueue_sysfs_unregister(s
+@@ -338,20 +338,20 @@ static void workqueue_sysfs_unregister(s
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
-- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
-+ rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&wq_pool_mutex), \
-- "sched RCU or wq_pool_mutex should be held")
-+ "RCU or wq_pool_mutex should be held")
+- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&wq_pool_mutex), \
+- "sched RCU or wq_pool_mutex should be held")
++ "RCU or wq_pool_mutex should be held")
#define assert_rcu_or_wq_mutex(wq) \
-- rcu_lockdep_assert(rcu_read_lock_sched_held() || \
-+ rcu_lockdep_assert(rcu_read_lock_held() || \
- lockdep_is_held(&wq->mutex), \
-- "sched RCU or wq->mutex should be held")
-+ "RCU or wq->mutex should be held")
+- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&wq->mutex), \
+- "sched RCU or wq->mutex should be held")
++ "RCU or wq->mutex should be held")
+
+ #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
+- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !lockdep_is_held(&wq->mutex) && \
+ !lockdep_is_held(&wq_pool_mutex), \
+- "sched RCU, wq->mutex or wq_pool_mutex should be held")
++ "RCU, wq->mutex or wq_pool_mutex should be held")
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -357,7 +357,7 @@ static void workqueue_sysfs_unregister(s
+@@ -363,7 +363,7 @@ static void workqueue_sysfs_unregister(s
* @pool: iteration cursor
* @pi: integer used for iteration
*
@@ -74,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
-@@ -389,7 +389,7 @@ static void workqueue_sysfs_unregister(s
+@@ -395,7 +395,7 @@ static void workqueue_sysfs_unregister(s
* @pwq: iteration cursor
* @wq: the target workqueue
*
@@ -83,16 +95,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -551,7 +551,7 @@ static int worker_pool_assign_id(struct
+@@ -557,7 +557,7 @@ static int worker_pool_assign_id(struct
* @wq: the target workqueue
* @node: the node ID
*
-- * This must be called either with pwq_lock held or sched RCU read locked.
-+ * This must be called either with pwq_lock held or RCU read locked.
+- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
++ * This must be called with any of wq_pool_mutex, wq->mutex or RCU
+ * read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
- *
-@@ -655,8 +655,8 @@ static struct pool_workqueue *get_work_p
+@@ -662,8 +662,8 @@ static struct pool_workqueue *get_work_p
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -103,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -1062,7 +1062,7 @@ static void put_pwq_unlocked(struct pool
+@@ -1069,7 +1069,7 @@ static void put_pwq_unlocked(struct pool
{
if (pwq) {
/*
@@ -112,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
-@@ -1188,6 +1188,7 @@ static int try_to_grab_pending(struct wo
+@@ -1195,6 +1195,7 @@ static int try_to_grab_pending(struct wo
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -120,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1226,10 +1227,12 @@ static int try_to_grab_pending(struct wo
+@@ -1233,10 +1234,12 @@ static int try_to_grab_pending(struct wo
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -133,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
-@@ -1310,6 +1313,8 @@ static void __queue_work(int cpu, struct
+@@ -1317,6 +1320,8 @@ static void __queue_work(int cpu, struct
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -142,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = raw_smp_processor_id();
-@@ -1366,10 +1371,8 @@ static void __queue_work(int cpu, struct
+@@ -1373,10 +1378,8 @@ static void __queue_work(int cpu, struct
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -155,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1385,7 +1388,9 @@ static void __queue_work(int cpu, struct
+@@ -1392,7 +1395,9 @@ static void __queue_work(int cpu, struct
insert_work(pwq, work, worklist, work_flags);
@@ -165,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -2672,14 +2677,14 @@ static bool start_flush_work(struct work
+@@ -2677,14 +2682,14 @@ static bool start_flush_work(struct work
might_sleep();
@@ -183,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2706,10 +2711,11 @@ static bool start_flush_work(struct work
+@@ -2711,10 +2716,11 @@ static bool start_flush_work(struct work
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -196,7 +208,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return false;
}
-@@ -3147,7 +3153,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3122,7 +3128,7 @@ static void rcu_free_pool(struct rcu_hea
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -205,7 +217,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3201,8 +3207,8 @@ static void put_unbound_pool(struct work
+@@ -3176,8 +3182,8 @@ static void put_unbound_pool(struct work
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -216,7 +228,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3307,14 +3313,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3284,14 +3290,14 @@ static void pwq_unbound_release_workfn(s
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -233,7 +245,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3920,7 +3926,7 @@ void destroy_workqueue(struct workqueue_
+@@ -3944,7 +3950,7 @@ void destroy_workqueue(struct workqueue_
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -242,7 +254,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4013,7 +4019,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4037,7 +4043,8 @@ bool workqueue_congested(int cpu, struct
struct pool_workqueue *pwq;
bool ret;
@@ -252,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4024,7 +4031,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4048,7 +4055,8 @@ bool workqueue_congested(int cpu, struct
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -262,7 +274,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4050,15 +4058,15 @@ unsigned int work_busy(struct work_struc
+@@ -4074,15 +4082,15 @@ unsigned int work_busy(struct work_struc
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -282,7 +294,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4247,7 +4255,7 @@ void show_workqueue_state(void)
+@@ -4271,7 +4279,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -291,7 +303,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4298,7 +4306,7 @@ void show_workqueue_state(void)
+@@ -4322,7 +4330,7 @@ void show_workqueue_state(void)
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -300,7 +312,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -4648,16 +4656,16 @@ bool freeze_workqueues_busy(void)
+@@ -4672,16 +4680,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -320,7 +332,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -4771,7 +4779,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4871,7 +4879,8 @@ static ssize_t wq_pool_ids_show(struct d
const char *delim = "";
int node, written = 0;
@@ -330,7 +342,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -4779,7 +4788,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -4879,7 +4888,8 @@ static ssize_t wq_pool_ids_show(struct d
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/patches/x86-UV-raw_spinlock-conversion.patch b/patches/x86-UV-raw_spinlock-conversion.patch
index f7a749e1894a0..0dcf84e3bdf53 100644
--- a/patches/x86-UV-raw_spinlock-conversion.patch
+++ b/patches/x86-UV-raw_spinlock-conversion.patch
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct uv_blade_info *uv_blade_info;
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
-@@ -949,7 +949,7 @@ void __init uv_system_init(void)
+@@ -947,7 +947,7 @@ void __init uv_system_init(void)
uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
uv_blade_info[blade].nr_online_cpus = 0;
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
-@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct clock_event
+@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event
/* There is one of these allocated per node */
struct uv_rtc_timer_head {
@@ -171,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* next cpu waiting for timer, local node relative: */
int next_cpu;
/* number of cpus on this node: */
-@@ -178,7 +178,7 @@ static __init int uv_rtc_allocate_timers
+@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers
uv_rtc_deallocate_timers();
return -ENOMEM;
}
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
head->ncpus = uv_blade_nr_possible_cpus(bid);
head->next_cpu = -1;
blade_info[bid] = head;
-@@ -232,7 +232,7 @@ static int uv_rtc_set_timer(int cpu, u64
+@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64
unsigned long flags;
int next_cpu;
@@ -189,7 +189,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
next_cpu = head->next_cpu;
*t = expires;
-@@ -244,12 +244,12 @@ static int uv_rtc_set_timer(int cpu, u64
+@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64
if (uv_setup_intr(cpu, expires)) {
*t = ULLONG_MAX;
uv_rtc_find_next_timer(head, pnode);
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -268,7 +268,7 @@ static int uv_rtc_unset_timer(int cpu, i
+@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, i
unsigned long flags;
int rc = 0;
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
rc = 1;
-@@ -280,7 +280,7 @@ static int uv_rtc_unset_timer(int cpu, i
+@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, i
uv_rtc_find_next_timer(head, pnode);
}
@@ -222,7 +222,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return rc;
}
-@@ -300,13 +300,18 @@ static int uv_rtc_unset_timer(int cpu, i
+@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, i
static cycle_t uv_read_rtc(struct clocksource *cs)
{
unsigned long offset;
diff --git a/patches/x86-crypto-reduce-preempt-disabled-regions.patch b/patches/x86-crypto-reduce-preempt-disabled-regions.patch
index 0650758353b66..9586322f205ff 100644
--- a/patches/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/patches/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
-@@ -382,14 +382,14 @@ static int ecb_encrypt(struct blkcipher_
+@@ -383,14 +383,14 @@ static int ecb_encrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -406,14 +406,14 @@ static int ecb_decrypt(struct blkcipher_
+@@ -407,14 +407,14 @@ static int ecb_decrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -430,14 +430,14 @@ static int cbc_encrypt(struct blkcipher_
+@@ -431,14 +431,14 @@ static int cbc_encrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -454,14 +454,14 @@ static int cbc_decrypt(struct blkcipher_
+@@ -455,14 +455,14 @@ static int cbc_decrypt(struct blkcipher_
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return err;
}
-@@ -513,18 +513,20 @@ static int ctr_crypt(struct blkcipher_de
+@@ -514,18 +514,20 @@ static int ctr_crypt(struct blkcipher_de
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
diff --git a/patches/x86-io-apic-migra-no-unmask.patch b/patches/x86-io-apic-migra-no-unmask.patch
index 76fadb9a913cb..2d8afd851adf0 100644
--- a/patches/x86-io-apic-migra-no-unmask.patch
+++ b/patches/x86-io-apic-migra-no-unmask.patch
@@ -9,18 +9,19 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
+xXx
arch/x86/kernel/apic/io_apic.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1891,7 +1891,8 @@ static bool io_apic_level_ack_pending(st
- static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+@@ -1711,7 +1711,8 @@ static bool io_apic_level_ack_pending(st
+ static inline bool ioapic_irqd_mask(struct irq_data *data)
{
/* If we are moving the irq we need to mask it */
- if (unlikely(irqd_is_setaffinity_pending(data))) {
+ if (unlikely(irqd_is_setaffinity_pending(data) &&
+ !irqd_irq_inprogress(data))) {
- mask_ioapic(cfg);
+ mask_ioapic_irq(data);
return true;
}
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 7ef8da21b0c1d..270702b495040 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5813,6 +5813,13 @@ int kvm_arch_init(void *opaque)
+@@ -5782,6 +5782,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-mce-timer-hrtimer.patch
index af21becd3a7d3..b3b27813d0fcf 100644
--- a/patches/x86-mce-timer-hrtimer.patch
+++ b/patches/x86-mce-timer-hrtimer.patch
@@ -34,7 +34,7 @@ fold in:
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1267,7 +1268,7 @@ void mce_log_therm_throt_event(__u64 sta
+@@ -1225,7 +1226,7 @@ void mce_log_therm_throt_event(__u64 sta
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -43,7 +43,7 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1276,32 +1277,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1234,32 +1235,18 @@ static unsigned long mce_adjust_timer_de
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
@@ -82,7 +82,7 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1324,7 +1311,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1282,7 +1269,7 @@ static void mce_timer_fn(unsigned long d
done:
__this_cpu_write(mce_next_interval, iv);
@@ -91,7 +91,7 @@ fold in:
}
/*
-@@ -1332,7 +1319,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1290,7 +1277,7 @@ static void mce_timer_fn(unsigned long d
*/
void mce_timer_kick(unsigned long interval)
{
@@ -100,7 +100,7 @@ fold in:
unsigned long iv = __this_cpu_read(mce_next_interval);
__restart_timer(t, interval);
-@@ -1347,7 +1334,7 @@ static void mce_timer_delete_all(void)
+@@ -1305,7 +1292,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -109,7 +109,7 @@ fold in:
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1649,7 +1636,7 @@ static void __mcheck_cpu_init_vendor(str
+@@ -1628,7 +1615,7 @@ static void __mcheck_cpu_clear_vendor(st
}
}
@@ -118,7 +118,7 @@ fold in:
{
unsigned long iv = check_interval * HZ;
-@@ -1658,16 +1645,17 @@ static void mce_start_timer(unsigned int
+@@ -1637,16 +1624,17 @@ static void mce_start_timer(unsigned int
per_cpu(mce_next_interval, cpu) = iv;
@@ -140,7 +140,7 @@ fold in:
mce_start_timer(cpu, t);
}
-@@ -2345,6 +2333,8 @@ static void mce_disable_cpu(void *h)
+@@ -2365,6 +2353,8 @@ static void mce_disable_cpu(void *h)
if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
@@ -148,8 +148,8 @@ fold in:
+
if (!(action & CPU_TASKS_FROZEN))
cmci_clear();
- for (i = 0; i < mca_cfg.banks; i++) {
-@@ -2371,6 +2361,7 @@ static void mce_reenable_cpu(void *h)
+
+@@ -2387,6 +2377,7 @@ static void mce_reenable_cpu(void *h)
if (b->init)
wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
}
@@ -157,7 +157,7 @@ fold in:
}
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2378,7 +2369,6 @@ static int
+@@ -2394,7 +2385,6 @@ static int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -165,7 +165,7 @@ fold in:
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
-@@ -2398,11 +2388,9 @@ mce_cpu_callback(struct notifier_block *
+@@ -2414,11 +2404,9 @@ mce_cpu_callback(struct notifier_block *
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
index ae8a65288fa47..e0b1abaaa2a0d 100644
--- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -68,7 +68,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
#include <asm/processor.h>
#include <asm/traps.h>
-@@ -1344,6 +1345,56 @@ static void mce_do_trigger(struct work_s
+@@ -1302,6 +1303,56 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -125,7 +125,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1351,19 +1402,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1309,19 +1360,8 @@ static DECLARE_WORK(mce_trigger_work, mc
*/
int mce_notify_irq(void)
{
@@ -146,7 +146,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
return 1;
}
return 0;
-@@ -2429,6 +2469,10 @@ static __init int mcheck_init_device(voi
+@@ -2445,6 +2485,10 @@ static __init int mcheck_init_device(voi
goto err_out;
}
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 01f0d19a1bad5..ff3829dc9d966 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -7,33 +7,98 @@ Implement the x86 pieces for lazy preempt.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/Kconfig | 1 +
+ arch/x86/entry/common.c | 2 +-
+ arch/x86/entry/entry_32.S | 16 ++++++++++++++++
+ arch/x86/entry/entry_64.S | 16 ++++++++++++++++
arch/x86/include/asm/thread_info.h | 6 ++++++
arch/x86/kernel/asm-offsets.c | 2 ++
- arch/x86/kernel/entry_32.S | 20 ++++++++++++++++++--
- arch/x86/kernel/entry_64.S | 24 ++++++++++++++++++++----
- 5 files changed, 47 insertions(+), 6 deletions(-)
+ 6 files changed, 42 insertions(+), 1 deletion(-)
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -22,6 +22,7 @@ config X86_64
+@@ -17,6 +17,7 @@ config X86_64
### Arch settings
config X86
def_bool y
+ select HAVE_PREEMPT_LAZY
- select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
- select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
- select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+ select ACPI_LEGACY_TABLES_LOOKUP if ACPI
+ select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+ select ANON_INODES
+--- a/arch/x86/entry/common.c
++++ b/arch/x86/entry/common.c
+@@ -236,7 +236,7 @@ static void exit_to_usermode_loop(struct
+ /* We have work to do. */
+ local_irq_enable();
+
+- if (cached_flags & _TIF_NEED_RESCHED)
++ if (cached_flags & _TIF_NEED_RESCHED_MASK)
+ schedule();
+
+ #ifdef ARCH_RT_DELAYS_SIGNAL_SEND
+--- a/arch/x86/entry/entry_32.S
++++ b/arch/x86/entry/entry_32.S
+@@ -278,8 +278,24 @@ END(ret_from_exception)
+ ENTRY(resume_kernel)
+ DISABLE_INTERRUPTS(CLBR_ANY)
+ need_resched:
++ # preempt count == 0 + NEED_RS set?
+ cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz restore_all
++#else
++ jz test_int_off
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jne restore_all
++
++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
++
++ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
++ jz restore_all
++test_int_off:
++#endif
+ testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
+ jz restore_all
+ call preempt_schedule_irq
+--- a/arch/x86/entry/entry_64.S
++++ b/arch/x86/entry/entry_64.S
+@@ -579,7 +579,23 @@ GLOBAL(retint_user)
+ bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ jnc 1f
+ 0: cmpl $0, PER_CPU_VAR(__preempt_count)
++#ifndef CONFIG_PREEMPT_LAZY
+ jnz 1f
++#else
++ jz do_preempt_schedule_irq
++
++ # atleast preempt count == 0 ?
++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
++ jnz 1f
++
++ GET_THREAD_INFO(%rcx)
++ cmpl $0, TI_preempt_lazy_count(%rcx)
++ jnz 1f
++
++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
++ jnc 1f
++do_preempt_schedule_irq:
++#endif
+ call preempt_schedule_irq
+ jmp 0b
+ 1:
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
-@@ -55,6 +55,8 @@ struct thread_info {
+@@ -58,6 +58,8 @@ struct thread_info {
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
- int saved_preempt_count;
-+ int preempt_lazy_count; /* 0 => lazy preemptable
-+ <0 => BUG */
mm_segment_t addr_limit;
- void __user *sysenter_return;
++ int preempt_lazy_count; /* 0 => lazy preemptable
++ <0 => BUG */
unsigned int sig_on_uaccess_error:1;
+ unsigned int uaccess_err:1; /* uaccess failed */
+ };
@@ -95,6 +97,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
@@ -50,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
-@@ -168,6 +172,8 @@ struct thread_info {
+@@ -152,6 +156,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@@ -69,102 +134,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BLANK();
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
-@@ -71,4 +72,5 @@ void common(void) {
+@@ -89,4 +90,5 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
}
---- a/arch/x86/kernel/entry_32.S
-+++ b/arch/x86/kernel/entry_32.S
-@@ -359,8 +359,24 @@ END(ret_from_exception)
- ENTRY(resume_kernel)
- DISABLE_INTERRUPTS(CLBR_ANY)
- need_resched:
-+ # preempt count == 0 + NEED_RS set?
- cmpl $0,PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
- jnz restore_all
-+#else
-+ jz test_int_off
-+
-+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+ jne restore_all
-+
-+ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
-+ jnz restore_all
-+
-+ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
-+ jz restore_all
-+test_int_off:
-+#endif
- testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
- jz restore_all
- call preempt_schedule_irq
-@@ -594,7 +610,7 @@ ENDPROC(system_call)
- ALIGN
- RING0_PTREGS_FRAME # can't unwind into user space anyway
- work_pending:
-- testb $_TIF_NEED_RESCHED, %cl
-+ testl $_TIF_NEED_RESCHED_MASK, %ecx
- jz work_notifysig
- work_resched:
- call schedule
-@@ -607,7 +623,7 @@ ENDPROC(system_call)
- andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
- # than syscall tracing?
- jz restore_all
-- testb $_TIF_NEED_RESCHED, %cl
-+ testl $_TIF_NEED_RESCHED_MASK, %ecx
- jnz work_resched
-
- work_notifysig: # deal with pending signals and
---- a/arch/x86/kernel/entry_64.S
-+++ b/arch/x86/kernel/entry_64.S
-@@ -370,8 +370,8 @@ GLOBAL(int_with_check)
- /* First do a reschedule test. */
- /* edx: work, edi: workmask */
- int_careful:
-- bt $TIF_NEED_RESCHED,%edx
-- jnc int_very_careful
-+ testl $_TIF_NEED_RESCHED_MASK,%edx
-+ jz int_very_careful
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
- pushq_cfi %rdi
-@@ -776,7 +776,23 @@ retint_swapgs: /* return to user-space
- bt $9,EFLAGS(%rsp) /* interrupts were off? */
- jnc 1f
- 0: cmpl $0,PER_CPU_VAR(__preempt_count)
-+#ifndef CONFIG_PREEMPT_LAZY
- jnz 1f
-+#else
-+ jz do_preempt_schedule_irq
-+
-+ # atleast preempt count == 0 ?
-+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
-+ jnz 1f
-+
-+ GET_THREAD_INFO(%rcx)
-+ cmpl $0, TI_preempt_lazy_count(%rcx)
-+ jnz 1f
-+
-+ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
-+ jnc 1f
-+do_preempt_schedule_irq:
-+#endif
- call preempt_schedule_irq
- jmp 0b
- 1:
-@@ -844,8 +860,8 @@ ENTRY(native_iret)
- /* edi: workmask, edx: work */
- retint_careful:
- CFI_RESTORE_STATE
-- bt $TIF_NEED_RESCHED,%edx
-- jnc retint_signal
-+ testl $_TIF_NEED_RESCHED_MASK,%edx
-+ jz retint_signal
- TRACE_IRQS_ON
- ENABLE_INTERRUPTS(CLBR_NONE)
- pushq_cfi %rdi
diff --git a/patches/x86-stackprot-no-random-on-rt.patch b/patches/x86-stackprot-no-random-on-rt.patch
index 5c3ca09908af5..5a110414f9709 100644
--- a/patches/x86-stackprot-no-random-on-rt.patch
+++ b/patches/x86-stackprot-no-random-on-rt.patch
@@ -14,12 +14,12 @@ Reported-by: Carsten Emde <carsten.emde@osadl.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- arch/x86/include/asm/stackprotector.h | 10 +++++++++-
- 1 file changed, 9 insertions(+), 1 deletion(-)
+ arch/x86/include/asm/stackprotector.h | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
-@@ -57,7 +57,7 @@
+@@ -59,7 +59,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
{
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
u64 tsc;
#ifdef CONFIG_X86_64
-@@ -68,8 +68,16 @@ static __always_inline void boot_init_st
+@@ -70,8 +70,15 @@ static __always_inline void boot_init_st
* of randomness. The TSC only matters for very early init,
* there it already has some randomness on most systems. Later
* on during the bootup the random pool has true entropy too.
@@ -36,12 +36,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * For preempt-rt we need to weaken the randomness a bit, as
+ * we can't call into the random generator from atomic context
+ * due to locking constraints. We just leave canary
-+ * uninitialized and use the TSC based randomness on top of
-+ * it.
++ * uninitialized and use the TSC based randomness on top of it.
*/
+#ifndef CONFIG_PREEMPT_RT_FULL
get_random_bytes(&canary, sizeof(canary));
+#endif
- tsc = __native_read_tsc();
+ tsc = rdtsc();
canary += tsc + (tsc << 32UL);
diff --git a/patches/x86-use-gen-rwsem-spinlocks-rt.patch b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
index a32937dd889c7..7643217743fad 100644
--- a/patches/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -203,8 +203,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -212,8 +212,11 @@ config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API
diff --git a/patches/xfs--clean-up-inode-lockdep-annotations b/patches/xfs--clean-up-inode-lockdep-annotations
deleted file mode 100644
index 110db4a4672f2..0000000000000
--- a/patches/xfs--clean-up-inode-lockdep-annotations
+++ /dev/null
@@ -1,281 +0,0 @@
-Subject: xfs: clean up inode lockdep annotations
-From: Dave Chinner <dchinner@redhat.com>
-Date: Wed Aug 19 10:32:49 2015 +1000
-
-Upstream commit 0952c8183c1575a78dc416b5e168987ff98728bb
-
-Lockdep annotations are a maintenance nightmare. Locking has to be
-modified to suit the limitations of the annotations, and we're
-always having to fix the annotations because they are unable to
-express the complexity of locking heirarchies correctly.
-
-So, next up, we've got more issues with lockdep annotations for
-inode locking w.r.t. XFS_LOCK_PARENT:
-
- - lockdep classes are exclusive and can't be ORed together
- to form new classes.
- - IOLOCK needs multiple PARENT subclasses to express the
- changes needed for the readdir locking rework needed to
- stop the endless flow of lockdep false positives involving
- readdir calling filldir under the ILOCK.
- - there are only 8 unique lockdep subclasses available,
- so we can't create a generic solution.
-
-IOWs we need to treat the 3-bit space available to each lock type
-differently:
-
- - IOLOCK uses xfs_lock_two_inodes(), so needs:
- - at least 2 IOLOCK subclasses
- - at least 2 IOLOCK_PARENT subclasses
- - MMAPLOCK uses xfs_lock_two_inodes(), so needs:
- - at least 2 MMAPLOCK subclasses
- - ILOCK uses xfs_lock_inodes with up to 5 inodes, so needs:
- - at least 5 ILOCK subclasses
- - one ILOCK_PARENT subclass
- - one RTBITMAP subclass
- - one RTSUM subclass
-
-For the IOLOCK, split the space into two sets of subclasses.
-For the MMAPLOCK, just use half the space for the one subclass to
-match the non-parent lock classes of the IOLOCK.
-For the ILOCK, use 0-4 as the ILOCK subclasses, 5-7 for the
-remaining individual subclasses.
-
-Because they are now all different, modify xfs_lock_inumorder() to
-handle the nested subclasses, and to assert fail if passed an
-invalid subclass. Further, annotate xfs_lock_inodes() to assert fail
-if an invalid combination of lock primitives and inode counts are
-passed that would result in a lockdep subclass annotation overflow.
-
-Signed-off-by: Dave Chinner <dchinner@redhat.com>
-Reviewed-by: Brian Foster <bfoster@redhat.com>
-Signed-off-by: Dave Chinner <david@fromorbit.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- fs/xfs/xfs_inode.c | 68 ++++++++++++++++++++++++++++++++++-----------
- fs/xfs/xfs_inode.h | 79 +++++++++++++++++++++++++++++++++++++----------------
- 2 files changed, 107 insertions(+), 40 deletions(-)
-
---- a/fs/xfs/xfs_inode.c
-+++ b/fs/xfs/xfs_inode.c
-@@ -164,7 +164,7 @@ xfs_ilock(
- (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
- ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
- (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
-+ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
-
- if (lock_flags & XFS_IOLOCK_EXCL)
- mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-@@ -212,7 +212,7 @@ xfs_ilock_nowait(
- (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
- ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
- (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
-+ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
-
- if (lock_flags & XFS_IOLOCK_EXCL) {
- if (!mrtryupdate(&ip->i_iolock))
-@@ -281,7 +281,7 @@ xfs_iunlock(
- (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
- ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
- (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
-- ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
-+ ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
- ASSERT(lock_flags != 0);
-
- if (lock_flags & XFS_IOLOCK_EXCL)
-@@ -364,30 +364,38 @@ int xfs_lock_delays;
-
- /*
- * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
-- * value. This shouldn't be called for page fault locking, but we also need to
-- * ensure we don't overrun the number of lockdep subclasses for the iolock or
-- * mmaplock as that is limited to 12 by the mmap lock lockdep annotations.
-+ * value. This can be called for any type of inode lock combination, including
-+ * parent locking. Care must be taken to ensure we don't overrun the subclass
-+ * storage fields in the class mask we build.
- */
- static inline int
- xfs_lock_inumorder(int lock_mode, int subclass)
- {
-+ int class = 0;
-+
-+ ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
-+ XFS_ILOCK_RTSUM)));
-+
- if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
-- ASSERT(subclass + XFS_LOCK_INUMORDER <
-- (1 << (XFS_MMAPLOCK_SHIFT - XFS_IOLOCK_SHIFT)));
-- lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
-+ ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
-+ ASSERT(subclass + XFS_IOLOCK_PARENT_VAL <
-+ MAX_LOCKDEP_SUBCLASSES);
-+ class += subclass << XFS_IOLOCK_SHIFT;
-+ if (lock_mode & XFS_IOLOCK_PARENT)
-+ class += XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT;
- }
-
- if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
-- ASSERT(subclass + XFS_LOCK_INUMORDER <
-- (1 << (XFS_ILOCK_SHIFT - XFS_MMAPLOCK_SHIFT)));
-- lock_mode |= (subclass + XFS_LOCK_INUMORDER) <<
-- XFS_MMAPLOCK_SHIFT;
-+ ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
-+ class += subclass << XFS_MMAPLOCK_SHIFT;
- }
-
-- if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
-- lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
-+ if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
-+ ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
-+ class += subclass << XFS_ILOCK_SHIFT;
-+ }
-
-- return lock_mode;
-+ return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
- }
-
- /*
-@@ -399,6 +407,11 @@ xfs_lock_inumorder(int lock_mode, int su
- * transaction (such as truncate). This can result in deadlock since the long
- * running trans might need to wait for the inode we just locked in order to
- * push the tail and free space in the log.
-+ *
-+ * xfs_lock_inodes() can only be used to lock one type of lock at a time -
-+ * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
-+ * lock more than one at a time, lockdep will report false positives saying we
-+ * have violated locking orders.
- */
- void
- xfs_lock_inodes(
-@@ -409,8 +422,29 @@ xfs_lock_inodes(
- int attempts = 0, i, j, try_lock;
- xfs_log_item_t *lp;
-
-- /* currently supports between 2 and 5 inodes */
-+ /*
-+ * Currently supports between 2 and 5 inodes with exclusive locking. We
-+ * support an arbitrary depth of locking here, but absolute limits on
-+ * inodes depend on the the type of locking and the limits placed by
-+ * lockdep annotations in xfs_lock_inumorder. These are all checked by
-+ * the asserts.
-+ */
- ASSERT(ips && inodes >= 2 && inodes <= 5);
-+ ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
-+ XFS_ILOCK_EXCL));
-+ ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
-+ XFS_ILOCK_SHARED)));
-+ ASSERT(!(lock_mode & XFS_IOLOCK_EXCL) ||
-+ inodes <= XFS_IOLOCK_MAX_SUBCLASS + 1);
-+ ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
-+ inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
-+ ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
-+ inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
-+
-+ if (lock_mode & XFS_IOLOCK_EXCL) {
-+ ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
-+ } else if (lock_mode & XFS_MMAPLOCK_EXCL)
-+ ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
-
- try_lock = 0;
- i = 0;
---- a/fs/xfs/xfs_inode.h
-+++ b/fs/xfs/xfs_inode.h
-@@ -284,9 +284,9 @@ static inline int xfs_isiflocked(struct
- * Flags for lockdep annotations.
- *
- * XFS_LOCK_PARENT - for directory operations that require locking a
-- * parent directory inode and a child entry inode. The parent gets locked
-- * with this flag so it gets a lockdep subclass of 1 and the child entry
-- * lock will have a lockdep subclass of 0.
-+ * parent directory inode and a child entry inode. IOLOCK requires nesting,
-+ * MMAPLOCK does not support this class, ILOCK requires a single subclass
-+ * to differentiate parent from child.
- *
- * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
- * inodes do not participate in the normal lock order, and thus have their
-@@ -295,30 +295,63 @@ static inline int xfs_isiflocked(struct
- * XFS_LOCK_INUMORDER - for locking several inodes at the some time
- * with xfs_lock_inodes(). This flag is used as the starting subclass
- * and each subsequent lock acquired will increment the subclass by one.
-- * So the first lock acquired will have a lockdep subclass of 4, the
-- * second lock will have a lockdep subclass of 5, and so on. It is
-- * the responsibility of the class builder to shift this to the correct
-- * portion of the lock_mode lockdep mask.
-+ * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly
-+ * limited to the subclasses we can represent via nesting. We need at least
-+ * 5 inodes nest depth for the ILOCK through rename, and we also have to support
-+ * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP
-+ * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all
-+ * 8 subclasses supported by lockdep.
-+ *
-+ * This also means we have to number the sub-classes in the lowest bits of
-+ * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep
-+ * mask and we can't use bit-masking to build the subclasses. What a mess.
-+ *
-+ * Bit layout:
-+ *
-+ * Bit Lock Region
-+ * 16-19 XFS_IOLOCK_SHIFT dependencies
-+ * 20-23 XFS_MMAPLOCK_SHIFT dependencies
-+ * 24-31 XFS_ILOCK_SHIFT dependencies
-+ *
-+ * IOLOCK values
-+ *
-+ * 0-3 subclass value
-+ * 4-7 PARENT subclass values
-+ *
-+ * MMAPLOCK values
-+ *
-+ * 0-3 subclass value
-+ * 4-7 unused
-+ *
-+ * ILOCK values
-+ * 0-4 subclass values
-+ * 5 PARENT subclass (not nestable)
-+ * 6 RTBITMAP subclass (not nestable)
-+ * 7 RTSUM subclass (not nestable)
-+ *
- */
--#define XFS_LOCK_PARENT 1
--#define XFS_LOCK_RTBITMAP 2
--#define XFS_LOCK_RTSUM 3
--#define XFS_LOCK_INUMORDER 4
--
--#define XFS_IOLOCK_SHIFT 16
--#define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT)
-+#define XFS_IOLOCK_SHIFT 16
-+#define XFS_IOLOCK_PARENT_VAL 4
-+#define XFS_IOLOCK_MAX_SUBCLASS (XFS_IOLOCK_PARENT_VAL - 1)
-+#define XFS_IOLOCK_DEP_MASK 0x000f0000
-+#define XFS_IOLOCK_PARENT (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT)
-
--#define XFS_MMAPLOCK_SHIFT 20
-+#define XFS_MMAPLOCK_SHIFT 20
-+#define XFS_MMAPLOCK_NUMORDER 0
-+#define XFS_MMAPLOCK_MAX_SUBCLASS 3
-+#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
-
--#define XFS_ILOCK_SHIFT 24
--#define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT)
--#define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT)
--#define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT)
-+#define XFS_ILOCK_SHIFT 24
-+#define XFS_ILOCK_PARENT_VAL 5
-+#define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1)
-+#define XFS_ILOCK_RTBITMAP_VAL 6
-+#define XFS_ILOCK_RTSUM_VAL 7
-+#define XFS_ILOCK_DEP_MASK 0xff000000
-+#define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
-+#define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
-+#define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
-
--#define XFS_IOLOCK_DEP_MASK 0x000f0000
--#define XFS_MMAPLOCK_DEP_MASK 0x00f00000
--#define XFS_ILOCK_DEP_MASK 0xff000000
--#define XFS_LOCK_DEP_MASK (XFS_IOLOCK_DEP_MASK | \
-+#define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \
- XFS_MMAPLOCK_DEP_MASK | \
- XFS_ILOCK_DEP_MASK)
-