summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-04-01 23:19:31 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-04-01 23:19:31 +0200
commit78eaf7bbf2853f0726c445dbf90234ba299c6f32 (patch)
tree6357dc3e58559adbaef20d0c1c24d333749a3a4d
parent1df1c6d97b8a65305fdbd9d8d90fecd48718fc83 (diff)
download4.9-rt-patches-78eaf7bbf2853f0726c445dbf90234ba299c6f32.tar.gz
[ANNOUNCE] 4.4.6-rt13
Dear RT folks! I'm pleased to announce the v4.4.6-rt13 patch set. Changes since v4.4.6-rt12: - Alexandre Belloni sent patch for the AT91 to get rid of the free_irq() warning. - Yang Shi sent a patch to address a "sleeping while atomic" warning in a writeback tracepoint. Until now it was disabled to avoid it, now it can be used again. - Rik van Riel sent a patch to make the kvm async pagefault code use a simple wait queue. - Mike Galbraith set a patch to address a "sleeping while atomic" warning in zsmalloc - Netork packets sent by a RT task could be delayed (but won't block the RT task) if a task with lower priority was interruped while sending a packet. This is address by taking a qdisc lock so the high-prio task can boost a task with lower priority. - Clark Williams reported a swait related complate_all() warning while comming out of suspend. Suspend to RAM (and hibernate) are now filtered out from the warning. - Mike Galbraith sent a patch to address a "sleeping while atomic" warning in the zram driver. - Josh Cartwright sent a patch to fix a lockdep splat in list_bl which was reported by Luis Claudio R. Goncalves. Known issues: - CPU hotplug got a little better but can deadlock. The delta patch against 4.4.6-rt12 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/incr/patch-4.4.6-rt12-rt13.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.4.6-rt13 The RT patch against 4.4.6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patch-4.4.6-rt13.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.4/patches-4.4.6-rt13.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch45
-rw-r--r--patches/completion-use-simple-wait-queues.patch93
-rw-r--r--patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch175
-rw-r--r--patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch157
-rw-r--r--patches/list_bl-fixup-bogus-lockdep-warning.patch97
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch34
-rw-r--r--patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch34
-rw-r--r--patches/net-tx-action-avoid-livelock-on-rt.patch4
-rw-r--r--patches/series11
-rw-r--r--patches/trace-writeback--Block-cgroup-path-tracing-on-RT.patch55
-rw-r--r--patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch392
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
13 files changed, 1039 insertions, 66 deletions
diff --git a/patches/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch b/patches/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
new file mode 100644
index 00000000000000..9ffe99dbd954f1
--- /dev/null
+++ b/patches/clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
@@ -0,0 +1,45 @@
+From: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Date: Thu, 17 Mar 2016 21:09:43 +0100
+Subject: [PATCH] clockevents/drivers/timer-atmel-pit: fix double free_irq
+
+clockevents_exchange_device() changes the state from detached to shutdown
+and so at that point the IRQ has not yet been requested.
+
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/timer-atmel-pit.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/clocksource/timer-atmel-pit.c
++++ b/drivers/clocksource/timer-atmel-pit.c
+@@ -46,6 +46,7 @@ struct pit_data {
+ u32 cycle;
+ u32 cnt;
+ unsigned int irq;
++ bool irq_requested;
+ struct clk *mck;
+ };
+
+@@ -96,7 +97,10 @@ static int pit_clkevt_shutdown(struct cl
+
+ /* disable irq, leaving the clocksource active */
+ pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN);
+- free_irq(data->irq, data);
++ if (data->irq_requested) {
++ free_irq(data->irq, data);
++ data->irq_requested = false;
++ }
+ return 0;
+ }
+
+@@ -115,6 +119,8 @@ static int pit_clkevt_set_periodic(struc
+ if (ret)
+ panic(pr_fmt("Unable to setup IRQ\n"));
+
++ data->irq_requested = true;
++
+ /* update clocksource counter */
+ data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
+ pit_write(data->base, AT91_PIT_MR,
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 4b128206a83e0c..17ef483be110dc 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -12,12 +12,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
drivers/usb/gadget/function/f_fs.c | 2 -
drivers/usb/gadget/legacy/inode.c | 4 +--
include/linux/completion.h | 9 +++-----
+ include/linux/suspend.h | 6 +++++
include/linux/swait.h | 1
include/linux/uprobes.h | 1
+ kernel/power/hibernate.c | 7 ++++++
+ kernel/power/suspend.c | 5 ++++
kernel/sched/completion.c | 32 ++++++++++++++---------------
kernel/sched/core.c | 10 +++++++--
- kernel/sched/swait.c | 17 +++++++++++++++
- 9 files changed, 51 insertions(+), 27 deletions(-)
+ kernel/sched/swait.c | 20 ++++++++++++++++++
+ 12 files changed, 72 insertions(+), 27 deletions(-)
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -96,6 +99,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
+--- a/include/linux/suspend.h
++++ b/include/linux/suspend.h
+@@ -194,6 +194,12 @@ struct platform_freeze_ops {
+ void (*end)(void);
+ };
+
++#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION)
++extern bool pm_in_action;
++#else
++# define pm_in_action false
++#endif
++
+ #ifdef CONFIG_SUSPEND
+ /**
+ * suspend_set_ops - set platform dependent suspend operations
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -87,6 +87,7 @@ static inline int swait_active(struct sw
@@ -116,6 +134,64 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct vm_area_struct;
struct mm_struct;
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -648,6 +648,10 @@ static void power_down(void)
+ cpu_relax();
+ }
+
++#ifndef CONFIG_SUSPEND
++bool pm_in_action;
++#endif
++
+ /**
+ * hibernate - Carry out system hibernation, including saving the image.
+ */
+@@ -660,6 +664,8 @@ int hibernate(void)
+ return -EPERM;
+ }
+
++ pm_in_action = true;
++
+ lock_system_sleep();
+ /* The snapshot device should not be opened while we're running */
+ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
+@@ -725,6 +731,7 @@ int hibernate(void)
+ atomic_inc(&snapshot_device_available);
+ Unlock:
+ unlock_system_sleep();
++ pm_in_action = false;
+ return error;
+ }
+
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -522,6 +522,8 @@ static int enter_state(suspend_state_t s
+ return error;
+ }
+
++bool pm_in_action;
++
+ /**
+ * pm_suspend - Externally visible function for suspending the system.
+ * @state: System sleep state to enter.
+@@ -536,6 +538,8 @@ int pm_suspend(suspend_state_t state)
+ if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
+ return -EINVAL;
+
++ pm_in_action = true;
++
+ error = enter_state(state);
+ if (error) {
+ suspend_stats.fail++;
+@@ -543,6 +547,7 @@ int pm_suspend(suspend_state_t state)
+ } else {
+ suspend_stats.success++;
+ }
++ pm_in_action = false;
+ return error;
+ }
+ EXPORT_SYMBOL(pm_suspend);
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
@@ -30,10 +30,10 @@ void complete(struct completion *x)
@@ -236,7 +312,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
-@@ -29,6 +29,23 @@ void swake_up_locked(struct swait_queue_
+@@ -1,5 +1,6 @@
+ #include <linux/sched.h>
+ #include <linux/swait.h>
++#include <linux/suspend.h>
+
+ void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
+ struct lock_class_key *key)
+@@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_
}
EXPORT_SYMBOL(swake_up_locked);
@@ -253,7 +336,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ list_del_init(&curr->task_list);
+ wakes++;
+ }
-+ WARN_ON(wakes > 2);
++ if (pm_in_action)
++ return;
++ WARN(wakes > 2, "complate_all() with %d waiters\n", wakes);
+}
+EXPORT_SYMBOL(swake_up_all_locked);
+
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
new file mode 100644
index 00000000000000..72c412350f53c9
--- /dev/null
+++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -0,0 +1,175 @@
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Thu, 31 Mar 2016 04:08:28 +0200
+Subject: [PATCH] drivers/block/zram: Replace bit spinlocks with rtmutex
+ for -rt
+
+They're nondeterministic, and lead to ___might_sleep() splats in -rt.
+OTOH, they're a lot less wasteful than an rtmutex per page.
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/block/zram/zram_drv.c | 30 ++++++++++++++++--------------
+ drivers/block/zram/zram_drv.h | 41 +++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 57 insertions(+), 14 deletions(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -520,6 +520,8 @@ static struct zram_meta *zram_meta_alloc
+ goto out_error;
+ }
+
++ zram_meta_init_table_locks(meta, disksize);
++
+ return meta;
+
+ out_error:
+@@ -568,12 +570,12 @@ static int zram_decompress_page(struct z
+ unsigned long handle;
+ size_t size;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ handle = meta->table[index].handle;
+ size = zram_get_obj_size(meta, index);
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ clear_page(mem);
+ return 0;
+ }
+@@ -584,7 +586,7 @@ static int zram_decompress_page(struct z
+ else
+ ret = zcomp_decompress(zram->comp, cmem, size, mem);
+ zs_unmap_object(meta->mem_pool, handle);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ /* Should NEVER happen. Return bio error if it does. */
+ if (unlikely(ret)) {
+@@ -604,14 +606,14 @@ static int zram_bvec_read(struct zram *z
+ struct zram_meta *meta = zram->meta;
+ page = bvec->bv_page;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ if (unlikely(!meta->table[index].handle) ||
+ zram_test_flag(meta, index, ZRAM_ZERO)) {
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ handle_zero_page(bvec);
+ return 0;
+ }
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ if (is_partial_io(bvec))
+ /* Use a temporary buffer to decompress the page */
+@@ -689,10 +691,10 @@ static int zram_bvec_write(struct zram *
+ if (user_mem)
+ kunmap_atomic(user_mem);
+ /* Free memory associated with this sector now. */
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+ zram_set_flag(meta, index, ZRAM_ZERO);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ atomic64_inc(&zram->stats.zero_pages);
+ ret = 0;
+@@ -752,12 +754,12 @@ static int zram_bvec_write(struct zram *
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+
+ meta->table[index].handle = handle;
+ zram_set_obj_size(meta, index, clen);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+
+ /* Update stats */
+ atomic64_add(clen, &zram->stats.compr_data_size);
+@@ -800,9 +802,9 @@ static void zram_bio_discard(struct zram
+ }
+
+ while (n >= PAGE_SIZE) {
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ atomic64_inc(&zram->stats.notify_free);
+ index++;
+ n -= PAGE_SIZE;
+@@ -928,9 +930,9 @@ static void zram_slot_free_notify(struct
+ zram = bdev->bd_disk->private_data;
+ meta = zram->meta;
+
+- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_lock_table(&meta->table[index]);
+ zram_free_page(zram, index);
+- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
++ zram_unlock_table(&meta->table[index]);
+ atomic64_inc(&zram->stats.notify_free);
+ }
+
+--- a/drivers/block/zram/zram_drv.h
++++ b/drivers/block/zram/zram_drv.h
+@@ -72,6 +72,9 @@ enum zram_pageflags {
+ struct zram_table_entry {
+ unsigned long handle;
+ unsigned long value;
++#ifdef CONFIG_PREEMPT_RT_BASE
++ spinlock_t lock;
++#endif
+ };
+
+ struct zram_stats {
+@@ -119,4 +122,42 @@ struct zram {
+ */
+ bool claim; /* Protected by bdev->bd_mutex */
+ };
++
++#ifndef CONFIG_PREEMPT_RT_BASE
++static inline void zram_lock_table(struct zram_table_entry *table)
++{
++ bit_spin_lock(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_unlock_table(struct zram_table_entry *table)
++{
++ bit_spin_unlock(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_meta_init_locks(struct zram_meta *meta, u64 disksize) { }
++#else /* CONFIG_PREEMPT_RT_BASE */
++static inline void zram_lock_table(struct zram_table_entry *table)
++{
++ spin_lock(&table->lock);
++ __set_bit(ZRAM_ACCESS, &table->value);
++}
++
++static inline void zram_unlock_table(struct zram_table_entry *table)
++{
++ __clear_bit(ZRAM_ACCESS, &table->value);
++ spin_unlock(&table->lock);
++}
++
++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize)
++{
++ size_t num_pages = disksize >> PAGE_SHIFT;
++ size_t index;
++
++ for (index = 0; index < num_pages; index++) {
++ spinlock_t *lock = &meta->table[index].lock;
++ spin_lock_init(lock);
++ }
++}
++#endif /* CONFIG_PREEMPT_RT_BASE */
++
+ #endif
diff --git a/patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch b/patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch
new file mode 100644
index 00000000000000..3ac4693d127542
--- /dev/null
+++ b/patches/kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch
@@ -0,0 +1,157 @@
+From: Rik van Riel <riel@redhat.com>
+Date: Mon, 21 Mar 2016 15:13:27 +0100
+Subject: [PATCH] kvm, rt: change async pagefault code locking for PREEMPT_RT
+
+The async pagefault wake code can run from the idle task in exception
+context, so everything here needs to be made non-preemptible.
+
+Conversion to a simple wait queue and raw spinlock does the trick.
+
+Signed-off-by: Rik van Riel <riel@redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/kvm.c | 37 +++++++++++++++++++------------------
+ 1 file changed, 19 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -36,6 +36,7 @@
+ #include <linux/kprobes.h>
+ #include <linux/debugfs.h>
+ #include <linux/nmi.h>
++#include <linux/swait.h>
+ #include <asm/timer.h>
+ #include <asm/cpu.h>
+ #include <asm/traps.h>
+@@ -91,14 +92,14 @@ static void kvm_io_delay(void)
+
+ struct kvm_task_sleep_node {
+ struct hlist_node link;
+- wait_queue_head_t wq;
++ struct swait_queue_head wq;
+ u32 token;
+ int cpu;
+ bool halted;
+ };
+
+ static struct kvm_task_sleep_head {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ struct hlist_head list;
+ } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
+
+@@ -122,17 +123,17 @@ void kvm_async_pf_task_wait(u32 token)
+ u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+ struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+ struct kvm_task_sleep_node n, *e;
+- DEFINE_WAIT(wait);
++ DECLARE_SWAITQUEUE(wait);
+
+ rcu_irq_enter();
+
+- spin_lock(&b->lock);
++ raw_spin_lock(&b->lock);
+ e = _find_apf_task(b, token);
+ if (e) {
+ /* dummy entry exist -> wake up was delivered ahead of PF */
+ hlist_del(&e->link);
+ kfree(e);
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+
+ rcu_irq_exit();
+ return;
+@@ -141,13 +142,13 @@ void kvm_async_pf_task_wait(u32 token)
+ n.token = token;
+ n.cpu = smp_processor_id();
+ n.halted = is_idle_task(current) || preempt_count() > 1;
+- init_waitqueue_head(&n.wq);
++ init_swait_queue_head(&n.wq);
+ hlist_add_head(&n.link, &b->list);
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+
+ for (;;) {
+ if (!n.halted)
+- prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
++ prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (hlist_unhashed(&n.link))
+ break;
+
+@@ -166,7 +167,7 @@ void kvm_async_pf_task_wait(u32 token)
+ }
+ }
+ if (!n.halted)
+- finish_wait(&n.wq, &wait);
++ finish_swait(&n.wq, &wait);
+
+ rcu_irq_exit();
+ return;
+@@ -178,8 +179,8 @@ static void apf_task_wake_one(struct kvm
+ hlist_del_init(&n->link);
+ if (n->halted)
+ smp_send_reschedule(n->cpu);
+- else if (waitqueue_active(&n->wq))
+- wake_up(&n->wq);
++ else if (swait_active(&n->wq))
++ swake_up(&n->wq);
+ }
+
+ static void apf_task_wake_all(void)
+@@ -189,14 +190,14 @@ static void apf_task_wake_all(void)
+ for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
+ struct hlist_node *p, *next;
+ struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
+- spin_lock(&b->lock);
++ raw_spin_lock(&b->lock);
+ hlist_for_each_safe(p, next, &b->list) {
+ struct kvm_task_sleep_node *n =
+ hlist_entry(p, typeof(*n), link);
+ if (n->cpu == smp_processor_id())
+ apf_task_wake_one(n);
+ }
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+ }
+ }
+
+@@ -212,7 +213,7 @@ void kvm_async_pf_task_wake(u32 token)
+ }
+
+ again:
+- spin_lock(&b->lock);
++ raw_spin_lock(&b->lock);
+ n = _find_apf_task(b, token);
+ if (!n) {
+ /*
+@@ -225,17 +226,17 @@ void kvm_async_pf_task_wake(u32 token)
+ * Allocation failed! Busy wait while other cpu
+ * handles async PF.
+ */
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+ cpu_relax();
+ goto again;
+ }
+ n->token = token;
+ n->cpu = smp_processor_id();
+- init_waitqueue_head(&n->wq);
++ init_swait_queue_head(&n->wq);
+ hlist_add_head(&n->link, &b->list);
+ } else
+ apf_task_wake_one(n);
+- spin_unlock(&b->lock);
++ raw_spin_unlock(&b->lock);
+ return;
+ }
+ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+@@ -486,7 +487,7 @@ void __init kvm_guest_init(void)
+ paravirt_ops_setup();
+ register_reboot_notifier(&kvm_pv_reboot_nb);
+ for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
+- spin_lock_init(&async_pf_sleepers[i].lock);
++ raw_spin_lock_init(&async_pf_sleepers[i].lock);
+ if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
+ x86_init.irqs.trap_init = kvm_apf_trap_init;
+
diff --git a/patches/list_bl-fixup-bogus-lockdep-warning.patch b/patches/list_bl-fixup-bogus-lockdep-warning.patch
new file mode 100644
index 00000000000000..1583b5afad8613
--- /dev/null
+++ b/patches/list_bl-fixup-bogus-lockdep-warning.patch
@@ -0,0 +1,97 @@
+From: Josh Cartwright <joshc@ni.com>
+Date: Thu, 31 Mar 2016 00:04:25 -0500
+Subject: [PATCH] list_bl: fixup bogus lockdep warning
+
+At first glance, the use of 'static inline' seems appropriate for
+INIT_HLIST_BL_HEAD().
+
+However, when a 'static inline' function invocation is inlined by gcc,
+all callers share any static local data declared within that inline
+function.
+
+This presents a problem for how lockdep classes are setup. raw_spinlocks, for
+example, when CONFIG_DEBUG_SPINLOCK,
+
+ # define raw_spin_lock_init(lock) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+ __raw_spin_lock_init((lock), #lock, &__key); \
+ } while (0)
+
+When this macro is expanded into a 'static inline' caller, like
+INIT_HLIST_BL_HEAD():
+
+ static inline INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
+ {
+ h->first = NULL;
+ raw_spin_lock_init(&h->lock);
+ }
+
+...the static local lock_class_key object is made a function static.
+
+For compilation units which initialize invoke INIT_HLIST_BL_HEAD() more
+than once, then, all of the invocations share this same static local
+object.
+
+This can lead to some very confusing lockdep splats (example below).
+Solve this problem by forcing the INIT_HLIST_BL_HEAD() to be a macro,
+which prevents the lockdep class object sharing.
+
+ =============================================
+ [ INFO: possible recursive locking detected ]
+ 4.4.4-rt11 #4 Not tainted
+ ---------------------------------------------
+ kswapd0/59 is trying to acquire lock:
+ (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan
+
+ but task is already holding lock:
+ (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan
+
+ other info that might help us debug this:
+ Possible unsafe locking scenario:
+
+ CPU0
+ ----
+ lock(&h->lock#2);
+ lock(&h->lock#2);
+
+ *** DEADLOCK ***
+
+ May be due to missing lock nesting notation
+
+ 2 locks held by kswapd0/59:
+ #0: (shrinker_rwsem){+.+...}, at: rt_down_read_trylock
+ #1: (&h->lock#2){+.+.-.}, at: mb_cache_shrink_scan
+
+Reported-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
+Tested-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
+Signed-off-by: Josh Cartwright <joshc@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/list_bl.h | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/include/linux/list_bl.h
++++ b/include/linux/list_bl.h
+@@ -42,13 +42,15 @@ struct hlist_bl_node {
+ struct hlist_bl_node *next, **pprev;
+ };
+
+-static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
+-{
+- h->first = NULL;
+ #ifdef CONFIG_PREEMPT_RT_BASE
+- raw_spin_lock_init(&h->lock);
++#define INIT_HLIST_BL_HEAD(h) \
++do { \
++ (h)->first = NULL; \
++ raw_spin_lock_init(&(h)->lock); \
++} while (0)
++#else
++#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL
+ #endif
+-}
+
+ static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
+ {
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 8a0480968fdc25..523b2362c6114e 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.4.6-rt12
+Subject: v4.4.6-rt13
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt12
++-rt13
diff --git a/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch b/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
new file mode 100644
index 00000000000000..b827211e33ba6f
--- /dev/null
+++ b/patches/mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
@@ -0,0 +1,34 @@
+From 1fd1b32ad881496d3a3b4caac77965555cc021b0 Mon Sep 17 00:00:00 2001
+From: Mike Galbraith <umgwanakikbuti@gmail.com>
+Date: Tue, 22 Mar 2016 11:16:09 +0100
+Subject: [PATCH] mm/zsmalloc: Use get/put_cpu_light in
+ zs_map_object()/zs_unmap_object()
+
+Otherwise, we get a ___might_sleep() splat.
+
+Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/zsmalloc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -1289,7 +1289,7 @@ void *zs_map_object(struct zs_pool *pool
+ class = pool->size_class[class_idx];
+ off = obj_idx_to_offset(page, obj_idx, class->size);
+
+- area = &get_cpu_var(zs_map_area);
++ area = per_cpu_ptr(&zs_map_area, get_cpu_light());
+ area->vm_mm = mm;
+ if (off + class->size <= PAGE_SIZE) {
+ /* this object is contained entirely within a page */
+@@ -1342,7 +1342,7 @@ void zs_unmap_object(struct zs_pool *poo
+
+ __zs_unmap_object(area, pages, off, class->size);
+ }
+- put_cpu_var(zs_map_area);
++ put_cpu_light();
+ unpin_tag(handle);
+ }
+ EXPORT_SYMBOL_GPL(zs_unmap_object);
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
new file mode 100644
index 00000000000000..b915dc9490ed7e
--- /dev/null
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -0,0 +1,34 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 30 Mar 2016 13:36:29 +0200
+Subject: [PATCH] net: dev: always take qdisc's busylock in __dev_xmit_skb()
+
+The root-lock is dropped before dev_hard_start_xmit() is invoked and after
+setting the __QDISC___STATE_RUNNING bit. If this task is now pushed away
+by a task with a higher priority then the task with the higher priority
+won't be able to submit packets to the NIC directly instead they will be
+enqueued into the Qdisc. The NIC will remain idle until the task(s) with
+higher priority leave the CPU and the task with lower priority gets back
+and finishes the job.
+
+If we take always the busylock we ensure that the RT task can boost the
+low-prio task and submit the packet.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2885,7 +2885,11 @@ static inline int __dev_xmit_skb(struct
+ * This permits __QDISC___STATE_RUNNING owner to get the lock more
+ * often and dequeue packets faster.
+ */
++#ifdef CONFIG_PREEMPT_RT_FULL
++ contended = true;
++#else
+ contended = qdisc_is_running(q);
++#endif
+ if (unlikely(contended))
+ spin_lock(&q->busylock);
+
diff --git a/patches/net-tx-action-avoid-livelock-on-rt.patch b/patches/net-tx-action-avoid-livelock-on-rt.patch
index 9313a85f01cd8f..d8a85721893f6d 100644
--- a/patches/net-tx-action-avoid-livelock-on-rt.patch
+++ b/patches/net-tx-action-avoid-livelock-on-rt.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3638,6 +3638,36 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3642,6 +3642,36 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -3679,7 +3709,7 @@ static void net_tx_action(struct softirq
+@@ -3683,7 +3713,7 @@ static void net_tx_action(struct softirq
head = head->next_sched;
root_lock = qdisc_lock(q);
diff --git a/patches/series b/patches/series
index 075616db0f1863..7d906bc56e567f 100644
--- a/patches/series
+++ b/patches/series
@@ -7,6 +7,9 @@
############################################################
rtmutex-Make-wait_lock-irq-safe.patch
arm64-replace-read_lock-to-rcu-lock-in-call_step_hoo.patch
+tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch
+kvm-rt-change-async-pagefault-code-locking-for-PREEM.patch
+
# AT91 queue in ARM-SOC
0001-clk-at91-make-use-of-syscon-to-share-PMC-registers-i.patch
0002-clk-at91-make-use-of-syscon-regmap-internally.patch
@@ -41,6 +44,8 @@ drivers-cpuidle-coupled-fix-warning-cpuidle_coupled_.patch
drivers-media-vsp1_video-fix-compile-error.patch
sc16is7xx_Drop_bogus_use_of_IRQF_ONESHOT.patch
f2fs_Mutex_cant_be_used_by_down_write_nest_lock().patch
+
+# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
############################################################
@@ -57,7 +62,6 @@ kernel-sched-fix-preempt_disable_ip-recodring-for-pr.patch
# Wants a different fix for upstream
iommu-amd--Use-WARN_ON_NORT.patch
-trace-writeback--Block-cgroup-path-tracing-on-RT.patch
############################################################
# Submitted on LKML
@@ -131,6 +135,7 @@ drivers-random-reduce-preempt-disabled-region.patch
# CLOCKSOURCE
arm-at91-pit-remove-irq-handler-when-clock-is-unused.patch
+clockevents-drivers-timer-atmel-pit-fix-double-free_.patch
clocksource-tclib-allow-higher-clockrates.patch
# DRIVERS NET
@@ -230,6 +235,7 @@ fs-jbd-replace-bh_state-lock.patch
# GENIRQ
list_bl.h-make-list-head-locking-RT-safe.patch
+list_bl-fixup-bogus-lockdep-warning.patch
genirq-disable-irqpoll-on-rt.patch
genirq-force-threading.patch
genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -265,6 +271,7 @@ mm-page-alloc-use-local-lock-on-target-cpu.patch
mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
mm-memcontrol-do_not_disable_irq.patch
mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
+mm-zsmalloc-Use-get-put_cpu_light-in-zs_map_object-z.patch
# RADIX TREE
radix-tree-rt-aware.patch
@@ -454,6 +461,7 @@ skbufhead-raw-lock.patch
net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
net-move-xmit_recursion-to-per-task-variable-on-RT.patch
net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
# NETWORK livelock fix
net-tx-action-avoid-livelock-on-rt.patch
@@ -588,6 +596,7 @@ leds-trigger-disable-CPU-trigger-on-RT.patch
i2c-omap-drop-the-lock-hard-irq-context.patch
mmci-remove-bogus-irq-save.patch
cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
# I915
i915_compile_fix.patch
diff --git a/patches/trace-writeback--Block-cgroup-path-tracing-on-RT.patch b/patches/trace-writeback--Block-cgroup-path-tracing-on-RT.patch
deleted file mode 100644
index d3c45f35b1dd4d..00000000000000
--- a/patches/trace-writeback--Block-cgroup-path-tracing-on-RT.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-Subject: trace/writeback: Block cgroup path tracing on RT
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 28 Feb 2016 15:14:43 +0100
-
-Yang reported that with CGROUP_WRITEBACK enabled the tracer triggers the
-following backtrace:
-
-BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
-in_atomic(): 1, irqs_disabled(): 0, pid: 625, name: kworker/u16:3
-INFO: lockdep is turned off.
-Preemption disabled at:[<ffffffc000374a5c>] wb_writeback+0xec/0x830
-
-CPU: 7 PID: 625 Comm: kworker/u16:3 Not tainted 4.4.1-rt5 #20
-Hardware name: Freescale Layerscape 2085a RDB Board (DT)
-Workqueue: writeback wb_workfn (flush-7:0)
-Call trace:
-[<ffffffc00008d708>] dump_backtrace+0x0/0x200
-[<ffffffc00008d92c>] show_stack+0x24/0x30
-[<ffffffc0007b0f40>] dump_stack+0x88/0xa8
-[<ffffffc000127d74>] ___might_sleep+0x2ec/0x300
-[<ffffffc000d5d550>] rt_spin_lock+0x38/0xb8
-[<ffffffc0003e0548>] kernfs_path_len+0x30/0x90
-[<ffffffc00036b360>] trace_event_raw_event_writeback_work_class+0xe8/0x2e8
-[<ffffffc000374f90>] wb_writeback+0x620/0x830
-[<ffffffc000376224>] wb_workfn+0x61c/0x950
-[<ffffffc000110adc>] process_one_work+0x3ac/0xb30
-[<ffffffc0001112fc>] worker_thread+0x9c/0x7a8
-[<ffffffc00011a9e8>] kthread+0x190/0x1b0
-[<ffffffc000086ca0>] ret_from_fork+0x10/0x30
-
-Yang proposed to provide an unlocked function for accessing the cgroup path,
-but that'd involve synchronize_sched() in a syscall, which is not desired
-either. There was some discussion about using the inode number or the cgroup
-id, but so far we have no working solution.
-
-Disable the CGROUP_WRITEBACK path tracing for now when RT is enabled.
-
-Reported-by: Yang Shi <yang.shi@linaro.org>
-Link: http://lkml.kernel.org/r/1456528481-15936-1-git-send-email-yang.shi@linaro.org
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/trace/events/writeback.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/include/trace/events/writeback.h
-+++ b/include/trace/events/writeback.h
-@@ -132,7 +132,7 @@ DEFINE_EVENT(writeback_dirty_inode_templ
- );
-
- #ifdef CREATE_TRACE_POINTS
--#ifdef CONFIG_CGROUP_WRITEBACK
-+#if defined(CONFIG_CGROUP_WRITEBACK) && !defined(CONFIG_PREEMPT_RT_FULL)
-
- static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
- {
diff --git a/patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch b/patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch
new file mode 100644
index 00000000000000..54c5bb9b7016bf
--- /dev/null
+++ b/patches/tracing-writeback-Replace-cgroup-path-to-cgroup-ino.patch
@@ -0,0 +1,392 @@
+From: Yang Shi <yang.shi@linaro.org>
+Date: Thu, 3 Mar 2016 01:08:57 -0800
+Subject: [PATCH] tracing, writeback: Replace cgroup path to cgroup ino
+
+commit 5634cc2aa9aebc77bc862992e7805469dcf83dac ("writeback: update writeback
+tracepoints to report cgroup") made writeback tracepoints print out cgroup
+path when CGROUP_WRITEBACK is enabled, but it may trigger the below bug on -rt
+kernel since kernfs_path and kernfs_path_len are called by tracepoints, which
+acquire spin lock that is sleepable on -rt kernel.
+
+BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
+in_atomic(): 1, irqs_disabled(): 0, pid: 625, name: kworker/u16:3
+INFO: lockdep is turned off.
+Preemption disabled at:[<ffffffc000374a5c>] wb_writeback+0xec/0x830
+
+CPU: 7 PID: 625 Comm: kworker/u16:3 Not tainted 4.4.1-rt5 #20
+Hardware name: Freescale Layerscape 2085a RDB Board (DT)
+Workqueue: writeback wb_workfn (flush-7:0)
+Call trace:
+[<ffffffc00008d708>] dump_backtrace+0x0/0x200
+[<ffffffc00008d92c>] show_stack+0x24/0x30
+[<ffffffc0007b0f40>] dump_stack+0x88/0xa8
+[<ffffffc000127d74>] ___might_sleep+0x2ec/0x300
+[<ffffffc000d5d550>] rt_spin_lock+0x38/0xb8
+[<ffffffc0003e0548>] kernfs_path_len+0x30/0x90
+[<ffffffc00036b360>] trace_event_raw_event_writeback_work_class+0xe8/0x2e8
+[<ffffffc000374f90>] wb_writeback+0x620/0x830
+[<ffffffc000376224>] wb_workfn+0x61c/0x950
+[<ffffffc000110adc>] process_one_work+0x3ac/0xb30
+[<ffffffc0001112fc>] worker_thread+0x9c/0x7a8
+[<ffffffc00011a9e8>] kthread+0x190/0x1b0
+[<ffffffc000086ca0>] ret_from_fork+0x10/0x30
+
+With unlocked kernfs_* functions, synchronize_sched() has to be called in
+kernfs_rename which could be called in syscall path, but it is problematic.
+So, print out cgroup ino instead of path name, which could be converted to
+path name by userland.
+
+Withouth CGROUP_WRITEBACK enabled, it just prints out root dir. But, root
+dir ino vary from different filesystems, so printing out -1U to indicate
+an invalid cgroup ino.
+
+Link: http://lkml.kernel.org/r/1456996137-8354-1-git-send-email-yang.shi@linaro.org
+
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Yang Shi <yang.shi@linaro.org>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+---
+ include/trace/events/writeback.h | 121 ++++++++++++++-------------------------
+ 1 file changed, 45 insertions(+), 76 deletions(-)
+
+--- a/include/trace/events/writeback.h
++++ b/include/trace/events/writeback.h
+@@ -134,58 +134,28 @@ DEFINE_EVENT(writeback_dirty_inode_templ
+ #ifdef CREATE_TRACE_POINTS
+ #ifdef CONFIG_CGROUP_WRITEBACK
+
+-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
++static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+ {
+- return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
++ return wb->memcg_css->cgroup->kn->ino;
+ }
+
+-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
+-{
+- struct cgroup *cgrp = wb->memcg_css->cgroup;
+- char *path;
+-
+- path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
+- WARN_ON_ONCE(path != buf);
+-}
+-
+-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
+-{
+- if (wbc->wb)
+- return __trace_wb_cgroup_size(wbc->wb);
+- else
+- return 2;
+-}
+-
+-static inline void __trace_wbc_assign_cgroup(char *buf,
+- struct writeback_control *wbc)
++static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+ {
+ if (wbc->wb)
+- __trace_wb_assign_cgroup(buf, wbc->wb);
++ return __trace_wb_assign_cgroup(wbc->wb);
+ else
+- strcpy(buf, "/");
++ return -1U;
+ }
+-
+ #else /* CONFIG_CGROUP_WRITEBACK */
+
+-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
+-{
+- return 2;
+-}
+-
+-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
+-{
+- strcpy(buf, "/");
+-}
+-
+-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
++static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+ {
+- return 2;
++ return -1U;
+ }
+
+-static inline void __trace_wbc_assign_cgroup(char *buf,
+- struct writeback_control *wbc)
++static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+ {
+- strcpy(buf, "/");
++ return -1U;
+ }
+
+ #endif /* CONFIG_CGROUP_WRITEBACK */
+@@ -201,7 +171,7 @@ DECLARE_EVENT_CLASS(writeback_write_inod
+ __array(char, name, 32)
+ __field(unsigned long, ino)
+ __field(int, sync_mode)
+- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
++ __field(unsigned int, cgroup_ino)
+ ),
+
+ TP_fast_assign(
+@@ -209,14 +179,14 @@ DECLARE_EVENT_CLASS(writeback_write_inod
+ dev_name(inode_to_bdi(inode)->dev), 32);
+ __entry->ino = inode->i_ino;
+ __entry->sync_mode = wbc->sync_mode;
+- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
++ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
+ ),
+
+- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
++ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
+ __entry->name,
+ __entry->ino,
+ __entry->sync_mode,
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+
+@@ -246,7 +216,7 @@ DECLARE_EVENT_CLASS(writeback_work_class
+ __field(int, range_cyclic)
+ __field(int, for_background)
+ __field(int, reason)
+- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
++ __field(unsigned int, cgroup_ino)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->name,
+@@ -258,10 +228,10 @@ DECLARE_EVENT_CLASS(writeback_work_class
+ __entry->range_cyclic = work->range_cyclic;
+ __entry->for_background = work->for_background;
+ __entry->reason = work->reason;
+- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ ),
+ TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
+- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
++ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
+ __entry->name,
+ MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
+ __entry->nr_pages,
+@@ -270,7 +240,7 @@ DECLARE_EVENT_CLASS(writeback_work_class
+ __entry->range_cyclic,
+ __entry->for_background,
+ __print_symbolic(__entry->reason, WB_WORK_REASON),
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+ #define DEFINE_WRITEBACK_WORK_EVENT(name) \
+@@ -300,15 +270,15 @@ DECLARE_EVENT_CLASS(writeback_class,
+ TP_ARGS(wb),
+ TP_STRUCT__entry(
+ __array(char, name, 32)
+- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
++ __field(unsigned int, cgroup_ino)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ ),
+- TP_printk("bdi %s: cgroup=%s",
++ TP_printk("bdi %s: cgroup_ino=%u",
+ __entry->name,
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+ #define DEFINE_WRITEBACK_EVENT(name) \
+@@ -347,7 +317,7 @@ DECLARE_EVENT_CLASS(wbc_class,
+ __field(int, range_cyclic)
+ __field(long, range_start)
+ __field(long, range_end)
+- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
++ __field(unsigned int, cgroup_ino)
+ ),
+
+ TP_fast_assign(
+@@ -361,12 +331,12 @@ DECLARE_EVENT_CLASS(wbc_class,
+ __entry->range_cyclic = wbc->range_cyclic;
+ __entry->range_start = (long)wbc->range_start;
+ __entry->range_end = (long)wbc->range_end;
+- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
++ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
+ ),
+
+ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
+ "bgrd=%d reclm=%d cyclic=%d "
+- "start=0x%lx end=0x%lx cgroup=%s",
++ "start=0x%lx end=0x%lx cgroup_ino=%u",
+ __entry->name,
+ __entry->nr_to_write,
+ __entry->pages_skipped,
+@@ -377,7 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
+ __entry->range_cyclic,
+ __entry->range_start,
+ __entry->range_end,
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ )
+
+@@ -398,7 +368,7 @@ TRACE_EVENT(writeback_queue_io,
+ __field(long, age)
+ __field(int, moved)
+ __field(int, reason)
+- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
++ __field(unsigned int, cgroup_ino)
+ ),
+ TP_fast_assign(
+ unsigned long *older_than_this = work->older_than_this;
+@@ -408,15 +378,15 @@ TRACE_EVENT(writeback_queue_io,
+ (jiffies - *older_than_this) * 1000 / HZ : -1;
+ __entry->moved = moved;
+ __entry->reason = work->reason;
+- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ ),
+- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
++ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
+ __entry->name,
+ __entry->older, /* older_than_this in jiffies */
+ __entry->age, /* older_than_this in relative milliseconds */
+ __entry->moved,
+ __print_symbolic(__entry->reason, WB_WORK_REASON),
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+
+@@ -484,7 +454,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
+ __field(unsigned long, dirty_ratelimit)
+ __field(unsigned long, task_ratelimit)
+ __field(unsigned long, balanced_dirty_ratelimit)
+- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
++ __field(unsigned int, cgroup_ino)
+ ),
+
+ TP_fast_assign(
+@@ -496,13 +466,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
+ __entry->task_ratelimit = KBps(task_ratelimit);
+ __entry->balanced_dirty_ratelimit =
+ KBps(wb->balanced_dirty_ratelimit);
+- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ ),
+
+ TP_printk("bdi %s: "
+ "write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
+ "dirty_ratelimit=%lu task_ratelimit=%lu "
+- "balanced_dirty_ratelimit=%lu cgroup=%s",
++ "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
+ __entry->bdi,
+ __entry->write_bw, /* write bandwidth */
+ __entry->avg_write_bw, /* avg write bandwidth */
+@@ -510,7 +480,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
+ __entry->dirty_ratelimit, /* base ratelimit */
+ __entry->task_ratelimit, /* ratelimit with position control */
+ __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+
+@@ -548,7 +518,7 @@ TRACE_EVENT(balance_dirty_pages,
+ __field( long, pause)
+ __field(unsigned long, period)
+ __field( long, think)
+- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
++ __field(unsigned int, cgroup_ino)
+ ),
+
+ TP_fast_assign(
+@@ -571,7 +541,7 @@ TRACE_EVENT(balance_dirty_pages,
+ __entry->period = period * 1000 / HZ;
+ __entry->pause = pause * 1000 / HZ;
+ __entry->paused = (jiffies - start_time) * 1000 / HZ;
+- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
+ ),
+
+
+@@ -580,7 +550,7 @@ TRACE_EVENT(balance_dirty_pages,
+ "bdi_setpoint=%lu bdi_dirty=%lu "
+ "dirty_ratelimit=%lu task_ratelimit=%lu "
+ "dirtied=%u dirtied_pause=%u "
+- "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
++ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
+ __entry->bdi,
+ __entry->limit,
+ __entry->setpoint,
+@@ -595,7 +565,7 @@ TRACE_EVENT(balance_dirty_pages,
+ __entry->pause, /* ms */
+ __entry->period, /* ms */
+ __entry->think, /* ms */
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+
+@@ -609,8 +579,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
+ __field(unsigned long, ino)
+ __field(unsigned long, state)
+ __field(unsigned long, dirtied_when)
+- __dynamic_array(char, cgroup,
+- __trace_wb_cgroup_size(inode_to_wb(inode)))
++ __field(unsigned int, cgroup_ino)
+ ),
+
+ TP_fast_assign(
+@@ -619,16 +588,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
+ __entry->ino = inode->i_ino;
+ __entry->state = inode->i_state;
+ __entry->dirtied_when = inode->dirtied_when;
+- __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
++ __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
+ ),
+
+- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
++ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
+ __entry->name,
+ __entry->ino,
+ show_inode_state(__entry->state),
+ __entry->dirtied_when,
+ (jiffies - __entry->dirtied_when) / HZ,
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+
+@@ -684,7 +653,7 @@ DECLARE_EVENT_CLASS(writeback_single_ino
+ __field(unsigned long, writeback_index)
+ __field(long, nr_to_write)
+ __field(unsigned long, wrote)
+- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
++ __field(unsigned int, cgroup_ino)
+ ),
+
+ TP_fast_assign(
+@@ -696,11 +665,11 @@ DECLARE_EVENT_CLASS(writeback_single_ino
+ __entry->writeback_index = inode->i_mapping->writeback_index;
+ __entry->nr_to_write = nr_to_write;
+ __entry->wrote = nr_to_write - wbc->nr_to_write;
+- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
++ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
+ ),
+
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
+- "index=%lu to_write=%ld wrote=%lu cgroup=%s",
++ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
+ __entry->name,
+ __entry->ino,
+ show_inode_state(__entry->state),
+@@ -709,7 +678,7 @@ DECLARE_EVENT_CLASS(writeback_single_ino
+ __entry->writeback_index,
+ __entry->nr_to_write,
+ __entry->wrote,
+- __get_str(cgroup)
++ __entry->cgroup_ino
+ )
+ );
+
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index 960e813d3d9a26..5ab91835904059 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3580,7 +3580,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3584,7 +3584,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3590,13 +3590,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3594,13 +3594,13 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();