summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-08-22 14:10:49 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-08-22 14:10:49 +0200
commit37dfea85b9b2a0bb234a46e4459524d3f42aee3f (patch)
tree58eb28bface2336e7640211352f5dfc3a90ba904
parentdfa5ee5e044c4cfaef3e3f944df765e3625195f2 (diff)
download4.9-rt-patches-37dfea85b9b2a0bb234a46e4459524d3f42aee3f.tar.gz
[ANNOUNCE] 4.6.7-rt11
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch6
-rw-r--r--patches/drivers-random-reduce-preempt-disabled-region.patch4
-rw-r--r--patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch27
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch23
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch15
-rw-r--r--patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch10
-rw-r--r--patches/mm-page_alloc-reduce-lock-sections-further.patch18
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch28
-rw-r--r--patches/mm-perform-lru_add_drain_all-remotely.patch6
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch6
-rw-r--r--patches/preempt-lazy-support.patch18
-rw-r--r--patches/radix-tree-rt-aware.patch4
-rw-r--r--patches/random-make-it-work-on-rt.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch6
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/timer-Remove-slack-leftovers.patch2
19 files changed, 96 insertions, 89 deletions
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index 3a27f541593f75..f0ddcbdfa4aed0 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
-@@ -5005,10 +5005,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -5011,10 +5011,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -5049,8 +5049,8 @@ static void css_release(struct percpu_re
+@@ -5055,8 +5055,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5694,6 +5694,7 @@ static int __init cgroup_wq_init(void)
+@@ -5698,6 +5698,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/drivers-random-reduce-preempt-disabled-region.patch b/patches/drivers-random-reduce-preempt-disabled-region.patch
index 1b1132096298e4..9181e5eed2255a 100644
--- a/patches/drivers-random-reduce-preempt-disabled-region.patch
+++ b/patches/drivers-random-reduce-preempt-disabled-region.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -796,8 +796,6 @@ static void add_timer_randomness(struct
+@@ -799,8 +799,6 @@ static void add_timer_randomness(struct
} sample;
long delta, delta2, delta3;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
-@@ -838,7 +836,6 @@ static void add_timer_randomness(struct
+@@ -841,7 +839,6 @@ static void add_timer_randomness(struct
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index 134a6140b2b797..94e96c6e9bb896 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -11,9 +11,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/autofs4/autofs_i.h | 1 +
fs/autofs4/expire.c | 2 +-
- fs/dcache.c | 5 +++--
+ fs/dcache.c | 10 ++++++++--
fs/namespace.c | 3 ++-
- 4 files changed, 7 insertions(+), 4 deletions(-)
+ 4 files changed, 12 insertions(+), 4 deletions(-)
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -46,16 +46,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
-@@ -578,7 +579,7 @@ static struct dentry *dentry_kill(struct
-
- failed:
- spin_unlock(&dentry->d_lock);
-- cpu_relax();
-+ cpu_chill();
- return dentry; /* try again with same dentry */
+@@ -785,7 +786,12 @@ void dput(struct dentry *dentry)
+ kill_it:
+ dentry = dentry_kill(dentry);
+ if (dentry) {
+- cond_resched();
++ int r;
++
++ /* the task with the highest priority won't schedule */
++ r = cond_resched();
++ if (!r)
++ cpu_chill();
+ goto repeat;
+ }
}
-
-@@ -2316,7 +2317,7 @@ void d_delete(struct dentry * dentry)
+@@ -2319,7 +2325,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index e16fb07c0a7d6f..58842b503a2712 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt10
++-rt11
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index c93537f60add37..70afad7a5d7b0d 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -1414,10 +1414,12 @@ static int compact_zone(struct zone *zon
+@@ -1409,10 +1409,12 @@ static int compact_zone(struct zone *zon
cc->migrate_pfn & ~((1UL << cc->order) - 1);
if (cc->last_migrated_pfn < current_block_start) {
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -6274,7 +6274,9 @@ static int page_alloc_cpu_notify(struct
+@@ -6276,7 +6276,9 @@ static int page_alloc_cpu_notify(struct
int cpu = (unsigned long)hcpu;
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- local_irq_save(flags);
+ local_lock_irqsave(rotate_lock, flags);
pvec = this_cpu_ptr(&lru_rotate_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_move_tail(pvec);
- local_irq_restore(flags);
+ local_unlock_irqrestore(rotate_lock, flags);
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ activate_page_pvecs);
get_page(page);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, __activate_page, NULL);
- put_cpu_var(activate_page_pvecs);
+ put_locked_var(swapvec_lock, activate_page_pvecs);
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -387,13 +392,13 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -387,12 +392,12 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
@@ -130,15 +130,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec);
get_page(page);
- if (!pagevec_space(pvec))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
__pagevec_lru_add(pvec);
- pagevec_add(pvec, page);
- put_cpu_var(lru_add_pvec);
+ put_locked_var(swapvec_lock, lru_add_pvec);
}
/**
-@@ -591,9 +596,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -590,9 +595,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -150,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -625,11 +630,12 @@ void deactivate_file_page(struct page *p
+@@ -624,11 +629,12 @@ void deactivate_file_page(struct page *p
return;
if (likely(get_page_unless_zero(page))) {
@@ -158,14 +157,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct pagevec *pvec = &get_locked_var(swapvec_lock,
+ lru_deactivate_file_pvecs);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
- put_cpu_var(lru_deactivate_file_pvecs);
+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs);
}
}
-@@ -644,19 +650,20 @@ void deactivate_file_page(struct page *p
+@@ -643,19 +649,20 @@ void deactivate_file_page(struct page *p
void deactivate_page(struct page *page)
{
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
@@ -174,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ lru_deactivate_pvecs);
get_page(page);
- if (!pagevec_add(pvec, page))
+ if (!pagevec_add(pvec, page) || PageCompound(page))
pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
- put_cpu_var(lru_deactivate_pvecs);
+ put_locked_var(swapvec_lock, lru_deactivate_pvecs);
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index bd7edc658cadb0..9f68cc881fc0b8 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1717,6 +1717,7 @@ choice
+@@ -1718,6 +1718,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
-@@ -1735,6 +1736,7 @@ config SLUB
+@@ -1736,6 +1737,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 44afca7012f65f..f5fff39dc6ea3c 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4487,12 +4490,12 @@ static int mem_cgroup_move_account(struc
+@@ -4579,12 +4582,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5342,10 +5345,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5436,10 +5439,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5397,14 +5400,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5491,14 +5494,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -74,15 +74,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5722,6 +5725,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5817,6 +5820,7 @@ void mem_cgroup_swapout(struct page *pag
{
- struct mem_cgroup *memcg;
+ struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
+ unsigned long flags;
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5750,9 +5754,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5857,12 +5861,16 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
@@ -92,6 +92,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
mem_cgroup_charge_statistics(memcg, page, false, -1);
memcg_check_events(memcg, page);
+
+ if (!mem_cgroup_is_root(memcg))
+ css_put(&memcg->css);
+ local_unlock_irqrestore(event_lock, flags);
}
diff --git a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index 8668cc78e9cf24..c730252d654edf 100644
--- a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -14,16 +14,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -5557,10 +5557,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5652,10 +5652,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
-- local_irq_disable();
-+ local_lock_irq(event_lock);
+- local_irq_save(flags);
++ local_lock_irqsave(event_lock, flags);
mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
memcg_check_events(memcg, newpage);
-- local_irq_enable();
-+ local_unlock_irq(event_lock);
+- local_irq_restore(flags);
++ local_unlock_irqrestore(event_lock, flags);
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index 3f9bffba1881e9..419cccfceb4d23 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -827,7 +827,7 @@ static inline int free_pages_check(struc
+@@ -829,7 +829,7 @@ static inline int free_pages_check(struc
}
/*
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -838,18 +838,53 @@ static inline int free_pages_check(struc
+@@ -840,18 +840,53 @@ static inline int free_pages_check(struc
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (to_free) {
struct page *page;
struct list_head *list;
-@@ -865,7 +900,7 @@ static void free_pcppages_bulk(struct zo
+@@ -867,7 +902,7 @@ static void free_pcppages_bulk(struct zo
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -873,24 +908,12 @@ static void free_pcppages_bulk(struct zo
+@@ -875,24 +910,12 @@ static void free_pcppages_bulk(struct zo
batch_free = to_free;
do {
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -899,7 +922,9 @@ static void free_one_page(struct zone *z
+@@ -901,7 +924,9 @@ static void free_one_page(struct zone *z
int migratetype)
{
unsigned long nr_scanned;
@@ -126,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
-@@ -909,7 +934,7 @@ static void free_one_page(struct zone *z
+@@ -911,7 +936,7 @@ static void free_one_page(struct zone *z
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int free_tail_pages_check(struct page *head_page, struct page *page)
-@@ -2028,16 +2053,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -2030,16 +2055,18 @@ static int rmqueue_bulk(struct zone *zon
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -155,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2053,16 +2080,21 @@ static void drain_pages_zone(unsigned in
+@@ -2055,16 +2082,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2240,8 +2272,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2242,8 +2274,13 @@ void free_hot_cold_page(struct page *pag
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 95e409cc62db8e..b1625d089b855f 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1070,10 +1083,10 @@ static void __free_pages_ok(struct page
+@@ -1072,10 +1085,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page,
-@@ -2017,14 +2030,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -2019,14 +2032,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2041,7 +2054,7 @@ static void drain_pages_zone(unsigned in
+@@ -2043,7 +2056,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2049,7 +2062,7 @@ static void drain_pages_zone(unsigned in
+@@ -2051,7 +2064,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2135,8 +2148,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2137,8 +2150,17 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#ifdef CONFIG_HIBERNATION
-@@ -2192,7 +2214,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2194,7 +2216,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__count_vm_event(PGFREE);
/*
-@@ -2223,7 +2245,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2225,7 +2247,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2358,7 +2380,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2360,7 +2382,7 @@ struct page *buffered_rmqueue(struct zon
struct per_cpu_pages *pcp;
struct list_head *list;
@@ -137,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
if (list_empty(list)) {
-@@ -2382,7 +2404,7 @@ struct page *buffered_rmqueue(struct zon
+@@ -2384,7 +2406,7 @@ struct page *buffered_rmqueue(struct zon
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
page = NULL;
if (alloc_flags & ALLOC_HARDER) {
-@@ -2392,11 +2414,13 @@ struct page *buffered_rmqueue(struct zon
+@@ -2394,11 +2416,13 @@ struct page *buffered_rmqueue(struct zon
}
if (!page)
page = __rmqueue(zone, order, migratetype);
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-@@ -2406,13 +2430,13 @@ struct page *buffered_rmqueue(struct zon
+@@ -2408,13 +2432,13 @@ struct page *buffered_rmqueue(struct zon
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
@@ -178,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -6239,6 +6263,7 @@ static int page_alloc_cpu_notify(struct
+@@ -6241,6 +6265,7 @@ static int page_alloc_cpu_notify(struct
void __init page_alloc_init(void)
{
hotcpu_notifier(page_alloc_cpu_notify, 0);
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -7163,7 +7188,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7165,7 +7190,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7172,7 +7197,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7174,7 +7199,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-perform-lru_add_drain_all-remotely.patch b/patches/mm-perform-lru_add_drain_all-remotely.patch
index ac2befde1f6b1a..c7c3995a941771 100644
--- a/patches/mm-perform-lru_add_drain_all-remotely.patch
+++ b/patches/mm-perform-lru_add_drain_all-remotely.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -596,9 +596,15 @@ void lru_add_drain_cpu(int cpu)
+@@ -595,9 +595,15 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -666,12 +672,32 @@ void lru_add_drain(void)
+@@ -665,12 +671,32 @@ void lru_add_drain(void)
local_unlock_cpu(swapvec_lock);
}
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void lru_add_drain_all(void)
{
-@@ -684,21 +710,18 @@ void lru_add_drain_all(void)
+@@ -683,21 +709,18 @@ void lru_add_drain_all(void)
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 460ff2e2c41aa2..45800d7f64f05f 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = 0;
old_incr = timer->it.cpu.incr;
-@@ -1063,7 +1064,7 @@ void posix_cpu_timer_schedule(struct k_i
+@@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_i
/*
* Now re-arm for the new expiry time.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arm_timer(timer);
unlock_task_sighand(p, &flags);
-@@ -1152,13 +1153,13 @@ static inline int fastpath_timer_check(s
+@@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The fast path checks that there are no expired thread or thread
-@@ -1212,6 +1213,190 @@ void run_posix_cpu_timers(struct task_st
+@@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_st
}
}
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 6a003dc6665aeb..020e2379c4da1c 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -345,7 +345,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
preempt_disable_notrace();
/*
-@@ -5247,7 +5293,9 @@ void init_idle(struct task_struct *idle,
+@@ -5249,7 +5295,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -358,7 +358,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -3335,7 +3335,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3333,7 +3333,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3359,7 +3359,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3357,7 +3357,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -376,7 +376,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -3504,7 +3504,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3502,7 +3502,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -385,7 +385,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -3686,7 +3686,7 @@ static void __account_cfs_rq_runtime(str
+@@ -3684,7 +3684,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -394,7 +394,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -4298,7 +4298,7 @@ static void hrtick_start_fair(struct rq
+@@ -4296,7 +4296,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -403,7 +403,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -5438,7 +5438,7 @@ static void check_preempt_wakeup(struct
+@@ -5441,7 +5441,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -412,7 +412,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -8189,7 +8189,7 @@ static void task_fork_fair(struct task_s
+@@ -8192,7 +8192,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -421,7 +421,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -8214,7 +8214,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -8217,7 +8217,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
diff --git a/patches/radix-tree-rt-aware.patch b/patches/radix-tree-rt-aware.patch
index ba964a24a90bfc..a91a5af3f3d493 100644
--- a/patches/radix-tree-rt-aware.patch
+++ b/patches/radix-tree-rt-aware.patch
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
-@@ -240,13 +240,14 @@ radix_tree_node_alloc(struct radix_tree_
+@@ -241,13 +241,14 @@ radix_tree_node_alloc(struct radix_tree_
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Load up this CPU's radix_tree_node buffer with sufficient objects to
* ensure that the addition of a single element in the tree cannot fail. On
-@@ -355,6 +357,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
+@@ -361,6 +363,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index f23fa40dfd13ac..e114aa02abfe6f 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -888,28 +888,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -891,28 +891,27 @@ static __u32 get_reg(struct fast_pool *f
return *(ptr + f->reg_idx++);
}
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 1380590e5008eb..aff8567e5a745b 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7535,7 +7535,7 @@ void __init sched_init(void)
+@@ -7537,7 +7537,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 265e408de1a55d..87d7c5027e73bc 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5300,6 +5304,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5302,6 +5306,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -5314,7 +5320,11 @@ void idle_task_exit(void)
+@@ -5316,7 +5322,11 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -120,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5510,6 +5520,10 @@ migration_call(struct notifier_block *nf
+@@ -5512,6 +5522,10 @@ migration_call(struct notifier_block *nf
case CPU_DEAD:
calc_load_migrate(rq);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 976fbfb5f1707a..9cae4816650c6a 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1746,7 +1746,7 @@ endchoice
+@@ -1747,7 +1747,7 @@ endchoice
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/timer-Remove-slack-leftovers.patch b/patches/timer-Remove-slack-leftovers.patch
index 4ec5f4014086ee..24787332b7a2a3 100644
--- a/patches/timer-Remove-slack-leftovers.patch
+++ b/patches/timer-Remove-slack-leftovers.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/genhd.c
+++ b/block/genhd.c
-@@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct
+@@ -1524,12 +1524,7 @@ static void __disk_unblock_events(struct
if (--ev->block)
goto out_unlock;