diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-08-29 15:59:17 -0400 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2016-08-29 15:59:17 -0400 |
commit | 71b7c1aaf6c550abed2d1835c6e0454d4ef7b43d (patch) | |
tree | fab4210938318e3e1935d1d2826990a84170aed6 | |
parent | 10305a7c2b77752fb67d8457a557ff044ebd9c44 (diff) | |
download | 4.8-rt-patches-71b7c1aaf6c550abed2d1835c6e0454d4ef7b43d.tar.gz |
mm: refresh percpu swap patch
-rw-r--r-- | patches/mm-convert-swap-to-percpu-locked.patch | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch index d5ba1acb6b25a1..2512a65f8c175d 100644 --- a/patches/mm-convert-swap-to-percpu-locked.patch +++ b/patches/mm-convert-swap-to-percpu-locked.patch @@ -1,4 +1,4 @@ -From 6da3b53de601b70ad37f9073e939ce597cb7bb35 Mon Sep 17 00:00:00 2001 +From b3b3461e90b457914a35011167d2a737a1de7b60 Mon Sep 17 00:00:00 2001 From: Ingo Molnar <mingo@elte.hu> Date: Fri, 3 Jul 2009 08:29:51 -0500 Subject: [PATCH] mm/swap: Convert to percpu locked @@ -10,7 +10,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> diff --git a/include/linux/swap.h b/include/linux/swap.h -index a62bb13cbed9..2b7721949dd5 100644 +index 98d34df40058..6df838de7d78 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -297,6 +297,7 @@ extern unsigned long nr_free_pagecache_pages(void); @@ -22,10 +22,10 @@ index a62bb13cbed9..2b7721949dd5 100644 extern void lru_cache_add_anon(struct page *page); extern void lru_cache_add_file(struct page *page); diff --git a/mm/compaction.c b/mm/compaction.c -index eda3c2244f30..c90550d486ed 100644 +index 1427366ad673..5c60c2b42dd5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c -@@ -1490,10 +1490,12 @@ check_drain: +@@ -1531,10 +1531,12 @@ check_drain: block_start_pfn(cc->migrate_pfn, cc->order); if (cc->last_migrated_pfn < current_block_start) { @@ -41,10 +41,10 @@ index eda3c2244f30..c90550d486ed 100644 cc->last_migrated_pfn = 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index a07c534cee60..ed17ea3e7fbe 100644 +index 2dcbdd6a8f11..4169b9c62cc5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -6549,7 +6549,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self, +@@ -6741,7 +6741,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self, int cpu = (unsigned long)hcpu; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { @@ -55,7 +55,7 @@ index a07c534cee60..ed17ea3e7fbe 100644 /* diff --git a/mm/swap.c b/mm/swap.c -index 03aacbcb013f..892747266c7e 100644 +index 95916142fc46..fe6075dc1f36 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -32,6 +32,7 @@ @@ -66,9 +66,9 @@ index 03aacbcb013f..892747266c7e 100644 #include <linux/hugetlb.h> #include <linux/page_idle.h> -@@ -48,6 +49,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); - static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); - static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); +@@ -51,6 +52,9 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); + static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); + #endif +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); @@ -76,7 +76,7 @@ index 03aacbcb013f..892747266c7e 100644 /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. -@@ -237,11 +241,11 @@ void rotate_reclaimable_page(struct page *page) +@@ -240,11 +244,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; get_page(page); @@ -90,7 +90,7 @@ index 03aacbcb013f..892747266c7e 100644 } } -@@ -292,12 +296,13 @@ static bool need_activate_page_drain(int cpu) +@@ -293,12 +297,13 @@ static bool need_activate_page_drain(int cpu) void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { @@ -106,7 +106,7 @@ index 03aacbcb013f..892747266c7e 100644 } } -@@ -323,7 +328,7 @@ void activate_page(struct page *page) +@@ -324,7 +329,7 @@ void activate_page(struct page *page) static void __lru_cache_activate_page(struct page *page) { @@ -115,7 +115,7 @@ index 03aacbcb013f..892747266c7e 100644 int i; /* -@@ -345,7 +350,7 @@ static void __lru_cache_activate_page(struct page *page) +@@ -346,7 +351,7 @@ static void __lru_cache_activate_page(struct page *page) } } @@ -124,7 +124,7 @@ index 03aacbcb013f..892747266c7e 100644 } /* -@@ -387,13 +392,13 @@ EXPORT_SYMBOL(mark_page_accessed); +@@ -388,13 +393,13 @@ EXPORT_SYMBOL(mark_page_accessed); static void __lru_cache_add(struct page *page) { @@ -140,7 +140,7 @@ index 03aacbcb013f..892747266c7e 100644 } /** -@@ -591,9 +596,9 @@ void lru_add_drain_cpu(int cpu) +@@ -592,9 +597,9 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ @@ -152,7 +152,7 @@ index 03aacbcb013f..892747266c7e 100644 } pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); -@@ -625,11 +630,12 @@ void deactivate_file_page(struct page *page) +@@ -626,11 +631,12 @@ void deactivate_file_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { @@ -167,7 +167,7 @@ index 03aacbcb013f..892747266c7e 100644 } } -@@ -644,19 +650,20 @@ void deactivate_file_page(struct page *page) +@@ -645,19 +651,20 @@ void deactivate_file_page(struct page *page) void deactivate_page(struct page *page) { if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { |