summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-09-28 11:31:19 -0700
committerGreg Kroah-Hartman <gregkh@suse.de>2011-09-28 11:31:19 -0700
commita9020a0e40611957d95497a5354584b38509201b (patch)
tree46f5b06797a0b7c7f93af81e31d3b2157a311fb2
parent0bb5c509ebf09b6cd15813f9722e0f942f8bff8f (diff)
downloadstable-queue-a9020a0e40611957d95497a5354584b38509201b.tar.gz
3.0 patches
-rw-r--r--queue-3.0/memcg-fix-vmscan-count-in-small-memcgs.patch116
-rw-r--r--queue-3.0/series1
2 files changed, 117 insertions, 0 deletions
diff --git a/queue-3.0/memcg-fix-vmscan-count-in-small-memcgs.patch b/queue-3.0/memcg-fix-vmscan-count-in-small-memcgs.patch
new file mode 100644
index 0000000000..e4a8fed904
--- /dev/null
+++ b/queue-3.0/memcg-fix-vmscan-count-in-small-memcgs.patch
@@ -0,0 +1,116 @@
+From 4508378b9523e22a2a0175d8bf64d932fb10a67d Mon Sep 17 00:00:00 2001
+From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Date: Tue, 26 Jul 2011 16:08:24 -0700
+Subject: memcg: fix vmscan count in small memcgs
+
+From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+
+commit 4508378b9523e22a2a0175d8bf64d932fb10a67d upstream.
+
+Commit 246e87a93934 ("memcg: fix get_scan_count() for small targets")
+fixes the memcg/kswapd behavior against small targets and prevent vmscan
+priority too high.
+
+But the implementation is too naive and adds another problem to small
+memcg. It always force scan to 32 pages of file/anon and doesn't handle
+swappiness and other rotate_info. It makes vmscan to scan anon LRU
+regardless of swappiness and make reclaim bad. This patch fixes it by
+adjusting scanning count with regard to swappiness at el.
+
+At a test "cat 1G file under 300M limit." (swappiness=20)
+ before patch
+ scanned_pages_by_limit 360919
+ scanned_anon_pages_by_limit 180469
+ scanned_file_pages_by_limit 180450
+ rotated_pages_by_limit 31
+ rotated_anon_pages_by_limit 25
+ rotated_file_pages_by_limit 6
+ freed_pages_by_limit 180458
+ freed_anon_pages_by_limit 19
+ freed_file_pages_by_limit 180439
+ elapsed_ns_by_limit 429758872
+ after patch
+ scanned_pages_by_limit 180674
+ scanned_anon_pages_by_limit 24
+ scanned_file_pages_by_limit 180650
+ rotated_pages_by_limit 35
+ rotated_anon_pages_by_limit 24
+ rotated_file_pages_by_limit 11
+ freed_pages_by_limit 180634
+ freed_anon_pages_by_limit 0
+ freed_file_pages_by_limit 180634
+ elapsed_ns_by_limit 367119089
+ scanned_pages_by_system 0
+
+the numbers of scanning anon are decreased(as expected), and elapsed time
+reduced. By this patch, small memcgs will work better.
+(*) Because the amount of file-cache is much bigger than anon,
+ recalaim_stat's rotate-scan counter make scanning files more.
+
+Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Ying Han <yinghan@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/vmscan.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1748,6 +1748,7 @@ static void get_scan_count(struct zone *
+ enum lru_list l;
+ int noswap = 0;
+ int force_scan = 0;
++ unsigned long nr_force_scan[2];
+
+
+ anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+@@ -1770,6 +1771,8 @@ static void get_scan_count(struct zone *
+ fraction[0] = 0;
+ fraction[1] = 1;
+ denominator = 1;
++ nr_force_scan[0] = 0;
++ nr_force_scan[1] = SWAP_CLUSTER_MAX;
+ goto out;
+ }
+
+@@ -1781,6 +1784,8 @@ static void get_scan_count(struct zone *
+ fraction[0] = 1;
+ fraction[1] = 0;
+ denominator = 1;
++ nr_force_scan[0] = SWAP_CLUSTER_MAX;
++ nr_force_scan[1] = 0;
+ goto out;
+ }
+ }
+@@ -1829,6 +1834,11 @@ static void get_scan_count(struct zone *
+ fraction[0] = ap;
+ fraction[1] = fp;
+ denominator = ap + fp + 1;
++ if (force_scan) {
++ unsigned long scan = SWAP_CLUSTER_MAX;
++ nr_force_scan[0] = div64_u64(scan * ap, denominator);
++ nr_force_scan[1] = div64_u64(scan * fp, denominator);
++ }
+ out:
+ for_each_evictable_lru(l) {
+ int file = is_file_lru(l);
+@@ -1849,12 +1859,8 @@ out:
+ * memcg, priority drop can cause big latency. So, it's better
+ * to scan small amount. See may_noscan above.
+ */
+- if (!scan && force_scan) {
+- if (file)
+- scan = SWAP_CLUSTER_MAX;
+- else if (!noswap)
+- scan = SWAP_CLUSTER_MAX;
+- }
++ if (!scan && force_scan)
++ scan = nr_force_scan[file];
+ nr[l] = scan;
+ }
+ }
diff --git a/queue-3.0/series b/queue-3.0/series
index 21713ef525..d7e3cde1d4 100644
--- a/queue-3.0/series
+++ b/queue-3.0/series
@@ -227,3 +227,4 @@ asoc-ssm2602-re-enable-oscillator-after-suspend.patch
alsa-hda-realtek-avoid-bogus-hp-pin-assignment.patch
alsa-hda-no-power-nids-on-92hd93.patch
alsa-usb-audio-check-for-possible-chip-null-pointer-before-clearing-probing-flag.patch
+memcg-fix-vmscan-count-in-small-memcgs.patch