aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-06-23 18:53:52 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-23 18:53:52 -0700
commit42b8d9947462d751f1a4cd6c7e842d95c3249f35 (patch)
tree4c0ffad7a9fed200b8995f128213a62b261e79fd /mm
parent2332dc7870b6f40eff03df88cbb03f4ffddbd086 (diff)
downloadhistory-42b8d9947462d751f1a4cd6c7e842d95c3249f35.tar.gz
[PATCH] vmscan.c: dont reclaim too many pages
The shrink_zone() logic can, under some circumstances, cause far too many pages to be reclaimed. Say, we're scanning at high priority and suddenly hit a large number of reclaimable pages on the LRU. Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3fe27c1d23817d..a744497f0d49aa 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -62,6 +62,9 @@ struct scan_control {
unsigned long nr_mapped; /* From page_state */
+ /* How many pages shrink_cache() should reclaim */
+ int nr_to_reclaim;
+
/* Ask shrink_caches, or shrink_zone to scan at this priority */
unsigned int priority;
@@ -586,6 +589,7 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
if (current_is_kswapd())
mod_page_state(kswapd_steal, nr_freed);
mod_page_state_zone(zone, pgsteal, nr_freed);
+ sc->nr_to_reclaim -= nr_freed;
spin_lock_irq(&zone->lru_lock);
/*
@@ -815,6 +819,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
else
nr_inactive = 0;
+ sc->nr_to_reclaim = SWAP_CLUSTER_MAX;
+
while (nr_active || nr_inactive) {
if (nr_active) {
sc->nr_to_scan = min(nr_active,
@@ -828,6 +834,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
(unsigned long)SWAP_CLUSTER_MAX);
nr_inactive -= sc->nr_to_scan;
shrink_cache(zone, sc);
+ if (sc->nr_to_reclaim <= 0)
+ break;
}
}
}