From: Nick Piggin The logic which calculates the numberof pages which were scanned is mucked up. Fix. --- mm/vmscan.c | 25 +++++++++++-------------- 1 files changed, 11 insertions(+), 14 deletions(-) diff -puN mm/vmscan.c~vmscan-total_scanned-fix mm/vmscan.c --- 25/mm/vmscan.c~vmscan-total_scanned-fix 2004-02-28 23:38:00.000000000 -0800 +++ 25-akpm/mm/vmscan.c 2004-02-28 23:38:00.000000000 -0800 @@ -246,8 +246,7 @@ static void handle_write_error(struct ad * shrink_list returns the number of reclaimed pages */ static int -shrink_list(struct list_head *page_list, unsigned int gfp_mask, - int *max_scan, int *nr_mapped) +shrink_list(struct list_head *page_list, unsigned int gfp_mask, int *nr_scanned) { struct address_space *mapping; LIST_HEAD(ret_pages); @@ -271,7 +270,7 @@ shrink_list(struct list_head *page_list, /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) - (*nr_mapped)++; + (*nr_scanned)++; BUG_ON(PageActive(page)); @@ -479,7 +478,7 @@ keep: */ static int shrink_cache(const int nr_pages, struct zone *zone, - unsigned int gfp_mask, int max_scan, int *nr_mapped) + unsigned int gfp_mask, int max_scan, int *total_scanned) { LIST_HEAD(page_list); struct pagevec pvec; @@ -536,8 +535,8 @@ shrink_cache(const int nr_pages, struct mod_page_state_zone(zone, pgscan_kswapd, nr_scan); else mod_page_state_zone(zone, pgscan_direct, nr_scan); - nr_freed = shrink_list(&page_list, gfp_mask, - &max_scan, nr_mapped); + nr_freed = shrink_list(&page_list, gfp_mask, total_scanned); + *total_scanned += nr_taken; if (current_is_kswapd()) mod_page_state(kswapd_steal, nr_freed); mod_page_state_zone(zone, pgsteal, nr_freed); @@ -751,7 +750,7 @@ refill_inactive_zone(struct zone *zone, */ static int shrink_zone(struct zone *zone, int max_scan, unsigned int gfp_mask, - const int nr_pages, int *nr_mapped, struct page_state *ps) + const int nr_pages, int *total_scanned, struct page_state *ps) { unsigned long ratio; @@ -784,7 +783,7 @@ shrink_zone(struct zone *zone, int max_s refill_inactive_zone(zone, count, ps); } return shrink_cache(nr_pages, zone, gfp_mask, - max_scan, nr_mapped); + max_scan, total_scanned); } /* @@ -813,7 +812,6 @@ shrink_caches(struct zone **zones, int p for (i = 0; zones[i] != NULL; i++) { int to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX); struct zone *zone = zones[i]; - int nr_mapped = 0; int max_scan; if (zone->free_pages < zone->pages_high) @@ -830,8 +828,7 @@ shrink_caches(struct zone **zones, int p if (max_scan < to_reclaim * 2) max_scan = to_reclaim * 2; ret += shrink_zone(zone, max_scan, gfp_mask, - to_reclaim, &nr_mapped, ps); - *total_scanned += max_scan + nr_mapped; + to_reclaim, total_scanned, ps); if (ret >= nr_pages) break; } @@ -946,7 +943,7 @@ static int balance_pgdat(pg_data_t *pgda for (i = 0; i < pgdat->nr_zones; i++) { struct zone *zone = pgdat->node_zones + i; - int nr_mapped = 0; + int total_scanned = 0; int max_scan; int to_reclaim; int reclaimed; @@ -968,10 +965,10 @@ static int balance_pgdat(pg_data_t *pgda if (max_scan < SWAP_CLUSTER_MAX) max_scan = SWAP_CLUSTER_MAX; reclaimed = shrink_zone(zone, max_scan, GFP_KERNEL, - to_reclaim, &nr_mapped, ps); + to_reclaim, &total_scanned, ps); if (i < ZONE_HIGHMEM) { reclaim_state->reclaimed_slab = 0; - shrink_slab(max_scan + nr_mapped, GFP_KERNEL); + shrink_slab(total_scanned, GFP_KERNEL); reclaimed += reclaim_state->reclaimed_slab; } to_free -= reclaimed; _