- prevent nr_scan_inactive from going negative - compare `count' with SWAP_CLUSTER_MAX, not `max_scan' - Use ">= SWAP_CLUSTER_MAX", not "> SWAP_CLUSTER_MAX". --- mm/vmscan.c | 8 ++++---- 1 files changed, 4 insertions(+), 4 deletions(-) diff -puN mm/vmscan.c~vm-batch-inactive-scanning-fix mm/vmscan.c --- 25/mm/vmscan.c~vm-batch-inactive-scanning-fix 2004-02-29 18:31:57.000000000 -0800 +++ 25-akpm/mm/vmscan.c 2004-02-29 18:33:05.000000000 -0800 @@ -759,14 +759,14 @@ shrink_zone(struct zone *zone, int max_s ratio = (unsigned long)SWAP_CLUSTER_MAX * zone->nr_active / ((zone->nr_inactive | 1) * 2); atomic_add(ratio+1, &zone->nr_scan_active); - if (atomic_read(&zone->nr_scan_active) > SWAP_CLUSTER_MAX) { + count = atomic_read(&zone->nr_scan_active); + if (count >= SWAP_CLUSTER_MAX) { /* * Don't try to bring down too many pages in one attempt. * If this fails, the caller will increase `priority' and * we'll try again, with an increased chance of reclaiming * mapped memory. */ - count = atomic_read(&zone->nr_scan_active); if (count > SWAP_CLUSTER_MAX * 4) count = SWAP_CLUSTER_MAX * 4; atomic_set(&zone->nr_scan_active, 0); @@ -775,8 +775,8 @@ shrink_zone(struct zone *zone, int max_s atomic_add(max_scan, &zone->nr_scan_inactive); count = atomic_read(&zone->nr_scan_inactive); - if (max_scan > SWAP_CLUSTER_MAX) { - atomic_sub(count, &zone->nr_scan_inactive); + if (count >= SWAP_CLUSTER_MAX) { + atomic_set(&zone->nr_scan_inactive, 0); return shrink_cache(zone, gfp_mask, count, total_scanned); } return 0; _