This is just a random unsubstantiated tuning tweak: don't immediately throttle page allocators and kwapd when the going is getting heavier: scan a bit more of the LRU before throttling. --- mm/vmscan.c | 4 ++-- 1 files changed, 2 insertions(+), 2 deletions(-) diff -puN mm/vmscan.c~vmscan-throttle-later mm/vmscan.c --- 25/mm/vmscan.c~vmscan-throttle-later 2004-02-28 23:38:22.000000000 -0800 +++ 25-akpm/mm/vmscan.c 2004-02-28 23:38:22.000000000 -0800 @@ -872,7 +872,7 @@ int try_to_free_pages(struct zone **zone wakeup_bdflush(total_scanned); /* Take a nap, wait for some writeback to complete */ - if (total_scanned) + if (total_scanned && priority < DEF_PRIORITY - 2) blk_congestion_wait(WRITE, HZ/10); } if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) @@ -964,7 +964,7 @@ static int balance_pgdat(pg_data_t *pgda * OK, kswapd is getting into trouble. Take a nap, then take * another pass across the zones. */ - if (pages_scanned) + if (pages_scanned && priority < DEF_PRIORITY - 2) blk_congestion_wait(WRITE, HZ/10); } _