diff -urNp x-ref/include/linux/swap.h x/include/linux/swap.h --- x-ref/include/linux/swap.h 2003-01-17 21:31:59.000000000 +0100 +++ x/include/linux/swap.h 2003-01-17 21:32:00.000000000 +0100 @@ -115,7 +115,7 @@ extern void swap_setup(void); extern wait_queue_head_t kswapd_wait; extern int FASTCALL(try_to_free_pages_zone(zone_t *, unsigned int)); extern int FASTCALL(try_to_free_pages(unsigned int)); -extern int vm_vfs_scan_ratio, vm_cache_scan_ratio, vm_lru_balance_ratio, vm_passes, vm_gfp_debug, vm_mapped_ratio; +extern int vm_vfs_scan_ratio, vm_cache_scan_ratio, vm_lru_balance_ratio, vm_passes, vm_gfp_debug, vm_mapped_ratio, vm_anon_lru; /* linux/mm/page_io.c */ extern void rw_swap_page(int, struct page *); diff -urNp x-ref/include/linux/sysctl.h x/include/linux/sysctl.h --- x-ref/include/linux/sysctl.h 2003-01-17 21:31:59.000000000 +0100 +++ x/include/linux/sysctl.h 2003-01-17 21:32:34.000000000 +0100 @@ -151,6 +151,7 @@ enum VM_GFP_DEBUG=18, /* debug GFP failures */ VM_CACHE_SCAN_RATIO=19, /* part of the inactive cache list to scan */ VM_MAPPED_RATIO=20, /* amount of unfreeable pages that triggers swapout */ + VM_ANON_LRU=21, /* immediatly insert anon pages in the vm page lru */ }; diff -urNp x-ref/kernel/sysctl.c x/kernel/sysctl.c --- x-ref/kernel/sysctl.c 2003-01-17 21:31:59.000000000 +0100 +++ x/kernel/sysctl.c 2003-01-17 21:32:00.000000000 +0100 @@ -272,6 +272,8 @@ static ctl_table vm_table[] = { &vm_cache_scan_ratio, sizeof(int), 0644, NULL, &proc_dointvec}, {VM_MAPPED_RATIO, "vm_mapped_ratio", &vm_mapped_ratio, sizeof(int), 0644, NULL, &proc_dointvec}, + {VM_ANON_LRU, "vm_anon_lru", + &vm_anon_lru, sizeof(int), 0644, NULL, &proc_dointvec}, {VM_LRU_BALANCE_RATIO, "vm_lru_balance_ratio", &vm_lru_balance_ratio, sizeof(int), 0644, NULL, &proc_dointvec}, {VM_PASSES, "vm_passes", diff -urNp x-ref/mm/memory.c x/mm/memory.c --- x-ref/mm/memory.c 2003-01-17 21:31:59.000000000 +0100 +++ x/mm/memory.c 2003-01-17 21:32:00.000000000 +0100 @@ -1006,7 +1006,8 @@ static int do_wp_page(struct mm_struct * if (PageReserved(old_page)) ++mm->rss; break_cow(vma, new_page, address, page_table); - lru_cache_add(new_page); + if (vm_anon_lru) + lru_cache_add(new_page); /* Free the old page.. */ new_page = old_page; @@ -1235,7 +1236,8 @@ static int do_anonymous_page(struct mm_s mm->rss++; flush_page_to_ram(page); entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); - lru_cache_add(page); + if (vm_anon_lru) + lru_cache_add(page); } set_pte(page_table, entry); @@ -1289,7 +1291,8 @@ static int do_no_page(struct mm_struct * } copy_user_highpage(page, new_page, address); page_cache_release(new_page); - lru_cache_add(page); + if (vm_anon_lru) + lru_cache_add(page); new_page = page; } diff -urNp x-ref/mm/vmscan.c x/mm/vmscan.c --- x-ref/mm/vmscan.c 2003-01-17 21:31:59.000000000 +0100 +++ x/mm/vmscan.c 2003-01-17 21:32:00.000000000 +0100 @@ -65,6 +65,27 @@ int vm_lru_balance_ratio = 2; int vm_vfs_scan_ratio = 6; /* + * "vm_anon_lru" select if to immdiatly insert anon pages in the + * lru. Immediatly means as soon as they're allocated during the + * page faults. + * + * If this is set to 0, they're inserted only after the first + * swapout. + * + * Having anon pages immediatly inserted in the lru allows the + * VM to know better when it's worthwhile to start swapping + * anonymous ram, it will start to swap earlier and it should + * swap smoother and faster, but it will decrease scalability + * on the >16-ways of an order of magnitude. Big SMP/NUMA + * definitely can't take an hit on a global spinlock at + * every anon page allocation. So this is off by default. + * + * Low ram machines that swaps all the time want to turn + * this on (i.e. set to 1). + */ +int vm_anon_lru = 0; + +/* * The swap-out function returns 1 if it successfully * scanned all the pages it was asked to (`count'). * It returns zero if it couldn't do anything,