diff -aurpN -X /home/fletch/.diff.exclude 345-schedstat_arches/kernel/sysctl.c 350-autoswap/kernel/sysctl.c --- 345-schedstat_arches/kernel/sysctl.c Fri Jan 9 22:25:24 2004 +++ 350-autoswap/kernel/sysctl.c Fri Jan 9 22:57:58 2004 @@ -682,11 +682,8 @@ static ctl_table vm_table[] = { .procname = "swappiness", .data = &vm_swappiness, .maxlen = sizeof(vm_swappiness), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .strategy = &sysctl_intvec, - .extra1 = &zero, - .extra2 = &one_hundred, + .mode = 0444 /* read-only*/, + .proc_handler = &proc_dointvec, }, #ifdef CONFIG_HUGETLB_PAGE { diff -aurpN -X /home/fletch/.diff.exclude 345-schedstat_arches/mm/vmscan.c 350-autoswap/mm/vmscan.c --- 345-schedstat_arches/mm/vmscan.c Fri Jan 9 17:40:10 2004 +++ 350-autoswap/mm/vmscan.c Fri Jan 9 22:57:58 2004 @@ -47,7 +47,7 @@ /* * From 0 .. 100. Higher means more swappy. */ -int vm_swappiness = 60; +int vm_swappiness = 0; static long total_memory; #ifdef ARCH_HAS_PREFETCH @@ -600,6 +600,7 @@ refill_inactive_zone(struct zone *zone, LIST_HEAD(l_active); /* Pages to go onto the active_list */ struct page *page; struct pagevec pvec; + struct sysinfo i; int reclaim_mapped = 0; long mapped_ratio; long distress; @@ -640,6 +641,14 @@ refill_inactive_zone(struct zone *zone, * is mapped. */ mapped_ratio = (ps->nr_mapped * 100) / total_memory; + + /* + * Autoregulate vm_swappiness to be equal to the percentage of + * pages in physical ram that are application pages. -ck + */ + si_meminfo(&i); + vm_swappiness = 100 - (((i.freeram + get_page_cache_size() - + swapper_space.nrpages) * 100) / i.totalram); /* * Now decide how much we really want to unmap some pages. The mapped