Binary files vm-ref/ID and vm/ID differ diff -urN vm-ref/fs/buffer.c vm/fs/buffer.c --- vm-ref/fs/buffer.c Sat Oct 13 17:09:42 2001 +++ vm/fs/buffer.c Sat Oct 13 17:11:06 2001 @@ -2338,11 +2338,8 @@ ll_rw_block(WRITE, 1, &p); tryagain = 0; } else if (buffer_locked(p)) { - if (gfp_mask & __GFP_WAIT) { - wait_on_buffer(p); - tryagain = 1; - } else - tryagain = 0; + wait_on_buffer(p); + tryagain = 1; } } else tryagain = 0; diff -urN vm-ref/include/linux/mm.h vm/include/linux/mm.h --- vm-ref/include/linux/mm.h Sat Oct 13 17:09:42 2001 +++ vm/include/linux/mm.h Sat Oct 13 17:14:11 2001 @@ -279,7 +279,6 @@ #define PG_checked 12 /* kill me in 2.5.. */ #define PG_arch_1 13 #define PG_reserved 14 -#define PG_wait_for_IO 15 /* Make it prettier to test the above... */ #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) @@ -310,7 +309,6 @@ */ #define UnlockPage(page) do { \ smp_mb__before_clear_bit(); \ - clear_bit(PG_wait_for_IO, &(page)->flags); \ if (!test_and_clear_bit(PG_locked, &(page)->flags)) BUG(); \ smp_mb__after_clear_bit(); \ if (waitqueue_active(&(page)->wait)) \ diff -urN vm-ref/include/linux/swap.h vm/include/linux/swap.h --- vm-ref/include/linux/swap.h Sat Oct 13 17:09:42 2001 +++ vm/include/linux/swap.h Sat Oct 13 17:11:04 2001 @@ -102,9 +102,7 @@ extern void FASTCALL(lru_cache_del(struct page *)); extern void FASTCALL(deactivate_page(struct page *)); -extern void FASTCALL(deactivate_page_nolock(struct page *)); extern void FASTCALL(activate_page(struct page *)); -extern void FASTCALL(activate_page_nolock(struct page *)); extern void swap_setup(void); diff -urN vm-ref/mm/filemap.c vm/mm/filemap.c --- vm-ref/mm/filemap.c Sat Oct 13 17:09:42 2001 +++ vm/mm/filemap.c Sat Oct 13 17:13:58 2001 @@ -624,7 +624,7 @@ { unsigned long flags; - flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_dirty | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked | 1 << PG_wait_for_IO); + flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_dirty | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_checked); page->flags = flags | (1 << PG_locked); page_cache_get(page); page->index = offset; diff -urN vm-ref/mm/highmem.c vm/mm/highmem.c --- vm-ref/mm/highmem.c Sun Sep 23 21:11:43 2001 +++ vm/mm/highmem.c Sat Oct 13 17:11:06 2001 @@ -327,7 +327,6 @@ struct list_head *tmp; struct page *page; -repeat_alloc: page = alloc_page(GFP_NOHIGHIO); if (page) return page; @@ -337,6 +336,7 @@ */ wakeup_bdflush(); +repeat_alloc: /* * Try to allocate from the emergency pool. */ @@ -365,7 +365,6 @@ struct list_head *tmp; struct buffer_head *bh; -repeat_alloc: bh = kmem_cache_alloc(bh_cachep, SLAB_NOHIGHIO); if (bh) return bh; @@ -375,6 +374,7 @@ */ wakeup_bdflush(); +repeat_alloc: /* * Try to allocate from the emergency pool. */ diff -urN vm-ref/mm/page_alloc.c vm/mm/page_alloc.c --- vm-ref/mm/page_alloc.c Sat Oct 13 17:09:42 2001 +++ vm/mm/page_alloc.c Sat Oct 13 17:11:06 2001 @@ -390,8 +390,7 @@ return page; } } - if (!order) - goto rebalance; + goto rebalance; } else { /* * Check that no other task is been killed meanwhile, diff -urN vm-ref/mm/shmem.c vm/mm/shmem.c --- vm-ref/mm/shmem.c Sat Oct 13 17:09:42 2001 +++ vm/mm/shmem.c Sat Oct 13 17:14:03 2001 @@ -559,7 +559,7 @@ swap_free(*entry); *entry = (swp_entry_t) {0}; delete_from_swap_cache(page); - flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_referenced | 1 << PG_arch_1 | 1 << PG_wait_for_IO); + flags = page->flags & ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_referenced | 1 << PG_arch_1); page->flags = flags | (1 << PG_dirty); add_to_page_cache_locked(page, mapping, idx); info->swapped--; diff -urN vm-ref/mm/swap.c vm/mm/swap.c --- vm-ref/mm/swap.c Sat Oct 13 17:09:42 2001 +++ vm/mm/swap.c Sat Oct 13 17:11:06 2001 @@ -48,13 +48,12 @@ * called on a page which is not on any of the lists, the * page is left alone. */ -void deactivate_page_nolock(struct page * page) +static inline void deactivate_page_nolock(struct page * page) { if (PageActive(page)) { del_page_from_active_list(page); add_page_to_inactive_list(page); } - ClearPageReferenced(page); } void deactivate_page(struct page * page) @@ -67,7 +66,7 @@ /* * Move an inactive page to the active list. */ -void activate_page_nolock(struct page * page) +static inline void activate_page_nolock(struct page * page) { if (PageInactive(page)) { del_page_from_inactive_list(page); @@ -80,7 +79,6 @@ spin_lock(&pagemap_lru_lock); activate_page_nolock(page); spin_unlock(&pagemap_lru_lock); - SetPageReferenced(page); } /** diff -urN vm-ref/mm/vmscan.c vm/mm/vmscan.c --- vm-ref/mm/vmscan.c Sat Oct 13 17:10:28 2001 +++ vm/mm/vmscan.c Sat Oct 13 17:25:04 2001 @@ -51,6 +51,7 @@ /* Don't look at this pte if it's been accessed recently. */ if (ptep_test_and_clear_young(page_table)) { flush_tlb_page(vma, address); + mark_page_accessed(page); return 0; } @@ -284,10 +285,10 @@ return count; } -static int FASTCALL(swap_out(zone_t * classzone, unsigned int gfp_mask, int nr_pages)); -static int swap_out(zone_t * classzone, unsigned int gfp_mask, int nr_pages) +static int FASTCALL(swap_out(zone_t * classzone, unsigned int gfp_mask)); +static int swap_out(zone_t * classzone, unsigned int gfp_mask) { - int counter; + int counter, nr_pages = SWAP_CLUSTER_MAX << 2; struct mm_struct *mm; /* Then, look at the other mm's */ @@ -327,10 +328,12 @@ return 0; } -static int FASTCALL(shrink_cache(int nr_pages, int max_scan, zone_t * classzone, unsigned int gfp_mask)); -static int shrink_cache(int nr_pages, int max_scan, zone_t * classzone, unsigned int gfp_mask) +/* NOTE: this is not only a shrink but also a probe, see the &faliures param */ +static int FASTCALL(shrink_cache(int nr_pages, int max_scan, int * faliures, zone_t * classzone, unsigned int gfp_mask)); +static int shrink_cache(int nr_pages, int max_scan, int * faliures, zone_t * classzone, unsigned int gfp_mask) { struct list_head * entry; + int __faliures = 0; while (max_scan && classzone->nr_inactive_pages && (entry = inactive_list.prev) != &inactive_list) { struct page * page; @@ -354,24 +357,20 @@ if (!memclass(page->zone, classzone)) continue; - ClearPageReferenced(page); - max_scan--; + /* Racy check to avoid trylocking when not worthwhile */ + if (!page->buffers && page_count(page) != 1) { + __faliures++; + continue; + } + /* * The page is locked. IO in progress? * Move it to the back of the list. */ if (unlikely(TryLockPage(page))) { - if (gfp_mask & __GFP_FS) { - if (test_and_set_bit(PG_wait_for_IO, &page->flags)) { - page_cache_get(page); - spin_unlock(&pagemap_lru_lock); - wait_on_page(page); - page_cache_release(page); - spin_lock(&pagemap_lru_lock); - } - } + __faliures++; continue; } @@ -395,6 +394,7 @@ writepage(page); page_cache_release(page); + __faliures++; spin_lock(&pagemap_lru_lock); continue; } @@ -444,6 +444,7 @@ UnlockPage(page); page_cache_release(page); + __faliures++; spin_lock(&pagemap_lru_lock); continue; } @@ -468,6 +469,7 @@ if (!is_page_cache_freeable(page) || PageDirty(page)) { spin_unlock(&pagecache_lock); UnlockPage(page); + __faliures++; continue; } @@ -495,6 +497,7 @@ } spin_unlock(&pagemap_lru_lock); + *faliures = __faliures; return nr_pages; } @@ -538,11 +541,11 @@ } } -static int FASTCALL(shrink_caches(zone_t * classzone, unsigned int gfp_mask, int nr_pages)); -static int shrink_caches(zone_t * classzone, unsigned int gfp_mask, int nr_pages) + +static int FASTCALL(shrink_caches(zone_t * classzone, unsigned int gfp_mask, int nr_pages, int * force_paging)); +static int shrink_caches(zone_t * classzone, unsigned int gfp_mask, int nr_pages, int * force_paging) { - int max_scan; - int chunk_size = nr_pages; + int max_scan, orig_nr_pages = nr_pages, faliures; unsigned long ratio; nr_pages -= kmem_cache_reap(gfp_mask); @@ -550,21 +553,20 @@ return 0; spin_lock(&pagemap_lru_lock); - nr_pages = chunk_size; /* try to keep the active list 2/3 of the size of the cache */ - ratio = (unsigned long) nr_pages * classzone->nr_active_pages / ((classzone->nr_inactive_pages * 2) + 1); + ratio = (unsigned long) orig_nr_pages * classzone->nr_active_pages / ((classzone->nr_inactive_pages * 2) + 1); /* allow the active cache to grow */ - if (ratio > nr_pages) - ratio = nr_pages; + if (ratio > orig_nr_pages) + ratio = orig_nr_pages; refill_inactive(ratio, classzone); max_scan = classzone->nr_inactive_pages / DEF_PRIORITY; - nr_pages = shrink_cache(nr_pages, max_scan, classzone, gfp_mask); - if (nr_pages <= 0) - return 0; + nr_pages = shrink_cache(orig_nr_pages, max_scan, &faliures, classzone, gfp_mask); - shrink_dcache_memory(DEF_PRIORITY, gfp_mask); - shrink_icache_memory(DEF_PRIORITY, gfp_mask); + /* Here we find when it's time to do paging */ + *force_paging = 0; + if (faliures > max_scan / DEF_PRIORITY) + *force_paging = 1; return nr_pages; } @@ -576,15 +578,19 @@ int ret = 0; for (;;) { - int tries = DEF_PRIORITY << 1; + int tries = DEF_PRIORITY << 2; int nr_pages = SWAP_CLUSTER_MAX; do { - nr_pages = shrink_caches(classzone, gfp_mask, nr_pages); + int force_paging; + + nr_pages = shrink_caches(classzone, gfp_mask, nr_pages, &force_paging); + if (force_paging || nr_pages > 0) + ret |= swap_out(classzone, gfp_mask); if (nr_pages <= 0) return 1; - - ret |= swap_out(classzone, gfp_mask, SWAP_CLUSTER_MAX << 2); + shrink_dcache_memory(DEF_PRIORITY, gfp_mask); + shrink_icache_memory(DEF_PRIORITY, gfp_mask); } while (--tries); if (likely(ret))