From: Martin Hicks I noticed that the loop to pull pages out of the LRU lists for processing occurred twice. This just sticks that code into a separate function to improve readability. Signed-Off-By: Martin Hicks Signed-off-by: Andrew Morton --- 25-akpm/mm/vmscan.c | 111 ++++++++++++++++++++++++++-------------------------- 1 files changed, 57 insertions(+), 54 deletions(-) diff -puN mm/vmscan.c~vmscan-move-code-to-isolate-lru-pages-into-separate-function mm/vmscan.c --- 25/mm/vmscan.c~vmscan-move-code-to-isolate-lru-pages-into-separate-function 2005-03-15 22:35:46.000000000 -0800 +++ 25-akpm/mm/vmscan.c 2005-03-15 22:35:46.000000000 -0800 @@ -549,14 +549,57 @@ keep: } /* - * zone->lru_lock is heavily contented. We relieve it by quickly privatising - * a batch of pages and working on them outside the lock. Any pages which were - * not freed will be added back to the LRU. + * zone->lru_lock is heavily contended. Some of the functions that + * shrink the lists perform better by taking out a batch of pages + * and working on them outside the LRU lock. * - * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed + * For pagecache intensive workloads, this function is the hottest + * spot in the kernel (apart from copy_*_user functions). + * + * Appropriate locks must be held before calling this function. + * + * @nr_to_scan: The number of pages to look through on the list. + * @src: The LRU list to pull pages off. + * @dst: The temp list to put pages on to. + * @scanned: The number of pages that were scanned. * - * For pagecache intensive workloads, the first loop here is the hottest spot - * in the kernel (apart from the copy_*_user functions). + * returns how many pages were moved onto *@dst. + */ +static int isolate_lru_pages(int nr_to_scan, struct list_head *src, + struct list_head *dst, int *scanned) +{ + int nr_taken = 0; + struct page *page; + int scan = 0; + + while (scan++ < nr_to_scan && !list_empty(src)) { + page = lru_to_page(src); + prefetchw_prev_lru_page(page, src, flags); + + if (!TestClearPageLRU(page)) + BUG(); + list_del(&page->lru); + if (get_page_testone(page)) { + /* + * It is being freed elsewhere + */ + __put_page(page); + SetPageLRU(page); + list_add(&page->lru, src); + continue; + } else { + list_add(&page->lru, dst); + nr_taken++; + } + } + + if (scanned) + *scanned = scan; + return nr_taken; +} + +/* + * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed */ static void shrink_cache(struct zone *zone, struct scan_control *sc) { @@ -570,32 +613,13 @@ static void shrink_cache(struct zone *zo spin_lock_irq(&zone->lru_lock); while (max_scan > 0) { struct page *page; - int nr_taken = 0; - int nr_scan = 0; + int nr_taken; + int nr_scan; int nr_freed; - while (nr_scan++ < sc->swap_cluster_max && - !list_empty(&zone->inactive_list)) { - page = lru_to_page(&zone->inactive_list); - - prefetchw_prev_lru_page(page, - &zone->inactive_list, flags); - - if (!TestClearPageLRU(page)) - BUG(); - list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It is being freed elsewhere - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, &zone->inactive_list); - continue; - } - list_add(&page->lru, &page_list); - nr_taken++; - } + nr_taken = isolate_lru_pages(sc->swap_cluster_max, + &zone->inactive_list, + &page_list, &nr_scan); zone->nr_inactive -= nr_taken; zone->pages_scanned += nr_scan; spin_unlock_irq(&zone->lru_lock); @@ -661,7 +685,7 @@ refill_inactive_zone(struct zone *zone, { int pgmoved; int pgdeactivate = 0; - int pgscanned = 0; + int pgscanned; int nr_pages = sc->nr_to_scan; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ @@ -674,30 +698,9 @@ refill_inactive_zone(struct zone *zone, long swap_tendency; lru_add_drain(); - pgmoved = 0; spin_lock_irq(&zone->lru_lock); - while (pgscanned < nr_pages && !list_empty(&zone->active_list)) { - page = lru_to_page(&zone->active_list); - prefetchw_prev_lru_page(page, &zone->active_list, flags); - if (!TestClearPageLRU(page)) - BUG(); - list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It was already free! release_pages() or put_page() - * are about to remove it from the LRU and free it. So - * put the refcount back and put the page back on the - * LRU - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, &zone->active_list); - } else { - list_add(&page->lru, &l_hold); - pgmoved++; - } - pgscanned++; - } + pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, + &l_hold, &pgscanned); zone->pages_scanned += pgscanned; zone->nr_active -= pgmoved; spin_unlock_irq(&zone->lru_lock); _