Switch the page allocator over to using page.lru for the buddy lists. --- 25-akpm/mm/page_alloc.c | 28 ++++++++++++++-------------- 1 files changed, 14 insertions(+), 14 deletions(-) diff -puN mm/page_alloc.c~page_alloc-stop-using-page-list mm/page_alloc.c --- 25/mm/page_alloc.c~page_alloc-stop-using-page-list 2004-03-13 18:34:21.558729472 -0800 +++ 25-akpm/mm/page_alloc.c 2004-03-13 18:34:21.561729016 -0800 @@ -199,13 +199,13 @@ static inline void __free_pages_bulk (st buddy2 = base + page_idx; BUG_ON(bad_range(zone, buddy1)); BUG_ON(bad_range(zone, buddy2)); - list_del(&buddy1->list); + list_del(&buddy1->lru); mask <<= 1; area++; index >>= 1; page_idx &= mask; } - list_add(&(base + page_idx)->list, &area->free_list); + list_add(&(base + page_idx)->lru, &area->free_list); } static inline void free_pages_check(const char *function, struct page *page) @@ -253,9 +253,9 @@ free_pages_bulk(struct zone *zone, int c zone->all_unreclaimable = 0; zone->pages_scanned = 0; while (!list_empty(list) && count--) { - page = list_entry(list->prev, struct page, list); + page = list_entry(list->prev, struct page, lru); /* have to delete it as __free_pages_bulk list manipulates */ - list_del(&page->list); + list_del(&page->lru); __free_pages_bulk(page, base, zone, area, mask, order); ret++; } @@ -271,7 +271,7 @@ void __free_pages_ok(struct page *page, mod_page_state(pgfree, 1 << order); for (i = 0 ; i < (1 << order) ; ++i) free_pages_check(__FUNCTION__, page + i); - list_add(&page->list, &list); + list_add(&page->lru, &list); kernel_map_pages(page, 1<>= 1; - list_add(&page->list, &area->free_list); + list_add(&page->lru, &area->free_list); MARK_USED(index, high, area); index += size; page += size; @@ -353,8 +353,8 @@ static struct page *__rmqueue(struct zon if (list_empty(&area->free_list)) continue; - page = list_entry(area->free_list.next, struct page, list); - list_del(&page->list); + page = list_entry(area->free_list.next, struct page, lru); + list_del(&page->lru); index = page - zone->zone_mem_map; if (current_order != MAX_ORDER-1) MARK_USED(index, current_order, area); @@ -384,7 +384,7 @@ static int rmqueue_bulk(struct zone *zon if (page == NULL) break; allocated++; - list_add_tail(&page->list, list); + list_add_tail(&page->lru, list); } spin_unlock_irqrestore(&zone->lock, flags); return allocated; @@ -426,7 +426,7 @@ int is_head_of_free_region(struct page * spin_lock_irqsave(&zone->lock, flags); for (order = MAX_ORDER - 1; order >= 0; --order) list_for_each(curr, &zone->free_area[order].free_list) - if (page == list_entry(curr, struct page, list)) { + if (page == list_entry(curr, struct page, lru)) { spin_unlock_irqrestore(&zone->lock, flags); return 1 << order; } @@ -464,7 +464,7 @@ static void fastcall free_hot_cold_page( local_irq_save(flags); if (pcp->count >= pcp->high) pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); - list_add(&page->list, &pcp->list); + list_add(&page->lru, &pcp->list); pcp->count++; local_irq_restore(flags); put_cpu(); @@ -500,8 +500,8 @@ static struct page *buffered_rmqueue(str pcp->count += rmqueue_bulk(zone, 0, pcp->batch, &pcp->list); if (pcp->count) { - page = list_entry(pcp->list.next, struct page, list); - list_del(&page->list); + page = list_entry(pcp->list.next, struct page, lru); + list_del(&page->lru); pcp->count--; } local_irq_restore(flags); @@ -1362,7 +1362,7 @@ void __init memmap_init_zone(struct page set_page_zone(page, NODEZONE(nid, zone)); set_page_count(page, 0); SetPageReserved(page); - INIT_LIST_HEAD(&page->list); + INIT_LIST_HEAD(&page->lru); #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ if (zone != ZONE_HIGHMEM) _