diff -urpN lb4-2.5.44/mm/page_alloc.c lb5-2.5.44/mm/page_alloc.c --- lb4-2.5.44/mm/page_alloc.c 2002-10-28 18:44:31.000000000 -0800 +++ lb5-2.5.44/mm/page_alloc.c 2002-10-28 19:08:03.000000000 -0800 @@ -294,9 +294,58 @@ static struct page *buddy_alloc(struct z return NULL; } +static void split_pages(struct zone *zone, struct page *page, int page_order, int deferred_order) +{ + int split_order = deferred_order - 1; + unsigned long split_offset = 1UL << split_order; + struct page *split_page; + + while (split_order >= page_order) { + split_page = &page[split_offset]; + list_add(&split_page->list, &zone->free_area[split_order].deferred_pages); + zone->free_area[split_order].locally_free++; + --split_order; + split_offset >>= 1; + } +} + +#define COALESCE_BATCH 256 static inline struct page *steal_deferred_page(struct zone *zone, int order) { - return NULL; + struct page *page; + struct list_head *elem; + struct free_area *area = zone->free_area; + int found_order, k; + + if (zone->free_pages < (1 << order)) + return NULL; + + for (found_order = order + 1; found_order < MAX_ORDER; ++found_order) + if (!list_empty(&area[found_order].deferred_pages)) { + elem = area[found_order].deferred_pages.next; + page = list_entry(elem, struct page, list); + list_del(elem); + area[found_order].locally_free--; + split_pages(zone, page, order, found_order); + return page; + } + + for (found_order = order - 1; found_order >= 0; --found_order) { + for (k = 0; k < COALESCE_BATCH; ++k) { + unsigned long mask = (~0UL) << found_order; + if (list_empty(&area[found_order].deferred_pages)) + break; + elem = area[found_order].deferred_pages.next; + page = list_entry(elem, struct page, list); + list_del(elem); + area[found_order].locally_free--; + buddy_free(page, zone->zone_mem_map, zone, &area[found_order], mask, found_order); + } + page = buddy_alloc(zone, order); + if (page) + return page; + } + return buddy_alloc(zone, order); } static inline struct page *__rmqueue(struct zone *zone, unsigned int order)