diff -urpN lb2-2.5.44/fs/proc/proc_misc.c lb3-2.5.44/fs/proc/proc_misc.c --- lb2-2.5.44/fs/proc/proc_misc.c 2002-10-28 16:34:42.000000000 -0800 +++ lb3-2.5.44/fs/proc/proc_misc.c 2002-10-28 17:54:46.000000000 -0800 @@ -131,7 +131,7 @@ static int uptime_read_proc(char *page, } extern atomic_t vm_committed_space; - +extern unsigned long nr_deferred_pages(void); static int meminfo_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { @@ -172,6 +172,7 @@ static int meminfo_read_proc(char *page, "SwapFree: %8lu kB\n" "Dirty: %8lu kB\n" "Writeback: %8lu kB\n" + "Deferred: %8lu kB\n" "Mapped: %8lu kB\n" "Slab: %8lu kB\n" "Committed_AS: %8u kB\n" @@ -193,6 +194,7 @@ static int meminfo_read_proc(char *page, K(i.freeswap), K(ps.nr_dirty), K(ps.nr_writeback), + K(nr_deferred_pages()), K(ps.nr_mapped), K(ps.nr_slab), K(committed), diff -urpN lb2-2.5.44/mm/page_alloc.c lb3-2.5.44/mm/page_alloc.c --- lb2-2.5.44/mm/page_alloc.c 2002-10-28 17:30:56.000000000 -0800 +++ lb3-2.5.44/mm/page_alloc.c 2002-10-28 18:19:46.000000000 -0800 @@ -129,11 +129,13 @@ static inline void buddy_free(struct pag BUG_ON(bad_range(zone, buddy2)); list_del(&buddy1->list); mask <<= 1; + area->globally_free--; area++; index >>= 1; page_idx &= mask; } list_add(&(base + page_idx)->list, &area->free_list); + area->globally_free++; } static inline void __free_pages_bulk (struct page *page, struct page *base, @@ -141,6 +143,7 @@ static inline void __free_pages_bulk (st unsigned int order) { buddy_free(page, base, zone, area, mask, order); + area->active--; } static inline void free_pages_check(const char *function, struct page *page) @@ -212,6 +215,7 @@ expand(struct zone *zone, struct page *p area--; high--; size >>= 1; + area->globally_free++; list_add(&page->list, &area->free_list); MARK_USED(index, high, area); index += size; @@ -265,6 +269,7 @@ static struct page *buddy_alloc(struct z page = list_entry(curr, struct page, list); list_del(curr); + area->globally_free--; index = page - zone->zone_mem_map; if (current_order != MAX_ORDER-1) MARK_USED(index, current_order, area); @@ -277,7 +282,10 @@ static struct page *buddy_alloc(struct z static inline struct page *__rmqueue(struct zone *zone, unsigned int order) { - buddy_alloc(zone, order); + struct page *page = buddy_alloc(zone, order); + if (page) + zone->free_area[order].active++; + return page; } /* @@ -786,6 +794,17 @@ unsigned int nr_free_highpages (void) } #endif +unsigned long nr_deferred_pages(void) +{ + struct zone *zone; + unsigned long order, pages = 0; + + for_each_zone(zone) + for (order = 0; order < MAX_ORDER; ++order) + pages += zone->free_area[order].locally_free << order; + return pages; +} + /* * Accumulate the page_state information across all CPUs. * The result is unavoidably approximate - it can change @@ -1391,17 +1410,16 @@ static int frag_show(struct seq_file *m, if (!zone->present_pages) continue; - spin_lock_irqsave(&zone->lock, flags); - seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); - for (order = 0; order < MAX_ORDER; ++order) { - unsigned long nr_bufs = 0; - struct list_head *elem; - - list_for_each(elem, &(zone->free_area[order].free_list)) - ++nr_bufs; - seq_printf(m, "%6lu ", nr_bufs); - } - spin_unlock_irqrestore(&zone->lock, flags); + seq_printf(m, "Node %d, zone %8s\n", pgdat->node_id, zone->name); + seq_puts(m, "buddy: "); + for (order = 0; order < MAX_ORDER; ++order) + seq_printf(m, "%6lu ", zone->free_area[order].globally_free); + seq_puts(m, "\ndefer: "); + for (order = 0; order < MAX_ORDER; ++order) + seq_printf(m, "%6lu ", zone->free_area[order].locally_free); + seq_puts(m, "\nactive: "); + for (order = 0; order < MAX_ORDER; ++order) + seq_printf(m, "%6lu ", zone->free_area[order].active); seq_putc(m, '\n'); } return 0;