--- 2.4.10pre12aa2/include/linux/mm.h.~1~ Fri Sep 21 06:58:47 2001 +++ 2.4.10pre12aa2/include/linux/mm.h Fri Sep 21 07:18:23 2001 @@ -419,6 +419,8 @@ #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr),0) +extern int start_aggressive_readahead(void); + extern void show_free_areas(void); extern void show_free_areas_node(pg_data_t *pgdat); --- 2.4.10pre12aa2/kernel/ksyms.c.~1~ Fri Sep 21 06:57:27 2001 +++ 2.4.10pre12aa2/kernel/ksyms.c Fri Sep 21 07:14:18 2001 @@ -92,6 +92,7 @@ EXPORT_SYMBOL(exit_sighand); /* internal kernel memory management */ +EXPORT_SYMBOL(start_aggressive_readahead); EXPORT_SYMBOL(_alloc_pages); EXPORT_SYMBOL(__alloc_pages); EXPORT_SYMBOL(alloc_pages_node); --- 2.4.10pre12aa2/mm/page_alloc.c.~1~ Fri Sep 21 06:57:27 2001 +++ 2.4.10pre12aa2/mm/page_alloc.c Fri Sep 21 07:13:25 2001 @@ -555,6 +555,37 @@ #endif /* + * If it returns non zero it means there's lots of ram "free" + * (note: not in cache!) so any caller will know that + * he can allocate some memory to do some more aggressive + * (possibly wasteful) readahead. The state of the memory + * should be rechecked after every few pages allocated for + * doing this aggressive readahead. + * + * NOTE: it assumes that the caller will be ok to use HIGHMEM! + */ +int start_aggressive_readahead(void) +{ + pg_data_t *pgdat = pgdat_list; + zonelist_t *zonelist; + zone_t **zonep, *zone; + int ret = 0; + + do { + zonelist = pgdat->node_zonelists + __GFP_HIGHMEM; + zonep = zonelist->zones; + + for (zone = *zonep++; zone; zone = *zonep++) + if (zone->free_pages > zone->pages_high * 2) + ret = 1; + + pgdat = pgdat->node_next; + } while (pgdat); + + return ret; +} + +/* * Show free area list (used inside shift_scroll-lock stuff) * We also calculate the percentage fragmentation. We do this by counting the * memory on each free list with the exception of the first item on the list.