diff -urNp ref/fs/buffer.c 2.4.20pre5aa1/fs/buffer.c --- ref/fs/buffer.c Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/fs/buffer.c Fri Aug 30 02:03:58 2002 @@ -2939,16 +2939,6 @@ int bdflush(void *startup) complete((struct completion *)startup); - /* - * FIXME: The ndirty logic here is wrong. It's supposed to - * send bdflush back to sleep after writing ndirty buffers. - * In fact, the test is wrong so bdflush will in fact - * sleep when bdflush_stop() returns true. - * - * FIXME: If it proves useful to implement ndirty properly, - * then perhaps the value of ndirty should be scaled by the - * amount of memory in the machine. - */ for (;;) { int ndirty = bdf_prm.b_un.ndirty; diff -urNp ref/include/linux/mm.h 2.4.20pre5aa1/include/linux/mm.h --- ref/include/linux/mm.h Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/include/linux/mm.h Fri Aug 30 02:03:13 2002 @@ -168,9 +168,8 @@ typedef struct page { * we can simply calculate the virtual address. On machines with * highmem some memory is mapped into kernel virtual memory * dynamically, so we need a place to store that address. - * Note that this field could be 16 bits on x86 ... ;) * - * Architectures with slow multiplication can define + * Architectures with slow ALU can define * WANT_PAGE_VIRTUAL in asm/page.h */ #if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) @@ -311,6 +310,7 @@ typedef struct page { #define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags) #define PageChecked(page) test_bit(PG_checked, &(page)->flags) #define SetPageChecked(page) set_bit(PG_checked, &(page)->flags) + #define PageLaunder(page) test_bit(PG_launder, &(page)->flags) #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) #define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags) @@ -348,24 +348,18 @@ static inline void set_page_zone(struct do { \ (page)->virtual = (address); \ } while(0) - -#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ -#define set_page_address(page, address) do { } while(0) -#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ - -/* - * Permanent address of a page. Obviously must never be - * called on a highmem page. - */ -#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL) - #define page_address(page) ((page)->virtual) #else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ +#define set_page_address(page, address) do { } while(0) +#ifdef CONFIG_DISCONTIGMEM #define page_address(page) \ __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \ + page_zone(page)->zone_start_paddr) +#else +#define page_address(page) __va(((page) - mem_map) << PAGE_SHIFT) +#endif #endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */ @@ -463,6 +457,8 @@ extern void FASTCALL(free_exact(void * a #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr),0) +extern int start_aggressive_readahead(unsigned int); + extern void show_free_areas(void); extern void show_free_areas_node(pg_data_t *pgdat); @@ -527,8 +523,8 @@ static inline int is_page_cache_freeable return page_count(page) - !!page->buffers == 1; } -extern int can_share_swap_page(struct page *); -extern int remove_exclusive_swap_page(struct page *); +extern int FASTCALL(make_exclusive_page(struct page *, int)); +extern int FASTCALL(remove_exclusive_swap_page(struct page *)); extern void __free_pte(pte_t); diff -urNp ref/include/linux/mmzone.h 2.4.20pre5aa1/include/linux/mmzone.h --- ref/include/linux/mmzone.h Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/include/linux/mmzone.h Fri Aug 30 02:03:13 2002 @@ -19,6 +19,11 @@ #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER #endif +#define ZONE_DMA 0 +#define ZONE_NORMAL 1 +#define ZONE_HIGHMEM 2 +#define MAX_NR_ZONES 3 + typedef struct free_area_struct { struct list_head free_list; unsigned long *map; @@ -26,6 +31,10 @@ typedef struct free_area_struct { struct pglist_data; +typedef struct zone_watermarks_s { + unsigned long min, low, high; +} zone_watermarks_t; + /* * On machines where it is needed (eg PCs) we divide physical memory * into multiple physical zones. On a PC we have 3 zones: @@ -40,7 +49,15 @@ typedef struct zone_struct { */ spinlock_t lock; unsigned long free_pages; - unsigned long pages_min, pages_low, pages_high; + + /* + * We don't know if the memory that we're going to allocate will be freeable + * or/and it will be released eventually, so to avoid totally wasting several + * GB of ram we must reserve some of the lower zone memory (otherwise we risk + * to run OOM on the lower zones despite there's tons of freeable ram + * on the higher zones). + */ + zone_watermarks_t watermarks[MAX_NR_ZONES]; /* * The below fields are protected by different locks (or by @@ -60,35 +77,6 @@ typedef struct zone_struct { free_area_t free_area[MAX_ORDER]; /* - * wait_table -- the array holding the hash table - * wait_table_size -- the size of the hash table array - * wait_table_shift -- wait_table_size - * == BITS_PER_LONG (1 << wait_table_bits) - * - * The purpose of all these is to keep track of the people - * waiting for a page to become available and make them - * runnable again when possible. The trouble is that this - * consumes a lot of space, especially when so few things - * wait on pages at a given time. So instead of using - * per-page waitqueues, we use a waitqueue hash table. - * - * The bucket discipline is to sleep on the same queue when - * colliding and wake all in that wait queue when removing. - * When something wakes, it must check to be sure its page is - * truly available, a la thundering herd. The cost of a - * collision is great, but given the expected load of the - * table, they should be so rare as to be outweighed by the - * benefits from the saved space. - * - * __wait_on_page() and unlock_page() in mm/filemap.c, are the - * primary users of these fields, and in mm/page_alloc.c - * free_area_init_core() performs the initialization of them. - */ - wait_queue_head_t * wait_table; - unsigned long wait_table_size; - unsigned long wait_table_shift; - - /* * Discontig memory support fields. */ struct pglist_data *zone_pgdat; @@ -101,13 +89,9 @@ typedef struct zone_struct { */ char *name; unsigned long size; + unsigned long realsize; } zone_t; -#define ZONE_DMA 0 -#define ZONE_NORMAL 1 -#define ZONE_HIGHMEM 2 -#define MAX_NR_ZONES 3 - /* * One allocation request operates on a zonelist. A zonelist * is a list of zones, the first one is the 'goal' of the @@ -125,6 +109,32 @@ typedef struct zonelist_struct { #define GFP_ZONEMASK 0x0f +typedef struct wait_table_s { + /* + * The purpose of all these is to keep track of the people + * waiting for a page to become available and make them + * runnable again when possible. The trouble is that this + * consumes a lot of space, especially when so few things + * wait on pages at a given time. So instead of using + * per-page waitqueues, we use a waitqueue hash table. + * + * The bucket discipline is to sleep on the same queue when + * colliding and wake all in that wait queue when removing. + * When something wakes, it must check to be sure its page is + * truly available, a la thundering herd. The cost of a + * collision is great, but given the expected load of the + * table, they should be so rare as to be outweighed by the + * benefits from the saved space. + * + * __wait_on_page() and unlock_page() in mm/filemap.c, are the + * primary users of these fields, and in mm/page_alloc.c + * free_area_init_core() performs the initialization of them. + */ + wait_queue_head_t * head; + unsigned long shift; + unsigned long size; +} wait_table_t; + /* * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM * (mostly NUMA machines?) to denote a higher-level memory zone than the @@ -148,14 +158,15 @@ typedef struct pglist_data { unsigned long node_start_mapnr; unsigned long node_size; int node_id; + wait_table_t wait_table; struct pglist_data *node_next; } pg_data_t; extern int numnodes; extern pg_data_t *pgdat_list; -#define memclass(pgzone, classzone) (((pgzone)->zone_pgdat == (classzone)->zone_pgdat) \ - && ((pgzone) <= (classzone))) +#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) +#define memclass(pgzone, classzone) (zone_idx(pgzone) <= zone_idx(classzone)) /* * The following two are not meant for general usage. They are here as diff -urNp ref/include/linux/sysctl.h 2.4.20pre5aa1/include/linux/sysctl.h --- ref/include/linux/sysctl.h Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/include/linux/sysctl.h Fri Aug 30 02:03:13 2002 @@ -143,12 +143,13 @@ enum VM_MAX_MAP_COUNT=11, /* int: Maximum number of active map areas */ VM_MIN_READAHEAD=12, /* Min file readahead */ VM_MAX_READAHEAD=13, /* Max file readahead */ - VM_VFS_SCAN_RATIO=14, /* part of the inactive vfs lists to scan */ - VM_LRU_BALANCE_RATIO=15,/* balance active and inactive caches */ - VM_PASSES=16, /* number of vm passes before failing */ - VM_GFP_DEBUG=17, /* debug GFP failures */ - VM_CACHE_SCAN_RATIO=18, /* part of the inactive cache list to scan */ - VM_MAPPED_RATIO=19, /* amount of unfreeable pages that triggers swapout */ + VM_HEAP_STACK_GAP=14, /* int: page gap between heap and stack */ + VM_VFS_SCAN_RATIO=15, /* part of the inactive vfs lists to scan */ + VM_LRU_BALANCE_RATIO=16,/* balance active and inactive caches */ + VM_PASSES=17, /* number of vm passes before failing */ + VM_GFP_DEBUG=18, /* debug GFP failures */ + VM_CACHE_SCAN_RATIO=19, /* part of the inactive cache list to scan */ + VM_MAPPED_RATIO=20, /* amount of unfreeable pages that triggers swapout */ }; diff -urNp ref/kernel/ksyms.c 2.4.20pre5aa1/kernel/ksyms.c --- ref/kernel/ksyms.c Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/kernel/ksyms.c Fri Aug 30 02:03:13 2002 @@ -90,6 +90,7 @@ EXPORT_SYMBOL(exit_fs); EXPORT_SYMBOL(exit_sighand); /* internal kernel memory management */ +EXPORT_SYMBOL(start_aggressive_readahead); EXPORT_SYMBOL(_alloc_pages); EXPORT_SYMBOL(__alloc_pages); EXPORT_SYMBOL(alloc_pages_node); diff -urNp ref/mm/filemap.c 2.4.20pre5aa1/mm/filemap.c --- ref/mm/filemap.c Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/mm/filemap.c Fri Aug 30 02:04:56 2002 @@ -744,25 +744,14 @@ static int read_cluster_nonblocking(stru return 0; } -/* - * Knuth recommends primes in approximately golden ratio to the maximum - * integer representable by a machine word for multiplicative hashing. - * Chuck Lever verified the effectiveness of this technique: - * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf - * - * These primes are chosen to be bit-sparse, that is operations on - * them can use shifts and additions instead of multiplications for - * machines where multiplications are slow. - */ -#if BITS_PER_LONG == 32 -/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ -#define GOLDEN_RATIO_PRIME 0x9e370001UL -#elif BITS_PER_LONG == 64 -/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */ -#define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL -#else -#error Define GOLDEN_RATIO_PRIME for your wordsize. -#endif +static inline wait_queue_head_t * wait_table_hashfn(struct page * page, wait_table_t * wait_table) +{ +#define i (((unsigned long) page)/(sizeof(struct page) & ~ (sizeof(struct page) - 1))) +#define s(x) ((x)+((x)>>wait_table->shift)) + return wait_table->head + (s(i) & (wait_table->size-1)); +#undef i +#undef s +} /* * In order to wait for pages to become available there must be @@ -774,34 +763,10 @@ static int read_cluster_nonblocking(stru * at a cost of "thundering herd" phenomena during rare hash * collisions. */ -static inline wait_queue_head_t *page_waitqueue(struct page *page) +static inline wait_queue_head_t * page_waitqueue(struct page *page) { - const zone_t *zone = page_zone(page); - wait_queue_head_t *wait = zone->wait_table; - unsigned long hash = (unsigned long)page; - -#if BITS_PER_LONG == 64 - /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ - unsigned long n = hash; - n <<= 18; - hash -= n; - n <<= 33; - hash -= n; - n <<= 3; - hash += n; - n <<= 3; - hash -= n; - n <<= 4; - hash += n; - n <<= 2; - hash += n; -#else - /* On some cpus multiply is faster, on others gcc will do shifts */ - hash *= GOLDEN_RATIO_PRIME; -#endif - hash >>= zone->wait_table_shift; - - return &wait[hash]; + pg_data_t * pgdat = page_zone(page)->zone_pgdat; + return wait_table_hashfn(page, &pgdat->wait_table); } /* @@ -867,7 +832,7 @@ void unlock_page(struct page *page) * pages are being waited on here. */ if (waitqueue_active(waitqueue)) - wake_up_all(waitqueue); + wake_up(waitqueue); } /* @@ -880,7 +845,7 @@ static void __lock_page(struct page *pag struct task_struct *tsk = current; DECLARE_WAITQUEUE(wait, tsk); - add_wait_queue_exclusive(waitqueue, &wait); + add_wait_queue(waitqueue, &wait); for (;;) { set_task_state(tsk, TASK_UNINTERRUPTIBLE); if (PageLocked(page)) { diff -urNp ref/mm/memory.c 2.4.20pre5aa1/mm/memory.c --- ref/mm/memory.c Fri Aug 30 02:03:11 2002 +++ 2.4.20pre5aa1/mm/memory.c Fri Aug 30 02:03:13 2002 @@ -968,15 +968,11 @@ static int do_wp_page(struct mm_struct * if (!VALID_PAGE(old_page)) goto bad_wp_page; - if (!TryLockPage(old_page)) { - int reuse = can_share_swap_page(old_page); - unlock_page(old_page); - if (reuse) { - flush_cache_page(vma, address); - establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); - spin_unlock(&mm->page_table_lock); - return 1; /* Minor fault */ - } + if (make_exclusive_page(old_page, 1)) { + flush_cache_page(vma, address); + establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte)))); + spin_unlock(&mm->page_table_lock); + return 1; /* Minor fault */ } /* @@ -994,6 +990,19 @@ static int do_wp_page(struct mm_struct * * Re-check the pte - we dropped the lock */ spin_lock(&mm->page_table_lock); + /* + * keep the page pinned until we return runnable + * to avoid another thread to skip the break_cow + * path, so we're sure pte_same below check also implys + * that the _contents_ of the old_page didn't changed + * under us (not only that the pagetable is the same). + * + * Since we have the page_table_lock acquired here, if the + * pte is the same it means we're still holding an additional + * reference on the old_page so we can safely + * page_cache_release(old_page) before the "pte_same == true" path. + */ + page_cache_release(old_page); if (pte_same(*page_table, pte)) { if (PageReserved(old_page)) ++mm->rss; @@ -1005,7 +1014,6 @@ static int do_wp_page(struct mm_struct * } spin_unlock(&mm->page_table_lock); page_cache_release(new_page); - page_cache_release(old_page); return 1; /* Minor fault */ bad_wp_page: @@ -1158,9 +1166,8 @@ static int do_swap_page(struct mm_struct ret = 2; } - mark_page_accessed(page); - - lock_page(page); + if (!Page_Uptodate(page)) + wait_on_page(page); /* * Back out if somebody else faulted in this pte while we @@ -1169,7 +1176,6 @@ static int do_swap_page(struct mm_struct spin_lock(&mm->page_table_lock); if (!pte_same(*page_table, orig_pte)) { spin_unlock(&mm->page_table_lock); - unlock_page(page); page_cache_release(page); return 1; } @@ -1177,14 +1183,15 @@ static int do_swap_page(struct mm_struct /* The page isn't present yet, go ahead with the fault. */ swap_free(entry); - if (vm_swap_full()) - remove_exclusive_swap_page(page); - mm->rss++; pte = mk_pte(page, vma->vm_page_prot); - if (write_access && can_share_swap_page(page)) - pte = pte_mkdirty(pte_mkwrite(pte)); - unlock_page(page); + if (make_exclusive_page(page, write_access)) { + if (write_access) + pte = pte_mkdirty(pte); + if (vma->vm_flags & VM_WRITE) + pte = pte_mkwrite(pte); + } + mark_page_accessed(page); flush_page_to_ram(page); flush_icache_page(vma, page); @@ -1222,15 +1229,14 @@ static int do_anonymous_page(struct mm_s spin_lock(&mm->page_table_lock); if (!pte_none(*page_table)) { - page_cache_release(page); spin_unlock(&mm->page_table_lock); + page_cache_release(page); return 1; } mm->rss++; flush_page_to_ram(page); entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); lru_cache_add(page); - mark_page_accessed(page); } set_pte(page_table, entry); @@ -1309,9 +1315,9 @@ static int do_no_page(struct mm_struct * entry = pte_mkwrite(pte_mkdirty(entry)); set_pte(page_table, entry); } else { + spin_unlock(&mm->page_table_lock); /* One of our sibling threads was faster, back out. */ page_cache_release(new_page); - spin_unlock(&mm->page_table_lock); return 1; } diff -urNp ref/mm/page_alloc.c 2.4.20pre5aa1/mm/page_alloc.c --- ref/mm/page_alloc.c Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/mm/page_alloc.c Fri Aug 30 02:10:22 2002 @@ -43,6 +43,7 @@ static char *zone_names[MAX_NR_ZONES] = static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, }; static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, }; static int zone_balance_max[MAX_NR_ZONES] __initdata = { 255 , 255, 255, }; +static int lower_zone_reserve_ratio[MAX_NR_ZONES-1] = { 256, 32 }; int vm_gfp_debug = 0; @@ -254,7 +255,7 @@ static struct page * FASTCALL(balance_cl static struct page * balance_classzone(zone_t * classzone, unsigned int gfp_mask, unsigned int order, int * freed) { struct page * page = NULL; - int __freed = 0; + int __freed; if (in_interrupt()) BUG(); @@ -297,28 +298,31 @@ static struct page * balance_classzone(z return page; } +static inline unsigned long zone_free_pages(zone_t * zone, unsigned int order) +{ + long free = zone->free_pages - (1UL << order); + return free >= 0 ? free : 0; +} + /* * This is the 'heart' of the zoned buddy allocator: */ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist) { - unsigned long min; zone_t **zone, * classzone; struct page * page; - int freed; + int freed, class_idx; zone = zonelist->zones; classzone = *zone; - if (classzone == NULL) - return NULL; - min = 1UL << order; + class_idx = zone_idx(classzone); + for (;;) { zone_t *z = *(zone++); if (!z) break; - min += z->pages_low; - if (z->free_pages > min) { + if (zone_free_pages(z, order) > z->watermarks[class_idx].low) { page = rmqueue(z, order); if (page) return page; @@ -331,18 +335,16 @@ struct page * __alloc_pages(unsigned int wake_up_interruptible(&kswapd_wait); zone = zonelist->zones; - min = 1UL << order; for (;;) { - unsigned long local_min; + unsigned long min; zone_t *z = *(zone++); if (!z) break; - local_min = z->pages_min; + min = z->watermarks[class_idx].min; if (!(gfp_mask & __GFP_WAIT)) - local_min >>= 2; - min += local_min; - if (z->free_pages > min) { + min >>= 2; + if (zone_free_pages(z, order) > min) { page = rmqueue(z, order); if (page) return page; @@ -351,8 +353,7 @@ struct page * __alloc_pages(unsigned int /* here we're in the low on memory slow path */ -rebalance: - if (current->flags & PF_MEMALLOC) { + if (current->flags & PF_MEMALLOC && !in_interrupt()) { zone = zonelist->zones; for (;;) { zone_t *z = *(zone++); @@ -368,34 +369,51 @@ rebalance: /* Atomic allocations - we can't balance anything */ if (!(gfp_mask & __GFP_WAIT)) - return NULL; + goto out; + rebalance: page = balance_classzone(classzone, gfp_mask, order, &freed); if (page) return page; zone = zonelist->zones; - min = 1UL << order; - for (;;) { - zone_t *z = *(zone++); - if (!z) - break; + if (likely(freed)) { + for (;;) { + zone_t *z = *(zone++); + if (!z) + break; - min += z->pages_min; - if (z->free_pages > min) { - page = rmqueue(z, order); - if (page) - return page; + if (zone_free_pages(z, order) > z->watermarks[class_idx].min) { + page = rmqueue(z, order); + if (page) + return page; + } } - } + goto rebalance; + } else { + /* + * Check that no other task is been killed meanwhile, + * in such a case we can succeed the allocation. + */ + for (;;) { + zone_t *z = *(zone++); + if (!z) + break; - /* Don't let big-order allocations loop */ - if (order > 3) - return NULL; + if (zone_free_pages(z, order) > z->watermarks[class_idx].high) { + page = rmqueue(z, order); + if (page) + return page; + } + } + } - /* Yield for kswapd, and try again */ - yield(); - goto rebalance; + out: + printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed (gfp=0x%x/%i)\n", + order, gfp_mask, !!(current->flags & PF_MEMALLOC)); + if (unlikely(vm_gfp_debug)) + dump_stack(); + return NULL; } /* @@ -505,18 +523,25 @@ unsigned int nr_free_buffer_pages (void) { pg_data_t *pgdat; unsigned int sum = 0; + zonelist_t *zonelist; + zone_t **zonep, *zone; for_each_pgdat(pgdat) { - zonelist_t *zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK); - zone_t **zonep = zonelist->zones; - zone_t *zone; - - for (zone = *zonep++; zone; zone = *zonep++) { - unsigned long size = zone->size; - unsigned long high = zone->pages_high; - if (size > high) - sum += size - high; - } + int class_idx; + zonelist = pgdat->node_zonelists + (GFP_USER & GFP_ZONEMASK); + zonep = zonelist->zones; + zone = *zonep; + class_idx = zone_idx(zone); + + sum += zone->nr_cache_pages; + do { + unsigned int free = zone->free_pages - zone->watermarks[class_idx].high; + zonep++; + zone = *zonep; + if (free <= 0) + continue; + sum += free; + } while (zone); } return sum; @@ -535,6 +560,41 @@ unsigned int nr_free_highpages (void) } #endif +/* + * If it returns non zero it means there's lots of ram "free" + * (note: not in cache!) so any caller will know that + * he can allocate some memory to do some more aggressive + * (possibly wasteful) readahead. The state of the memory + * should be rechecked after every few pages allocated for + * doing this aggressive readahead. + * + * The gfp_mask parameter specifies in which kind of memory + * the readahead information will be applicated to. + */ +int start_aggressive_readahead(unsigned int gfp_mask) +{ + pg_data_t *pgdat = pgdat_list; + zonelist_t *zonelist; + zone_t **zonep, *zone; + int ret = 0; + + do { + int class_idx; + zonelist = pgdat->node_zonelists + (gfp_mask & GFP_ZONEMASK); + zonep = zonelist->zones; + zone = *(zonep++); + class_idx = zone_idx(zone); + + for (; zone; zone = *(zonep++)) + if (zone->free_pages > zone->watermarks[class_idx].high * 2) + ret = 1; + + pgdat = pgdat->node_next; + } while (pgdat); + + return ret; +} + int try_to_free_pages_nozone(unsigned int gfp_mask) { pg_data_t *pgdat; @@ -575,13 +635,9 @@ void show_free_areas_core(pg_data_t *pgd zone_t *zone; for (zone = tmpdat->node_zones; zone < tmpdat->node_zones + MAX_NR_ZONES; zone++) - printk("Zone:%s freepages:%6lukB min:%6lukB low:%6lukB " - "high:%6lukB\n", + printk("Zone:%s freepages:%6lukB\n", zone->name, - K(zone->free_pages), - K(zone->pages_min), - K(zone->pages_low), - K(zone->pages_high)); + K(zone->free_pages)); tmpdat = tmpdat->node_next; } @@ -687,33 +743,45 @@ static inline void build_zonelists(pg_da */ #define PAGES_PER_WAITQUEUE 256 -static inline unsigned long wait_table_size(unsigned long pages) +static inline unsigned long wait_table_size(unsigned long pages, unsigned long * shift) { unsigned long size = 1; + unsigned long __shift = 0; pages /= PAGES_PER_WAITQUEUE; - while (size < pages) + while (size < pages) { size <<= 1; + __shift++; + } /* - * Once we have dozens or even hundreds of threads sleeping - * on IO we've got bigger problems than wait queue collision. - * Limit the size of the wait table to a reasonable size. + * The usage pattern of the queues depends mostly on the I/O, + * not much of the ram size of the machine, so make sure the + * array is large enough on lowmem nodes too. */ - size = min(size, 4096UL); + size = max(size, 256UL); + *shift = max(__shift, 8UL); return size; } /* - * This is an integer logarithm so that shifts can be used later - * to extract the more random high bits from the multiplicative - * hash function before the remainder is taken. + * The per-node waitqueue mechanism uses hashed waitqueues + * per zone. */ -static inline unsigned long wait_table_bits(unsigned long size) +static inline void wait_table_init(pg_data_t *pgdat) { - return ffz(~size); + unsigned long shift, size, i; + + size = wait_table_size(pgdat->node_size, &shift); + + pgdat->wait_table.size = size; + pgdat->wait_table.shift = shift; + pgdat->wait_table.head = (wait_queue_head_t *) alloc_bootmem_node(pgdat, size * sizeof(wait_queue_head_t)); + + for(i = 0; i < size; i++) + init_waitqueue_head(pgdat->wait_table.head + i); } #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) @@ -767,11 +835,14 @@ void __init free_area_init_core(int nid, pgdat->node_start_mapnr = (lmem_map - mem_map); pgdat->nr_zones = 0; + wait_table_init(pgdat); + offset = lmem_map - mem_map; for (j = 0; j < MAX_NR_ZONES; j++) { zone_t *zone = pgdat->node_zones + j; unsigned long mask; unsigned long size, realsize; + int idx; zone_table[nid * MAX_NR_ZONES + j] = zone; realsize = size = zones_size[j]; @@ -780,28 +851,16 @@ void __init free_area_init_core(int nid, printk("zone(%lu): %lu pages.\n", j, size); zone->size = size; + zone->realsize = realsize; zone->name = zone_names[j]; zone->lock = SPIN_LOCK_UNLOCKED; zone->zone_pgdat = pgdat; zone->free_pages = 0; zone->need_balance = 0; + zone->nr_active_pages = zone->nr_inactive_pages = 0; if (!size) continue; - /* - * The per-page waitqueue mechanism uses hashed waitqueues - * per zone. - */ - zone->wait_table_size = wait_table_size(size); - zone->wait_table_shift = - BITS_PER_LONG - wait_table_bits(zone->wait_table_size); - zone->wait_table = (wait_queue_head_t *) - alloc_bootmem_node(pgdat, zone->wait_table_size - * sizeof(wait_queue_head_t)); - - for(i = 0; i < zone->wait_table_size; ++i) - init_waitqueue_head(zone->wait_table + i); - pgdat->nr_zones = j+1; mask = (realsize / zone_balance_ratio[j]); @@ -809,9 +868,29 @@ void __init free_area_init_core(int nid, mask = zone_balance_min[j]; else if (mask > zone_balance_max[j]) mask = zone_balance_max[j]; - zone->pages_min = mask; - zone->pages_low = mask*2; - zone->pages_high = mask*3; + zone->watermarks[j].min = mask; + zone->watermarks[j].low = mask*2; + zone->watermarks[j].high = mask*3; + /* now set the watermarks of the lower zones in the "j" classzone */ + for (idx = j-1; idx >= 0; idx--) { + zone_t * lower_zone = pgdat->node_zones + idx; + unsigned long lower_zone_reserve; + if (!lower_zone->size) + continue; + + mask = lower_zone->watermarks[idx].min; + lower_zone->watermarks[j].min = mask; + lower_zone->watermarks[j].low = mask*2; + lower_zone->watermarks[j].high = mask*3; + + /* now the brainer part */ + lower_zone_reserve = realsize / lower_zone_reserve_ratio[idx]; + lower_zone->watermarks[j].min += lower_zone_reserve; + lower_zone->watermarks[j].low += lower_zone_reserve; + lower_zone->watermarks[j].high += lower_zone_reserve; + + realsize += lower_zone->realsize; + } zone->zone_mem_map = mem_map + offset; zone->zone_start_mapnr = offset; @@ -895,3 +974,16 @@ static int __init setup_mem_frac(char *s } __setup("memfrac=", setup_mem_frac); + +static int __init setup_lower_zone_reserve(char *str) +{ + int j = 0; + + while (get_option(&str, &lower_zone_reserve_ratio[j++]) == 2); + printk("setup_lower_zone_reserve: "); + for (j = 0; j < MAX_NR_ZONES-1; j++) printk("%d ", lower_zone_reserve_ratio[j]); + printk("\n"); + return 1; +} + +__setup("lower_zone_reserve=", setup_lower_zone_reserve); diff -urNp ref/mm/swapfile.c 2.4.20pre5aa1/mm/swapfile.c --- ref/mm/swapfile.c Fri Aug 30 02:03:12 2002 +++ 2.4.20pre5aa1/mm/swapfile.c Fri Aug 30 02:03:13 2002 @@ -226,6 +226,7 @@ void swap_free(swp_entry_t entry) * Check if we're the only user of a swap page, * when the page is locked. */ +static int FASTCALL(exclusive_swap_page(struct page *page)); static int exclusive_swap_page(struct page *page) { int retval = 0; @@ -239,12 +240,13 @@ static int exclusive_swap_page(struct pa if (p->swap_map[SWP_OFFSET(entry)] == 1) { /* Recheck the page count with the pagecache lock held.. */ spin_lock(&pagecache_lock); - if (page_count(page) - !!page->buffers == 2) + if (PageSwapCache(page) && page_count(page) - !!page->buffers == 2) retval = 1; spin_unlock(&pagecache_lock); } swap_info_put(p); } + return retval; } @@ -256,21 +258,42 @@ static int exclusive_swap_page(struct pa * work, but we opportunistically check whether * we need to get all the locks first.. */ -int can_share_swap_page(struct page *page) +int make_exclusive_page(struct page *page, int write) { int retval = 0; - if (!PageLocked(page)) - BUG(); switch (page_count(page)) { case 3: if (!page->buffers) break; /* Fallthrough */ case 2: + /* racy fastpath check */ if (!PageSwapCache(page)) break; - retval = exclusive_swap_page(page); + + if ((!write && !vm_swap_full()) || TryLockPage(page)) { + /* + * Don't remove the page from the swapcache if: + * - it was a read fault and... + * - the swap isn't full + * or if + * - we failed acquiring the page lock + * + * NOTE: if failed acquiring the lock we cannot remove the + * page from the swapcache, but still we can safely takeover + * the page if it's exclusive, see the swapcache check in + * the innermost critical section of exclusive_swap_page(). + */ + retval = exclusive_swap_page(page); + } else { + /* + * Here we've the page lock acquired and we're asked + * to try to drop this page from the swapcache. + */ + retval = remove_exclusive_swap_page(page); + unlock_page(page); + } break; case 1: if (PageReserved(page)) @@ -299,7 +322,7 @@ int remove_exclusive_swap_page(struct pa entry.val = page->index; p = swap_info_get(entry); - if (!p) + if (unlikely(!p)) return 0; /* Is the only swap cache user the cache itself? */ @@ -308,18 +331,19 @@ int remove_exclusive_swap_page(struct pa /* Recheck the page count with the pagecache lock held.. */ spin_lock(&pagecache_lock); if (page_count(page) - !!page->buffers == 2) { + if (page->buffers && !try_to_free_buffers(page, 0)) + /* an anonymous page cannot have page->buffers set */ + BUG(); __delete_from_swap_cache(page); + swap_entry_free(p, SWP_OFFSET(entry)); retval = 1; } spin_unlock(&pagecache_lock); } swap_info_put(p); - if (retval) { - block_flushpage(page, 0); - swap_free(entry); + if (retval) page_cache_release(page); - } return retval; } @@ -341,9 +365,7 @@ void free_swap_and_cache(swp_entry_t ent } if (page) { page_cache_get(page); - /* Only cache user (+us), or swap space full? Free it! */ - if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) - delete_from_swap_cache(page); + remove_exclusive_swap_page(page); UnlockPage(page); page_cache_release(page); } diff -urNp ref/mm/vmscan.c 2.4.20pre5aa1/mm/vmscan.c --- ref/mm/vmscan.c Fri Aug 30 02:03:13 2002 +++ 2.4.20pre5aa1/mm/vmscan.c Fri Aug 30 02:03:13 2002 @@ -283,6 +283,7 @@ static inline int swap_out_mm(struct mm_ { unsigned long address; struct vm_area_struct* vma; + int tlb_flush = 0; /* * Find the proper vm-area after freezing the vma chain @@ -297,6 +298,7 @@ static inline int swap_out_mm(struct mm_ } vma = find_vma(mm, address); if (vma) { + tlb_flush = 1; if (address < vma->vm_start) address = vma->vm_start; @@ -315,6 +317,8 @@ static inline int swap_out_mm(struct mm_ out_unlock: spin_unlock(&mm->page_table_lock); + if (tlb_flush) + flush_tlb_mm(mm); return count; } @@ -731,11 +735,12 @@ DECLARE_WAIT_QUEUE_HEAD(kswapd_wait); static int check_classzone_need_balance(zone_t * classzone) { - zone_t * first_classzone; + zone_t * first_zone; + int class_idx = zone_idx(classzone); - first_classzone = classzone->zone_pgdat->node_zones; - while (classzone >= first_classzone) { - if (classzone->free_pages > classzone->pages_high) + first_zone = classzone->zone_pgdat->node_zones; + while (classzone >= first_zone) { + if (classzone->free_pages > classzone->watermarks[class_idx].high) return 0; classzone--; } @@ -751,12 +756,12 @@ static int kswapd_balance_pgdat(pg_data_ zone = pgdat->node_zones + i; if (unlikely(current->need_resched)) schedule(); - if (!zone->need_balance) + if (!zone->need_balance || !zone->size) continue; if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) { zone->need_balance = 0; __set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); + schedule_timeout(HZ*5); continue; } if (check_classzone_need_balance(zone)) @@ -788,7 +793,7 @@ static int kswapd_can_sleep_pgdat(pg_dat for (i = pgdat->nr_zones-1; i >= 0; i--) { zone = pgdat->node_zones + i; - if (!zone->need_balance) + if (!zone->need_balance || !zone->size) continue; return 0; }