diff options
author | davem <davem> | 2001-10-07 06:41:36 +0000 |
---|---|---|
committer | davem <davem> | 2001-10-07 06:41:36 +0000 |
commit | 546cef5f8b31332d3a3afe1fdbbb616099010762 (patch) | |
tree | 2b89b7b000f53b1a5151098cf3aef7d00c5a70db /mm | |
parent | d4ede3da8a0f235f0d2c236ff2085737cf302305 (diff) | |
download | netdev-vger-cvs-546cef5f8b31332d3a3afe1fdbbb616099010762.tar.gz |
Linus's current fix to 2.4.11-pre4 paging
issues.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/oom_kill.c | 3 | ||||
-rw-r--r-- | mm/page_alloc.c | 48 | ||||
-rw-r--r-- | mm/vmscan.c | 12 |
3 files changed, 22 insertions, 41 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d7f035c48..2d6355055 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -241,13 +241,12 @@ int out_of_memory(void) return 0; /* - * If the buffer and page cache (excluding swap cache) are over + * If the buffer and page cache (including swap cache) are over * their (/proc tunable) minimum, we're still not OOM. We test * this to make sure we don't return OOM when the system simply * has a hard time with the cache. */ cache_mem = atomic_read(&page_cache_size); - cache_mem -= swapper_space.nrpages; limit = 2; limit *= num_physpages / 100; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a6c56b290..cfbf6d14f 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -357,6 +357,7 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_ /* here we're in the low on memory slow path */ +rebalance: if (current->flags & PF_MEMALLOC) { zone = zonelist->zones; for (;;) { @@ -371,50 +372,29 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_ return NULL; } - rebalance: page = balance_classzone(classzone, gfp_mask, order, &freed); if (page) return page; zone = zonelist->zones; - if (likely(freed)) { - for (;;) { - zone_t *z = *(zone++); - if (!z) - break; - - if (zone_free_pages(z, order) > z->pages_min) { - page = rmqueue(z, order); - if (page) - return page; - } - } - goto rebalance; - } else { - /* - * Check that no other task is been killed meanwhile, - * in such a case we can succeed the allocation. - */ - for (;;) { - zone_t *z = *(zone++); - if (!z) - break; + for (;;) { + zone_t *z = *(zone++); + if (!z) + break; - if (zone_free_pages(z, order) > z->pages_min) { - page = rmqueue(z, order); - if (page) - return page; - } + if (zone_free_pages(z, order) > z->pages_min) { + page = rmqueue(z, order); + if (page) + return page; } - - goto rebalance; } - printk(KERN_NOTICE "__alloc_pages: %u-order allocation failed (gfp=0x%x/%i) from %p\n", - order, gfp_mask, !!(current->flags & PF_MEMALLOC), __builtin_return_address(0)); - return NULL; + /* Yield for kswapd, and try again */ + current->policy |= SCHED_YIELD; + __set_current_state(TASK_RUNNING); + schedule(); + goto rebalance; } - /* * Common helper functions. */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 6e5644b6a..3e0a5b1f6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -553,14 +553,16 @@ static int shrink_caches(int priority, zone_t * classzone, unsigned int gfp_mask int try_to_free_pages(zone_t * classzone, unsigned int gfp_mask, unsigned int order) { int ret = 0; + int priority = DEF_PRIORITY; int nr_pages = SWAP_CLUSTER_MAX; - nr_pages = shrink_caches(DEF_PRIORITY, classzone, gfp_mask, nr_pages); - - if (nr_pages < SWAP_CLUSTER_MAX) - ret |= 1; + do { + nr_pages = shrink_caches(priority, classzone, gfp_mask, nr_pages); + if (nr_pages <= 0) + return 1; - ret |= swap_out(DEF_PRIORITY, classzone, gfp_mask, SWAP_CLUSTER_MAX << 2); + ret |= swap_out(priority, classzone, gfp_mask, SWAP_CLUSTER_MAX << 2); + } while (--priority); return ret; } |