aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-01-03 04:14:29 -0800
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-01-03 04:14:29 -0800
commit206ca74edc648af67a6ea386f917f30c7cf1b108 (patch)
tree9b5bf0d911cd61e2b1bb9b00e6b8839d3442c635 /mm
parentf86789bcbbcba553d5f7d489224c1e8878e7d209 (diff)
downloadhistory-206ca74edc648af67a6ea386f917f30c7cf1b108.tar.gz
[PATCH] mm: higher order watermarks
Move the watermark checking code into a single function. Extend it to account for the order of the allocation and the number of free pages that could satisfy such a request. From: Marcelo Tosatti <marcelo.tosatti@cyclades.com> Fix typo in Nick's kswapd-high-order awareness patch Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c58
1 files changed, 39 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7425f0d9e020a2..bb11a6dc3ebb45 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -586,6 +586,37 @@ buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
}
/*
+ * Return 1 if free pages are above 'mark'. This takes into account the order
+ * of the allocation.
+ */
+int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+ int alloc_type, int can_try_harder, int gfp_high)
+{
+ /* free_pages my go negative - that's OK */
+ long min = mark, free_pages = z->free_pages - (1 << order) + 1;
+ int o;
+
+ if (gfp_high)
+ min -= min / 2;
+ if (can_try_harder)
+ min -= min / 4;
+
+ if (free_pages <= min + z->protection[alloc_type])
+ return 0;
+ for (o = 0; o < order; o++) {
+ /* At the next order, this order's pages become unavailable */
+ free_pages -= z->free_area[o].nr_free << o;
+
+ /* Require fewer higher order pages to be free */
+ min >>= 1;
+
+ if (free_pages <= min)
+ return 0;
+ }
+ return 1;
+}
+
+/*
* This is the 'heart' of the zoned buddy allocator.
*
* Herein lies the mysterious "incremental min". That's the
@@ -606,7 +637,6 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
const int wait = gfp_mask & __GFP_WAIT;
- unsigned long min;
struct zone **zones, *z;
struct page *page;
struct reclaim_state reclaim_state;
@@ -636,9 +666,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
/* Go through the zonelist once, looking for a zone with enough free */
for (i = 0; (z = zones[i]) != NULL; i++) {
- min = z->pages_low + (1<<order) + z->protection[alloc_type];
- if (z->free_pages < min)
+ if (!zone_watermark_ok(z, order, z->pages_low,
+ alloc_type, 0, 0))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -654,14 +684,9 @@ __alloc_pages(unsigned int gfp_mask, unsigned int order,
* coming from realtime tasks to go deeper into reserves
*/
for (i = 0; (z = zones[i]) != NULL; i++) {
- min = z->pages_min;
- if (gfp_mask & __GFP_HIGH)
- min /= 2;
- if (can_try_harder)
- min -= min / 4;
- min += (1<<order) + z->protection[alloc_type];
-
- if (z->free_pages < min)
+ if (!zone_watermark_ok(z, order, z->pages_min,
+ alloc_type, can_try_harder,
+ gfp_mask & __GFP_HIGH))
continue;
page = buffered_rmqueue(z, order, gfp_mask);
@@ -697,14 +722,9 @@ rebalance:
/* go through the zonelist yet one more time */
for (i = 0; (z = zones[i]) != NULL; i++) {
- min = z->pages_min;
- if (gfp_mask & __GFP_HIGH)
- min /= 2;
- if (can_try_harder)
- min -= min / 4;
- min += (1<<order) + z->protection[alloc_type];
-
- if (z->free_pages < min)
+ if (!zone_watermark_ok(z, order, z->pages_min,
+ alloc_type, can_try_harder,
+ gfp_mask & __GFP_HIGH))
continue;
page = buffered_rmqueue(z, order, gfp_mask);