diff options
author | Nick Piggin <piggin@cyberone.com.au> | 2004-11-15 03:53:53 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-11-15 03:53:53 -0800 |
commit | c07161d8c071dd633255443607fd30de9ed65f60 (patch) | |
tree | ce6c68c2d4f2d42cc60658fbb487c2d33f65a297 /mm | |
parent | ce57094bd9472a2752d471c8dc7aadb5cbdf11e8 (diff) | |
download | history-c07161d8c071dd633255443607fd30de9ed65f60.tar.gz |
[PATCH] mm: tune the page allocator thresholds
without patch:
pages_min pages_low pages_high
dma 4 8 12
normal 234 468 702
high 128 256 384
with patch:
pages_min pages_low pages_high
dma 17 21 25
normal 939 1173 1408
high 128 160 192
without patch:
| GFP_KERNEL | GFP_ATOMIC
allocate immediately | 9 dma, 469 norm | 9 dma, 469 norm
allocate after waking kswapd | 5 dma, 234 norm | 3 dma, 88 norm
allocate after synch reclaim | 5 dma, 234 norm | n/a
with patch:
| GFP_KERNEL | GFP_ATOMIC
allocate immediately | 22 dma, 1174 norm | 22 dma, 1174 norm
allocate after waking kswapd | 18 dma, 940 norm | 6 dma, 440 norm
allocate after synch reclaim | 18 dma, 940 norm | n/a
So the buffer between GFP_KERNEL and GFP_ATOMIC allocations is:
2.6.8 | 465 dma, 117 norm, 582 tot = 2328K
2.6.10-rc | 2 dma, 146 norm, 148 tot = 592K
patch | 12 dma, 500 norm, 512 tot = 2048K
Which is getting pretty good.
kswap starts at:
2.6.8 477 dma, 496 norm, 973 total
2.6.10-rc 8 dma, 468 norm, 476 total
patched 17 dma, 939 norm, 956 total
So in terms of total pages, that's looking similar to 2.6.8.
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 41 |
1 files changed, 23 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 16c0787bb57a34..00cc1d0aaba3d0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1945,8 +1945,12 @@ static void setup_per_zone_pages_min(void) lowmem_pages; } - zone->pages_low = zone->pages_min * 2; - zone->pages_high = zone->pages_min * 3; + /* + * When interpreting these watermarks, just keep in mind that: + * zone->pages_min == (zone->pages_min * 4) / 4; + */ + zone->pages_low = (zone->pages_min * 5) / 4; + zone->pages_high = (zone->pages_min * 6) / 4; spin_unlock_irqrestore(&zone->lru_lock, flags); } } @@ -1955,24 +1959,25 @@ static void setup_per_zone_pages_min(void) * Initialise min_free_kbytes. * * For small machines we want it small (128k min). For large machines - * we want it large (16MB max). But it is not linear, because network + * we want it large (64MB max). But it is not linear, because network * bandwidth does not increase linearly with machine size. We use * - * min_free_kbytes = sqrt(lowmem_kbytes) + * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: + * min_free_kbytes = sqrt(lowmem_kbytes * 16) * * which yields * - * 16MB: 128k - * 32MB: 181k - * 64MB: 256k - * 128MB: 362k - * 256MB: 512k - * 512MB: 724k - * 1024MB: 1024k - * 2048MB: 1448k - * 4096MB: 2048k - * 8192MB: 2896k - * 16384MB: 4096k + * 16MB: 512k + * 32MB: 724k + * 64MB: 1024k + * 128MB: 1448k + * 256MB: 2048k + * 512MB: 2896k + * 1024MB: 4096k + * 2048MB: 5792k + * 4096MB: 8192k + * 8192MB: 11584k + * 16384MB: 16384k */ static int __init init_per_zone_pages_min(void) { @@ -1980,11 +1985,11 @@ static int __init init_per_zone_pages_min(void) lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); - min_free_kbytes = int_sqrt(lowmem_kbytes); + min_free_kbytes = int_sqrt(lowmem_kbytes * 16); if (min_free_kbytes < 128) min_free_kbytes = 128; - if (min_free_kbytes > 16384) - min_free_kbytes = 16384; + if (min_free_kbytes > 65536) + min_free_kbytes = 65536; setup_per_zone_pages_min(); setup_per_zone_protection(); return 0; |