aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2004-08-22 22:49:13 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-22 22:49:13 -0700
commitfde9e1da4c3abf6e445aa56325c75c25ffe89a8b (patch)
tree5efaa947f2b154e16c3d0c83d49ed98f862e08b7 /mm
parent47ad9591d188f1ff0c13a52d37a2eb4cc510f364 (diff)
downloadhistory-fde9e1da4c3abf6e445aa56325c75c25ffe89a8b.tar.gz
[PATCH] break out zone free list initialization
The following patch removes the individual free area initialization from free_area_init_core(), and puts it in a new function zone_init_free_lists(). It also creates pages_to_bitmap_size(), which is then used in zone_init_free_lists() as well as several times in my free area bitmap resizing patch. First of all, I think it looks nicer this way, but it's also necessary to have this if you want to initialize a zone after system boot, like if a NUMA node was hot-added. In any case, it should be functionally equivalent to the old code. Compiles and boots on x86. I've been running with this for a few weeks, and haven't seen any problems with it yet. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c84
1 files changed, 47 insertions, 37 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a3f2a7407ac77b..a1e8501808a554 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1398,6 +1398,52 @@ void __init memmap_init_zone(struct page *start, unsigned long size, int nid,
}
}
+/*
+ * Page buddy system uses "index >> (i+1)", where "index" is
+ * at most "size-1".
+ *
+ * The extra "+3" is to round down to byte size (8 bits per byte
+ * assumption). Thus we get "(size-1) >> (i+4)" as the last byte
+ * we can access.
+ *
+ * The "+1" is because we want to round the byte allocation up
+ * rather than down. So we should have had a "+7" before we shifted
+ * down by three. Also, we have to add one as we actually _use_ the
+ * last bit (it's [0,n] inclusive, not [0,n[).
+ *
+ * So we actually had +7+1 before we shift down by 3. But
+ * (n+8) >> 3 == (n >> 3) + 1 (modulo overflows, which we do not have).
+ *
+ * Finally, we LONG_ALIGN because all bitmap operations are on longs.
+ */
+unsigned long pages_to_bitmap_size(unsigned long order, unsigned long nr_pages)
+{
+ unsigned long bitmap_size;
+
+ bitmap_size = (nr_pages-1) >> (order+4);
+ bitmap_size = LONG_ALIGN(bitmap_size+1);
+
+ return bitmap_size;
+}
+
+void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, unsigned long size)
+{
+ int order;
+ for (order = 0; ; order++) {
+ unsigned long bitmap_size;
+
+ INIT_LIST_HEAD(&zone->free_area[order].free_list);
+ if (order == MAX_ORDER-1) {
+ zone->free_area[order].map = NULL;
+ break;
+ }
+
+ bitmap_size = pages_to_bitmap_size(order, size);
+ zone->free_area[order].map =
+ (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
+ }
+}
+
#ifndef __HAVE_ARCH_MEMMAP_INIT
#define memmap_init(start, size, nid, zone, start_pfn) \
memmap_init_zone((start), (size), (nid), (zone), (start_pfn))
@@ -1514,43 +1560,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
zone_start_pfn += size;
lmem_map += size;
- for (i = 0; ; i++) {
- unsigned long bitmap_size;
-
- INIT_LIST_HEAD(&zone->free_area[i].free_list);
- if (i == MAX_ORDER-1) {
- zone->free_area[i].map = NULL;
- break;
- }
-
- /*
- * Page buddy system uses "index >> (i+1)",
- * where "index" is at most "size-1".
- *
- * The extra "+3" is to round down to byte
- * size (8 bits per byte assumption). Thus
- * we get "(size-1) >> (i+4)" as the last byte
- * we can access.
- *
- * The "+1" is because we want to round the
- * byte allocation up rather than down. So
- * we should have had a "+7" before we shifted
- * down by three. Also, we have to add one as
- * we actually _use_ the last bit (it's [0,n]
- * inclusive, not [0,n[).
- *
- * So we actually had +7+1 before we shift
- * down by 3. But (n+8) >> 3 == (n >> 3) + 1
- * (modulo overflows, which we do not have).
- *
- * Finally, we LONG_ALIGN because all bitmap
- * operations are on longs.
- */
- bitmap_size = (size-1) >> (i+4);
- bitmap_size = LONG_ALIGN(bitmap_size+1);
- zone->free_area[i].map =
- (unsigned long *) alloc_bootmem_node(pgdat, bitmap_size);
- }
+ zone_init_free_lists(pgdat, zone, zone->spanned_pages);
}
}