aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2004-08-23 21:20:45 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-23 21:20:45 -0700
commit01af8988b3a146015f5d188741c7d2a23f1d7e1a (patch)
treefda92cbaf84305f8571873547a274462c2e986cd /mm
parent14297505ffdc628bd9dcf5801c16dbc5435e08b0 (diff)
downloadhistory-01af8988b3a146015f5d188741c7d2a23f1d7e1a.tar.gz
[PATCH] don't pass mem_map into init functions
When using CONFIG_NONLINEAR, a zone's mem_map isn't contiguous, and isn't allocated in the same place. This means that nonlinear doesn't really have a mem_map[] to pass into free_area_init_node() or memmap_init_zone() which makes any sense. So, this patch removes the 'struct page *mem_map' argument to both of those functions. All non-NUMA architectures just pass a NULL in there, which is ignored. The solution on the NUMA arches is to pass the mem_map in via the pgdat, which works just fine. To replace the removed arguments, a call to pfn_to_page(node_start_pfn) is made. This is valid because all of the pfn_to_page() implementations rely only on the pgdats, which are already set up at this time. Plus, the pfn_to_page() method should work for any future nonlinear-type code. Finally, the patch creates a function: node_alloc_mem_map(), which I plan to effectively #ifdef out for nonlinear at some future date. Compile tested and booted on SMP x86, NUMAQ, and ppc64. From: Jesse Barnes <jbarnes@engr.sgi.com> Fix up ia64 specific memory map init function in light of Dave's memmap_init cleanups. Signed-off-by: Jesse Barnes <jbarnes@sgi.com> From: Dave Hansen <haveblue@us.ibm.com> Looks like I missed a couple of architectures. This patch, on top of my previous one and Jesse's should clean up the rest. From: William Lee Irwin III <wli@holomorphy.com> x86-64 wouldn't compile with NUMA support on, as node_alloc_mem_map() references mem_map outside #ifdefs on CONFIG_NUMA/CONFIG_DISCONTIGMEM. This patch wraps that reference in such an #ifdef. From: William Lee Irwin III <wli@holomorphy.com> Initializing NODE_DATA(nid)->node_mem_map prior to calling it should do. From: Dave Hansen <haveblue@us.ibm.com> Rick, I bet you didn't think your nerf weapons would be so effective in getting that compile error fixed, did you? Applying the attached patch and commenting out this line: arch/i386/kernel/nmi.c: In function `proc_unknown_nmi_panic': arch/i386/kernel/nmi.c:558: too few arguments to function `proc_dointvec' will let it compile. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c41
1 files changed, 23 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 62be1c9e9c85ba..aa5cfa69e584a5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1383,9 +1383,10 @@ static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
* up by free_all_bootmem() once the early boot process is
* done. Non-atomic initialization, single-pass.
*/
-void __init memmap_init_zone(struct page *start, unsigned long size, int nid,
- unsigned long zone, unsigned long start_pfn)
+void __init memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+ unsigned long start_pfn)
{
+ struct page *start = pfn_to_page(start_pfn);
struct page *page;
for (page = start; page < (start + size); page++) {
@@ -1449,8 +1450,8 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, unsigned
}
#ifndef __HAVE_ARCH_MEMMAP_INIT
-#define memmap_init(start, size, nid, zone, start_pfn) \
- memmap_init_zone((start), (size), (nid), (zone), (start_pfn))
+#define memmap_init(size, nid, zone, start_pfn) \
+ memmap_init_zone((size), (nid), (zone), (start_pfn))
#endif
/*
@@ -1465,7 +1466,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
unsigned long i, j;
const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1);
int cpu, nid = pgdat->node_id;
- struct page *lmem_map = pgdat->node_mem_map;
unsigned long zone_start_pfn = pgdat->node_start_pfn;
pgdat->nr_zones = 0;
@@ -1553,35 +1553,41 @@ static void __init free_area_init_core(struct pglist_data *pgdat,
pgdat->nr_zones = j+1;
- zone->zone_mem_map = lmem_map;
+ zone->zone_mem_map = pfn_to_page(zone_start_pfn);
zone->zone_start_pfn = zone_start_pfn;
if ((zone_start_pfn) & (zone_required_alignment-1))
printk("BUG: wrong zone alignment, it will crash\n");
- memmap_init(lmem_map, size, nid, j, zone_start_pfn);
+ memmap_init(size, nid, j, zone_start_pfn);
zone_start_pfn += size;
- lmem_map += size;
zone_init_free_lists(pgdat, zone, zone->spanned_pages);
}
}
-void __init free_area_init_node(int nid, struct pglist_data *pgdat,
- struct page *node_mem_map, unsigned long *zones_size,
- unsigned long node_start_pfn, unsigned long *zholes_size)
+void __init node_alloc_mem_map(struct pglist_data *pgdat)
{
unsigned long size;
+ size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+ pgdat->node_mem_map = alloc_bootmem_node(pgdat, size);
+#ifndef CONFIG_DISCONTIGMEM
+ mem_map = contig_page_data.node_mem_map;
+#endif
+}
+
+void __init free_area_init_node(int nid, struct pglist_data *pgdat,
+ unsigned long *zones_size, unsigned long node_start_pfn,
+ unsigned long *zholes_size)
+{
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
calculate_zone_totalpages(pgdat, zones_size, zholes_size);
- if (!node_mem_map) {
- size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
- node_mem_map = alloc_bootmem_node(pgdat, size);
- }
- pgdat->node_mem_map = node_mem_map;
+
+ if (!pfn_to_page(node_start_pfn))
+ node_alloc_mem_map(pgdat);
free_area_init_core(pgdat, zones_size, zholes_size);
}
@@ -1594,9 +1600,8 @@ EXPORT_SYMBOL(contig_page_data);
void __init free_area_init(unsigned long *zones_size)
{
- free_area_init_node(0, &contig_page_data, NULL, zones_size,
+ free_area_init_node(0, &contig_page_data, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
- mem_map = contig_page_data.node_mem_map;
}
#endif