aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2004-06-20 04:57:59 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-06-20 04:57:59 -0700
commit2150328b658b717b12069335e3d8288295eb5180 (patch)
tree5876a0771dacfac7e6b634752d441148724b6ee7 /mm
parent0d8e8a6c1e943e883d950802d49bb06dc84677f9 (diff)
downloadhistory-2150328b658b717b12069335e3d8288295eb5180.tar.gz
[PATCH] NUMA API updates
This patch three issues in NUMA API - When 1 was passed to set_mempolicy or mbind as maxnodes argument get_nodes could corrupt the stack and cause a crash. Fix that. - Remove the restriction to do interleaving only for order 0. Together with the patch that went in previously to use interleaving policy at boot time this should give back the original behaviour of distributing the big hash tables. - Fix some bad white space in comments Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 096a2490471924..4f1fc840550a72 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -26,7 +26,7 @@
* process policy.
* default Allocate on the local node first, or when on a VMA
* use the process policy. This is what Linux always did
- * in a NUMA aware kernel and still does by, ahem, default.
+ * in a NUMA aware kernel and still does by, ahem, default.
*
* The process policy is applied for most non interrupt memory allocations
* in that process' context. Interrupts ignore the policies and always
@@ -135,6 +135,10 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
unsigned long endmask;
--maxnode;
+ bitmap_zero(nodes, MAX_NUMNODES);
+ if (maxnode == 0 || !nmask)
+ return 0;
+
nlongs = BITS_TO_LONGS(maxnode);
if ((maxnode % BITS_PER_LONG) == 0)
endmask = ~0UL;
@@ -143,7 +147,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
/* When the user specified more nodes than supported just check
if the non supported part is all zero. */
- if (nmask && nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
+ if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
unsigned long t;
if (get_user(t, nmask + k))
@@ -158,8 +162,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
endmask = ~0UL;
}
- bitmap_zero(nodes, MAX_NUMNODES);
- if (nmask && copy_from_user(nodes, nmask, nlongs*sizeof(unsigned long)))
+ if (copy_from_user(nodes, nmask, nlongs*sizeof(unsigned long)))
return -EFAULT;
nodes[nlongs-1] &= endmask;
return mpol_check_policy(mode, nodes);
@@ -622,14 +625,14 @@ static unsigned offset_il_node(struct mempolicy *pol,
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
-static struct page *alloc_page_interleave(unsigned gfp, unsigned nid)
+static struct page *alloc_page_interleave(unsigned gfp, unsigned order, unsigned nid)
{
struct zonelist *zl;
struct page *page;
BUG_ON(!test_bit(nid, node_online_map));
zl = NODE_DATA(nid)->node_zonelists + (gfp & GFP_ZONEMASK);
- page = __alloc_pages(gfp, 0, zl);
+ page = __alloc_pages(gfp, order, zl);
if (page && page_zone(page) == zl->zones[0]) {
zl->zones[0]->pageset[get_cpu()].interleave_hit++;
put_cpu();
@@ -677,7 +680,7 @@ alloc_page_vma(unsigned gfp, struct vm_area_struct *vma, unsigned long addr)
/* fall back to process interleaving */
nid = interleave_nodes(pol);
}
- return alloc_page_interleave(gfp, nid);
+ return alloc_page_interleave(gfp, 0, nid);
}
return __alloc_pages(gfp, 0, zonelist_policy(gfp, pol));
}
@@ -686,7 +689,7 @@ alloc_page_vma(unsigned gfp, struct vm_area_struct *vma, unsigned long addr)
* alloc_pages_current - Allocate pages.
*
* @gfp:
- * %GFP_USER user allocation,
+ * %GFP_USER user allocation,
* %GFP_KERNEL kernel allocation,
* %GFP_HIGHMEM highmem allocation,
* %GFP_FS don't call back into a file system.
@@ -703,8 +706,8 @@ struct page *alloc_pages_current(unsigned gfp, unsigned order)
if (!pol || in_interrupt())
pol = &default_policy;
- if (pol->policy == MPOL_INTERLEAVE && order == 0)
- return alloc_page_interleave(gfp, interleave_nodes(pol));
+ if (pol->policy == MPOL_INTERLEAVE)
+ return alloc_page_interleave(gfp, order, interleave_nodes(pol));
return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
}
EXPORT_SYMBOL(alloc_pages_current);