diff -upN reference/include/linux/gfp.h current/include/linux/gfp.h --- reference/include/linux/gfp.h 2003-10-01 11:41:17.000000000 -0700 +++ current/include/linux/gfp.h 2004-04-09 11:53:01.000000000 -0700 @@ -32,6 +32,7 @@ #define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */ #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */ +#define __GFP_NODE_STRICT 0x4000 /* Do not fall back to other nodes */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) diff -upN reference/mm/page_alloc.c current/mm/page_alloc.c --- reference/mm/page_alloc.c 2004-04-08 15:10:25.000000000 -0700 +++ current/mm/page_alloc.c 2004-04-09 11:53:01.000000000 -0700 @@ -579,6 +579,10 @@ __alloc_pages(unsigned int gfp_mask, uns struct zone *z = zones[i]; unsigned long local_low; + if ((__GFP_NODE_STRICT & gfp_mask) && + (pfn_to_nid(z->zone_start_pfn) != numa_node_id())) + continue; + /* * This is the fabled 'incremental min'. We let real-time tasks * dip their real-time paws a little deeper into reserves.