diff -aurpN -X /home/fletch/.diff.exclude 280-gcov/include/linux/gfp.h 290-gfp_node_strict/include/linux/gfp.h --- 280-gcov/include/linux/gfp.h Tue Sep 2 09:55:55 2003 +++ 290-gfp_node_strict/include/linux/gfp.h Thu Jan 1 23:22:32 2004 @@ -32,6 +32,7 @@ #define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */ #define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */ #define __GFP_NO_GROW 0x2000 /* Slab internal usage */ +#define __GFP_NODE_STRICT 0x4000 /* Do not fall back to other nodes */ #define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) diff -aurpN -X /home/fletch/.diff.exclude 280-gcov/mm/page_alloc.c 290-gfp_node_strict/mm/page_alloc.c --- 280-gcov/mm/page_alloc.c Thu Jan 1 23:21:56 2004 +++ 290-gfp_node_strict/mm/page_alloc.c Thu Jan 1 23:22:32 2004 @@ -566,6 +566,10 @@ __alloc_pages(unsigned int gfp_mask, uns struct zone *z = zones[i]; unsigned long local_low; + if ((__GFP_NODE_STRICT & gfp_mask) && + (pfn_to_nid(z->zone_start_pfn) != numa_node_id())) + continue; + /* * This is the fabled 'incremental min'. We let real-time tasks * dip their real-time paws a little deeper into reserves.