diff -urpN -X /home/fletch/.diff.exclude 675-pidmaps_nodepages/arch/i386/Kconfig 696-config_numasched/arch/i386/Kconfig --- 675-pidmaps_nodepages/arch/i386/Kconfig Sat May 10 19:21:30 2003 +++ 696-config_numasched/arch/i386/Kconfig Sat May 10 19:46:39 2003 @@ -713,6 +713,11 @@ config NUMA default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) +config NUMA_SCHED + bool "Numa Scheduling Support" + depends on NUMA + default y + # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) diff -urpN -X /home/fletch/.diff.exclude 675-pidmaps_nodepages/include/linux/sched.h 696-config_numasched/include/linux/sched.h --- 675-pidmaps_nodepages/include/linux/sched.h Sat May 10 19:17:24 2003 +++ 696-config_numasched/include/linux/sched.h Sat May 10 19:46:39 2003 @@ -496,7 +496,7 @@ extern void set_cpus_allowed(task_t *p, # define set_cpus_allowed(p, new_mask) do { } while (0) #endif -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED extern void sched_balance_exec(void); extern void node_nr_running_init(void); #else diff -urpN -X /home/fletch/.diff.exclude 675-pidmaps_nodepages/kernel/sched.c 696-config_numasched/kernel/sched.c --- 675-pidmaps_nodepages/kernel/sched.c Sat May 10 19:17:24 2003 +++ 696-config_numasched/kernel/sched.c Sat May 10 19:46:39 2003 @@ -33,7 +33,7 @@ #include #include -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu)) #else #define cpu_to_node_mask(cpu) (cpu_online_map) @@ -182,7 +182,7 @@ struct runqueue { struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; int prev_cpu_load[NR_CPUS]; -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED atomic_t *node_nr_running; int prev_node_load[MAX_NUMNODES]; #endif @@ -211,7 +211,7 @@ static struct runqueue runqueues[NR_CPUS # define task_running(rq, p) ((rq)->curr == (p)) #endif -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED /* * Keep track of running tasks. @@ -245,13 +245,13 @@ __init void node_nr_running_init(void) cpu_rq(i)->node_nr_running = &node_nr_running[cpu_to_node(i)]; } -#else /* !CONFIG_NUMA */ +#else /* !CONFIG_NUMA_SCHED */ # define nr_running_init(rq) do { } while (0) # define nr_running_inc(rq) do { (rq)->nr_running++; } while (0) # define nr_running_dec(rq) do { (rq)->nr_running--; } while (0) -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA_SCHED */ struct schedstat { @@ -976,7 +976,7 @@ static inline void double_rq_unlock(runq spin_unlock(&rq2->lock); } -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -1104,7 +1104,7 @@ static int find_busiest_node(int this_no return node; } -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA_SCHED */ int idle_node_rebalance_ratio = 10; int busy_node_rebalance_ratio = 2; @@ -1349,7 +1349,7 @@ out: #define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * idle_node_rebalance_ratio) #define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * busy_node_rebalance_ratio) -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED static void balance_node(runqueue_t *this_rq, int idle, int this_cpu) { int node = find_busiest_node(cpu_to_node(this_cpu)); @@ -1382,7 +1382,7 @@ static void rebalance_tick(runqueue_t *t * are not balanced.) */ if (idle) { -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED if (!(j % IDLE_NODE_REBALANCE_TICK)) balance_node(this_rq, idle, this_cpu); #endif @@ -1394,7 +1394,7 @@ static void rebalance_tick(runqueue_t *t } return; } -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED if (!(j % BUSY_NODE_REBALANCE_TICK)) balance_node(this_rq, idle, this_cpu); #endif