diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/arch/i386/Kconfig 696-config_numasched/arch/i386/Kconfig --- 640-per_node_idt/arch/i386/Kconfig Wed Aug 13 20:47:30 2003 +++ 696-config_numasched/arch/i386/Kconfig Wed Aug 13 20:48:52 2003 @@ -735,6 +735,11 @@ config NUMA default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) +config NUMA_SCHED + bool "Numa Scheduling Support" + depends on NUMA + default y + # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/include/linux/sched.h 696-config_numasched/include/linux/sched.h --- 640-per_node_idt/include/linux/sched.h Wed Aug 13 20:47:27 2003 +++ 696-config_numasched/include/linux/sched.h Wed Aug 13 20:48:52 2003 @@ -515,7 +515,7 @@ static inline int set_cpus_allowed(task_ } #endif -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED extern void sched_balance_exec(void); extern void node_nr_running_init(void); #else diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/kernel/sched.c 696-config_numasched/kernel/sched.c --- 640-per_node_idt/kernel/sched.c Wed Aug 13 20:47:27 2003 +++ 696-config_numasched/kernel/sched.c Wed Aug 13 20:48:52 2003 @@ -35,7 +35,7 @@ #include #include -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu)) #else #define cpu_to_node_mask(cpu) (cpu_online_map) @@ -181,7 +181,7 @@ struct runqueue { struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; int prev_cpu_load[NR_CPUS]; -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED atomic_t *node_nr_running; int prev_node_load[MAX_NUMNODES]; #endif @@ -210,7 +210,7 @@ static DEFINE_PER_CPU(struct runqueue, r # define task_running(rq, p) ((rq)->curr == (p)) #endif -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED /* * Keep track of running tasks. @@ -247,13 +247,13 @@ __init void node_nr_running_init(void) } } -#else /* !CONFIG_NUMA */ +#else /* !CONFIG_NUMA_SCHED */ # define nr_running_init(rq) do { } while (0) # define nr_running_inc(rq) do { (rq)->nr_running++; } while (0) # define nr_running_dec(rq) do { (rq)->nr_running--; } while (0) -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA_SCHED */ /* * task_rq_lock - lock the runqueue a given task resides on and disable @@ -846,7 +846,7 @@ static inline void double_rq_unlock(runq spin_unlock(&rq2->lock); } -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -990,7 +990,7 @@ static int find_busiest_node(int this_no return node; } -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA_SCHED */ int idle_node_rebalance_ratio = 10; int busy_node_rebalance_ratio = 2; @@ -1225,7 +1225,7 @@ out: #define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * idle_node_rebalance_ratio) #define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * busy_node_rebalance_ratio) -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED static void balance_node(runqueue_t *this_rq, int idle, int this_cpu) { int node = find_busiest_node(cpu_to_node(this_cpu)); @@ -1256,7 +1256,7 @@ static void rebalance_tick(runqueue_t *t * are not balanced.) */ if (idle) { -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED if (!(j % IDLE_NODE_REBALANCE_TICK)) balance_node(this_rq, idle, this_cpu); #endif @@ -1267,7 +1267,7 @@ static void rebalance_tick(runqueue_t *t } return; } -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED if (!(j % BUSY_NODE_REBALANCE_TICK)) balance_node(this_rq, idle, this_cpu); #endif