diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/arch/i386/Kconfig 696-config_numasched/arch/i386/Kconfig --- 640-per_node_idt/arch/i386/Kconfig Wed Jul 2 22:24:47 2003 +++ 696-config_numasched/arch/i386/Kconfig Wed Jul 2 22:25:12 2003 @@ -740,6 +740,11 @@ config NUMA default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) +config NUMA_SCHED + bool "Numa Scheduling Support" + depends on NUMA + default y + # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/include/linux/sched.h 696-config_numasched/include/linux/sched.h --- 640-per_node_idt/include/linux/sched.h Wed Jul 2 22:23:25 2003 +++ 696-config_numasched/include/linux/sched.h Wed Jul 2 22:25:12 2003 @@ -509,7 +509,7 @@ static inline int set_cpus_allowed(task_ } #endif -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED extern void sched_balance_exec(void); extern void node_nr_running_init(void); #else diff -urpN -X /home/fletch/.diff.exclude 640-per_node_idt/kernel/sched.c 696-config_numasched/kernel/sched.c --- 640-per_node_idt/kernel/sched.c Wed Jul 2 22:24:31 2003 +++ 696-config_numasched/kernel/sched.c Wed Jul 2 22:25:12 2003 @@ -35,7 +35,7 @@ #include #include -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu)) #else #define cpu_to_node_mask(cpu) (cpu_online_map) @@ -181,7 +181,7 @@ struct runqueue { struct mm_struct *prev_mm; prio_array_t *active, *expired, arrays[2]; int prev_cpu_load[NR_CPUS]; -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED atomic_t *node_nr_running; int prev_node_load[MAX_NUMNODES]; #endif @@ -210,7 +210,7 @@ static DEFINE_PER_CPU(struct runqueue, r # define task_running(rq, p) ((rq)->curr == (p)) #endif -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED /* * Keep track of running tasks. @@ -247,13 +247,13 @@ __init void node_nr_running_init(void) } } -#else /* !CONFIG_NUMA */ +#else /* !CONFIG_NUMA_SCHED */ # define nr_running_init(rq) do { } while (0) # define nr_running_inc(rq) do { (rq)->nr_running++; } while (0) # define nr_running_dec(rq) do { (rq)->nr_running--; } while (0) -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA_SCHED */ struct schedstat { @@ -950,7 +950,7 @@ static inline void double_rq_unlock(runq spin_unlock(&rq2->lock); } -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only @@ -1094,7 +1094,7 @@ static int find_busiest_node(int this_no return node; } -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_NUMA_SCHED */ int idle_node_rebalance_ratio = 10; int busy_node_rebalance_ratio = 2; @@ -1339,7 +1339,7 @@ out: #define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * idle_node_rebalance_ratio) #define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * busy_node_rebalance_ratio) -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED static void balance_node(runqueue_t *this_rq, int idle, int this_cpu) { int node = find_busiest_node(cpu_to_node(this_cpu)); @@ -1372,7 +1372,7 @@ static void rebalance_tick(runqueue_t *t * are not balanced.) */ if (idle) { -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED if (!(j % IDLE_NODE_REBALANCE_TICK)) balance_node(this_rq, idle, this_cpu); #endif @@ -1384,7 +1384,7 @@ static void rebalance_tick(runqueue_t *t } return; } -#ifdef CONFIG_NUMA +#ifdef CONFIG_NUMA_SCHED if (!(j % BUSY_NODE_REBALANCE_TICK)) balance_node(this_rq, idle, this_cpu); #endif