From: Darren Hart The current default implementations of arch_init_sched_domains constructs either a flat or two level topolology. The two level topology is built if CONFIG_NUMA is set. It seems that CONFIG_NUMA is not the appropriate flag to use for constructing a two level topology since some architectures which define CONFIG_NUMA would be better served with a flat topology. x86_64 for example will construct a two level topology with one CPU per node, causing performance problems because balancing within nodes is pointless and balancing across nodes doesn't occur as often. This patch introduces a new CONFIG_SCHED_NUMA flag and uses it to decide between a flat or two level topology of sched_domains. The patch is minimally invasive as it primarily modifies Kconfig files and sets the appropriate default (off for x86_64, on for everything that used to export CONFIG_NUMA) and should only change the sched_domains topology constructed on x86_64 systems. I have verified this on a 4 node x86 NUMAQ, but need someone to test x86_64. This patch is intended as a quick fix for the x86_64 problem, and doesn't solve the problem of how to build generic sched domain topologies. We can certainly conceive of various topologies for x86 systems, so even arch specific topologies may not be sufficient. Would sub-arch (ie NUMAQ) be the right way to handle different topologies, or will we be able to autodiscover the appropriate topology? I will be looking into this more, but thought some might benefit from an immediate x86_64 fix. I am very interested in hearing your ideas on this. diff -upN reference/arch/alpha/Kconfig current/arch/alpha/Kconfig --- reference/arch/alpha/Kconfig 2004-04-07 14:53:52.000000000 -0700 +++ current/arch/alpha/Kconfig 2004-04-09 21:53:37.000000000 -0700 @@ -519,6 +519,14 @@ config NUMA Access). This option is for configuring high-end multiprocessor server machines. If in doubt, say N. +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + # LARGE_VMALLOC is racy, if you *really* need it then fix it first config ALPHA_LARGE_VMALLOC bool diff -upN reference/arch/i386/Kconfig current/arch/i386/Kconfig --- reference/arch/i386/Kconfig 2004-04-09 21:53:32.000000000 -0700 +++ current/arch/i386/Kconfig 2004-04-09 21:53:37.000000000 -0700 @@ -772,6 +772,14 @@ config NUMA default n if X86_PC default y if (X86_NUMAQ || X86_SUMMIT) +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + # Need comments to help the hapless user trying to turn on NUMA support comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support" depends on X86_NUMAQ && (!HIGHMEM64G || !SMP) diff -upN reference/arch/ia64/Kconfig current/arch/ia64/Kconfig --- reference/arch/ia64/Kconfig 2004-04-09 21:53:23.000000000 -0700 +++ current/arch/ia64/Kconfig 2004-04-09 21:53:37.000000000 -0700 @@ -172,6 +172,14 @@ config NUMA Access). This option is for configuring high-end multiprocessor server systems. If in doubt, say N. +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + config VIRTUAL_MEM_MAP bool "Virtual mem map" default y if !IA64_HP_SIM diff -upN reference/arch/mips/Kconfig current/arch/mips/Kconfig --- reference/arch/mips/Kconfig 2004-04-07 14:53:58.000000000 -0700 +++ current/arch/mips/Kconfig 2004-04-09 21:53:37.000000000 -0700 @@ -337,6 +337,14 @@ config NUMA Access). This option is for configuring high-end multiprocessor server machines. If in doubt, say N. +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + config MAPPED_KERNEL bool "Mapped kernel support" depends on SGI_IP27 diff -upN reference/arch/ppc64/Kconfig current/arch/ppc64/Kconfig --- reference/arch/ppc64/Kconfig 2004-04-09 21:53:32.000000000 -0700 +++ current/arch/ppc64/Kconfig 2004-04-09 21:53:37.000000000 -0700 @@ -173,6 +173,14 @@ config NUMA bool "NUMA support" depends on DISCONTIGMEM +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default y + help + Enable two level sched domains hierarchy. + Say Y if unsure. + config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" depends on SMP diff -upN reference/arch/x86_64/Kconfig current/arch/x86_64/Kconfig --- reference/arch/x86_64/Kconfig 2004-04-09 21:53:32.000000000 -0700 +++ current/arch/x86_64/Kconfig 2004-04-09 21:53:37.000000000 -0700 @@ -261,6 +261,14 @@ config NUMA depends on K8_NUMA default y +config SCHED_NUMA + bool "Two level sched domains" + depends on NUMA + default n + help + Enable two level sched domains hierarchy. + Say N if unsure. + config HAVE_DEC_LOCK bool depends on SMP diff -upN reference/include/linux/sched.h current/include/linux/sched.h --- reference/include/linux/sched.h 2004-04-09 21:53:32.000000000 -0700 +++ current/include/linux/sched.h 2004-04-09 21:53:37.000000000 -0700 @@ -660,7 +660,7 @@ struct sched_domain { .nr_balance_failed = 0, \ } -#ifdef CONFIG_NUMA +#ifdef CONFIG_SCHED_NUMA /* Common values for NUMA nodes */ #define SD_NODE_INIT (struct sched_domain) { \ .span = CPU_MASK_NONE, \ diff -upN reference/kernel/sched.c current/kernel/sched.c --- reference/kernel/sched.c 2004-04-09 21:53:32.000000000 -0700 +++ current/kernel/sched.c 2004-04-09 21:53:37.000000000 -0700 @@ -44,7 +44,7 @@ #include #include -#ifdef CONFIG_NUMA +#ifdef CONFIG_SCHED_NUMA #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu)) #else #define cpu_to_node_mask(cpu) (cpu_online_map) @@ -3758,7 +3758,7 @@ extern void __init arch_init_sched_domai #else static struct sched_group sched_group_cpus[NR_CPUS]; static DEFINE_PER_CPU(struct sched_domain, cpu_domains); -#ifdef CONFIG_NUMA +#ifdef CONFIG_SCHED_NUMA static struct sched_group sched_group_nodes[MAX_NUMNODES]; static DEFINE_PER_CPU(struct sched_domain, node_domains); static void __init arch_init_sched_domains(void) @@ -3829,7 +3829,7 @@ static void __init arch_init_sched_domai } } -#else /* !CONFIG_NUMA */ +#else /* !CONFIG_SCHED_NUMA */ static void __init arch_init_sched_domains(void) { int i; @@ -3867,7 +3867,7 @@ static void __init arch_init_sched_domai } } -#endif /* CONFIG_NUMA */ +#endif /* CONFIG_SCHED_NUMA */ #endif /* ARCH_HAS_SCHED_DOMAIN */ #define SCHED_DOMAIN_DEBUG