diff options
author | Andrew Morton <akpm@osdl.org> | 2004-05-09 23:24:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-05-09 23:24:38 -0700 |
commit | e18e19ade9a0c0334cf8a2bc1945d97ec1697061 (patch) | |
tree | 57afc4fa73702ed5d66fb8197f8668e65de28e8a /kernel | |
parent | 7a1dc0ea9e84be7175d007b19ae9d8caab13c2e5 (diff) | |
download | history-e18e19ade9a0c0334cf8a2bc1945d97ec1697061.tar.gz |
[PATCH] sched: implement domains for i386 HT
From: Nick Piggin <piggin@cyberone.com.au>
The following patch builds a scheduling description for the i386
architecture using cpu_sibling_map to set up SMT if CONFIG_SCHED_SMT is
set.
It could be made more fancy and collapse degenerate domains at runtime (ie.
1 sibling per CPU, or 1 NUMA node in the computer).
From: Zwane Mwaikambo <zwane@arm.linux.org.uk>
This fixes an oops due to cpu_sibling_map being uninitialised when a
system with no MP table (most UP boxen) boots a CONFIG_SMT kernel. What
also happens is that the cpu_group lists end up not being terminated
properly, but this oops kills it first. Patch tested on UP w/o MP table,
2x P2 and UP Xeon w/ no siblings.
From: "Martin J. Bligh" <mbligh@aracnet.com>,
Nick Piggin <piggin@cyberone.com.au>
Change arch_init_sched_domains to use cpu_online_map
From: Anton Blanchard <anton@samba.org>
Fix build with NR_CPUS > BITS_PER_LONG
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 35 |
1 files changed, 10 insertions, 25 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4b9db0bff5bf9..7293c40707ec8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3295,28 +3295,20 @@ DEFINE_PER_CPU(struct sched_domain, node_domains); static void __init arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_node = NULL, *last_node = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { int node = cpu_to_node(i); cpumask_t nodemask = node_to_cpumask(node); struct sched_domain *node_domain = &per_cpu(node_domains, i); struct sched_domain *cpu_domain = cpu_sched_domain(i); *node_domain = SD_NODE_INIT; - node_domain->span = all_cpus; + node_domain->span = cpu_online_map; *cpu_domain = SD_CPU_INIT; - cpus_and(cpu_domain->span, nodemask, all_cpus); + cpus_and(cpu_domain->span, nodemask, cpu_online_map); cpu_domain->parent = node_domain; } @@ -3326,8 +3318,9 @@ static void __init arch_init_sched_domains(void) int j; cpumask_t nodemask; struct sched_group *node = &sched_group_nodes[i]; + cpumask_t tmp = node_to_cpumask(i); - cpus_and(nodemask, node_to_cpumask(i), all_cpus); + cpus_and(nodemask, tmp, cpu_online_map); if (cpus_empty(nodemask)) continue; @@ -3357,7 +3350,7 @@ static void __init arch_init_sched_domains(void) last_node->next = first_node; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *node_domain = &per_cpu(node_domains, i); struct sched_domain *cpu_domain = cpu_sched_domain(i); node_domain->groups = &sched_group_nodes[cpu_to_node(i)]; @@ -3369,26 +3362,18 @@ static void __init arch_init_sched_domains(void) static void __init arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_cpu = NULL, *last_cpu = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); *cpu_domain = SD_CPU_INIT; - cpu_domain->span = all_cpus; + cpu_domain->span = cpu_online_map; } /* Set up CPU groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_group *cpu = &sched_group_cpus[i]; cpus_clear(cpu->cpumask); @@ -3403,7 +3388,7 @@ static void __init arch_init_sched_domains(void) last_cpu->next = first_cpu; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); cpu_domain->groups = &sched_group_cpus[i]; } |