From: "Martin J. Bligh" , Nick Piggin arch_init_sched_domains is using cpu_callout_map (via cpu_possible) instead of cpu_online_map. That's really only intended for cpu bootstrap, and won't work properly if we called out to a cpu, but it failed to respond. The normal way is to use cpu_online_map for this stuff, and it even cleans up the existing code a bit. (it's just a case of s/all_cpus/cpu_online_map/ and removing the loop that builds all_cpus). I tested this out on the NUMA-Q, and it works fine. --- arch/i386/kernel/smpboot.c | 38 +++++++++++--------------------------- kernel/sched.c | 34 +++++++++------------------------- 2 files changed, 20 insertions(+), 52 deletions(-) diff -puN arch/i386/kernel/smpboot.c~sched-arch_init_sched_domains-fix arch/i386/kernel/smpboot.c --- 25/arch/i386/kernel/smpboot.c~sched-arch_init_sched_domains-fix 2004-01-24 16:21:42.000000000 -0800 +++ 25-akpm/arch/i386/kernel/smpboot.c 2004-01-24 16:21:42.000000000 -0800 @@ -1132,18 +1132,10 @@ static DEFINE_PER_CPU(struct sched_domai __init void arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_cpu = NULL, *last_cpu = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); struct sched_domain *node_domain = &per_cpu(node_domains, i); @@ -1158,11 +1150,11 @@ __init void arch_init_sched_domains(void phys_domain->flags |= SD_FLAG_IDLE; *node_domain = SD_NODE_INIT; - node_domain->span = all_cpus; + node_domain->span = cpu_online_map; } /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); int j; first_cpu = last_cpu = NULL; @@ -1188,7 +1180,7 @@ __init void arch_init_sched_domains(void for (i = 0; i < MAX_NUMNODES; i++) { int j; cpumask_t nodemask; - cpus_and(nodemask, node_to_cpumask(i), all_cpus); + cpus_and(nodemask, node_to_cpumask(i), cpu_online_map); first_cpu = last_cpu = NULL; /* Set up physical groups */ @@ -1215,7 +1207,7 @@ __init void arch_init_sched_domains(void for (i = 0; i < MAX_NUMNODES; i++) { struct sched_group *cpu = &sched_group_nodes[i]; cpumask_t nodemask; - cpus_and(nodemask, node_to_cpumask(i), all_cpus); + cpus_and(nodemask, node_to_cpumask(i), cpu_online_map); if (cpus_empty(nodemask)) continue; @@ -1232,7 +1224,7 @@ __init void arch_init_sched_domains(void mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { int node = cpu_to_node(i); struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); @@ -1256,18 +1248,10 @@ static DEFINE_PER_CPU(struct sched_domai __init void arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_cpu = NULL, *last_cpu = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); @@ -1275,12 +1259,12 @@ __init void arch_init_sched_domains(void cpu_domain->span = cpu_sibling_map[i]; *phys_domain = SD_CPU_INIT; - phys_domain->span = all_cpus; + phys_domain->span = cpu_online_map; phys_domain->flags |= SD_FLAG_IDLE; } /* Set up CPU (sibling) groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); int j; first_cpu = last_cpu = NULL; @@ -1305,7 +1289,7 @@ __init void arch_init_sched_domains(void first_cpu = last_cpu = NULL; /* Set up physical groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_group *cpu = &sched_group_phys[i]; @@ -1323,7 +1307,7 @@ __init void arch_init_sched_domains(void last_cpu->next = first_cpu; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); struct sched_domain *phys_domain = &per_cpu(phys_domains, i); struct sched_group *cpu_group = &sched_group_cpus[i]; diff -puN kernel/sched.c~sched-arch_init_sched_domains-fix kernel/sched.c --- 25/kernel/sched.c~sched-arch_init_sched_domains-fix 2004-01-24 16:21:42.000000000 -0800 +++ 25-akpm/kernel/sched.c 2004-01-24 16:21:42.000000000 -0800 @@ -3235,28 +3235,20 @@ DEFINE_PER_CPU(struct sched_domain, node static void __init arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_node = NULL, *last_node = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { int node = cpu_to_node(i); cpumask_t nodemask = node_to_cpumask(node); struct sched_domain *node_domain = &per_cpu(node_domains, i); struct sched_domain *cpu_domain = cpu_sched_domain(i); *node_domain = SD_NODE_INIT; - node_domain->span = all_cpus; + node_domain->span = cpu_online_map; *cpu_domain = SD_CPU_INIT; - cpus_and(cpu_domain->span, nodemask, all_cpus); + cpus_and(cpu_domain->span, nodemask, cpu_online_map); cpu_domain->parent = node_domain; } @@ -3267,7 +3259,7 @@ static void __init arch_init_sched_domai cpumask_t nodemask; struct sched_group *node = &sched_group_nodes[i]; - cpus_and(nodemask, node_to_cpumask(i), all_cpus); + cpus_and(nodemask, node_to_cpumask(i), cpu_online_map); if (cpus_empty(nodemask)) continue; @@ -3301,7 +3293,7 @@ static void __init arch_init_sched_domai last_node->next = first_node; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *node_domain = &per_cpu(node_domains, i); struct sched_domain *cpu_domain = cpu_sched_domain(i); node_domain->groups = &sched_group_nodes[cpu_to_node(i)]; @@ -3313,26 +3305,18 @@ static void __init arch_init_sched_domai static void __init arch_init_sched_domains(void) { int i; - cpumask_t all_cpus = CPU_MASK_NONE; struct sched_group *first_cpu = NULL, *last_cpu = NULL; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_possible(i)) - continue; - - cpu_set(i, all_cpus); - } - /* Set up domains */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); *cpu_domain = SD_CPU_INIT; - cpu_domain->span = all_cpus; + cpu_domain->span = cpu_online_map; } /* Set up CPU groups */ - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_group *cpu = &sched_group_cpus[i]; cpus_clear(cpu->cpumask); @@ -3347,7 +3331,7 @@ static void __init arch_init_sched_domai last_cpu->next = first_cpu; mb(); - for_each_cpu_mask(i, all_cpus) { + for_each_cpu_mask(i, cpu_online_map) { struct sched_domain *cpu_domain = cpu_sched_domain(i); cpu_domain->groups = &sched_group_cpus[i]; } _