From: Zwane Mwaikambo for_each_cpu walks through all processors in cpu_possible_map, which is defined as cpu_callout_map on i386 and isn't initialised until all processors have been booted. This breaks things which do for_each_cpu iterations early during boot. So, define cpu_possible_map as a bitmap with NR_CPUS bits populated. This was triggered by a patch i'm working on which does alloc_percpu before bringing up secondary processors. Signed-off-by: Zwane Mwaikambo Signed-off-by: Andrew Morton --- arch/i386/kernel/mpparse.c | 8 +++++++- arch/i386/kernel/smpboot.c | 1 + arch/i386/mach-voyager/voyager_smp.c | 1 + include/asm-i386/smp.h | 2 +- 4 files changed, 10 insertions(+), 2 deletions(-) diff -puN arch/i386/kernel/mpparse.c~i386-boottime-for_each_cpu-broken arch/i386/kernel/mpparse.c --- 25/arch/i386/kernel/mpparse.c~i386-boottime-for_each_cpu-broken Wed Aug 17 13:34:52 2005 +++ 25-akpm/arch/i386/kernel/mpparse.c Wed Aug 17 13:34:52 2005 @@ -122,7 +122,7 @@ static int MP_valid_apicid(int apicid, i static void __init MP_processor_info (struct mpc_config_processor *m) { - int ver, apicid; + int ver, apicid, cpu, found_bsp = 0; physid_mask_t tmp; if (!(m->mpc_cpuflag & CPU_ENABLED)) @@ -181,6 +181,7 @@ static void __init MP_processor_info (st if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { Dprintk(" Bootup CPU\n"); boot_cpu_physical_apicid = m->mpc_apicid; + found_bsp = 1; } if (num_processors >= NR_CPUS) { @@ -204,6 +205,11 @@ static void __init MP_processor_info (st return; } + if (found_bsp) + cpu = 0; + else + cpu = num_processors - 1; + cpu_set(cpu, cpu_possible_map); tmp = apicid_to_cpu_present(apicid); physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp); diff -puN arch/i386/kernel/smpboot.c~i386-boottime-for_each_cpu-broken arch/i386/kernel/smpboot.c --- 25/arch/i386/kernel/smpboot.c~i386-boottime-for_each_cpu-broken Wed Aug 17 13:34:52 2005 +++ 25-akpm/arch/i386/kernel/smpboot.c Wed Aug 17 13:34:52 2005 @@ -87,6 +87,7 @@ EXPORT_SYMBOL(cpu_online_map); cpumask_t cpu_callin_map; cpumask_t cpu_callout_map; +cpumask_t cpu_possible_map; EXPORT_SYMBOL(cpu_callout_map); static cpumask_t smp_commenced_mask; diff -puN arch/i386/mach-voyager/voyager_smp.c~i386-boottime-for_each_cpu-broken arch/i386/mach-voyager/voyager_smp.c --- 25/arch/i386/mach-voyager/voyager_smp.c~i386-boottime-for_each_cpu-broken Wed Aug 17 13:34:52 2005 +++ 25-akpm/arch/i386/mach-voyager/voyager_smp.c Wed Aug 17 13:34:52 2005 @@ -241,6 +241,7 @@ static cpumask_t smp_commenced_mask = CP /* This is for the new dynamic CPU boot code */ cpumask_t cpu_callin_map = CPU_MASK_NONE; cpumask_t cpu_callout_map = CPU_MASK_NONE; +cpumask_t cpu_possible_map = CPU_MASK_ALL; EXPORT_SYMBOL(cpu_callout_map); /* The per processor IRQ masks (these are usually kept in sync) */ diff -puN include/asm-i386/smp.h~i386-boottime-for_each_cpu-broken include/asm-i386/smp.h --- 25/include/asm-i386/smp.h~i386-boottime-for_each_cpu-broken Wed Aug 17 13:34:52 2005 +++ 25-akpm/include/asm-i386/smp.h Wed Aug 17 13:34:52 2005 @@ -59,7 +59,7 @@ extern void cpu_uninit(void); extern cpumask_t cpu_callout_map; extern cpumask_t cpu_callin_map; -#define cpu_possible_map cpu_callout_map +extern cpumask_t cpu_possible_map; /* We don't mark CPUs online until __cpu_up(), so we need another measure */ static inline int num_booting_cpus(void) _