From: Paul Jackson With a hotplug capable kernel, there is a requirement to distinguish a possible CPU from one actually present. The set of possible CPU numbers doesn't change during a single system boot, but the set of present CPUs changes as CPUs are physically inserted into or removed from a system. The cpu_possible_map does not change once initialized at boot, but the cpu_present_map changes dynamically as CPUs are inserted or removed. --- 25-akpm/arch/ia64/kernel/smpboot.c | 22 +++++++++++++--------- 25-akpm/fs/buffer.c | 2 +- 25-akpm/fs/proc/proc_misc.c | 4 ++-- 25-akpm/include/asm-ia64/smp.h | 3 --- 25-akpm/include/linux/cpumask.h | 11 +++++++++++ 25-akpm/init/main.c | 21 +++++++++++++++++++-- 25-akpm/kernel/cpu.c | 10 +++++++++- 25-akpm/kernel/fork.c | 2 +- 25-akpm/kernel/sched.c | 6 +++--- 25-akpm/kernel/timer.c | 2 +- 10 files changed, 60 insertions(+), 23 deletions(-) diff -puN arch/ia64/kernel/smpboot.c~ia64-cpu-hotplug-cpu_present-2 arch/ia64/kernel/smpboot.c --- 25/arch/ia64/kernel/smpboot.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.354639992 -0700 +++ 25-akpm/arch/ia64/kernel/smpboot.c 2004-05-08 13:18:55.374636952 -0700 @@ -75,11 +75,11 @@ extern unsigned long ia64_iobase; task_t *task_for_booting_cpu; -/* Bitmask of currently online CPUs */ +/* Bitmasks of currently online, and possible CPUs */ cpumask_t cpu_online_map; EXPORT_SYMBOL(cpu_online_map); -cpumask_t phys_cpu_present_map; -EXPORT_SYMBOL(phys_cpu_present_map); +cpumask_t cpu_possible_map; +EXPORT_SYMBOL(cpu_possible_map); /* which logical CPU number maps to which CPU (physical APIC ID) */ volatile int ia64_cpu_to_sapicid[NR_CPUS]; @@ -99,6 +99,7 @@ static int __init nointroute (char *str) { no_int_routing = 1; + printk ("no_int_routing on\n"); return 1; } @@ -441,14 +442,15 @@ smp_build_cpu_map (void) ia64_cpu_to_sapicid[cpu] = -1; ia64_cpu_to_sapicid[0] = boot_cpu_id; - cpus_clear(phys_cpu_present_map); - cpu_set(0, phys_cpu_present_map); - + cpus_clear(cpu_present_map); + cpu_set(0, cpu_present_map); + cpu_set(0, cpu_possible_map); for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { sapicid = smp_boot_data.cpu_phys_id[i]; if (sapicid == boot_cpu_id) continue; - cpu_set(cpu, phys_cpu_present_map); + cpu_set(cpu, cpu_present_map); + cpu_set(cpu, cpu_possible_map); ia64_cpu_to_sapicid[cpu] = sapicid; cpu++; } @@ -529,9 +531,11 @@ smp_prepare_cpus (unsigned int max_cpus) if (!max_cpus) { printk(KERN_INFO "SMP mode deactivated.\n"); cpus_clear(cpu_online_map); - cpus_clear(phys_cpu_present_map); + cpus_clear(cpu_present_map); + cpus_clear(cpu_possible_map); cpu_set(0, cpu_online_map); - cpu_set(0, phys_cpu_present_map); + cpu_set(0, cpu_present_map); + cpu_set(0, cpu_possible_map); return; } } diff -puN fs/buffer.c~ia64-cpu-hotplug-cpu_present-2 fs/buffer.c --- 25/fs/buffer.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.356639688 -0700 +++ 25-akpm/fs/buffer.c 2004-05-08 13:18:55.376636648 -0700 @@ -2966,7 +2966,7 @@ static void recalc_bh_state(void) if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) return; __get_cpu_var(bh_accounting).ratelimit = 0; - for_each_cpu(i) + for_each_online_cpu(i) tot += per_cpu(bh_accounting, i).nr; buffer_heads_over_limit = (tot > max_buffer_heads); } diff -puN fs/proc/proc_misc.c~ia64-cpu-hotplug-cpu_present-2 fs/proc/proc_misc.c --- 25/fs/proc/proc_misc.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.359639232 -0700 +++ 25-akpm/fs/proc/proc_misc.c 2004-05-08 13:18:55.377636496 -0700 @@ -371,7 +371,7 @@ int show_stat(struct seq_file *p, void * if (wall_to_monotonic.tv_nsec) --jif; - for_each_cpu(i) { + for_each_online_cpu(i) { int j; user += kstat_cpu(i).cpustat.user; @@ -393,7 +393,7 @@ int show_stat(struct seq_file *p, void * (unsigned long long)jiffies_64_to_clock_t(iowait), (unsigned long long)jiffies_64_to_clock_t(irq), (unsigned long long)jiffies_64_to_clock_t(softirq)); - for_each_cpu(i) { + for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kstat_cpu(i).cpustat.user; diff -puN include/asm-ia64/smp.h~ia64-cpu-hotplug-cpu_present-2 include/asm-ia64/smp.h --- 25/include/asm-ia64/smp.h~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.361638928 -0700 +++ 25-akpm/include/asm-ia64/smp.h 2004-05-08 13:18:55.378636344 -0700 @@ -38,7 +38,6 @@ extern struct smp_boot_data { extern char no_int_routing __devinitdata; -extern cpumask_t phys_cpu_present_map; extern cpumask_t cpu_online_map; extern unsigned long ipi_base_addr; extern unsigned char smp_int_redirect; @@ -48,8 +47,6 @@ extern volatile int ia64_cpu_to_sapicid[ extern unsigned long ap_wakeup_vector; -#define cpu_possible_map phys_cpu_present_map - /* * Function to map hard smp processor id to logical id. Slow, so don't use this in * performance-critical code. diff -puN include/linux/cpumask.h~ia64-cpu-hotplug-cpu_present-2 include/linux/cpumask.h --- 25/include/linux/cpumask.h~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.362638776 -0700 +++ 25-akpm/include/linux/cpumask.h 2004-05-08 13:18:55.378636344 -0700 @@ -10,11 +10,15 @@ extern cpumask_t cpu_online_map; extern cpumask_t cpu_possible_map; +extern cpumask_t cpu_present_map; #define num_online_cpus() cpus_weight(cpu_online_map) #define num_possible_cpus() cpus_weight(cpu_possible_map) +#define num_present_cpus() cpus_weight(cpu_present_map) + #define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) #define cpu_possible(cpu) cpu_isset(cpu, cpu_possible_map) +#define cpu_present(cpu) cpu_isset(cpu, cpu_present_map) #define for_each_cpu_mask(cpu, mask) \ for (cpu = first_cpu_const(mk_cpumask_const(mask)); \ @@ -23,16 +27,23 @@ extern cpumask_t cpu_possible_map; #define for_each_cpu(cpu) for_each_cpu_mask(cpu, cpu_possible_map) #define for_each_online_cpu(cpu) for_each_cpu_mask(cpu, cpu_online_map) +#define for_each_present_cpu(cpu) for_each_cpu_mask(cpu, cpu_present_map) #else #define cpu_online_map cpumask_of_cpu(0) #define cpu_possible_map cpumask_of_cpu(0) +#define cpu_present_map cpumask_of_cpu(0) + #define num_online_cpus() 1 #define num_possible_cpus() 1 +#define num_present_cpus() 1 + #define cpu_online(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define cpu_possible(cpu) ({ BUG_ON((cpu) != 0); 1; }) +#define cpu_present(cpu) ({ BUG_ON((cpu) != 0); 1; }) #define for_each_cpu(cpu) for (cpu = 0; cpu < 1; cpu++) #define for_each_online_cpu(cpu) for (cpu = 0; cpu < 1; cpu++) +#define for_each_present_cpu(cpu) for (cpu = 0; cpu < 1; cpu++) #endif #define cpumask_scnprintf(buf, buflen, map) \ diff -puN init/main.c~ia64-cpu-hotplug-cpu_present-2 init/main.c --- 25/init/main.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.364638472 -0700 +++ 25-akpm/init/main.c 2004-05-08 13:18:55.379636192 -0700 @@ -360,10 +360,10 @@ static void __init smp_init(void) unsigned j = 1; /* FIXME: This should be done in userspace --RR */ - for (i = 0; i < NR_CPUS; i++) { + for_each_present_cpu(i) { if (num_online_cpus() >= max_cpus) break; - if (cpu_possible(i) && !cpu_online(i)) { + if (!cpu_online(i)) { cpu_up(i); j++; } @@ -621,6 +621,22 @@ static void run_init_process(char *init_ execve(init_filename, argv_init, envp_init); } +static void fixup_cpu_present_map(void) +{ + int i; + + /* + * If arch is not hotplug ready and did not populate + * cpu_present_map, just make cpu_present_map same as cpu_possible_map + * for other cpu bringup code to function as normal. e.g smp_init() etc. + */ + if (cpus_empty(cpu_present_map)) { + for_each_cpu(i) { + cpu_set(i, cpu_present_map); + } + } +} + static int init(void * unused) { lock_kernel(); @@ -639,6 +655,7 @@ static int init(void * unused) do_pre_smp_initcalls(); + fixup_cpu_present_map(); smp_init(); sched_init_smp(); diff -puN kernel/cpu.c~ia64-cpu-hotplug-cpu_present-2 kernel/cpu.c --- 25/kernel/cpu.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.366638168 -0700 +++ 25-akpm/kernel/cpu.c 2004-05-08 13:18:55.380636040 -0700 @@ -20,6 +20,14 @@ DECLARE_MUTEX(cpucontrol); static struct notifier_block *cpu_chain; +/* + * Represents all cpu's present in the system + * In systems capable of hotplug, this map could dynamically grow + * as new cpu's are detected in the system via any platform specific + * method, such as ACPI for e.g. + */ +cpumask_t cpu_present_map; +EXPORT_SYMBOL(cpu_present_map); /* Need to know about CPUs going up/down? */ int register_cpu_notifier(struct notifier_block *nb) @@ -180,7 +188,7 @@ int __devinit cpu_up(unsigned int cpu) if ((ret = down_interruptible(&cpucontrol)) != 0) return ret; - if (cpu_online(cpu)) { + if (cpu_online(cpu) || !cpu_present(cpu)) { ret = -EINVAL; goto out; } diff -puN kernel/fork.c~ia64-cpu-hotplug-cpu_present-2 kernel/fork.c --- 25/kernel/fork.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.367638016 -0700 +++ 25-akpm/kernel/fork.c 2004-05-08 13:18:55.381635888 -0700 @@ -63,7 +63,7 @@ int nr_processes(void) int cpu; int total = 0; - for_each_cpu(cpu) + for_each_online_cpu(cpu) total += per_cpu(process_counts, cpu); return total; diff -puN kernel/sched.c~ia64-cpu-hotplug-cpu_present-2 kernel/sched.c --- 25/kernel/sched.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.369637712 -0700 +++ 25-akpm/kernel/sched.c 2004-05-08 13:18:55.384635432 -0700 @@ -1219,7 +1219,7 @@ unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; - for_each_cpu(i) + for_each_online_cpu(i) sum += cpu_rq(i)->nr_uninterruptible; return sum; @@ -1229,7 +1229,7 @@ unsigned long long nr_context_switches(v { unsigned long long i, sum = 0; - for_each_cpu(i) + for_each_online_cpu(i) sum += cpu_rq(i)->nr_switches; return sum; @@ -1239,7 +1239,7 @@ unsigned long nr_iowait(void) { unsigned long i, sum = 0; - for_each_cpu(i) + for_each_online_cpu(i) sum += atomic_read(&cpu_rq(i)->nr_iowait); return sum; diff -puN kernel/timer.c~ia64-cpu-hotplug-cpu_present-2 kernel/timer.c --- 25/kernel/timer.c~ia64-cpu-hotplug-cpu_present-2 2004-05-08 13:18:55.370637560 -0700 +++ 25-akpm/kernel/timer.c 2004-05-08 13:18:55.386635128 -0700 @@ -332,7 +332,7 @@ int del_timer_sync(struct timer_list *ti del_again: ret += del_timer(timer); - for_each_cpu(i) { + for_each_online_cpu(i) { base = &per_cpu(tvec_bases, i); if (base->running_timer == timer) { while (base->running_timer == timer) { _