From: Rusty Russell Some places use cpu_online() where they should be using cpu_possible, most commonly for tallying statistics. This makes no difference without hotplug CPU. Use the for_each_cpu() macro in those places, providing good examples (and making the external hotplug CPU patch smaller). Some places use cpu_online() where they should be using cpu_possible, most commonly for tallying statistics. This makes no difference without hotplug CPU. Use the for_each_cpu() macro in those places, providing good examples (and making the external hotplug CPU patch smaller). fs/buffer.c | 6 ++---- fs/proc/proc_misc.c | 6 ++---- kernel/fork.c | 7 +++---- kernel/sched.c | 18 ++++++------------ kernel/timer.c | 5 +---- kernel/workqueue.c | 4 +--- mm/page_alloc.c | 4 ++-- net/ipv4/route.c | 5 +---- 8 files changed, 18 insertions(+), 37 deletions(-) diff -puN fs/buffer.c~use-for_each_cpu-in-right-places fs/buffer.c --- 25/fs/buffer.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/fs/buffer.c 2004-01-03 14:17:00.000000000 -0800 @@ -2941,10 +2941,8 @@ static void recalc_bh_state(void) if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) return; __get_cpu_var(bh_accounting).ratelimit = 0; - for (i = 0; i < NR_CPUS; i++) { - if (cpu_online(i)) - tot += per_cpu(bh_accounting, i).nr; - } + for_each_cpu(i) + tot += per_cpu(bh_accounting, i).nr; buffer_heads_over_limit = (tot > max_buffer_heads); } diff -puN fs/proc/proc_misc.c~use-for_each_cpu-in-right-places fs/proc/proc_misc.c --- 25/fs/proc/proc_misc.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/fs/proc/proc_misc.c 2004-01-03 14:17:00.000000000 -0800 @@ -378,10 +378,9 @@ int show_stat(struct seq_file *p, void * jif = ((u64)now.tv_sec * HZ) + (now.tv_usec/(1000000/HZ)) - jif; do_div(jif, HZ); - for (i = 0; i < NR_CPUS; i++) { + for_each_cpu(i) { int j; - if (!cpu_online(i)) continue; user += kstat_cpu(i).cpustat.user; nice += kstat_cpu(i).cpustat.nice; system += kstat_cpu(i).cpustat.system; @@ -401,8 +400,7 @@ int show_stat(struct seq_file *p, void * jiffies_to_clock_t(iowait), jiffies_to_clock_t(irq), jiffies_to_clock_t(softirq)); - for (i = 0; i < NR_CPUS; i++){ - if (!cpu_online(i)) continue; + for_each_online_cpu(i) { seq_printf(p, "cpu%d %u %u %u %u %u %u %u\n", i, jiffies_to_clock_t(kstat_cpu(i).cpustat.user), diff -puN kernel/fork.c~use-for_each_cpu-in-right-places kernel/fork.c --- 25/kernel/fork.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/kernel/fork.c 2004-01-03 14:17:00.000000000 -0800 @@ -60,10 +60,9 @@ int nr_processes(void) int cpu; int total = 0; - for (cpu = 0; cpu < NR_CPUS; cpu++) { - if (cpu_online(cpu)) - total += per_cpu(process_counts, cpu); - } + for_each_cpu(cpu) + total += per_cpu(process_counts, cpu); + return total; } diff -puN kernel/sched.c~use-for_each_cpu-in-right-places kernel/sched.c --- 25/kernel/sched.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/kernel/sched.c 2004-01-03 14:17:00.000000000 -0800 @@ -895,11 +895,9 @@ unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_online(i)) - continue; + for_each_cpu(i) sum += cpu_rq(i)->nr_uninterruptible; - } + return sum; } @@ -907,11 +905,9 @@ unsigned long nr_context_switches(void) { unsigned long i, sum = 0; - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_online(i)) - continue; + for_each_cpu(i) sum += cpu_rq(i)->nr_switches; - } + return sum; } @@ -919,11 +915,9 @@ unsigned long nr_iowait(void) { unsigned long i, sum = 0; - for (i = 0; i < NR_CPUS; ++i) { - if (!cpu_online(i)) - continue; + for_each_cpu(i) sum += atomic_read(&cpu_rq(i)->nr_iowait); - } + return sum; } diff -puN kernel/timer.c~use-for_each_cpu-in-right-places kernel/timer.c --- 25/kernel/timer.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/kernel/timer.c 2004-01-03 14:17:00.000000000 -0800 @@ -332,10 +332,7 @@ int del_timer_sync(struct timer_list *ti del_again: ret += del_timer(timer); - for (i = 0; i < NR_CPUS; i++) { - if (!cpu_online(i)) - continue; - + for_each_cpu(i) { base = &per_cpu(tvec_bases, i); if (base->running_timer == timer) { while (base->running_timer == timer) { diff -puN kernel/workqueue.c~use-for_each_cpu-in-right-places kernel/workqueue.c --- 25/kernel/workqueue.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/kernel/workqueue.c 2004-01-03 14:17:00.000000000 -0800 @@ -366,9 +366,7 @@ int current_is_keventd(void) BUG_ON(!keventd_wq); - for (cpu = 0; cpu < NR_CPUS; cpu++) { - if (!cpu_online(cpu)) - continue; + for_each_cpu(cpu) { cwq = keventd_wq->cpu_wq + cpu; if (current == cwq->thread) return 1; diff -puN mm/page_alloc.c~use-for_each_cpu-in-right-places mm/page_alloc.c --- 25/mm/page_alloc.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/mm/page_alloc.c 2004-01-03 14:17:00.000000000 -0800 @@ -868,14 +868,14 @@ void __get_page_state(struct page_state while (cpu < NR_CPUS) { unsigned long *in, *out, off; - if (!cpu_online(cpu)) { + if (!cpu_possible(cpu)) { cpu++; continue; } in = (unsigned long *)&per_cpu(page_states, cpu); cpu++; - if (cpu < NR_CPUS && cpu_online(cpu)) + if (cpu < NR_CPUS && cpu_possible(cpu)) prefetch(&per_cpu(page_states, cpu)); out = (unsigned long *)ret; for (off = 0; off < nr; off++) diff -puN net/ipv4/route.c~use-for_each_cpu-in-right-places net/ipv4/route.c --- 25/net/ipv4/route.c~use-for_each_cpu-in-right-places 2004-01-03 14:17:00.000000000 -0800 +++ 25-akpm/net/ipv4/route.c 2004-01-03 14:17:00.000000000 -0800 @@ -2703,12 +2703,9 @@ static int ip_rt_acct_read(char *buffer, memcpy(dst, src, length); /* Add the other cpus in, one int at a time */ - for (i = 1; i < NR_CPUS; i++) { + for_each_cpu(i) { unsigned int j; - if (!cpu_online(i)) - continue; - src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; for (j = 0; j < length/4; j++) _