Patch from Thomas Schlichter Based on a patch from Dave Jones. It converts a large number of instances of: smp_call_function(foo); foo(); into on_each_cpu(foo); and in doing so fixes up the preempt-unsafeness of the first version. arch/alpha/kernel/process.c | 5 +---- arch/alpha/kernel/smp.c | 23 +++++++++++++++++------ arch/i386/kernel/io_apic.c | 6 ++---- arch/i386/kernel/ldt.c | 4 ++-- arch/i386/kernel/microcode.c | 3 +-- arch/i386/kernel/smp.c | 13 ++----------- arch/i386/kernel/sysenter.c | 3 +-- arch/i386/mach-voyager/voyager_smp.c | 15 +++------------ arch/i386/mm/pageattr.c | 7 ++----- arch/i386/oprofile/nmi_int.c | 12 ++++-------- arch/ia64/kernel/smp.c | 10 +++++----- arch/mips64/kernel/smp.c | 15 +++++++++++++-- arch/parisc/kernel/cache.c | 3 +-- arch/parisc/kernel/irq.c | 11 +++-------- arch/parisc/kernel/smp.c | 5 ++--- arch/parisc/mm/init.c | 3 +-- arch/ppc/kernel/temp.c | 10 ++-------- arch/s390/kernel/smp.c | 16 ++++++++-------- arch/s390x/kernel/smp.c | 13 +++++++------ arch/x86_64/kernel/bluesmoke.c | 14 +------------- arch/x86_64/kernel/io_apic.c | 3 +-- arch/x86_64/kernel/ldt.c | 4 ++-- arch/x86_64/kernel/smp.c | 13 ++----------- arch/x86_64/mm/pageattr.c | 7 +------ drivers/char/agp/agp.h | 3 +-- drivers/s390/char/sclp.c | 3 +-- fs/buffer.c | 5 +---- include/asm-parisc/cacheflush.h | 8 +------- include/linux/smp.h | 19 ++++++++++++++++++- mm/slab.c | 4 ++++ 30 files changed, 110 insertions(+), 150 deletions(-) diff -puN arch/alpha/kernel/process.c~on_each_cpu arch/alpha/kernel/process.c --- 25/arch/alpha/kernel/process.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/alpha/kernel/process.c 2003-02-26 03:28:09.000000000 -0800 @@ -155,10 +155,7 @@ common_shutdown(int mode, char *restart_ struct halt_info args; args.mode = mode; args.restart_cmd = restart_cmd; -#ifdef CONFIG_SMP - smp_call_function(common_shutdown_1, &args, 1, 0); -#endif - common_shutdown_1(&args); + on_each_cpu(common_shutdown_1, &args, 1, 0); } void diff -puN arch/alpha/kernel/smp.c~on_each_cpu arch/alpha/kernel/smp.c --- 25/arch/alpha/kernel/smp.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/alpha/kernel/smp.c 2003-02-26 03:28:09.000000000 -0800 @@ -899,10 +899,8 @@ void smp_imb(void) { /* Must wait other processors to flush their icache before continue. */ - if (smp_call_function(ipi_imb, NULL, 1, 1)) + if (on_each_cpu(ipi_imb, NULL, 1, 1)) printk(KERN_CRIT "smp_imb: timed out\n"); - - imb(); } static void @@ -916,11 +914,9 @@ flush_tlb_all(void) { /* Although we don't have any data to pass, we do want to synchronize with the other processors. */ - if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) { + if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { printk(KERN_CRIT "flush_tlb_all: timed out\n"); } - - tbia(); } #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) @@ -938,6 +934,8 @@ ipi_flush_tlb_mm(void *x) void flush_tlb_mm(struct mm_struct *mm) { + preempt_disable(); + if (mm == current->active_mm) { flush_tlb_current(mm); if (atomic_read(&mm->mm_users) <= 1) { @@ -948,6 +946,7 @@ flush_tlb_mm(struct mm_struct *mm) if (mm->context[cpu]) mm->context[cpu] = 0; } + preempt_enable(); return; } } @@ -955,6 +954,8 @@ flush_tlb_mm(struct mm_struct *mm) if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { printk(KERN_CRIT "flush_tlb_mm: timed out\n"); } + + preempt_enable(); } struct flush_tlb_page_struct { @@ -981,6 +982,8 @@ flush_tlb_page(struct vm_area_struct *vm struct flush_tlb_page_struct data; struct mm_struct *mm = vma->vm_mm; + preempt_disable(); + if (mm == current->active_mm) { flush_tlb_current_page(mm, vma, addr); if (atomic_read(&mm->mm_users) <= 1) { @@ -991,6 +994,7 @@ flush_tlb_page(struct vm_area_struct *vm if (mm->context[cpu]) mm->context[cpu] = 0; } + preempt_enable(); return; } } @@ -1002,6 +1006,8 @@ flush_tlb_page(struct vm_area_struct *vm if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { printk(KERN_CRIT "flush_tlb_page: timed out\n"); } + + preempt_enable(); } void @@ -1030,6 +1036,8 @@ flush_icache_user_range(struct vm_area_s if ((vma->vm_flags & VM_EXEC) == 0) return; + preempt_disable(); + if (mm == current->active_mm) { __load_new_mm_context(mm); if (atomic_read(&mm->mm_users) <= 1) { @@ -1040,6 +1048,7 @@ flush_icache_user_range(struct vm_area_s if (mm->context[cpu]) mm->context[cpu] = 0; } + preempt_enable(); return; } } @@ -1047,6 +1056,8 @@ flush_icache_user_range(struct vm_area_s if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { printk(KERN_CRIT "flush_icache_page: timed out\n"); } + + preempt_enable(); } #ifdef CONFIG_DEBUG_SPINLOCK diff -puN arch/i386/kernel/io_apic.c~on_each_cpu arch/i386/kernel/io_apic.c --- 25/arch/i386/kernel/io_apic.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/i386/kernel/io_apic.c 2003-02-26 03:28:09.000000000 -0800 @@ -1376,8 +1376,7 @@ void /*__init*/ print_local_APIC(void * void print_all_local_APICs (void) { - smp_call_function(print_local_APIC, NULL, 1, 1); - print_local_APIC(NULL); + on_each_cpu(print_local_APIC, NULL, 1, 1); } void /*__init*/ print_PIC(void) @@ -1843,8 +1842,7 @@ static void setup_nmi (void) */ printk(KERN_INFO "activating NMI Watchdog ..."); - smp_call_function(enable_NMI_through_LVT0, NULL, 1, 1); - enable_NMI_through_LVT0(NULL); + on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1); printk(" done.\n"); } diff -puN arch/i386/kernel/ldt.c~on_each_cpu arch/i386/kernel/ldt.c --- 25/arch/i386/kernel/ldt.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/i386/kernel/ldt.c 2003-02-26 03:28:09.000000000 -0800 @@ -55,13 +55,13 @@ static int alloc_ldt(mm_context_t *pc, i wmb(); if (reload) { + preempt_disable(); load_LDT(pc); #ifdef CONFIG_SMP - preempt_disable(); if (current->mm->cpu_vm_mask != (1 << smp_processor_id())) smp_call_function(flush_ldt, 0, 1, 1); - preempt_enable(); #endif + preempt_enable(); } if (oldsize) { if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE) diff -puN arch/i386/kernel/microcode.c~on_each_cpu arch/i386/kernel/microcode.c --- 25/arch/i386/kernel/microcode.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/i386/kernel/microcode.c 2003-02-26 03:28:09.000000000 -0800 @@ -183,11 +183,10 @@ static int do_microcode_update(void) int i, error = 0, err; struct microcode *m; - if (smp_call_function(do_update_one, NULL, 1, 1) != 0) { + if (on_each_cpu(do_update_one, NULL, 1, 1) != 0) { printk(KERN_ERR "microcode: IPI timeout, giving up\n"); return -EIO; } - do_update_one(NULL); for (i=0; iactive_mm && atomic_read(&mm->mm_users) == 1)) + { + local_finish_flush_tlb_mm(mm); return; + } /* * We could optimize this further by using mm->cpu_vm_mask to track which CPUs @@ -226,7 +226,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is * rather trivial. */ - smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); + on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); } /* diff -puN arch/mips64/kernel/smp.c~on_each_cpu arch/mips64/kernel/smp.c --- 25/arch/mips64/kernel/smp.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/mips64/kernel/smp.c 2003-02-26 03:28:09.000000000 -0800 @@ -195,8 +195,7 @@ static void flush_tlb_all_ipi(void *info void flush_tlb_all(void) { - smp_call_function(flush_tlb_all_ipi, 0, 1, 1); - _flush_tlb_all(); + on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); } static void flush_tlb_mm_ipi(void *mm) @@ -219,6 +218,8 @@ static void flush_tlb_mm_ipi(void *mm) void flush_tlb_mm(struct mm_struct *mm) { + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); } else { @@ -228,6 +229,8 @@ void flush_tlb_mm(struct mm_struct *mm) CPU_CONTEXT(i, mm) = 0; } _flush_tlb_mm(mm); + + preempt_enable(); } struct flush_tlb_data { @@ -246,6 +249,8 @@ static void flush_tlb_range_ipi(void *in void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + preempt_disable(); + if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { struct flush_tlb_data fd; @@ -260,6 +265,8 @@ void flush_tlb_range(struct vm_area_stru CPU_CONTEXT(i, mm) = 0; } _flush_tlb_range(mm, start, end); + + preempt_enable(); } static void flush_tlb_page_ipi(void *info) @@ -271,6 +278,8 @@ static void flush_tlb_page_ipi(void *inf void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + preempt_disable(); + if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { struct flush_tlb_data fd; @@ -284,5 +293,7 @@ void flush_tlb_page(struct vm_area_struc CPU_CONTEXT(i, vma->vm_mm) = 0; } _flush_tlb_page(vma, page); + + preempt_enable(); } diff -puN arch/parisc/kernel/cache.c~on_each_cpu arch/parisc/kernel/cache.c --- 25/arch/parisc/kernel/cache.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/parisc/kernel/cache.c 2003-02-26 03:28:09.000000000 -0800 @@ -39,8 +39,7 @@ static struct pdc_btlb_info btlb_info; void flush_data_cache(void) { - smp_call_function((void (*)(void *))flush_data_cache_local, NULL, 1, 1); - flush_data_cache_local(); + on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1); } #endif diff -puN arch/parisc/kernel/irq.c~on_each_cpu arch/parisc/kernel/irq.c --- 25/arch/parisc/kernel/irq.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/parisc/kernel/irq.c 2003-02-26 03:28:09.000000000 -0800 @@ -61,20 +61,17 @@ static volatile unsigned long cpu_eiem = static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */ -#ifdef CONFIG_SMP static void cpu_set_eiem(void *info) { set_eiem((unsigned long) info); } -#endif static inline void disable_cpu_irq(void *unused, int irq) { unsigned long eirr_bit = EIEM_MASK(irq); cpu_eiem &= ~eirr_bit; - set_eiem(cpu_eiem); - smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); + on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); } static void enable_cpu_irq(void *unused, int irq) @@ -83,8 +80,7 @@ static void enable_cpu_irq(void *unused, mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ cpu_eiem |= eirr_bit; - smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); - set_eiem(cpu_eiem); + on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); } /* mask and disable are the same at the CPU level @@ -100,8 +96,7 @@ static inline void unmask_cpu_irq(void * ** handle *any* unmasked pending interrupts. ** ie We don't need to check for pending interrupts here. */ - smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); - set_eiem(cpu_eiem); + on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1); } /* diff -puN arch/parisc/kernel/smp.c~on_each_cpu arch/parisc/kernel/smp.c --- 25/arch/parisc/kernel/smp.c~on_each_cpu 2003-02-26 03:28:08.000000000 -0800 +++ 25-akpm/arch/parisc/kernel/smp.c 2003-02-26 03:28:09.000000000 -0800 @@ -401,7 +401,7 @@ static int __init maxcpus(char *str) __setup("maxcpus=", maxcpus); /* - * Flush all other CPU's tlb and then mine. Do this with smp_call_function() + * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() * as we want to ensure all TLB's flushed before proceeding. */ @@ -410,8 +410,7 @@ extern void flush_tlb_all_local(void); void smp_flush_tlb_all(void) { - smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); - flush_tlb_all_local(); + on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); } diff -puN arch/parisc/mm/init.c~on_each_cpu arch/parisc/mm/init.c --- 25/arch/parisc/mm/init.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/arch/parisc/mm/init.c 2003-02-26 03:28:09.000000000 -0800 @@ -974,8 +974,7 @@ void flush_tlb_all(void) do_recycle++; } spin_unlock(&sid_lock); - smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); - flush_tlb_all_local(); + on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); if (do_recycle) { spin_lock(&sid_lock); recycle_sids(recycle_ndirty,recycle_dirty_array); diff -puN arch/ppc/kernel/temp.c~on_each_cpu arch/ppc/kernel/temp.c --- 25/arch/ppc/kernel/temp.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/arch/ppc/kernel/temp.c 2003-02-26 03:28:09.000000000 -0800 @@ -194,10 +194,7 @@ static void tau_timeout_smp(unsigned lon /* schedule ourselves to be run again */ mod_timer(&tau_timer, jiffies + shrink_timer) ; -#ifdef CONFIG_SMP - smp_call_function(tau_timeout, NULL, 1, 0); -#endif - tau_timeout(NULL); + on_each_cpu(tau_timeout, NULL, 1, 0); } /* @@ -239,10 +236,7 @@ int __init TAU_init(void) tau_timer.expires = jiffies + shrink_timer; add_timer(&tau_timer); -#ifdef CONFIG_SMP - smp_call_function(TAU_init_smp, NULL, 1, 0); -#endif - TAU_init_smp(NULL); + on_each_cpu(TAU_init_smp, NULL, 1, 0); printk("Thermal assist unit "); #ifdef CONFIG_TAU_INT diff -puN arch/s390/kernel/smp.c~on_each_cpu arch/s390/kernel/smp.c --- 25/arch/s390/kernel/smp.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/arch/s390/kernel/smp.c 2003-02-26 03:28:09.000000000 -0800 @@ -228,8 +228,7 @@ static void do_machine_restart(void * __ void machine_restart_smp(char * __unused) { cpu_restart_map = cpu_online_map; - smp_call_function(do_machine_restart, NULL, 0, 0); - do_machine_restart(NULL); + on_each_cpu(do_machine_restart, NULL, 0, 0); } static void do_machine_halt(void * __unused) @@ -247,8 +246,7 @@ static void do_machine_halt(void * __unu void machine_halt_smp(void) { - smp_call_function(do_machine_halt, NULL, 0, 0); - do_machine_halt(NULL); + on_each_cpu(do_machine_halt, NULL, 0, 0); } static void do_machine_power_off(void * __unused) @@ -266,8 +264,7 @@ static void do_machine_power_off(void * void machine_power_off_smp(void) { - smp_call_function(do_machine_power_off, NULL, 0, 0); - do_machine_power_off(NULL); + on_each_cpu(do_machine_power_off, NULL, 0, 0); } /* @@ -339,8 +336,7 @@ void smp_ptlb_callback(void *info) void smp_ptlb_all(void) { - smp_call_function(smp_ptlb_callback, NULL, 0, 1); - local_flush_tlb(); + on_each_cpu(smp_ptlb_callback, NULL, 0, 1); } /* @@ -400,8 +396,10 @@ void smp_ctl_set_bit(int cr, int bit) { parms.end_ctl = cr; parms.orvals[cr] = 1 << bit; parms.andvals[cr] = 0xFFFFFFFF; + preempt_disable(); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); __ctl_set_bit(cr, bit); + preempt_enable(); } /* @@ -414,8 +412,10 @@ void smp_ctl_clear_bit(int cr, int bit) parms.end_ctl = cr; parms.orvals[cr] = 0x00000000; parms.andvals[cr] = ~(1 << bit); + preempt_disable(); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); __ctl_clear_bit(cr, bit); + preempt_enable(); } /* diff -puN arch/s390x/kernel/smp.c~on_each_cpu arch/s390x/kernel/smp.c --- 25/arch/s390x/kernel/smp.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/arch/s390x/kernel/smp.c 2003-02-26 03:28:09.000000000 -0800 @@ -227,8 +227,7 @@ static void do_machine_restart(void * __ void machine_restart_smp(char * __unused) { cpu_restart_map = cpu_online_map; - smp_call_function(do_machine_restart, NULL, 0, 0); - do_machine_restart(NULL); + on_each_cpu(do_machine_restart, NULL, 0, 0); } static void do_machine_halt(void * __unused) @@ -246,8 +245,7 @@ static void do_machine_halt(void * __unu void machine_halt_smp(void) { - smp_call_function(do_machine_halt, NULL, 0, 0); - do_machine_halt(NULL); + on_each_cpu(do_machine_halt, NULL, 0, 0); } static void do_machine_power_off(void * __unused) @@ -265,8 +263,7 @@ static void do_machine_power_off(void * void machine_power_off_smp(void) { - smp_call_function(do_machine_power_off, NULL, 0, 0); - do_machine_power_off(NULL); + on_each_cpu(do_machine_power_off, NULL, 0, 0); } /* @@ -383,8 +380,10 @@ void smp_ctl_set_bit(int cr, int bit) { parms.end_ctl = cr; parms.orvals[cr] = 1 << bit; parms.andvals[cr] = -1L; + preempt_disable(); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); __ctl_set_bit(cr, bit); + preempt_enable(); } /* @@ -397,8 +396,10 @@ void smp_ctl_clear_bit(int cr, int bit) parms.end_ctl = cr; parms.orvals[cr] = 0; parms.andvals[cr] = ~(1L << bit); + preempt_disable(); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); __ctl_clear_bit(cr, bit); + preempt_enable(); } diff -puN arch/x86_64/kernel/bluesmoke.c~on_each_cpu arch/x86_64/kernel/bluesmoke.c --- 25/arch/x86_64/kernel/bluesmoke.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/arch/x86_64/kernel/bluesmoke.c 2003-02-26 03:28:09.000000000 -0800 @@ -111,11 +111,7 @@ static void mce_checkregs (void *info) { u32 low, high; int i; - unsigned int *cpu = info; - BUG_ON (*cpu != smp_processor_id()); - - preempt_disable(); for (i=0; isize = mincount; wmb(); if (reload) { + preempt_disable(); load_LDT(pc); #ifdef CONFIG_SMP - preempt_disable(); if (current->mm->cpu_vm_mask != (1< PAGE_SIZE) diff -puN arch/x86_64/kernel/smp.c~on_each_cpu arch/x86_64/kernel/smp.c --- 25/arch/x86_64/kernel/smp.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/arch/x86_64/kernel/smp.c 2003-02-26 03:28:09.000000000 -0800 @@ -328,7 +328,7 @@ void flush_tlb_page(struct vm_area_struc preempt_enable(); } -static inline void do_flush_tlb_all_local(void) +static void do_flush_tlb_all(void* info) { unsigned long cpu = smp_processor_id(); @@ -337,18 +337,9 @@ static inline void do_flush_tlb_all_loca leave_mm(cpu); } -static void flush_tlb_all_ipi(void* info) -{ - do_flush_tlb_all_local(); -} - void flush_tlb_all(void) { - preempt_disable(); - smp_call_function (flush_tlb_all_ipi,0,1,1); - - do_flush_tlb_all_local(); - preempt_enable(); + on_each_cpu(do_flush_tlb_all, 0, 1, 1); } void smp_kdb_stop(void) diff -puN arch/x86_64/mm/pageattr.c~on_each_cpu arch/x86_64/mm/pageattr.c --- 25/arch/x86_64/mm/pageattr.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/arch/x86_64/mm/pageattr.c 2003-02-26 03:28:09.000000000 -0800 @@ -123,12 +123,7 @@ __change_page_attr(unsigned long address static inline void flush_map(unsigned long address) { - preempt_disable(); -#ifdef CONFIG_SMP - smp_call_function(flush_kernel_map, (void *)address, 1, 1); -#endif - flush_kernel_map((void *)address); - preempt_enable(); + on_each_cpu(flush_kernel_map, (void *)address, 1, 1); } struct deferred_page { diff -puN drivers/char/agp/agp.h~on_each_cpu drivers/char/agp/agp.h --- 25/drivers/char/agp/agp.h~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/drivers/char/agp/agp.h 2003-02-26 03:28:09.000000000 -0800 @@ -42,9 +42,8 @@ static void ipi_handler(void *null) static void __attribute__((unused)) global_cache_flush(void) { - if (smp_call_function(ipi_handler, NULL, 1, 1) != 0) + if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0) panic(PFX "timed out waiting for the other CPUs!\n"); - flush_agp_cache(); } #else static inline void global_cache_flush(void) diff -puN drivers/s390/char/sclp.c~on_each_cpu drivers/s390/char/sclp.c --- 25/drivers/s390/char/sclp.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/drivers/s390/char/sclp.c 2003-02-26 03:28:09.000000000 -0800 @@ -481,8 +481,7 @@ static void do_machine_quiesce(void) { cpu_quiesce_map = cpu_online_map; - smp_call_function(do_load_quiesce_psw, NULL, 0, 0); - do_load_quiesce_psw(NULL); + on_each_cpu(do_load_quiesce_psw, NULL, 0, 0); } #else static void diff -puN fs/buffer.c~on_each_cpu fs/buffer.c --- 25/fs/buffer.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/fs/buffer.c 2003-02-26 03:28:09.000000000 -0800 @@ -1406,10 +1406,7 @@ static void invalidate_bh_lru(void *arg) static void invalidate_bh_lrus(void) { - preempt_disable(); - invalidate_bh_lru(NULL); - smp_call_function(invalidate_bh_lru, NULL, 1, 1); - preempt_enable(); + on_each_cpu(invalidate_bh_lru, NULL, 1, 1); } void set_bh_page(struct buffer_head *bh, diff -puN include/asm-parisc/cacheflush.h~on_each_cpu include/asm-parisc/cacheflush.h --- 25/include/asm-parisc/cacheflush.h~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/include/asm-parisc/cacheflush.h 2003-02-26 03:28:09.000000000 -0800 @@ -25,16 +25,10 @@ flush_page_to_ram(struct page *page) extern void flush_cache_all_local(void); -#ifdef CONFIG_SMP static inline void flush_cache_all(void) { - smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1); - flush_cache_all_local(); + on_each_cpu((void (*)(void *))flush_cache_all_local, NULL, 1, 1); } -#else -#define flush_cache_all flush_cache_all_local -#endif - /* The following value needs to be tuned and probably scaled with the * cache size. diff -puN include/linux/smp.h~on_each_cpu include/linux/smp.h --- 25/include/linux/smp.h~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/include/linux/smp.h 2003-02-26 03:28:34.000000000 -0800 @@ -10,9 +10,10 @@ #ifdef CONFIG_SMP +#include #include #include -#include +#include #include #include @@ -54,6 +55,21 @@ extern int smp_call_function (void (*fun int retry, int wait); /* + * Call a function on all processors + */ +static inline int on_each_cpu(void (*func) (void *info), void *info, + int retry, int wait) +{ + int ret = 0; + + preempt_disable(); + ret = smp_call_function(func, info, retry, wait); + func(info); + preempt_enable(); + return ret; +} + +/* * True once the per process idle is forked */ extern int smp_threads_ready; @@ -96,6 +112,7 @@ void smp_prepare_boot_cpu(void); #define hard_smp_processor_id() 0 #define smp_threads_ready 1 #define smp_call_function(func,info,retry,wait) ({ 0; }) +#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule_all(void) { } #define cpu_online_map 1 diff -puN mm/slab.c~on_each_cpu mm/slab.c --- 25/mm/slab.c~on_each_cpu 2003-02-26 03:28:09.000000000 -0800 +++ 25-akpm/mm/slab.c 2003-02-26 03:28:09.000000000 -0800 @@ -1116,12 +1116,16 @@ static inline void check_spinlock_acquir static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) { check_irq_on(); + preempt_disable(); + local_irq_disable(); func(arg); local_irq_enable(); if (smp_call_function(func, arg, 1, 1)) BUG(); + + preempt_enable(); } static void free_block (kmem_cache_t* cachep, void** objpp, int len); _