From: Christoph Hellwig currently only x86_64 and ia64 don't use the generic irq_cpustat code and both have to workaround it's brokenness for the non-default case. x86_64 defines an empty irq_cpustat_t even if it doesn't need one and ia64 adds CONFIG_IA64 ifdefs around all users. What about this patch instead to make __ARCH_IRQ_STAT useable? 25-akpm/include/asm-ia64/hardirq.h | 3 +++ 25-akpm/include/asm-x86_64/hardirq.h | 4 ---- 25-akpm/include/linux/irq_cpustat.h | 10 ++++++---- 25-akpm/kernel/ksyms.c | 1 - 25-akpm/kernel/softirq.c | 5 ++++- 5 files changed, 13 insertions(+), 10 deletions(-) diff -puN include/asm-ia64/hardirq.h~irq_cpustat-cleanup include/asm-ia64/hardirq.h --- 25/include/asm-ia64/hardirq.h~irq_cpustat-cleanup Mon May 19 16:30:49 2003 +++ 25-akpm/include/asm-ia64/hardirq.h Mon May 19 16:30:49 2003 @@ -16,6 +16,9 @@ /* * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. */ + +#define __ARCH_IRQ_STAT 1 + #define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending) #define syscall_count(cpu) /* unused on IA-64 */ #define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd) diff -puN include/asm-x86_64/hardirq.h~irq_cpustat-cleanup include/asm-x86_64/hardirq.h --- 25/include/asm-x86_64/hardirq.h~irq_cpustat-cleanup Mon May 19 16:30:49 2003 +++ 25-akpm/include/asm-x86_64/hardirq.h Mon May 19 16:30:49 2003 @@ -12,10 +12,6 @@ special access macros. This would generate better code. */ #define __IRQ_STAT(cpu,member) (read_pda(me)->member) -typedef struct { - /* Empty. All the fields have moved to the PDA. */ -} irq_cpustat_t; - #include /* Standard mappings for irq_cpustat_t above */ /* diff -puN include/linux/irq_cpustat.h~irq_cpustat-cleanup include/linux/irq_cpustat.h --- 25/include/linux/irq_cpustat.h~irq_cpustat-cleanup Mon May 19 16:30:49 2003 +++ 25-akpm/include/linux/irq_cpustat.h Mon May 19 16:30:49 2003 @@ -17,9 +17,8 @@ * definitions instead of differing sets for each arch. */ -extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ - -#ifndef __ARCH_IRQ_STAT /* Some architectures can do this more efficiently */ +#ifndef __ARCH_IRQ_STAT +extern irq_cpustat_t irq_stat[]; /* defined in asm/hardirq.h */ #ifdef CONFIG_SMP #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #else @@ -31,8 +30,11 @@ extern irq_cpustat_t irq_stat[]; /* de #define softirq_pending(cpu) __IRQ_STAT((cpu), __softirq_pending) #define local_softirq_pending() softirq_pending(smp_processor_id()) #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) +#define local_syscall_count() syscall_count(smp_processor_id()) #define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) +#define local_ksoftirqd_task() ksoftirqd_task(smp_processor_id()) + /* arch dependent irq_stat fields */ -#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ +#define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386 */ #endif /* __irq_cpustat_h */ diff -puN kernel/ksyms.c~irq_cpustat-cleanup kernel/ksyms.c --- 25/kernel/ksyms.c~irq_cpustat-cleanup Mon May 19 16:30:49 2003 +++ 25-akpm/kernel/ksyms.c Mon May 19 16:30:49 2003 @@ -402,7 +402,6 @@ EXPORT_SYMBOL(add_timer); EXPORT_SYMBOL(del_timer); EXPORT_SYMBOL(request_irq); EXPORT_SYMBOL(free_irq); -EXPORT_SYMBOL(irq_stat); /* waitqueue handling */ EXPORT_SYMBOL(add_wait_queue); diff -puN kernel/softirq.c~irq_cpustat-cleanup kernel/softirq.c --- 25/kernel/softirq.c~irq_cpustat-cleanup Mon May 19 16:30:49 2003 +++ 25-akpm/kernel/softirq.c Mon May 19 16:30:49 2003 @@ -33,7 +33,10 @@ - Tasklets: serialized wrt itself. */ +#ifndef __ARCH_IRQ_STAT irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; +EXPORT_SYMBOL(irq_stat); +#endif static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; @@ -321,7 +324,7 @@ static int ksoftirqd(void * __bind_cpu) __set_current_state(TASK_INTERRUPTIBLE); mb(); - ksoftirqd_task(cpu) = current; + local_ksoftirqd_task() = current; for (;;) { if (!local_softirq_pending()) _