diff -urN ksoftirqd-ref/arch/ia64/kernel/entry.S ksoftirqd/arch/ia64/kernel/entry.S --- ksoftirqd-ref/arch/ia64/kernel/entry.S Tue May 1 19:35:18 2001 +++ ksoftirqd/arch/ia64/kernel/entry.S Wed Jun 20 02:33:57 2001 @@ -531,13 +531,12 @@ GLOBAL_ENTRY(ia64_leave_kernel) PT_REGS_UNWIND_INFO(0) cmp.eq p16,p0=r0,r0 // set the "first_time" flag - movl r15=PERCPU_ADDR+IA64_CPU_SOFTIRQ_ACTIVE_OFFSET // r15 = &cpu_data.softirq.active + movl r15=PERCPU_ADDR+IA64_CPU_SOFTIRQ_PENDING_OFFSET // r15 = &cpu_data.softirq.pendig ;; ld8 r2=[r15] movl r14=.restart ;; lfetch.fault [sp] - shr.u r3=r2,32 // r3 = cpu_data.softirq.mask MOVBR(.ret.sptk,rp,r14,.restart) .restart: adds r17=IA64_TASK_NEED_RESCHED_OFFSET,r13 @@ -548,12 +547,11 @@ ;; ld8 r17=[r17] // load current->need_resched ld4 r18=[r18] // load current->sigpending -(p16) and r2=r2,r3 // r2 <- (softirq.active & softirq.mask) ;; #ifdef CONFIG_PERFMON ld8 r19=[r19] // load current->task.pfm_notify #endif -(p16) cmp4.ne.unc p6,p0=r2,r0 // p6 <- (softirq.active & softirq.mask) != 0 +(p16) cmp4.ne.unc p6,p0=r3,r0 // p6 <- softirq.pending != 0 (pUser) cmp.ne.unc p7,p0=r17,r0 // current->need_resched != 0? ;; (pUser) cmp.ne.unc p8,p0=r18,r0 // current->sigpending != 0? diff -urN ksoftirqd-ref/arch/ia64/tools/print_offsets.c ksoftirqd/arch/ia64/tools/print_offsets.c --- ksoftirqd-ref/arch/ia64/tools/print_offsets.c Tue May 1 19:35:19 2001 +++ ksoftirqd/arch/ia64/tools/print_offsets.c Wed Jun 20 02:47:58 2001 @@ -161,8 +161,7 @@ { "IA64_CLONE_VM", CLONE_VM }, { "IA64_CPU_IRQ_COUNT_OFFSET", offsetof (struct cpuinfo_ia64, irq_stat.f.irq_count) }, { "IA64_CPU_BH_COUNT_OFFSET", offsetof (struct cpuinfo_ia64, irq_stat.f.bh_count) }, - { "IA64_CPU_SOFTIRQ_ACTIVE_OFFSET", offsetof (struct cpuinfo_ia64, softirq.active) }, - { "IA64_CPU_SOFTIRQ_MASK_OFFSET", offsetof (struct cpuinfo_ia64, softirq.mask) }, + { "IA64_CPU_SOFTIRQ_PENDING_OFFSET", offsetof (struct cpuinfo_ia64, softirq.pending) }, { "IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET", offsetof (struct cpuinfo_ia64, phys_stacked_size_p8) }, }; diff -urN ksoftirqd-ref/include/asm-ia64/hardirq.h ksoftirqd/include/asm-ia64/hardirq.h --- ksoftirqd-ref/include/asm-ia64/hardirq.h Tue May 1 19:35:30 2001 +++ ksoftirqd/include/asm-ia64/hardirq.h Wed Jun 20 03:53:28 2001 @@ -16,15 +16,15 @@ /* * No irq_cpustat_t for IA-64. The data is held in the per-CPU data structure. */ -#define softirq_active(cpu) (cpu_data[cpu].softirq.active) -#define softirq_mask(cpu) (cpu_data[cpu].softirq.mask) +#define softirq_pending(cpu) (cpu_data[cpu].softirq.pending) +#define ksoftirqd_task(cpu) (cpu_data[cpu].softirq.ksoftirqd_task) #define irq_count(cpu) (cpu_data[cpu].irq_stat.f.irq_count) #define bh_count(cpu) (cpu_data[cpu].irq_stat.f.bh_count) #define syscall_count(cpu) /* unused on IA-64 */ #define nmi_count(cpu) 0 -#define local_softirq_active() (local_cpu_data->softirq.active) -#define local_softirq_mask() (local_cpu_data->softirq.mask) +#define local_softirq_pending() (local_cpu_data->softirq.pending) +#define local_ksoftirqd_task() (local_cpu_data->softirq.ksofitrqd_task) #define local_irq_count() (local_cpu_data->irq_stat.f.irq_count) #define local_bh_count() (local_cpu_data->irq_stat.f.bh_count) #define local_syscall_count() /* unused on IA-64 */ diff -urN ksoftirqd-ref/include/asm-ia64/processor.h ksoftirqd/include/asm-ia64/processor.h --- ksoftirqd-ref/include/asm-ia64/processor.h Tue May 1 19:35:30 2001 +++ ksoftirqd/include/asm-ia64/processor.h Wed Jun 20 03:18:58 2001 @@ -237,8 +237,8 @@ struct cpuinfo_ia64 { /* irq_stat and softirq should be 64-bit aligned */ struct { - __u32 active; - __u32 mask; + __u64 pending; + struct task_struct * ksoftirqd_task; } softirq; union { struct { diff -urN ksoftirqd-ref/include/asm-ia64/softirq.h ksoftirqd/include/asm-ia64/softirq.h --- ksoftirqd-ref/include/asm-ia64/softirq.h Tue May 1 19:35:31 2001 +++ ksoftirqd/include/asm-ia64/softirq.h Wed Jun 20 02:54:59 2001 @@ -8,7 +8,17 @@ #include #define local_bh_disable() do { local_bh_count()++; barrier(); } while (0) -#define local_bh_enable() do { barrier(); local_bh_count()--; } while (0) +#define __local_bh_enable() do { barrier(); local_bh_count()--; } while (0) + +#define local_bh_enable() \ +do { \ + int cpu; \ + \ + barrier(); \ + cpu = smp_processor_id(); \ + if (!--local_bh_count() && softirq_pending(cpu)) \ + do_softirq(); \ +} while (0) #define in_softirq() (local_bh_count() != 0)