diff -urN 2.4.5pre2/include/asm-alpha/hardirq.h ksoftirqd/include/asm-alpha/hardirq.h --- 2.4.5pre2/include/asm-alpha/hardirq.h Sun Apr 1 20:11:14 2001 +++ ksoftirqd/include/asm-alpha/hardirq.h Tue May 15 22:29:17 2001 @@ -11,6 +11,7 @@ unsigned int __local_irq_count; unsigned int __local_bh_count; unsigned int __syscall_count; + struct task_struct * __ksoftirqd_task; } ____cacheline_aligned irq_cpustat_t; #include /* Standard mappings for irq_cpustat_t above */ diff -urN 2.4.5pre2/include/asm-i386/hardirq.h ksoftirqd/include/asm-i386/hardirq.h --- 2.4.5pre2/include/asm-i386/hardirq.h Fri May 11 04:56:08 2001 +++ ksoftirqd/include/asm-i386/hardirq.h Tue May 15 22:29:17 2001 @@ -12,6 +12,7 @@ unsigned int __local_irq_count; unsigned int __local_bh_count; unsigned int __syscall_count; + struct task_struct * __ksoftirqd_task; /* waitqueue is too large */ unsigned int __nmi_count; /* arch dependent */ } ____cacheline_aligned irq_cpustat_t; diff -urN 2.4.5pre2/include/asm-sparc/hardirq.h ksoftirqd/include/asm-sparc/hardirq.h --- 2.4.5pre2/include/asm-sparc/hardirq.h Tue Aug 29 06:20:03 2000 +++ ksoftirqd/include/asm-sparc/hardirq.h Tue May 15 22:29:29 2001 @@ -23,6 +23,7 @@ #endif unsigned int __local_bh_count; unsigned int __syscall_count; + struct task_struct * __ksoftirqd_task; } ____cacheline_aligned irq_cpustat_t; #include /* Standard mappings for irq_cpustat_t above */ diff -urN 2.4.5pre2/include/asm-sparc64/hardirq.h ksoftirqd/include/asm-sparc64/hardirq.h --- 2.4.5pre2/include/asm-sparc64/hardirq.h Wed Aug 23 18:30:13 2000 +++ ksoftirqd/include/asm-sparc64/hardirq.h Tue May 15 22:29:29 2001 @@ -22,6 +22,7 @@ #endif unsigned int __local_bh_count; unsigned int __syscall_count; + struct task_struct * __ksoftirqd_task; } ____cacheline_aligned irq_cpustat_t; #include /* Standard mappings for irq_cpustat_t above */ diff -urN 2.4.5pre2/include/linux/irq_cpustat.h ksoftirqd/include/linux/irq_cpustat.h --- 2.4.5pre2/include/linux/irq_cpustat.h Fri May 11 04:56:08 2001 +++ ksoftirqd/include/linux/irq_cpustat.h Tue May 15 22:29:17 2001 @@ -31,6 +31,7 @@ #define local_irq_count(cpu) __IRQ_STAT((cpu), __local_irq_count) #define local_bh_count(cpu) __IRQ_STAT((cpu), __local_bh_count) #define syscall_count(cpu) __IRQ_STAT((cpu), __syscall_count) +#define ksoftirqd_task(cpu) __IRQ_STAT((cpu), __ksoftirqd_task) /* arch dependent irq_stat fields */ #define nmi_count(cpu) __IRQ_STAT((cpu), __nmi_count) /* i386, ia64 */ diff -urN 2.4.5pre2/kernel/softirq.c ksoftirqd/kernel/softirq.c --- 2.4.5pre2/kernel/softirq.c Tue Jan 2 17:41:22 2001 +++ ksoftirqd/kernel/softirq.c Tue May 15 22:29:17 2001 @@ -86,6 +86,17 @@ if ((active &= mask) != 0) goto retry; } + if (softirq_active(cpu) & softirq_mask(cpu)) { + /* + * we cannot loop indefinitely here to avoid userspace starvation, + * but we also don't want to introduce a worst case 1/HZ latency + * to the pending events, so lets the scheduler to balance + * the irq load for us. + */ + struct task_struct * tsk = ksoftirqd_task(cpu); + if (tsk && tsk->state != TASK_RUNNING) + wake_up_process(tsk); + } local_bh_enable(); @@ -315,3 +326,61 @@ f(data); } } + +static int ksoftirqd(void * __bind_cpu) +{ + int bind_cpu = *(int *) __bind_cpu; + int cpu = cpu_logical_map(bind_cpu); + + daemonize(); + current->nice = 19; + sigfillset(¤t->blocked); + + /* Migrate to the right CPU */ + current->cpus_allowed = 1UL << cpu; + while (smp_processor_id() != cpu) + schedule(); + + sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu); + + __set_current_state(TASK_INTERRUPTIBLE); + mb(); + + ksoftirqd_task(cpu) = current; + + for (;;) { + if (!(softirq_active(cpu) & softirq_mask(cpu))) + schedule(); + + __set_current_state(TASK_RUNNING); + + while (softirq_active(cpu) & softirq_mask(cpu)) { + do_softirq(); + if (current->need_resched) + schedule(); + } + + __set_current_state(TASK_INTERRUPTIBLE); + } +} + +static __init int spawn_ksoftirqd(void) +{ + int cpu; + + for (cpu = 0; cpu < smp_num_cpus; cpu++) { + if (kernel_thread(ksoftirqd, (void *) &cpu, + CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0) + printk("spawn_ksoftirqd() failed for cpu %d\n", cpu); + else { + while (!ksoftirqd_task(cpu_logical_map(cpu))) { + current->policy |= SCHED_YIELD; + schedule(); + } + } + } + + return 0; +} + +__initcall(spawn_ksoftirqd);