diff -urNp --exclude CVS --exclude BitKeeper xx-ref/kernel/softirq.c xx/kernel/softirq.c --- xx-ref/kernel/softirq.c 2002-11-29 02:23:18.000000000 +0100 +++ xx/kernel/softirq.c 2003-05-27 04:56:57.000000000 +0200 @@ -58,12 +58,32 @@ static inline void wakeup_softirqd(unsig wake_up_process(tsk); } +static inline int softirqd_is_waken(unsigned cpu) +{ + struct task_struct * tsk = ksoftirqd_task(cpu); + + return tsk && tsk->state == TASK_RUNNING; +} + +/* + * the higher this number the less likely ksoftirqd will be waken by + * a short irq flood peak, but the higher unfariness the softirq load + * will generate against the regular scheduler tasks. + * Each loop will allow one more block to pass through to the + * higher layer. If further blocks keeps arriving we giveup and we + * offload the work in a scheduler friendly way. After ksoftirqd + * is started we will stop wasting time here, so under attack + * we're still competely fair. + */ +#define MAX_SOFTIRQ_LOOPS 8 + asmlinkage void do_softirq() { int cpu = smp_processor_id(); __u32 pending; unsigned long flags; __u32 mask; + int loops; if (in_interrupt()) return; @@ -72,6 +92,7 @@ asmlinkage void do_softirq() pending = softirq_pending(cpu); + loops = 0; if (pending) { struct softirq_action *h; @@ -101,8 +122,16 @@ restart: } __local_bh_enable(); - if (pending) - wakeup_softirqd(cpu); + if (!softirqd_is_waken(cpu)) { + if (unlikely(++loops >= MAX_SOFTIRQ_LOOPS)) { + if (pending) + wakeup_softirqd(cpu); + } else { + mask = ~pending; + local_bh_disable(); + goto restart; + } + } } local_irq_restore(flags);