From: "David S. Miller" Stick this into -mm and see if people bark. :-) It's from Ingo Molnar, softirq kicks in way too early. We can probably sysctl this thing, that way everyone gets what they want probably... 25-akpm/kernel/softirq.c | 18 +++++++++++++----- 1 files changed, 13 insertions(+), 5 deletions(-) diff -puN kernel/softirq.c~delay-ksoftirqd-fallback kernel/softirq.c --- 25/kernel/softirq.c~delay-ksoftirqd-fallback Mon Jun 9 16:57:05 2003 +++ 25-akpm/kernel/softirq.c Mon Jun 9 16:57:05 2003 @@ -55,11 +55,22 @@ static inline void wakeup_softirqd(unsig wake_up_process(tsk); } +/* + * We restart softirq processing MAX_SOFTIRQ_RESTART times, + * and we fall back to softirqd after that. + * + * This number has been established via experimentation. + * The two things to balance is latency against fairness - + * we want to handle softirqs as soon as possible, but they + * should not be able to lock up the box. + */ +#define MAX_SOFTIRQ_RESTART 10 + asmlinkage void do_softirq(void) { + int max_restart = MAX_SOFTIRQ_RESTART; __u32 pending; unsigned long flags; - __u32 mask; if (in_interrupt()) return; @@ -71,7 +82,6 @@ asmlinkage void do_softirq(void) if (pending) { struct softirq_action *h; - mask = ~pending; local_bh_disable(); restart: /* Reset the pending bitmask before enabling irqs */ @@ -91,10 +101,8 @@ restart: local_irq_disable(); pending = local_softirq_pending(); - if (pending & mask) { - mask &= ~pending; + if (pending && --max_restart) goto restart; - } if (pending) wakeup_softirqd(smp_processor_id()); __local_bh_enable(); _