diff -urNp ref/include/linux/interrupt.h net-softirq/include/linux/interrupt.h --- ref/include/linux/interrupt.h Mon Jul 22 17:59:10 2002 +++ net-softirq/include/linux/interrupt.h Sat Sep 7 03:44:21 2002 @@ -77,6 +77,11 @@ extern void softirq_init(void); #define __cpu_raise_softirq(cpu, nr) do { softirq_pending(cpu) |= 1UL << (nr); } while (0) extern void FASTCALL(cpu_raise_softirq(unsigned int cpu, unsigned int nr)); extern void FASTCALL(raise_softirq(unsigned int nr)); +#define rerun_softirqs(cpu) \ +do { \ + if (!(local_irq_count(cpu) | local_bh_count(cpu))) \ + do_softirq(); \ +} while (0); @@ -186,12 +191,14 @@ static inline void tasklet_enable(struct { smp_mb__before_atomic_dec(); atomic_dec(&t->count); + rerun_softirqs(smp_processor_id()); } static inline void tasklet_hi_enable(struct tasklet_struct *t) { smp_mb__before_atomic_dec(); atomic_dec(&t->count); + rerun_softirqs(smp_processor_id()); } extern void tasklet_kill(struct tasklet_struct *t); diff -urNp ref/include/linux/netdevice.h net-softirq/include/linux/netdevice.h --- ref/include/linux/netdevice.h Thu Aug 29 02:13:20 2002 +++ net-softirq/include/linux/netdevice.h Sat Sep 7 03:44:21 2002 @@ -520,8 +520,9 @@ static inline void __netif_schedule(stru local_irq_save(flags); dev->next_sched = softnet_data[cpu].output_queue; softnet_data[cpu].output_queue = dev; - cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); + __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); local_irq_restore(flags); + rerun_softirqs(cpu); } } @@ -570,8 +571,9 @@ static inline void dev_kfree_skb_irq(str local_irq_save(flags); skb->next = softnet_data[cpu].completion_queue; softnet_data[cpu].completion_queue = skb; - cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); + __cpu_raise_softirq(cpu, NET_TX_SOFTIRQ); local_irq_restore(flags); + rerun_softirqs(cpu); } } @@ -738,6 +740,7 @@ static inline void __netif_rx_schedule(s dev->quota = dev->weight; __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); local_irq_restore(flags); + rerun_softirqs(cpu); } /* Try to reschedule poll. Called by irq handler. */ @@ -763,6 +766,7 @@ static inline int netif_rx_reschedule(st list_add_tail(&dev->poll_list, &softnet_data[cpu].poll_list); __cpu_raise_softirq(cpu, NET_RX_SOFTIRQ); local_irq_restore(flags); + rerun_softirqs(cpu); return 1; } return 0; diff -urNp ref/kernel/softirq.c net-softirq/kernel/softirq.c --- ref/kernel/softirq.c Thu Aug 29 02:13:21 2002 +++ net-softirq/kernel/softirq.c Sat Sep 7 03:44:24 2002 @@ -121,11 +121,11 @@ inline void cpu_raise_softirq(unsigned i * actually run the softirq once we return from * the irq or bh. * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. + * Otherwise run the softirq now. If it will need polling + * ksoftirqd will kick in automatically. */ if (!(local_irq_count(cpu) | local_bh_count(cpu))) - wakeup_softirqd(cpu); + rerun_softirqs(cpu); } void raise_softirq(unsigned int nr) @@ -157,8 +157,9 @@ void __tasklet_schedule(struct tasklet_s local_irq_save(flags); t->next = tasklet_vec[cpu].list; tasklet_vec[cpu].list = t; - cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); + __cpu_raise_softirq(cpu, TASKLET_SOFTIRQ); local_irq_restore(flags); + rerun_softirqs(cpu); } void __tasklet_hi_schedule(struct tasklet_struct *t) @@ -169,8 +170,9 @@ void __tasklet_hi_schedule(struct taskle local_irq_save(flags); t->next = tasklet_hi_vec[cpu].list; tasklet_hi_vec[cpu].list = t; - cpu_raise_softirq(cpu, HI_SOFTIRQ); + __cpu_raise_softirq(cpu, HI_SOFTIRQ); local_irq_restore(flags); + rerun_softirqs(cpu); } static void tasklet_action(struct softirq_action *a)