--- 2.2.18pre21aa2/arch/alpha/kernel/irq.c.~1~ Tue Jun 13 03:48:12 2000 +++ 2.2.18pre21aa2/arch/alpha/kernel/irq.c Mon Nov 20 03:52:29 2000 @@ -650,6 +650,7 @@ void synchronize_bh(void) { + mb(); if (atomic_read(&global_bh_count) && !in_interrupt()) wait_on_bh(); } --- 2.2.18pre21aa2/arch/alpha/kernel/irq.c.~1~ Mon Nov 20 03:52:29 2000 +++ 2.2.18pre21aa2/arch/alpha/kernel/irq.c Thu Nov 23 03:52:47 2000 @@ -385,6 +385,9 @@ /* This protects BH software state (masks, things like that). */ atomic_t global_bh_lock = ATOMIC_INIT(0); atomic_t global_bh_count = ATOMIC_INIT(0); +#ifdef CONFIG_SMP +spinlock_t alpha_bh_lock = SPIN_LOCK_UNLOCKED; +#endif static void *previous_irqholder = NULL; --- 2.2.18pre21aa2/include/asm-alpha/softirq.h.~1~ Thu Nov 23 03:26:00 2000 +++ 2.2.18pre21aa2/include/asm-alpha/softirq.h Thu Nov 23 05:55:20 2000 @@ -9,6 +9,7 @@ extern unsigned long local_bh_count; #else #define local_bh_count (cpu_data[smp_processor_id()].bh_count) +extern spinlock_t alpha_bh_lock; #endif #define get_active_bhs() (bh_mask & bh_active) @@ -28,24 +29,6 @@ :"Ir" (x), "m" (bh_active)); } -extern inline void init_bh(int nr, void (*routine)(void)) -{ - bh_base[nr] = routine; - atomic_set(&bh_mask_count[nr], 0); - bh_mask |= 1 << nr; -} - -extern inline void remove_bh(int nr) -{ - bh_base[nr] = NULL; - bh_mask &= ~(1 << nr); -} - -extern inline void mark_bh(int nr) -{ - set_bit(nr, &bh_active); -} - #ifdef __SMP__ /* @@ -113,21 +96,58 @@ #endif /* SMP */ +extern inline void init_bh(int nr, void (*routine)(void)) +{ + unsigned long flags; + + bh_base[nr] = routine; + atomic_set(&bh_mask_count[nr], 0); + + spin_lock_irqsave(&alpha_bh_lock, flags); + bh_mask |= 1 << nr; + spin_unlock_irqrestore(&alpha_bh_lock, flags); +} + +extern inline void remove_bh(int nr) +{ + unsigned long flags; + + spin_lock_irqsave(&alpha_bh_lock, flags); + bh_mask &= ~(1 << nr); + spin_unlock_irqrestore(&alpha_bh_lock, flags); + + synchronize_bh(); + bh_base[nr] = NULL; +} + +extern inline void mark_bh(int nr) +{ + set_bit(nr, &bh_active); +} + /* * These use a mask count to correctly handle * nested disable/enable calls */ extern inline void disable_bh(int nr) { + unsigned long flags; + + spin_lock_irqsave(&alpha_bh_lock, flags); bh_mask &= ~(1 << nr); atomic_inc(&bh_mask_count[nr]); + spin_unlock_irqrestore(&alpha_bh_lock, flags); synchronize_bh(); } extern inline void enable_bh(int nr) { + unsigned long flags; + + spin_lock_irqsave(&alpha_bh_lock, flags); if (atomic_dec_and_test(&bh_mask_count[nr])) bh_mask |= 1 << nr; + spin_unlock_irqrestore(&alpha_bh_lock, flags); } #endif /* _ALPHA_SOFTIRQ_H */