diff -urpN -X /home/fletch/.diff.exclude 696-config_numasched/arch/i386/Kconfig 697-lockmeter_tytso/arch/i386/Kconfig --- 696-config_numasched/arch/i386/Kconfig Wed Aug 13 20:48:52 2003 +++ 697-lockmeter_tytso/arch/i386/Kconfig Wed Aug 13 20:48:54 2003 @@ -1440,6 +1440,14 @@ config DEBUG_SPINLOCK_SLEEP If you say Y here, various routines which may sleep will become very noisy if they are called with a spinlock held. +config LOCKMETER + bool "Kernel lock metering" + depends on SMP + help + Say Y to enable kernel lock metering, which adds overhead to SMP + locks, but allows you to see various statistics using the lockstat + command + config FRAME_POINTER bool default y if X86_REMOTE_DEBUG diff -urpN -X /home/fletch/.diff.exclude 696-config_numasched/include/linux/spinlock.h 697-lockmeter_tytso/include/linux/spinlock.h --- 696-config_numasched/include/linux/spinlock.h Wed Aug 13 20:29:36 2003 +++ 697-lockmeter_tytso/include/linux/spinlock.h Wed Aug 13 20:48:54 2003 @@ -401,6 +401,27 @@ do { \ ({preempt_enable(); local_bh_enable(); 0;});}) #ifdef CONFIG_LOCKMETER +#undef spin_lock +#undef spin_trylock +#undef spin_unlock +#undef spin_lock_irqsave +#undef spin_lock_irq +#undef spin_lock_bh +#undef read_lock +#undef read_unlock +#undef write_lock +#undef write_unlock +#undef write_trylock +#undef spin_unlock_bh +#undef read_lock_irqsave +#undef read_lock_irq +#undef read_lock_bh +#undef read_unlock_bh +#undef write_lock_irqsave +#undef write_lock_irq +#undef write_lock_bh +#undef write_unlock_bh + #define spin_lock(lock) \ do { \ preempt_disable(); \ @@ -415,6 +436,35 @@ do { \ preempt_enable(); \ } while (0) +#define spin_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _metered_spin_lock(lock); \ +} while (0) + +#define spin_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _metered_spin_lock(lock); \ +} while (0) + +#define spin_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _metered_spin_lock(lock); \ +} while (0) + +#define spin_unlock_bh(lock) \ +do { \ + _metered_spin_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + + #define read_lock(lock) ({preempt_disable(); _metered_read_lock(lock);}) #define read_unlock(lock) ({_metered_read_unlock(lock); preempt_enable();}) #define write_lock(lock) ({preempt_disable(); _metered_write_lock(lock);}) @@ -425,6 +475,62 @@ do { \ do { \ _metered_spin_unlock(lock); \ preempt_enable_no_resched(); \ +} while (0) + +#define read_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _metered_read_lock(lock); \ +} while (0) + +#define read_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _metered_read_lock(lock); \ +} while (0) + +#define read_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _metered_read_lock(lock); \ +} while (0) + +#define read_unlock_bh(lock) \ +do { \ + _metered_read_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ +} while (0) + +#define write_lock_irqsave(lock, flags) \ +do { \ + local_irq_save(flags); \ + preempt_disable(); \ + _metered_write_lock(lock); \ +} while (0) + +#define write_lock_irq(lock) \ +do { \ + local_irq_disable(); \ + preempt_disable(); \ + _metered_write_lock(lock); \ +} while (0) + +#define write_lock_bh(lock) \ +do { \ + local_bh_disable(); \ + preempt_disable(); \ + _metered_write_lock(lock); \ +} while (0) + +#define write_unlock_bh(lock) \ +do { \ + _metered_write_unlock(lock); \ + preempt_enable(); \ + local_bh_enable(); \ } while (0) #endif /* !CONFIG_LOCKMETER */