From: Linus Torvalds Verifies that all "atomic_dec_and_test()" users never see a negative value (which would be bad). I don't much expect this to trigger, but since a lot of data structures (and the thread struct pages in particular) are protected with an atomic counter, this just adds a bit of overflow protection. In particular, if anything ends up releasing the thread structures too many times or something like that, this should give a big warning. arch/i386/lib/dec_and_lock.c | 2 +- include/asm-i386/atomic.h | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff -puN arch/i386/lib/dec_and_lock.c~atomic_dec-debug arch/i386/lib/dec_and_lock.c --- 25/arch/i386/lib/dec_and_lock.c~atomic_dec-debug 2003-11-02 10:09:11.000000000 -0800 +++ 25-akpm/arch/i386/lib/dec_and_lock.c 2003-11-02 10:09:11.000000000 -0800 @@ -19,7 +19,7 @@ repeat: counter = atomic_read(atomic); newcount = counter-1; - if (!newcount) + if (newcount <= 0) goto slow_path; asm volatile("lock; cmpxchgl %1,%2" diff -puN include/asm-i386/atomic.h~atomic_dec-debug include/asm-i386/atomic.h --- 25/include/asm-i386/atomic.h~atomic_dec-debug 2003-11-02 10:09:11.000000000 -0800 +++ 25-akpm/include/asm-i386/atomic.h 2003-11-02 10:09:11.000000000 -0800 @@ -2,6 +2,8 @@ #define __ARCH_I386_ATOMIC__ #include +#include +#include /* * Atomic operations that C can't guarantee us. Useful for @@ -136,12 +138,17 @@ static __inline__ void atomic_dec(atomic */ static __inline__ int atomic_dec_and_test(atomic_t *v) { - unsigned char c; + static int count = 2; + unsigned char c, neg; __asm__ __volatile__( - LOCK "decl %0; sete %1" - :"=m" (v->counter), "=qm" (c) + LOCK "decl %0; sete %1; sets %2" + :"=m" (v->counter), "=qm" (c), "=qm" (neg) :"m" (v->counter) : "memory"); + if (count && neg) { + count--; + WARN_ON(neg); + } return c != 0; } _