From: Zwane Mwaikambo This is a follow up to the previous patches for ia64 and i386, it will allow x86_64 to reenable interrupts during contested locks depending on previous interrupt enable status. It has been runtime and compile tested on UP and 2x SMP Linux-tiny/x86_64. Signed-off-by: Zwane Mwaikambo Signed-off-by: Andrew Morton --- 25-akpm/include/asm-x86_64/spinlock.h | 32 +++++++++++++++++++++++++++++++- 1 files changed, 31 insertions(+), 1 deletion(-) diff -puN include/asm-x86_64/spinlock.h~allow-x86_64-to-reenable-interrupts-on-contention include/asm-x86_64/spinlock.h --- 25/include/asm-x86_64/spinlock.h~allow-x86_64-to-reenable-interrupts-on-contention 2004-09-21 01:53:39.131974400 -0700 +++ 25-akpm/include/asm-x86_64/spinlock.h 2004-09-21 01:53:39.134973944 -0700 @@ -44,7 +44,6 @@ typedef struct { #define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0) #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x)) -#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) #define spin_lock_string \ "\n1:\t" \ @@ -58,6 +57,23 @@ typedef struct { "jmp 1b\n" \ LOCK_SECTION_END +#define spin_lock_string_flags \ + "\n1:\t" \ + "lock ; decb %0\n\t" \ + "js 2f\n\t" \ + LOCK_SECTION_START("") \ + "2:\t" \ + "test $0x200, %1\n\t" \ + "jz 3f\n\t" \ + "sti\n\t" \ + "3:\t" \ + "rep;nop\n\t" \ + "cmpb $0, %0\n\t" \ + "jle 3b\n\t" \ + "cli\n\t" \ + "jmp 1b\n" \ + LOCK_SECTION_END + /* * This works. Despite all the confusion. * (except on PPro SMP or if we are using OOSTORE) @@ -126,6 +142,20 @@ static inline void _raw_spin_lock(spinlo :"=m" (lock->lock) : : "memory"); } +static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + __label__ here; +here: + if (unlikely(lock->magic != SPINLOCK_MAGIC)) { + printk("eip: %p\n", &&here); + BUG(); + } +#endif + __asm__ __volatile__(spin_lock_string_flags + :"=m" (lock->lock) : "r" (flags) : "memory"); +} + /* * Read-write spinlocks, allowing multiple readers _