Binary files rwsemref/ID and rwsem/ID differ diff -urN rwsemref/arch/alpha/config.in rwsem/arch/alpha/config.in --- rwsemref/arch/alpha/config.in Sat Apr 21 20:04:05 2001 +++ rwsem/arch/alpha/config.in Sun Apr 22 23:22:22 2001 @@ -6,7 +6,6 @@ define_bool CONFIG_ALPHA y define_bool CONFIG_UID16 n define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Kernel configuration of Linux for Alpha machines" diff -urN rwsemref/arch/alpha/kernel/alpha_ksyms.c rwsem/arch/alpha/kernel/alpha_ksyms.c --- rwsemref/arch/alpha/kernel/alpha_ksyms.c Sat Apr 21 20:04:05 2001 +++ rwsem/arch/alpha/kernel/alpha_ksyms.c Sun Apr 22 23:22:22 2001 @@ -173,10 +173,6 @@ EXPORT_SYMBOL(down_interruptible); EXPORT_SYMBOL(down_trylock); EXPORT_SYMBOL(up); -EXPORT_SYMBOL(down_read); -EXPORT_SYMBOL(down_write); -EXPORT_SYMBOL(up_read); -EXPORT_SYMBOL(up_write); /* * SMP-specific symbols. diff -urN rwsemref/arch/arm/config.in rwsem/arch/arm/config.in --- rwsemref/arch/arm/config.in Sat Apr 21 20:04:05 2001 +++ rwsem/arch/arm/config.in Sun Apr 22 23:22:22 2001 @@ -10,8 +10,6 @@ define_bool CONFIG_MCA n define_bool CONFIG_UID16 y define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n - mainmenu_option next_comment comment 'Code maturity level options' diff -urN rwsemref/arch/cris/config.in rwsem/arch/cris/config.in --- rwsemref/arch/cris/config.in Sat Apr 21 20:04:05 2001 +++ rwsem/arch/cris/config.in Sun Apr 22 23:22:22 2001 @@ -6,7 +6,6 @@ define_bool CONFIG_UID16 y define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_option next_comment comment 'Code maturity level options' diff -urN rwsemref/arch/i386/config.in rwsem/arch/i386/config.in --- rwsemref/arch/i386/config.in Sat Apr 21 20:04:05 2001 +++ rwsem/arch/i386/config.in Tue Apr 24 04:49:59 2001 @@ -51,7 +51,6 @@ define_bool CONFIG_X86_XADD n define_int CONFIG_X86_L1_CACHE_SHIFT 4 define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y - define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n else define_bool CONFIG_X86_WP_WORKS_OK y define_bool CONFIG_X86_INVLPG y @@ -59,8 +58,7 @@ define_bool CONFIG_X86_XADD y define_bool CONFIG_X86_BSWAP y define_bool CONFIG_X86_POPAD_OK y - define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n - define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y + define_bool CONFIG_RWSEM_XCHGADD y fi if [ "$CONFIG_M486" = "y" ]; then define_int CONFIG_X86_L1_CACHE_SHIFT 4 diff -urN rwsemref/arch/ia64/config.in rwsem/arch/ia64/config.in --- rwsemref/arch/ia64/config.in Sat Apr 21 20:04:06 2001 +++ rwsem/arch/ia64/config.in Sun Apr 22 23:22:22 2001 @@ -24,7 +24,6 @@ define_bool CONFIG_MCA n define_bool CONFIG_SBUS n define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n choice 'IA-64 processor type' \ "Itanium CONFIG_ITANIUM \ diff -urN rwsemref/arch/m68k/config.in rwsem/arch/m68k/config.in --- rwsemref/arch/m68k/config.in Sat Apr 21 20:04:08 2001 +++ rwsem/arch/m68k/config.in Sun Apr 22 23:22:22 2001 @@ -5,7 +5,6 @@ define_bool CONFIG_UID16 y define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Linux/68k Kernel Configuration" diff -urN rwsemref/arch/mips/config.in rwsem/arch/mips/config.in --- rwsemref/arch/mips/config.in Sat Apr 21 20:04:09 2001 +++ rwsem/arch/mips/config.in Sun Apr 22 23:22:22 2001 @@ -29,7 +29,6 @@ bool 'Support for SNI RM200 PCI' CONFIG_SNI_RM200_PCI define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n # # Select some configuration options automatically for certain systems. diff -urN rwsemref/arch/mips64/config.in rwsem/arch/mips64/config.in --- rwsemref/arch/mips64/config.in Sat Apr 21 20:04:09 2001 +++ rwsem/arch/mips64/config.in Sun Apr 22 23:22:22 2001 @@ -26,7 +26,6 @@ endmenu define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n # # Select some configuration options automatically based on user selections diff -urN rwsemref/arch/parisc/config.in rwsem/arch/parisc/config.in --- rwsemref/arch/parisc/config.in Sat Apr 21 20:04:09 2001 +++ rwsem/arch/parisc/config.in Sun Apr 22 23:22:22 2001 @@ -8,7 +8,6 @@ define_bool CONFIG_PARISC y define_bool CONFIG_UID16 n define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_option next_comment comment 'Code maturity level options' diff -urN rwsemref/arch/ppc/config.in rwsem/arch/ppc/config.in --- rwsemref/arch/ppc/config.in Sat Apr 21 20:04:09 2001 +++ rwsem/arch/ppc/config.in Sun Apr 22 23:22:22 2001 @@ -4,7 +4,6 @@ # define_bool CONFIG_UID16 n define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Linux/PowerPC Kernel Configuration" diff -urN rwsemref/arch/ppc/kernel/ppc_ksyms.c rwsem/arch/ppc/kernel/ppc_ksyms.c --- rwsemref/arch/ppc/kernel/ppc_ksyms.c Sun Apr 1 01:17:11 2001 +++ rwsem/arch/ppc/kernel/ppc_ksyms.c Sun Apr 22 23:22:22 2001 @@ -332,8 +332,6 @@ EXPORT_SYMBOL(__down); EXPORT_SYMBOL(__down_interruptible); EXPORT_SYMBOL(__down_trylock); -EXPORT_SYMBOL(down_read_failed); -EXPORT_SYMBOL(down_write_failed); #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) extern void (*debugger)(struct pt_regs *regs); diff -urN rwsemref/arch/s390/config.in rwsem/arch/s390/config.in --- rwsemref/arch/s390/config.in Sat Apr 21 20:04:09 2001 +++ rwsem/arch/s390/config.in Sun Apr 22 23:22:22 2001 @@ -8,7 +8,6 @@ define_bool CONFIG_MCA n define_bool CONFIG_UID16 y define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Linux Kernel Configuration" define_bool CONFIG_ARCH_S390 y diff -urN rwsemref/arch/s390x/config.in rwsem/arch/s390x/config.in --- rwsemref/arch/s390x/config.in Sat Apr 21 20:04:09 2001 +++ rwsem/arch/s390x/config.in Sun Apr 22 23:22:22 2001 @@ -7,7 +7,6 @@ define_bool CONFIG_EISA n define_bool CONFIG_MCA n define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Linux Kernel Configuration" define_bool CONFIG_ARCH_S390 y diff -urN rwsemref/arch/sh/config.in rwsem/arch/sh/config.in --- rwsemref/arch/sh/config.in Sat Apr 21 20:04:09 2001 +++ rwsem/arch/sh/config.in Sun Apr 22 23:22:22 2001 @@ -8,7 +8,6 @@ define_bool CONFIG_UID16 y define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_option next_comment comment 'Code maturity level options' diff -urN rwsemref/arch/sparc/config.in rwsem/arch/sparc/config.in --- rwsemref/arch/sparc/config.in Sat Apr 21 20:04:10 2001 +++ rwsem/arch/sparc/config.in Sun Apr 22 23:22:22 2001 @@ -49,7 +49,6 @@ define_bool CONFIG_SUN_AUXIO y define_bool CONFIG_SUN_IO y define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n bool 'Support for SUN4 machines (disables SUN4[CDM] support)' CONFIG_SUN4 if [ "$CONFIG_SUN4" != "y" ]; then diff -urN rwsemref/arch/sparc64/config.in rwsem/arch/sparc64/config.in --- rwsemref/arch/sparc64/config.in Sat Apr 21 20:04:10 2001 +++ rwsem/arch/sparc64/config.in Tue Apr 24 04:51:06 2001 @@ -33,8 +33,8 @@ # Global things across all Sun machines. define_bool CONFIG_HAVE_DEC_LOCK y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y +# sorry I broke it again +define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y define_bool CONFIG_ISA n define_bool CONFIG_EISA n define_bool CONFIG_MCA n diff -urN rwsemref/include/asm-alpha/compiler.h rwsem/include/asm-alpha/compiler.h --- rwsemref/include/asm-alpha/compiler.h Tue Nov 28 18:40:01 2000 +++ rwsem/include/asm-alpha/compiler.h Sun Apr 22 23:22:22 2001 @@ -72,13 +72,4 @@ __asm__("stw %1,%0" : "=m"(mem) : "r"(val)) #endif -/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented - a mechanism by which the user can annotate likely branch directions and - expect the blocks to be reordered appropriately. Define __builtin_expect - to nothing for earlier compilers. */ - -#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 -#define __builtin_expect(x, expected_value) (x) -#endif - #endif /* __ALPHA_COMPILER_H */ diff -urN rwsemref/include/asm-alpha/rwsem_xchgadd.h rwsem/include/asm-alpha/rwsem_xchgadd.h --- rwsemref/include/asm-alpha/rwsem_xchgadd.h Thu Jan 1 01:00:00 1970 +++ rwsem/include/asm-alpha/rwsem_xchgadd.h Tue Apr 24 04:19:10 2001 @@ -0,0 +1,27 @@ +#ifndef _ALPHA_RWSEM_XCHGADD_H +#define _ALPHA_RWSEM_XCHGADD_H + +/* WRITEME */ + +static inline void __down_read(struct rw_semaphore *sem) +{ +} + +static inline void __down_write(struct rw_semaphore *sem) +{ +} + +static inline void __up_read(struct rw_semaphore *sem) +{ +} + +static inline void __up_write(struct rw_semaphore *sem) +{ +} + +static inline long rwsem_xchgadd(long value, long * count) +{ + return value; +} + +#endif diff -urN rwsemref/include/asm-alpha/semaphore.h rwsem/include/asm-alpha/semaphore.h --- rwsemref/include/asm-alpha/semaphore.h Sat Apr 21 20:04:21 2001 +++ rwsem/include/asm-alpha/semaphore.h Tue Apr 24 04:21:54 2001 @@ -11,7 +11,7 @@ #include #include #include -#include /* __builtin_expect */ +#include /* __builtin_expect */ #include #include diff -urN rwsemref/include/asm-i386/rwsem.h rwsem/include/asm-i386/rwsem.h --- rwsemref/include/asm-i386/rwsem.h Tue Apr 24 02:20:46 2001 +++ rwsem/include/asm-i386/rwsem.h Thu Jan 1 01:00:00 1970 @@ -1,225 +0,0 @@ -/* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+ - * - * Written by David Howells (dhowells@redhat.com). - * - * Derived from asm-i386/semaphore.h - */ - -#ifndef _I386_RWSEM_H -#define _I386_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead -#endif - -#ifdef __KERNEL__ - -#include -#include - -struct rwsem_waiter; - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t wait_lock; - struct rwsem_waiter *wait_front; - struct rwsem_waiter **wait_back; -#if RWSEM_DEBUG - int debug; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, NULL, &(name).wait_front \ - __RWSEM_DEBUG_INIT } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->wait_lock); - sem->wait_front = NULL; - sem->wait_back = &sem->wait_front; -#if RWSEM_DEBUG - sem->debug = 0; -#endif -} - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ - " js 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " pushl %%ecx\n\t" - " pushl %%edx\n\t" - " call rwsem_down_read_failed\n\t" - " popl %%edx\n\t" - " popl %%ecx\n\t" - " jmp 1b\n" - ".previous" - "# ending down_read\n\t" - : "=m"(sem->count) - : "a"(sem), "m"(sem->count) - : "memory"); -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - int tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtract 0x0000ffff, returns the old value */ - " testl %0,%0\n\t" /* was the count 0 before? */ - " jnz 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " pushl %%ecx\n\t" - " call rwsem_down_write_failed\n\t" - " popl %%ecx\n\t" - " jmp 1b\n" - ".previous\n" - "# ending down_write" - : "+d"(tmp), "=m"(sem->count) - : "a"(sem), "m"(sem->count) - : "memory"); -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xadd %%eax,(%%edx)\n\t" /* subtracts 1, returns the old value */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " decl %%eax\n\t" /* xadd gave us the old count */ - " testl %3,%%eax\n\t" /* do nothing if still outstanding active readers */ - " jnz 1b\n\t" - " call rwsem_up_read_wake\n\t" - " jmp 1b\n" - ".previous\n" - "# ending __up_read\n" - : "=m"(sem->count) - : "d"(sem), "a"(-RWSEM_ACTIVE_READ_BIAS), "i"(RWSEM_ACTIVE_MASK), "m"(sem->count) - : "memory", "ecx"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __up_write\n\t" -LOCK_PREFIX " cmpxchgl %%ecx,(%%edx)\n\t" /* tries to transition 0xffff0001 -> 0x00000000 */ - " jnz 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " call rwsem_up_write_wake\n\t" - " jmp 1b\n" - ".previous\n" - "# ending __up_write\n" - : "=m"(sem->count) - : "d"(sem), "a"(RWSEM_ACTIVE_WRITE_BIAS), "c"(0), "m"(sem->count) - : "memory"); -} - -/* - * implement atomic add functionality - */ -static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) -{ - __asm__ __volatile__( -LOCK_PREFIX "addl %1,%0" - :"=m"(sem->count) - :"ir"(delta), "m"(sem->count)); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int tmp = delta; - - __asm__ __volatile__( -LOCK_PREFIX "xadd %0,(%2)" - : "+r"(tmp), "=m"(sem->count) - : "r"(sem), "m"(sem->count) - : "memory"); - - return tmp+delta; -} - -/* - * implement compare and exchange functionality on the rw-semaphore count LSW - */ -static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new) -{ - __u16 tmp = old; - - __asm__ __volatile__( -LOCK_PREFIX "cmpxchgw %w2,%3" - : "=a"(tmp), "=m"(sem->count) - : "r"(new), "m1"(sem->count), "a"(tmp) - : "memory"); - - return tmp; -} - -/* - * implement compare and exchange functionality on the rw-semaphore count - */ -static inline signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new) -{ - signed long tmp = old; - - __asm__ __volatile__( -LOCK_PREFIX "cmpxchgl %2,%3" - : "=a"(tmp), "=m"(sem->count) - : "r"(new), "m1"(sem->count), "a"(tmp) - : "memory"); - - return tmp; -} - -#endif /* __KERNEL__ */ -#endif /* _I386_RWSEM_H */ diff -urN rwsemref/include/asm-i386/rwsem_xchgadd.h rwsem/include/asm-i386/rwsem_xchgadd.h --- rwsemref/include/asm-i386/rwsem_xchgadd.h Thu Jan 1 01:00:00 1970 +++ rwsem/include/asm-i386/rwsem_xchgadd.h Tue Apr 24 04:15:18 2001 @@ -0,0 +1,81 @@ +#ifndef _X86_RWSEM_XCHGADD_H +#define _X86_RWSEM_XCHGADD_H + +static inline void __down_read(struct rw_semaphore *sem) +{ + __asm__ __volatile__(LOCK "incl %0\n\t" + "js 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + "2:\t" + "movl %2, %%edx\n\t" + "call down_failed\n\t" + "jmp 1b\n" + ".previous" + : "+m" (sem->count) + : "a" (sem), "i" (RWSEM_READ_BLOCKING_BIAS) + : "memory", "cc"); +} + +static inline void __down_write(struct rw_semaphore *sem) +{ + long count; + + count = RWSEM_WRITE_BIAS + RWSEM_READ_BIAS; + __asm__ __volatile(LOCK "xaddl %0, %1\n\t" + "testl %0,%0\n\t" + "jnz 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + "2:\t" + "movl %3, %%edx\n\t" + "call down_failed\n\t" + "jmp 1b\n" + ".previous" + : "+r" (count), "+m" (sem->count) + : "a" (sem), "i" (RWSEM_WRITE_BLOCKING_BIAS) + : "memory", "cc"); +} + +static inline void __up_read(struct rw_semaphore *sem) +{ + long count; + + count = -RWSEM_READ_BIAS; + __asm__ __volatile__(LOCK "xaddl %0, %1\n\t" + "js 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + "2:\t" + "cmpw $1, %w0\n\t" + "jnz 1b\n\t" + "call rwsem_wake\n\t" + "jmp 1b\n" + ".previous" + : "+r" (count), "+m" (sem->count) + : "a" (sem) + : "memory", "cc"); +} +static inline void __up_write(struct rw_semaphore *sem) +{ + __asm__ __volatile__(LOCK "subl %2, %0\n\t" + "js 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + "2:\t" + "call rwsem_wake\n\t" + "jmp 1b\n" + ".previous" + : "+m" (sem->count) + : "a" (sem), "i" (RWSEM_READ_BIAS + RWSEM_WRITE_BIAS) + : "memory", "cc"); +} + +static inline long rwsem_xchgadd(long value, long * count) +{ + __asm__ __volatile__(LOCK "xaddl %0,%1" + : "+r" (value), "+m" (*count)); + return value; +} + +#endif diff -urN rwsemref/include/asm-sparc64/rwsem.h rwsem/include/asm-sparc64/rwsem.h --- rwsemref/include/asm-sparc64/rwsem.h Tue Apr 24 02:20:46 2001 +++ rwsem/include/asm-sparc64/rwsem.h Tue Apr 24 04:27:29 2001 @@ -2,7 +2,7 @@ * rwsem.h: R/W semaphores implemented using CAS * * Written by David S. Miller (davem@redhat.com), 2001. - * Derived from asm-i386/rwsem.h + * Derived from asm-i386/rwsem-xadd.h */ #ifndef _SPARC64_RWSEM_H #define _SPARC64_RWSEM_H @@ -127,15 +127,14 @@ "save %%sp, -160, %%sp\n\t" "mov %%g2, %%l2\n\t" "mov %%g3, %%l3\n\t" - " mov %%g7, %%o0\n\t" "call %1\n\t" - " mov %%g5, %%o1\n\t" + " mov %%g5, %%o0\n\t" "mov %%l2, %%g2\n\t" "ba,pt %%xcc, 2b\n\t" " restore %%l3, %%g0, %%g3\n\t" ".previous\n\t" "! ending __up_read" - : : "r" (sem), "i" (rwsem_up_read_wake), + : : "r" (sem), "i" (rwsem_wake), "i" (RWSEM_ACTIVE_MASK) : "g1", "g5", "g7", "memory", "cc"); } @@ -146,28 +145,31 @@ "! beginning __up_write\n\t" "sethi %%hi(%2), %%g1\n\t" "or %%g1, %%lo(%2), %%g1\n" - "sub %%g5, %%g5, %%g5\n\t" - "cas [%0], %%g1, %%g5\n\t" - "cmp %%g1, %%g5\n\t" - "bne,pn %%icc, 1f\n\t" + "1:\tlduw [%0], %%g5\n\t" + "sub %%g5, %%g1, %%g7\n\t" + "cas [%0], %%g5, %%g7\n\t" + "cmp %%g5, %%g7\n\t" + "bne,pn %%icc, 1b\n\t" + " sub %%g7, %%g1, %%g7\n\t" + "cmp %%g7, 0\n\t" + "bl,pn %%icc, 3f\n\t" " membar #StoreStore\n" "2:\n\t" ".subsection 2\n" - "3:\tmov %0, %%g1\n\t" + "3:\tmov %0, %%g5\n\t" "save %%sp, -160, %%sp\n\t" "mov %%g2, %%l2\n\t" "mov %%g3, %%l3\n\t" - "mov %%g1, %%o0\n\t" "call %1\n\t" - " mov %%g5, %%o1\n\t" + " mov %%g5, %%o0\n\t" "mov %%l2, %%g2\n\t" "ba,pt %%xcc, 2b\n\t" " restore %%l3, %%g0, %%g3\n\t" ".previous\n\t" "! ending __up_write" - : : "r" (sem), "i" (rwsem_up_write_wake), + : : "r" (sem), "i" (rwsem_wake), "i" (RWSEM_ACTIVE_WRITE_BIAS) - : "g1", "g5", "memory", "cc"); + : "g1", "g5", "g7", "memory", "cc"); } static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) @@ -189,8 +191,6 @@ return tmp + delta; } -#define rwsem_atomic_add rwsem_atomic_update - static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new) { u32 old = (sem->count & 0xffff0000) | (u32) __old; @@ -212,11 +212,6 @@ goto again; return prev & 0xffff; -} - -static inline signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new) -{ - return cmpxchg(&sem->count,old,new); } #endif /* __KERNEL__ */ diff -urN rwsemref/include/linux/compiler.h rwsem/include/linux/compiler.h --- rwsemref/include/linux/compiler.h Thu Jan 1 01:00:00 1970 +++ rwsem/include/linux/compiler.h Sun Apr 22 23:22:22 2001 @@ -0,0 +1,13 @@ +#ifndef _LINUX_COMPILER_H +#define _LINUX_COMPILER_H + +/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented + a mechanism by which the user can annotate likely branch directions and + expect the blocks to be reordered appropriately. Define __builtin_expect + to nothing for earlier compilers. */ + +#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 +#define __builtin_expect(x, expected_value) (x) +#endif + +#endif diff -urN rwsemref/include/linux/rwsem-spinlock.h rwsem/include/linux/rwsem-spinlock.h --- rwsemref/include/linux/rwsem-spinlock.h Tue Apr 24 02:20:46 2001 +++ rwsem/include/linux/rwsem-spinlock.h Thu Jan 1 01:00:00 1970 @@ -1,57 +0,0 @@ -/* rwsem-spinlock.h: fallback C implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - */ - -#ifndef _LINUX_RWSEM_SPINLOCK_H -#define _LINUX_RWSEM_SPINLOCK_H - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem-spinlock.h directly, use linux/rwsem.h instead -#endif - -#include - -#ifdef __KERNEL__ - -#include - -struct rwsem_waiter; - -/* - * the semaphore definition - */ -struct rw_semaphore { - __u32 active; - __u32 waiting; - spinlock_t wait_lock; - struct rwsem_waiter *wait_front; - struct rwsem_waiter **wait_back; -#if RWSEM_DEBUG - int debug; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - -#define __RWSEM_INITIALIZER(name) \ -{ 0, 0, SPIN_LOCK_UNLOCKED, NULL, &(name).wait_front __RWSEM_DEBUG_INIT } - -#define DECLARE_RWSEM(name) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name) - -extern void FASTCALL(init_rwsem(struct rw_semaphore *sem)); -extern void FASTCALL(__down_read(struct rw_semaphore *sem)); -extern void FASTCALL(__down_write(struct rw_semaphore *sem)); -extern void FASTCALL(__up_read(struct rw_semaphore *sem)); -extern void FASTCALL(__up_write(struct rw_semaphore *sem)); - -#endif /* __KERNEL__ */ -#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff -urN rwsemref/include/linux/rwsem.h rwsem/include/linux/rwsem.h --- rwsemref/include/linux/rwsem.h Tue Apr 24 02:20:46 2001 +++ rwsem/include/linux/rwsem.h Tue Apr 24 04:42:57 2001 @@ -1,113 +1,19 @@ -/* rwsem.h: R/W semaphores, public interface - * - * Written by David Howells (dhowells@redhat.com). - * Derived from asm-i386/semaphore.h - * - * - * The MSW of the count is the negated number of active writers and waiting - * lockers, and the LSW is the total number of active locks - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an - * uncontended lock. This can be determined because XADD returns the old value. - * Readers increment by 1 and see a positive value when uncontended, negative - * if there are writers (and maybe) readers waiting (in which case it goes to - * sleep). - * - * The value of WAITING_BIAS supports up to 32766 waiting processes. This can - * be extended to 65534 by manually checking the whole MSW rather than relying - * on the S flag. - * - * The value of ACTIVE_BIAS supports up to 65535 active processes. - * - * This should be totally fair - if anything is waiting, a process that wants a - * lock will go to the back of the queue. When the currently active lock is - * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consequtive readers at the - * front, then they'll all be woken up, but no other readers will be. - */ - #ifndef _LINUX_RWSEM_H #define _LINUX_RWSEM_H -#include - -#define RWSEM_DEBUG 0 - #ifdef __KERNEL__ -#include -#include -#include +#include -struct rw_semaphore; - -/* defined contention handler functions for the generic case - * - these are also used for the exchange-and-add based algorithm - */ -#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM -/* we use FASTCALL convention for the helpers */ -extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); -extern void FASTCALL(rwsem_up_read_wake(signed long, struct rw_semaphore *)); -extern void FASTCALL(rwsem_up_write_wake(signed long, struct rw_semaphore *)); -#endif +#undef RWSEM_DEBUG #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include /* use a generic implementation */ -#else -#include /* use an arch-specific implementation */ -#endif - -#ifndef rwsemtrace -#if RWSEM_DEBUG -extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str)); +#include +#elif defined(CONFIG_RWSEM_XCHGADD) +#include #else -#define rwsemtrace(SEM,FMT) -#endif +#include #endif - -/* - * lock for reading - */ -static inline void down_read(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering down_read"); - __down_read(sem); - rwsemtrace(sem,"Leaving down_read"); -} - -/* - * lock for writing - */ -static inline void down_write(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering down_write"); - __down_write(sem); - rwsemtrace(sem,"Leaving down_write"); -} - -/* - * release a read lock - */ -static inline void up_read(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering up_read"); - __up_read(sem); - rwsemtrace(sem,"Leaving up_read"); -} - -/* - * release a write lock - */ -static inline void up_write(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering up_write"); - __up_write(sem); - rwsemtrace(sem,"Leaving up_write"); -} - #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_H */ diff -urN rwsemref/include/linux/rwsem_spinlock.h rwsem/include/linux/rwsem_spinlock.h --- rwsemref/include/linux/rwsem_spinlock.h Thu Jan 1 01:00:00 1970 +++ rwsem/include/linux/rwsem_spinlock.h Tue Apr 24 04:10:07 2001 @@ -0,0 +1,61 @@ +#ifndef _LINUX_RWSEM_SPINLOCK_H +#define _LINUX_RWSEM_SPINLOCK_H + +#include + +struct rw_semaphore +{ + spinlock_t lock; + long count; +#define RWSEM_READ_BIAS 1 +#define RWSEM_WRITE_BIAS (~(~0UL >> (BITS_PER_LONG>>1))) + struct list_head wait; +#if RWSEM_DEBUG + long __magic; +#endif +}; + +#if RWSEM_DEBUG +#define __SEM_DEBUG_INIT(name) \ + , (int)&(name).__magic +#define RWSEM_MAGIC(x) \ + do { \ + if ((x) != (long)&(x)) { \ + printk("rwsem bad magic %lx (should be %lx), ", \ + (long)x, (long)&(x)); \ + BUG(); \ + } \ + } while (0) +#else +#define __SEM_DEBUG_INIT(name) +#define CHECK_MAGIC(x) +#endif + +#define __RWSEM_INITIALIZER(name, count) \ +{ \ + SPIN_LOCK_UNLOCKED, \ + (count), \ + LIST_HEAD_INIT((name).wait) \ + __SEM_DEBUG_INIT(name) \ +} +#define RWSEM_INITIALIZER(name) __RWSEM_INITIALIZER(name, 0) + +#define __DECLARE_RWSEM(name, count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name, count) +#define DECLARE_RWSEM(name) __DECLARE_RWSEM(name, 0) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM(name, RWSEM_READ_BIAS) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM(name, RWSEM_WRITE_BIAS) + +#define RWSEM_READ_BLOCKING_BIAS (RWSEM_WRITE_BIAS-RWSEM_READ_BIAS) +#define RWSEM_WRITE_BLOCKING_BIAS (0) + +#define RWSEM_READ_MASK (~RWSEM_WRITE_BIAS) +#define RWSEM_WRITE_MASK (RWSEM_WRITE_BIAS) + +extern void FASTCALL(init_rwsem(struct rw_semaphore *)); +extern void FASTCALL(down_read(struct rw_semaphore *)); +extern void FASTCALL(down_write(struct rw_semaphore *)); +extern void FASTCALL(up_read(struct rw_semaphore *)); +extern void FASTCALL(up_write(struct rw_semaphore *)); + +#endif _LINUX_RWSEM_SPINLOCK_H diff -urN rwsemref/include/linux/rwsem_xchgadd.h rwsem/include/linux/rwsem_xchgadd.h --- rwsemref/include/linux/rwsem_xchgadd.h Thu Jan 1 01:00:00 1970 +++ rwsem/include/linux/rwsem_xchgadd.h Tue Apr 24 04:31:14 2001 @@ -0,0 +1,96 @@ +#ifndef _LINUX_RWSEM_XCHGADD_H +#define _LINUX_RWSEM_XCHGADD_H + +struct rw_semaphore +{ + long count; + spinlock_t lock; +#define RWSEM_READ_BIAS 1 +#define RWSEM_WRITE_BIAS (~(~0UL >> (BITS_PER_LONG>>1))) + struct list_head wait; +#if RWSEM_DEBUG + long __magic; +#endif +}; + +#if RWSEM_DEBUG +#define __SEM_DEBUG_INIT(name) \ + , (int)&(name).__magic +#define RWSEM_MAGIC(x) \ + do { \ + if ((x) != (long)&(x)) { \ + printk("rwsem bad magic %lx (should be %lx), ", \ + (long)x, (long)&(x)); \ + BUG(); \ + } \ + } while (0) +#else +#define __SEM_DEBUG_INIT(name) +#define CHECK_MAGIC(x) +#endif + +#define __RWSEM_INITIALIZER(name, count) \ +{ \ + (count), \ + SPIN_LOCK_UNLOCKED, \ + LIST_HEAD_INIT((name).wait) \ + __SEM_DEBUG_INIT(name) \ +} +#define RWSEM_INITIALIZER(name) __RWSEM_INITIALIZER(name, 0) + +#define __DECLARE_RWSEM(name, count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name, count) +#define DECLARE_RWSEM(name) __DECLARE_RWSEM(name, 0) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM(name, RWSEM_READ_BIAS) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM(name, RWSEM_WRITE_BIAS+RWSEM_READ_BIAS) + +#define RWSEM_READ_BLOCKING_BIAS (RWSEM_WRITE_BIAS-RWSEM_READ_BIAS) +#define RWSEM_WRITE_BLOCKING_BIAS (-RWSEM_READ_BIAS) + +#define RWSEM_READ_MASK (~RWSEM_WRITE_BIAS) +#define RWSEM_WRITE_MASK (RWSEM_WRITE_BIAS) + +extern void FASTCALL(down_failed(struct rw_semaphore *, long)); +extern void FASTCALL(rwsem_wake(struct rw_semaphore *)); + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = 0; + spin_lock_init(&sem->lock); + INIT_LIST_HEAD(&sem->wait); +#if RWSEM_DEBUG + sem->__magic = (long)&sem->__magic; +#endif +} + +#include + +static inline void down_read(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + __down_read(sem); +} + +static inline void down_write(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + __down_write(sem); +} + +static inline void up_read(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + __up_read(sem); +} + +static inline void up_write(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + __up_write(sem); +} + +#endif /* _LINUX_RWSEM_XCHGADD_H */ diff -urN rwsemref/include/linux/sched.h rwsem/include/linux/sched.h --- rwsemref/include/linux/sched.h Tue Apr 24 00:20:48 2001 +++ rwsem/include/linux/sched.h Tue Apr 24 04:42:57 2001 @@ -239,7 +239,7 @@ mm_users: ATOMIC_INIT(2), \ mm_count: ATOMIC_INIT(1), \ map_count: 1, \ - mmap_sem: __RWSEM_INITIALIZER(name.mmap_sem), \ + mmap_sem: RWSEM_INITIALIZER(name.mmap_sem), \ page_table_lock: SPIN_LOCK_UNLOCKED, \ mmlist: LIST_HEAD_INIT(name.mmlist), \ } diff -urN rwsemref/lib/Makefile rwsem/lib/Makefile --- rwsemref/lib/Makefile Tue Apr 24 02:20:46 2001 +++ rwsem/lib/Makefile Tue Apr 24 04:48:48 2001 @@ -8,12 +8,12 @@ L_TARGET := lib.a -export-objs := cmdline.o rwsem-spinlock.o rwsem.o +export-objs := cmdline.o rwsem_spinlock.o rwsem_xchgadd.o obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o -obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o -obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o +obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem_spinlock.o +obj-$(CONFIG_RWSEM_XCHGADD) += rwsem_xchgadd.o ifneq ($(CONFIG_HAVE_DEC_LOCK),y) obj-y += dec_and_lock.o diff -urN rwsemref/lib/rwsem-spinlock.c rwsem/lib/rwsem-spinlock.c --- rwsemref/lib/rwsem-spinlock.c Tue Apr 24 02:20:46 2001 +++ rwsem/lib/rwsem-spinlock.c Thu Jan 1 01:00:00 1970 @@ -1,245 +0,0 @@ -/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock - * implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - */ -#include -#include -#include - -struct rwsem_waiter { - struct rwsem_waiter *next; - struct task_struct *task; - unsigned int flags; -#define RWSEM_WAITING_FOR_READ 0x00000001 -#define RWSEM_WAITING_FOR_WRITE 0x00000002 -}; - -#if RWSEM_DEBUG -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ - if (sem->debug) - printk("[%d] %s({%d,%d})\n",current->pid,str,sem->active,sem->waiting); -} -#endif - -/* - * initialise the semaphore - */ -void init_rwsem(struct rw_semaphore *sem) -{ - sem->active = 0; - sem->waiting = 0; - spin_lock_init(&sem->wait_lock); - sem->wait_front = NULL; - sem->wait_back = &sem->wait_front; -#if RWSEM_DEBUG - sem->debug = 0; -#endif -} - -/* - * handle the lock being released whilst there are processes blocked on it that can now run - * - if we come here, then: - * - the 'active count' _reached_ zero - * - the 'waiting count' is non-zero - * - the spinlock must be held by the caller - * - woken process blocks are discarded from the list after having flags zeroised - */ -static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem) -{ - struct rwsem_waiter *waiter, *next; - int woken, loop; - - rwsemtrace(sem,"Entering __rwsem_do_wake"); - - waiter = sem->wait_front; - - if (!waiter) - goto list_unexpectedly_empty; - - next = NULL; - - /* try to grant a single write lock if there's a writer at the front of the queue - * - we leave the 'waiting count' incremented to signify potential contention - */ - if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { - sem->active++; - next = waiter->next; - waiter->flags = 0; - wake_up_process(waiter->task); - goto discard_woken_processes; - } - - /* grant an infinite number of read locks to the readers at the front of the queue */ - woken = 0; - do { - woken++; - waiter = waiter->next; - } while (waiter && waiter->flags&RWSEM_WAITING_FOR_READ); - - sem->active += woken; - sem->waiting -= woken; - - waiter = sem->wait_front; - for (loop=woken; loop>0; loop--) { - next = waiter->next; - waiter->flags = 0; - wake_up_process(waiter->task); - waiter = next; - } - - discard_woken_processes: - sem->wait_front = next; - if (!next) sem->wait_back = &sem->wait_front; - - out: - rwsemtrace(sem,"Leaving __rwsem_do_wake"); - return sem; - - list_unexpectedly_empty: - printk("__rwsem_do_wake(): wait_list unexpectedly empty\n"); - printk("[%d] %p = { %d, %d })\n",current->pid,sem,sem->active,sem->waiting); - BUG(); - goto out; -} - -/* - * get a read lock on the semaphore - */ -void __down_read(struct rw_semaphore *sem) -{ - struct rwsem_waiter waiter; - struct task_struct *tsk = current; - - rwsemtrace(sem,"Entering __down_read"); - - spin_lock(&sem->wait_lock); - - if (!sem->waiting) { - /* granted */ - sem->active++; - spin_unlock(&sem->wait_lock); - goto out; - } - sem->waiting++; - - set_task_state(tsk,TASK_UNINTERRUPTIBLE); - - /* set up my own style of waitqueue */ - waiter.next = NULL; - waiter.task = tsk; - waiter.flags = RWSEM_WAITING_FOR_READ; - - *sem->wait_back = &waiter; /* add to back of queue */ - sem->wait_back = &waiter.next; - - /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); - - /* wait to be given the lock */ - for (;;) { - if (!waiter.flags) - break; - schedule(); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - } - - tsk->state = TASK_RUNNING; - - out: - rwsemtrace(sem,"Leaving __down_read"); -} - -/* - * get a write lock on the semaphore - * - note that we increment the waiting count anyway to indicate an exclusive lock - */ -void __down_write(struct rw_semaphore *sem) -{ - struct rwsem_waiter waiter; - struct task_struct *tsk = current; - - rwsemtrace(sem,"Entering __down_write"); - - spin_lock(&sem->wait_lock); - - if (!sem->waiting && !sem->active) { - /* granted */ - sem->active++; - sem->waiting++; - spin_unlock(&sem->wait_lock); - goto out; - } - sem->waiting++; - - set_task_state(tsk,TASK_UNINTERRUPTIBLE); - - /* set up my own style of waitqueue */ - waiter.next = NULL; - waiter.task = tsk; - waiter.flags = RWSEM_WAITING_FOR_WRITE; - - *sem->wait_back = &waiter; /* add to back of queue */ - sem->wait_back = &waiter.next; - - /* we don't need to touch the semaphore struct anymore */ - spin_unlock(&sem->wait_lock); - - /* wait to be given the lock */ - for (;;) { - if (!waiter.flags) - break; - schedule(); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - } - - tsk->state = TASK_RUNNING; - - out: - rwsemtrace(sem,"Leaving __down_write"); -} - -/* - * release a read lock on the semaphore - */ -void __up_read(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering __up_read"); - - spin_lock(&sem->wait_lock); - - if (--sem->active==0 && sem->waiting) - __rwsem_do_wake(sem); - - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem,"Leaving __up_read"); -} - -/* - * release a write lock on the semaphore - */ -void __up_write(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering __up_write"); - - spin_lock(&sem->wait_lock); - - sem->waiting--; - if (--sem->active==0 && sem->waiting) - __rwsem_do_wake(sem); - - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem,"Leaving __up_write"); -} - -EXPORT_SYMBOL(init_rwsem); -EXPORT_SYMBOL(__down_read); -EXPORT_SYMBOL(__down_write); -EXPORT_SYMBOL(__up_read); -EXPORT_SYMBOL(__up_write); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif diff -urN rwsemref/lib/rwsem.c rwsem/lib/rwsem.c --- rwsemref/lib/rwsem.c Tue Apr 24 02:20:46 2001 +++ rwsem/lib/rwsem.c Thu Jan 1 01:00:00 1970 @@ -1,265 +0,0 @@ -/* rwsem.c: R/W semaphores: contention handling functions - * - * Written by David Howells (dhowells@redhat.com). - * Derived from arch/i386/kernel/semaphore.c - */ -#include -#include -#include - -struct rwsem_waiter { - struct rwsem_waiter *next; - struct task_struct *task; - unsigned int flags; -#define RWSEM_WAITING_FOR_READ 0x00000001 -#define RWSEM_WAITING_FOR_WRITE 0x00000002 -}; - -#if RWSEM_DEBUG -void rwsemtrace(struct rw_semaphore *sem, const char *str) -{ - if (sem->debug) - printk("[%d] %s({%08lx})\n",current->pid,str,sem->count); -} -#endif - -/* - * handle the lock being released whilst there are processes blocked on it that can now run - * - the caller can specify an adjustment that will need to be made to the semaphore count to - * help reduce the number of atomic operations invoked - * - if we come here, then: - * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented - * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so) - * - the spinlock must be held by the caller - * - woken process blocks are discarded from the list after having flags zeroised - */ -static inline struct rw_semaphore *__rwsem_do_wake(int adjustment, struct rw_semaphore *sem) -{ - struct rwsem_waiter *waiter, *next; - int woken, loop; - - rwsemtrace(sem,"Entering __rwsem_do_wake"); - - waiter = sem->wait_front; - - if (!waiter) - goto list_unexpectedly_empty; - - next = NULL; - - /* try to grant a single write lock if there's a writer at the front of the queue - * - note we leave the 'active part' of the count incremented by 1 and the waiting part - * incremented by 0x00010000 - */ - if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { - if (adjustment) - rwsem_atomic_add(adjustment,sem); - next = waiter->next; - waiter->flags = 0; - wake_up_process(waiter->task); - goto discard_woken_processes; - } - - /* grant an infinite number of read locks to the readers at the front of the queue - * - note we increment the 'active part' of the count by the number of readers (less one - * for the activity decrement we've already done) before waking any processes up - */ - woken = 0; - do { - woken++; - waiter = waiter->next; - } while (waiter && waiter->flags&RWSEM_WAITING_FOR_READ); - - loop = woken; - woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS; - woken -= RWSEM_ACTIVE_BIAS; - woken += adjustment; - rwsem_atomic_add(woken,sem); - - waiter = sem->wait_front; - for (; loop>0; loop--) { - next = waiter->next; - waiter->flags = 0; - wake_up_process(waiter->task); - waiter = next; - } - - discard_woken_processes: - sem->wait_front = next; - if (!next) sem->wait_back = &sem->wait_front; - - out: - rwsemtrace(sem,"Leaving __rwsem_do_wake"); - return sem; - - list_unexpectedly_empty: - printk("__rwsem_do_wake(): wait_list unexpectedly empty\n"); - printk("[%d] %p = { %08lx })\n",current->pid,sem,sem->count); - BUG(); - goto out; -} - -/* - * wait for the read lock to be granted - * - need to repeal the increment made inline by the caller - * - need to throw a write-lock style spanner into the works (sub 0x00010000 from count) - */ -struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem) -{ - struct rwsem_waiter waiter; - struct task_struct *tsk = current; - signed long count; - - rwsemtrace(sem,"Entering rwsem_down_read_failed"); - - set_task_state(tsk,TASK_UNINTERRUPTIBLE); - - /* set up my own style of waitqueue */ - waiter.next = NULL; - waiter.task = tsk; - waiter.flags = RWSEM_WAITING_FOR_READ; - - spin_lock(&sem->wait_lock); - - *sem->wait_back = &waiter; /* add to back of queue */ - sem->wait_back = &waiter.next; - - /* note that we're now waiting on the lock, but no longer actively read-locking */ - count = rwsem_atomic_update(RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS,sem); - - /* if there are no longer active locks, wake the front queued process(es) up - * - it might even be this process, since the waker takes a more active part - * - should only enter __rwsem_do_wake() only on a transition 0->1 in the LSW - */ - if (!(count & RWSEM_ACTIVE_MASK)) - if (rwsem_cmpxchgw(sem,0,RWSEM_ACTIVE_BIAS)==0) - __rwsem_do_wake(0,sem); - - spin_unlock(&sem->wait_lock); - - /* wait to be given the lock */ - for (;;) { - if (!waiter.flags) - break; - schedule(); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - } - - tsk->state = TASK_RUNNING; - - rwsemtrace(sem,"Leaving rwsem_down_read_failed"); - return sem; -} - -/* - * wait for the write lock to be granted - */ -struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem) -{ - struct rwsem_waiter waiter; - struct task_struct *tsk = current; - signed long count; - - rwsemtrace(sem,"Entering rwsem_down_write_failed"); - - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - - /* set up my own style of waitqueue */ - waiter.next = NULL; - waiter.task = tsk; - waiter.flags = RWSEM_WAITING_FOR_WRITE; - - spin_lock(&sem->wait_lock); - - *sem->wait_back = &waiter; /* add to back of queue */ - sem->wait_back = &waiter.next; - - /* note that we're waiting on the lock, but no longer actively locking */ - count = rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem); - - /* if there are no longer active locks, wake the front queued process(es) up - * - it might even be this process, since the waker takes a more active part - * - should only enter __rwsem_do_wake() only on a transition 0->1 in the LSW - */ - if (!(count & RWSEM_ACTIVE_MASK)) - if (rwsem_cmpxchgw(sem,0,RWSEM_ACTIVE_BIAS)==0) - __rwsem_do_wake(0,sem); - - spin_unlock(&sem->wait_lock); - - /* wait to be given the lock */ - for (;;) { - if (!waiter.flags) - break; - schedule(); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - } - - tsk->state = TASK_RUNNING; - - rwsemtrace(sem,"Leaving rwsem_down_write_failed"); - return sem; -} - -/* - * handle up_read() finding a waiter on the semaphore - * - up_read has decremented the active part of the count if we come here - */ -void rwsem_up_read_wake(signed long count, struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering rwsem_up_read_wake"); - - spin_lock(&sem->wait_lock); - - /* need to wake up a waiter unless the semaphore has gone active again - * - should only enter __rwsem_do_wake() only on a transition 0->1 in the LSW - */ - if (rwsem_cmpxchgw(sem,0,RWSEM_ACTIVE_BIAS)==0) - sem = __rwsem_do_wake(0,sem); - - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem,"Leaving rwsem_up_read_wake"); -} - -/* - * handle up_write() finding a waiter on the semaphore - * - up_write has not modified the count if we come here - */ -void rwsem_up_write_wake(signed long count, struct rw_semaphore *sem) -{ - signed long new; - - rwsemtrace(sem,"Entering rwsem_up_write_wake"); - - spin_lock(&sem->wait_lock); - - try_again: - /* if the active part of the count is 1, we should perform a wake-up, else we should - * decrement the count and return - */ - if ((count&RWSEM_ACTIVE_MASK)==RWSEM_ACTIVE_BIAS) { - sem = __rwsem_do_wake(-RWSEM_WAITING_BIAS,sem); - } - else { - /* tricky - we mustn't return the active part of the count to 0 */ - new = count - RWSEM_ACTIVE_WRITE_BIAS; - new = rwsem_cmpxchg(sem,count,new); - if (count!=new) { - count = new; - goto try_again; - } - } - - spin_unlock(&sem->wait_lock); - - rwsemtrace(sem,"Leaving rwsem_up_write_wake"); -} - -EXPORT_SYMBOL(rwsem_down_read_failed); -EXPORT_SYMBOL(rwsem_down_write_failed); -EXPORT_SYMBOL(rwsem_up_read_wake); -EXPORT_SYMBOL(rwsem_up_write_wake); -#if RWSEM_DEBUG -EXPORT_SYMBOL(rwsemtrace); -#endif diff -urN rwsemref/lib/rwsem_spinlock.c rwsem/lib/rwsem_spinlock.c --- rwsemref/lib/rwsem_spinlock.c Thu Jan 1 01:00:00 1970 +++ rwsem/lib/rwsem_spinlock.c Tue Apr 24 00:32:06 2001 @@ -0,0 +1,124 @@ +/* + * rw_semaphores generic spinlock version + * Copyright (C) 2001 Andrea Arcangeli SuSE + */ + +#include +#include +#include + +struct rwsem_wait_queue { + unsigned long retire; + struct task_struct * task; + struct list_head task_list; +}; + +static void FASTCALL(down_failed(struct rw_semaphore *, long)); +static void down_failed(struct rw_semaphore *sem, long retire) +{ + struct task_struct *tsk = current; + struct rwsem_wait_queue wait; + + sem->count += retire; + wait.retire = retire; + wait.task = tsk; + INIT_LIST_HEAD(&wait.task_list); + list_add(&wait.task_list, &sem->wait); + + do { + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&sem->lock); + schedule(); + spin_lock_irq(&sem->lock); + } while(wait.task_list.next); +} + +static void FASTCALL(rwsem_wake(struct rw_semaphore *)); +static void rwsem_wake(struct rw_semaphore *sem) +{ + struct list_head * entry, * head = &sem->wait; + int last = 0; + + while ((entry = head->prev) != head) { + struct rwsem_wait_queue * wait; + + wait = list_entry(entry, struct rwsem_wait_queue, task_list); + + if (wait->retire == RWSEM_WRITE_BLOCKING_BIAS) { + if (sem->count & RWSEM_READ_MASK) + break; + last = 1; + } + + /* convert write lock into read lock when read become active */ + sem->count -= wait->retire; + list_del(entry); + entry->next = NULL; + wake_up_process(wait->task); + + if (last) + break; + } +} + +void init_rwsem(struct rw_semaphore *sem) +{ + spin_lock_init(&sem->lock); + sem->count = 0; + INIT_LIST_HEAD(&sem->wait); +#if RWSEM_DEBUG + sem->__magic = (long)&sem->__magic; +#endif +} + +void down_read(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + spin_lock(&sem->lock); + sem->count += RWSEM_READ_BIAS; + if (__builtin_expect(sem->count, 0) < 0) + down_failed(sem, RWSEM_READ_BLOCKING_BIAS); + spin_unlock(&sem->lock); +} + +void down_write(struct rw_semaphore *sem) +{ + long count; + CHECK_MAGIC(sem->__magic); + + spin_lock(&sem->lock); + count = sem->count; + sem->count += RWSEM_WRITE_BIAS; + if (__builtin_expect(count, 0)) + down_failed(sem, RWSEM_WRITE_BLOCKING_BIAS); + spin_unlock(&sem->lock); +} + +void up_read(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + spin_lock(&sem->lock); + sem->count -= RWSEM_READ_BIAS; + if (__builtin_expect(sem->count < 0 && !(sem->count & RWSEM_READ_MASK), 0)) + rwsem_wake(sem); + spin_unlock(&sem->lock); +} + +void up_write(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); + + spin_lock(&sem->lock); + sem->count -= RWSEM_WRITE_BIAS; + if (__builtin_expect(sem->count, 0)) + rwsem_wake(sem); + spin_unlock(&sem->lock); +} + +EXPORT_SYMBOL(init_rwsem); +EXPORT_SYMBOL(down_read); +EXPORT_SYMBOL(down_write); +EXPORT_SYMBOL(up_read); +EXPORT_SYMBOL(up_write); diff -urN rwsemref/lib/rwsem_xchgadd.c rwsem/lib/rwsem_xchgadd.c --- rwsemref/lib/rwsem_xchgadd.c Thu Jan 1 01:00:00 1970 +++ rwsem/lib/rwsem_xchgadd.c Tue Apr 24 03:36:44 2001 @@ -0,0 +1,88 @@ +/* + * rw_semaphores xchgadd version + * Copyright (C) 2001 Andrea Arcangeli SuSE + */ + +#include +#include +#include + +struct rwsem_wait_queue { + unsigned long retire; + struct task_struct * task; + struct list_head task_list; +}; + +static void FASTCALL(__rwsem_wake(struct rw_semaphore *)); +static void __rwsem_wake(struct rw_semaphore *sem) +{ + struct list_head * entry, * head = &sem->wait; + int wake_write = 0, wake_read = 0; + + while ((entry = head->prev) != head) { + struct rwsem_wait_queue * wait; + long count; + + wait = list_entry(entry, struct rwsem_wait_queue, task_list); + + if (wait->retire == RWSEM_WRITE_BLOCKING_BIAS) { + if (wake_read) + break; + wake_write = 1; + } + + again: + count = rwsem_xchgadd(-wait->retire, &sem->count); + if (!wake_read && (count & RWSEM_READ_MASK)) { + count = rwsem_xchgadd(wait->retire, &sem->count); + if ((count & RWSEM_READ_MASK) == 1) + goto again; + break; + } + + list_del(entry); + entry->next = NULL; + wake_up_process(wait->task); + + if (wake_write) + break; + wake_read = 1; + } +} + +void down_failed(struct rw_semaphore *sem, long retire) +{ + struct task_struct *tsk = current; + struct rwsem_wait_queue wait; + long count; + + wait.retire = retire; + wait.task = tsk; + INIT_LIST_HEAD(&wait.task_list); + + spin_lock(&sem->lock); + list_add(&wait.task_list, &sem->wait); + + count = rwsem_xchgadd(retire, &sem->count); + if ((count & RWSEM_READ_MASK) == 1) + __rwsem_wake(sem); + + while (wait.task_list.next) { + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&sem->lock); + schedule(); + spin_lock_irq(&sem->lock); + } + + spin_unlock(&sem->lock); +} + +void rwsem_wake(struct rw_semaphore *sem) +{ + spin_lock(&sem->lock); + __rwsem_wake(sem); + spin_unlock(&sem->lock); +} + +EXPORT_SYMBOL(down_failed); +EXPORT_SYMBOL(rwsem_wake);