diff -urN 2.4.4pre4/arch/alpha/config.in 2.4.4pre4-rwsem/arch/alpha/config.in --- 2.4.4pre4/arch/alpha/config.in Thu Apr 19 02:17:05 2001 +++ 2.4.4pre4-rwsem/arch/alpha/config.in Thu Apr 19 02:20:51 2001 @@ -5,8 +5,7 @@ define_bool CONFIG_ALPHA y define_bool CONFIG_UID16 n -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n +define_bool CONFIG_GENERIC_RWSEM y mainmenu_name "Kernel configuration of Linux for Alpha machines" diff -urN 2.4.4pre4/arch/alpha/kernel/alpha_ksyms.c 2.4.4pre4-rwsem/arch/alpha/kernel/alpha_ksyms.c --- 2.4.4pre4/arch/alpha/kernel/alpha_ksyms.c Thu Apr 19 02:17:05 2001 +++ 2.4.4pre4-rwsem/arch/alpha/kernel/alpha_ksyms.c Thu Apr 19 02:20:51 2001 @@ -173,13 +173,6 @@ EXPORT_SYMBOL(down_interruptible); EXPORT_SYMBOL(down_trylock); EXPORT_SYMBOL(up); -EXPORT_SYMBOL(__down_read_failed); -EXPORT_SYMBOL(__down_write_failed); -EXPORT_SYMBOL(__rwsem_wake); -EXPORT_SYMBOL(down_read); -EXPORT_SYMBOL(down_write); -EXPORT_SYMBOL(up_read); -EXPORT_SYMBOL(up_write); /* * SMP-specific symbols. diff -urN 2.4.4pre4/arch/arm/config.in 2.4.4pre4-rwsem/arch/arm/config.in --- 2.4.4pre4/arch/arm/config.in Thu Apr 19 02:17:05 2001 +++ 2.4.4pre4-rwsem/arch/arm/config.in Thu Apr 19 02:23:22 2001 @@ -9,8 +9,6 @@ define_bool CONFIG_SBUS n define_bool CONFIG_MCA n define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_option next_comment diff -urN 2.4.4pre4/arch/arm/kernel/semaphore.c 2.4.4pre4-rwsem/arch/arm/kernel/semaphore.c --- 2.4.4pre4/arch/arm/kernel/semaphore.c Thu Apr 19 02:17:05 2001 +++ 2.4.4pre4-rwsem/arch/arm/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -165,3 +165,321 @@ spin_unlock_irqrestore(&semaphore_lock, flags); return 1; } + +struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); + + return sem; +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +struct rw_semaphore *down_read_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + /* this takes care of granting the lock */ + __up_op_read(sem, __rwsem_wake); + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +struct rw_semaphore *down_write_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + /* this takes care of granting the lock */ + __up_op_write(sem, __rwsem_wake); + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); + return sem; +} + +struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); + return sem; +} + +/* + * The semaphore operations have a special calling sequence that + * allow us to do a simpler in-line version of them. These routines + * need to convert that sequence back into the C sequence when + * there is contention on the semaphore. + * + * ip contains the semaphore pointer on entry. Save the C-clobbered + * registers (r0 to r3 and lr), but not ip, as we use it as a return + * value in some cases.. + */ +#ifdef CONFIG_CPU_26 +asm(" .section .text.lock, \"ax\" + .align 5 + .globl __down_failed +__down_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __down + ldmfd sp!, {r0 - r3, pc}^ + + .align 5 + .globl __down_interruptible_failed +__down_interruptible_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __down_interruptible + mov ip, r0 + ldmfd sp!, {r0 - r3, pc}^ + + .align 5 + .globl __down_trylock_failed +__down_trylock_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __down_trylock + mov ip, r0 + ldmfd sp!, {r0 - r3, pc}^ + + .align 5 + .globl __up_wakeup +__up_wakeup: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __up + ldmfd sp!, {r0 - r3, pc}^ + + .align 5 + .globl __down_read_failed +__down_read_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bcc 1f +1: bl down_read_failed_biased + ldmfd sp!, {r0 - r3, pc}^ +2: bl down_read_failed + mov r1, pc + orr r2, r1, # + teqp r2, #0 + + ldr r3, [r0] + subs r3, r3, #1 + str r3, [r0] + ldmplfd sp!, {r0 - r3, pc}^ + orrcs r1, r1, #0x20000000 @ Set carry + teqp r1, #0 + bcc 2b + b 1b + + .align 5 + .globl __down_write_failed +__down_write_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bcc 1f +1: bl down_write_failed_biased + ldmfd sp!, {r0 - r3, pc}^ +2: bl down_write_failed + mov r1, pc + orr r2, r1, #128 + teqp r2, #0 + + ldr r3, [r0] + subs r3, r3, #"RW_LOCK_BIAS_STR" + str r3, [r0] + ldmeqfd sp!, {r0 - r3, pc}^ + orrcs r1, r1, #0x20000000 @ Set carry + teqp r1, #0 + bcc 2b + b 1b + + .align 5 + .globl __rwsem_wake +__rwsem_wake: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + beq 1f + bl rwsem_wake_readers + ldmfd sp!, {r0 - r3, pc}^ +1: bl rwsem_wake_writer + ldmfd sp!, {r0 - r3, pc}^ + + .previous + "); + +#else +/* 32 bit version */ +asm(" .section .text.lock, \"ax\" + .align 5 + .globl __down_failed +__down_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __down + ldmfd sp!, {r0 - r3, pc} + + .align 5 + .globl __down_interruptible_failed +__down_interruptible_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __down_interruptible + mov ip, r0 + ldmfd sp!, {r0 - r3, pc} + + .align 5 + .globl __down_trylock_failed +__down_trylock_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __down_trylock + mov ip, r0 + ldmfd sp!, {r0 - r3, pc} + + .align 5 + .globl __up_wakeup +__up_wakeup: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bl __up + ldmfd sp!, {r0 - r3, pc} + + .align 5 + .globl __down_read_failed +__down_read_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bcc 1f +1: bl down_read_failed_biased + ldmfd sp!, {r0 - r3, pc} +2: bl down_read_failed + mrs r1, cpsr + orr r2, r1, #128 + msr cpsr_c, r2 + ldr r3, [r0] + subs r3, r3, #1 + str r3, [r0] + msr cpsr_c, r1 + ldmplfd sp!, {r0 - r3, pc} + bcc 2b + b 1b + + .align 5 + .globl __down_write_failed +__down_write_failed: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + bcc 1f +1: bl down_write_failed_biased + ldmfd sp!, {r0 - r3, pc} +2: bl down_write_failed + mrs r1, cpsr + orr r2, r1, #128 + msr cpsr_c, r2 + ldr r3, [r0] + subs r3, r3, #"RW_LOCK_BIAS_STR" + str r3, [r0] + msr cpsr_c, r1 + ldmeqfd sp!, {r0 - r3, pc} + bcc 2b + b 1b + + .align 5 + .globl __rwsem_wake +__rwsem_wake: + stmfd sp!, {r0 - r3, lr} + mov r0, ip + beq 1f + bl rwsem_wake_readers + ldmfd sp!, {r0 - r3, pc} +1: bl rwsem_wake_writer + ldmfd sp!, {r0 - r3, pc} + + .previous + "); + +#endif diff -urN 2.4.4pre4/arch/cris/config.in 2.4.4pre4-rwsem/arch/cris/config.in --- 2.4.4pre4/arch/cris/config.in Thu Apr 19 02:17:06 2001 +++ 2.4.4pre4-rwsem/arch/cris/config.in Thu Apr 19 02:20:42 2001 @@ -5,8 +5,6 @@ mainmenu_name "Linux/CRIS Kernel Configuration" define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_option next_comment comment 'Code maturity level options' diff -urN 2.4.4pre4/arch/cris/kernel/semaphore.c 2.4.4pre4-rwsem/arch/cris/kernel/semaphore.c --- 2.4.4pre4/arch/cris/kernel/semaphore.c Thu Apr 19 02:17:06 2001 +++ 2.4.4pre4-rwsem/arch/cris/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -127,3 +127,112 @@ { return waking_non_zero_trylock(sem); } + +/* + * RW Semaphores + */ +void +__down_read(struct rw_semaphore *sem, int count) +{ + DOWN_VAR; + + retry_down: + if (count < 0) { + /* Wait for the lock to become unbiased. Readers + are non-exclusive. */ + + /* This takes care of granting the lock. */ + up_read(sem); + + add_wait_queue(&sem->wait, &wait); + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + mb(); + count = atomic_dec_return(&sem->count); + if (count <= 0) + goto retry_down; + } else { + add_wait_queue(&sem->wait, &wait); + + while (1) { + if (test_and_clear_bit(0, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if ((sem->granted & 1) == 0) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + } +} + +void +__down_write(struct rw_semaphore *sem, int count) +{ + DOWN_VAR; + + retry_down: + if (count + RW_LOCK_BIAS < 0) { + up_write(sem); + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= RW_LOCK_BIAS) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + mb(); + count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); + if (count != 0) + goto retry_down; + } else { + /* Put ourselves at the end of the list. */ + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); + + while (1) { + if (test_and_clear_bit(1, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if ((sem->granted & 2) == 0) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* If the lock is currently unbiased, awaken the sleepers. + FIXME: This wakes up the readers early in a bit of a + stampede -> bad! */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); + } +} + +void +__rwsem_wake(struct rw_semaphore *sem, unsigned long readers) +{ + if (readers) { + if (test_and_set_bit(0, &sem->granted)) + BUG(); + wake_up(&sem->wait); + } else { + if (test_and_set_bit(1, &sem->granted)) + BUG(); + wake_up(&sem->write_bias_wait); + } +} diff -urN 2.4.4pre4/arch/i386/config.in 2.4.4pre4-rwsem/arch/i386/config.in --- 2.4.4pre4/arch/i386/config.in Thu Apr 19 02:17:06 2001 +++ 2.4.4pre4-rwsem/arch/i386/config.in Thu Apr 19 02:20:51 2001 @@ -7,10 +7,9 @@ define_bool CONFIG_X86 y define_bool CONFIG_ISA y define_bool CONFIG_SBUS n +define_bool CONFIG_GENERIC_RWSEM y define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y mainmenu_option next_comment comment 'Code maturity level options' diff -urN 2.4.4pre4/arch/i386/kernel/i386_ksyms.c 2.4.4pre4-rwsem/arch/i386/kernel/i386_ksyms.c --- 2.4.4pre4/arch/i386/kernel/i386_ksyms.c Thu Apr 19 02:17:06 2001 +++ 2.4.4pre4-rwsem/arch/i386/kernel/i386_ksyms.c Thu Apr 19 02:20:51 2001 @@ -80,9 +80,6 @@ EXPORT_SYMBOL_NOVERS(__down_failed_interruptible); EXPORT_SYMBOL_NOVERS(__down_failed_trylock); EXPORT_SYMBOL_NOVERS(__up_wakeup); -EXPORT_SYMBOL_NOVERS(__rwsem_down_write_failed); -EXPORT_SYMBOL_NOVERS(__rwsem_down_read_failed); -EXPORT_SYMBOL_NOVERS(__rwsem_wake); /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_generic); /* Delay loops */ diff -urN 2.4.4pre4/arch/i386/lib/Makefile 2.4.4pre4-rwsem/arch/i386/lib/Makefile --- 2.4.4pre4/arch/i386/lib/Makefile Thu Apr 19 02:17:06 2001 +++ 2.4.4pre4-rwsem/arch/i386/lib/Makefile Thu Apr 19 02:20:51 2001 @@ -9,7 +9,7 @@ obj-y = checksum.o old-checksum.o delay.o \ usercopy.o getuser.o putuser.o \ - memcpy.o strstr.o rwsem.o + memcpy.o strstr.o obj-$(CONFIG_X86_USE_3DNOW) += mmx.o obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o diff -urN 2.4.4pre4/arch/i386/lib/rwsem.S 2.4.4pre4-rwsem/arch/i386/lib/rwsem.S --- 2.4.4pre4/arch/i386/lib/rwsem.S Thu Apr 19 02:17:06 2001 +++ 2.4.4pre4-rwsem/arch/i386/lib/rwsem.S Thu Jan 1 01:00:00 1970 @@ -1,36 +0,0 @@ -/* rwsem.S: R/W semaphores, register saving wrapper function stubs - * - * Written by David Howells (dhowells@redhat.com). - * Derived from arch/i386/kernel/semaphore.c - */ - -.text -.align 4 -.globl __rwsem_down_read_failed -__rwsem_down_read_failed: - pushl %edx - pushl %ecx - call rwsem_down_read_failed - popl %ecx - popl %edx - ret - -.align 4 -.globl __rwsem_down_write_failed -__rwsem_down_write_failed: - pushl %edx - pushl %ecx - call rwsem_down_write_failed - popl %ecx - popl %edx - ret - -.align 4 -.globl __rwsem_wake -__rwsem_wake: - pushl %edx - pushl %ecx - call rwsem_wake - popl %ecx - popl %edx - ret diff -urN 2.4.4pre4/arch/ia64/config.in 2.4.4pre4-rwsem/arch/ia64/config.in --- 2.4.4pre4/arch/ia64/config.in Thu Apr 19 02:17:07 2001 +++ 2.4.4pre4-rwsem/arch/ia64/config.in Thu Apr 19 02:20:42 2001 @@ -23,8 +23,6 @@ define_bool CONFIG_EISA n define_bool CONFIG_MCA n define_bool CONFIG_SBUS n -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n choice 'IA-64 processor type' \ "Itanium CONFIG_ITANIUM \ diff -urN 2.4.4pre4/arch/ia64/kernel/semaphore.c 2.4.4pre4-rwsem/arch/ia64/kernel/semaphore.c --- 2.4.4pre4/arch/ia64/kernel/semaphore.c Thu Apr 19 02:17:07 2001 +++ 2.4.4pre4-rwsem/arch/ia64/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -155,3 +155,180 @@ spin_unlock_irqrestore(&semaphore_lock, flags); return 1; } + +/* + * Helper routines for rw semaphores. These could be optimized some + * more, but since they're off the critical path, I prefer clarity for + * now... + */ + +/* + * This gets called if we failed to acquire the lock, but we're biased + * to acquire the lock by virtue of causing the count to change from 0 + * to -1. Being biased, we sleep and attempt to grab the lock until + * we succeed. When this function returns, we own the lock. + */ +static inline void +down_read_failed_biased (struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* + * This gets called if we failed to acquire the lock and we are not + * biased to acquire the lock. We undo the decrement that was + * done earlier, go to sleep, and then attempt to re-acquire the + * lock afterwards. + */ +static inline void +down_read_failed (struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + /* + * Undo the decrement we did in down_read() and check if we + * need to wake up someone. + */ + __up_read(sem); + + add_wait_queue(&sem->wait, &wait); + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->count >= 0) + break; + schedule(); + } + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* + * Wait for the lock to become unbiased. Readers are non-exclusive. + */ +void +__down_read_failed (struct rw_semaphore *sem, long count) +{ + while (1) { + if (count == -1) { + down_read_failed_biased(sem); + return; + } + /* unbiased */ + down_read_failed(sem); + + count = ia64_fetch_and_add(-1, &sem->count); + if (count >= 0) + return; + } +} + +static inline void +down_write_failed_biased (struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + /* put ourselves at the end of the list */ + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* + * If the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (sem->count >= 0) + wake_up(&sem->wait); +} + + +static inline void +down_write_failed (struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + __up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->count >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + + +/* + * Wait for the lock to become unbiased. Since we're a writer, we'll + * make ourselves exclusive. + */ +void +__down_write_failed (struct rw_semaphore *sem, long count) +{ + long old_count; + + while (1) { + if (count == -RW_LOCK_BIAS) { + down_write_failed_biased(sem); + return; + } + down_write_failed(sem); + + do { + old_count = sem->count; + count = old_count - RW_LOCK_BIAS; + } while (cmpxchg_acq(&sem->count, old_count, count) != old_count); + + if (count == 0) + return; + } +} + +void +__rwsem_wake (struct rw_semaphore *sem, long count) +{ + wait_queue_head_t *wq; + + if (count == 0) { + /* wake a writer */ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wq = &sem->write_bias_wait; + } else { + /* wake reader(s) */ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wq = &sem->wait; + } + wake_up(wq); /* wake up everyone on the wait queue */ +} diff -urN 2.4.4pre4/arch/m68k/config.in 2.4.4pre4-rwsem/arch/m68k/config.in --- 2.4.4pre4/arch/m68k/config.in Thu Apr 19 02:17:08 2001 +++ 2.4.4pre4-rwsem/arch/m68k/config.in Thu Apr 19 02:20:42 2001 @@ -4,8 +4,6 @@ # define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Linux/68k Kernel Configuration" diff -urN 2.4.4pre4/arch/m68k/kernel/semaphore.c 2.4.4pre4-rwsem/arch/m68k/kernel/semaphore.c --- 2.4.4pre4/arch/m68k/kernel/semaphore.c Thu Apr 19 02:17:09 2001 +++ 2.4.4pre4-rwsem/arch/m68k/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -130,3 +130,111 @@ { return waking_non_zero_trylock(sem); } + + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + __up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(current, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + current->state = TASK_RUNNING; +} + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(current, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + current->state = TASK_RUNNING; +} + + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + __up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(current, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + current->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(current, TASK_UNINTERRUPTIBLE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + current->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); +} + + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +void rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); +} + +void rwsem_wake_writer(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); +} diff -urN 2.4.4pre4/arch/mips/config.in 2.4.4pre4-rwsem/arch/mips/config.in --- 2.4.4pre4/arch/mips/config.in Thu Apr 19 02:17:09 2001 +++ 2.4.4pre4-rwsem/arch/mips/config.in Thu Apr 19 02:20:42 2001 @@ -28,9 +28,6 @@ bool 'Support for SGI IP22' CONFIG_SGI_IP22 bool 'Support for SNI RM200 PCI' CONFIG_SNI_RM200_PCI -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n - # # Select some configuration options automatically for certain systems. # diff -urN 2.4.4pre4/arch/mips/kernel/semaphore.c 2.4.4pre4-rwsem/arch/mips/kernel/semaphore.c --- 2.4.4pre4/arch/mips/kernel/semaphore.c Thu Apr 19 02:17:09 2001 +++ 2.4.4pre4-rwsem/arch/mips/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -127,3 +127,112 @@ { return waking_non_zero_trylock(sem); } + +/* + * RW Semaphores + */ +void +__down_read(struct rw_semaphore *sem, int count) +{ + DOWN_VAR; + + retry_down: + if (count < 0) { + /* Wait for the lock to become unbiased. Readers + are non-exclusive. */ + + /* This takes care of granting the lock. */ + up_read(sem); + + add_wait_queue(&sem->wait, &wait); + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + mb(); + count = atomic_dec_return(&sem->count); + if (count <= 0) + goto retry_down; + } else { + add_wait_queue(&sem->wait, &wait); + + while (1) { + if (test_and_clear_bit(0, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if ((sem->granted & 1) == 0) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + } +} + +void +__down_write(struct rw_semaphore *sem, int count) +{ + DOWN_VAR; + + retry_down: + if (count + RW_LOCK_BIAS < 0) { + up_write(sem); + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= RW_LOCK_BIAS) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + mb(); + count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); + if (count != 0) + goto retry_down; + } else { + /* Put ourselves at the end of the list. */ + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); + + while (1) { + if (test_and_clear_bit(1, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if ((sem->granted & 2) == 0) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* If the lock is currently unbiased, awaken the sleepers. + FIXME: This wakes up the readers early in a bit of a + stampede -> bad! */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); + } +} + +void +__rwsem_wake(struct rw_semaphore *sem, unsigned long readers) +{ + if (readers) { + if (test_and_set_bit(0, &sem->granted)) + BUG(); + wake_up(&sem->wait); + } else { + if (test_and_set_bit(1, &sem->granted)) + BUG(); + wake_up(&sem->write_bias_wait); + } +} diff -urN 2.4.4pre4/arch/mips64/config.in 2.4.4pre4-rwsem/arch/mips64/config.in --- 2.4.4pre4/arch/mips64/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/mips64/config.in Thu Apr 19 02:20:42 2001 @@ -25,9 +25,6 @@ fi endmenu -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n - # # Select some configuration options automatically based on user selections # diff -urN 2.4.4pre4/arch/mips64/kernel/semaphore.c 2.4.4pre4-rwsem/arch/mips64/kernel/semaphore.c --- 2.4.4pre4/arch/mips64/kernel/semaphore.c Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/mips64/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -127,3 +127,112 @@ { return waking_non_zero_trylock(sem); } + +/* + * RW Semaphores + */ +void +__down_read(struct rw_semaphore *sem, int count) +{ + DOWN_VAR; + + retry_down: + if (count < 0) { + /* Wait for the lock to become unbiased. Readers + are non-exclusive. */ + + /* This takes care of granting the lock. */ + up_read(sem); + + add_wait_queue(&sem->wait, &wait); + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + mb(); + count = atomic_dec_return(&sem->count); + if (count <= 0) + goto retry_down; + } else { + add_wait_queue(&sem->wait, &wait); + + while (1) { + if (test_and_clear_bit(0, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if ((sem->granted & 1) == 0) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + } +} + +void +__down_write(struct rw_semaphore *sem, int count) +{ + DOWN_VAR; + + retry_down: + if (count + RW_LOCK_BIAS < 0) { + up_write(sem); + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= RW_LOCK_BIAS) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + mb(); + count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); + if (count != 0) + goto retry_down; + } else { + /* Put ourselves at the end of the list. */ + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); + + while (1) { + if (test_and_clear_bit(1, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if ((sem->granted & 2) == 0) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* If the lock is currently unbiased, awaken the sleepers. + FIXME: This wakes up the readers early in a bit of a + stampede -> bad! */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); + } +} + +void +__rwsem_wake(struct rw_semaphore *sem, unsigned long readers) +{ + if (readers) { + if (test_and_set_bit(0, &sem->granted)) + BUG(); + wake_up(&sem->wait); + } else { + if (test_and_set_bit(1, &sem->granted)) + BUG(); + wake_up(&sem->write_bias_wait); + } +} diff -urN 2.4.4pre4/arch/parisc/config.in 2.4.4pre4-rwsem/arch/parisc/config.in --- 2.4.4pre4/arch/parisc/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/parisc/config.in Thu Apr 19 02:20:42 2001 @@ -7,8 +7,6 @@ define_bool CONFIG_PARISC y define_bool CONFIG_UID16 n -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_option next_comment comment 'Code maturity level options' diff -urN 2.4.4pre4/arch/parisc/kernel/semaphore.c 2.4.4pre4-rwsem/arch/parisc/kernel/semaphore.c --- 2.4.4pre4/arch/parisc/kernel/semaphore.c Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/parisc/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -129,3 +129,111 @@ { return waking_non_zero_trylock(sem); } + + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + __up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(current, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + current->state = TASK_RUNNING; +} + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(current, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + current->state = TASK_RUNNING; +} + + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + __up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(current, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to aquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + current->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(current, TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + current->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); +} + + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +void rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); +} + +void rwsem_wake_writer(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); +} diff -urN 2.4.4pre4/arch/ppc/config.in 2.4.4pre4-rwsem/arch/ppc/config.in --- 2.4.4pre4/arch/ppc/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/ppc/config.in Thu Apr 19 02:20:51 2001 @@ -3,8 +3,7 @@ # see Documentation/kbuild/config-language.txt. # define_bool CONFIG_UID16 n -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n +define_bool CONFIG_GENERIC_RWSEM y mainmenu_name "Linux/PowerPC Kernel Configuration" diff -urN 2.4.4pre4/arch/ppc/kernel/ppc_ksyms.c 2.4.4pre4-rwsem/arch/ppc/kernel/ppc_ksyms.c --- 2.4.4pre4/arch/ppc/kernel/ppc_ksyms.c Sun Apr 1 01:17:11 2001 +++ 2.4.4pre4-rwsem/arch/ppc/kernel/ppc_ksyms.c Thu Apr 19 02:20:51 2001 @@ -332,8 +332,6 @@ EXPORT_SYMBOL(__down); EXPORT_SYMBOL(__down_interruptible); EXPORT_SYMBOL(__down_trylock); -EXPORT_SYMBOL(down_read_failed); -EXPORT_SYMBOL(down_write_failed); #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) extern void (*debugger)(struct pt_regs *regs); diff -urN 2.4.4pre4/arch/s390/config.in 2.4.4pre4-rwsem/arch/s390/config.in --- 2.4.4pre4/arch/s390/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/s390/config.in Thu Apr 19 02:20:42 2001 @@ -7,8 +7,6 @@ define_bool CONFIG_EISA n define_bool CONFIG_MCA n define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Linux Kernel Configuration" define_bool CONFIG_ARCH_S390 y diff -urN 2.4.4pre4/arch/s390/kernel/semaphore.c 2.4.4pre4-rwsem/arch/s390/kernel/semaphore.c --- 2.4.4pre4/arch/s390/kernel/semaphore.c Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/s390/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -158,3 +158,145 @@ spin_unlock_irqrestore(&semaphore_lock, flags); return 1; } + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +void rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); +} + +void rwsem_wake_writers(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); +} + +void __down_read_failed(int count, struct rw_semaphore *sem) +{ + do { + if (count == -1) { + down_read_failed_biased(sem); + break; + } + down_read_failed(sem); + count = atomic_dec_return(&sem->count); + } while (count != 0); +} + +void __down_write_failed(int count, struct rw_semaphore *sem) +{ + do { + if (count < 0 && count > -RW_LOCK_BIAS) { + down_write_failed_biased(sem); + break; + } + down_write_failed(sem); + count = atomic_add_return(-RW_LOCK_BIAS, &sem->count); + } while (count != 0); +} + +void __rwsem_wake(int count, struct rw_semaphore *sem) +{ + if (count == 0) + rwsem_wake_readers(sem); + else + rwsem_wake_writers(sem); +} + diff -urN 2.4.4pre4/arch/s390x/config.in 2.4.4pre4-rwsem/arch/s390x/config.in --- 2.4.4pre4/arch/s390x/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/s390x/config.in Thu Apr 19 02:20:42 2001 @@ -6,8 +6,6 @@ define_bool CONFIG_ISA n define_bool CONFIG_EISA n define_bool CONFIG_MCA n -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_name "Linux Kernel Configuration" define_bool CONFIG_ARCH_S390 y diff -urN 2.4.4pre4/arch/s390x/kernel/semaphore.c 2.4.4pre4-rwsem/arch/s390x/kernel/semaphore.c --- 2.4.4pre4/arch/s390x/kernel/semaphore.c Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/s390x/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -158,3 +158,145 @@ spin_unlock_irqrestore(&semaphore_lock, flags); return 1; } + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +void rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); +} + +void rwsem_wake_writers(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); +} + +void __down_read_failed(int count, struct rw_semaphore *sem) +{ + do { + if (count == -1) { + down_read_failed_biased(sem); + break; + } + down_read_failed(sem); + count = atomic_dec_return(&sem->count); + } while (count != 0); +} + +void __down_write_failed(int count, struct rw_semaphore *sem) +{ + do { + if (count < 0 && count > -RW_LOCK_BIAS) { + down_write_failed_biased(sem); + break; + } + down_write_failed(sem); + count = atomic_add_return(-RW_LOCK_BIAS, &sem->count); + } while (count != 0); +} + +void __rwsem_wake(int count, struct rw_semaphore *sem) +{ + if (count == 0) + rwsem_wake_readers(sem); + else + rwsem_wake_writers(sem); +} + diff -urN 2.4.4pre4/arch/sh/config.in 2.4.4pre4-rwsem/arch/sh/config.in --- 2.4.4pre4/arch/sh/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/sh/config.in Thu Apr 19 02:20:42 2001 @@ -7,8 +7,6 @@ define_bool CONFIG_SUPERH y define_bool CONFIG_UID16 y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n mainmenu_option next_comment comment 'Code maturity level options' diff -urN 2.4.4pre4/arch/sh/kernel/semaphore.c 2.4.4pre4-rwsem/arch/sh/kernel/semaphore.c --- 2.4.4pre4/arch/sh/kernel/semaphore.c Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/sh/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -135,3 +135,162 @@ { return waking_non_zero_trylock(sem); } + +/* Called when someone has done an up that transitioned from + * negative to non-negative, meaning that the lock has been + * granted to whomever owned the bias. + */ +struct rw_semaphore *rwsem_wake_readers(struct rw_semaphore *sem) +{ + if (xchg(&sem->read_bias_granted, 1)) + BUG(); + wake_up(&sem->wait); + return sem; +} + +struct rw_semaphore *rwsem_wake_writer(struct rw_semaphore *sem) +{ + if (xchg(&sem->write_bias_granted, 1)) + BUG(); + wake_up(&sem->write_bias_wait); + return sem; +} + +struct rw_semaphore * __rwsem_wake(struct rw_semaphore *sem) +{ + if (atomic_read(&sem->count) == 0) + return rwsem_wake_writer(sem); + else + return rwsem_wake_readers(sem); +} + +struct rw_semaphore *down_read_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (sem->read_bias_granted && xchg(&sem->read_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->read_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +struct rw_semaphore *down_write_failed_biased(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (sem->write_bias_granted && xchg(&sem->write_bias_granted, 0)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!sem->write_bias_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (atomic_read(&sem->count) >= 0) + wake_up(&sem->wait); + + return sem; +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +struct rw_semaphore *down_read_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + __up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +struct rw_semaphore *down_write_failed(struct rw_semaphore *sem) +{ + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); + + __up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (atomic_read(&sem->count) < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (atomic_read(&sem->count) >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; + + return sem; +} + +struct rw_semaphore *__down_read(struct rw_semaphore *sem, int carry) +{ + if (carry) { + int saved, new; + + do { + down_read_failed(sem); + saved = atomic_read(&sem->count); + if ((new = atomic_dec_return(&sem->count)) >= 0) + return sem; + } while (!(new < 0 && saved >=0)); + } + + return down_read_failed_biased(sem); +} + +struct rw_semaphore *__down_write(struct rw_semaphore *sem, int carry) +{ + if (carry) { + int saved, new; + + do { + down_write_failed(sem); + saved = atomic_read(&sem->count); + if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count) ) == 0) + return sem; + } while (!(new < 0 && saved >=0)); + } + + return down_write_failed_biased(sem); +} diff -urN 2.4.4pre4/arch/sparc/config.in 2.4.4pre4-rwsem/arch/sparc/config.in --- 2.4.4pre4/arch/sparc/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/sparc/config.in Thu Apr 19 02:20:42 2001 @@ -48,8 +48,6 @@ define_bool CONFIG_SUN_CONSOLE y define_bool CONFIG_SUN_AUXIO y define_bool CONFIG_SUN_IO y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n bool 'Support for SUN4 machines (disables SUN4[CDM] support)' CONFIG_SUN4 if [ "$CONFIG_SUN4" != "y" ]; then diff -urN 2.4.4pre4/arch/sparc/kernel/semaphore.c 2.4.4pre4-rwsem/arch/sparc/kernel/semaphore.c --- 2.4.4pre4/arch/sparc/kernel/semaphore.c Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/sparc/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -151,3 +151,119 @@ spin_unlock_irqrestore(&semaphore_lock, flags); return 1; } + +/* rw mutexes + * Implemented by Jakub Jelinek (jakub@redhat.com) based on + * i386 implementation by Ben LaHaise (bcrl@redhat.com). + */ + +extern inline int ldstub(unsigned char *p) +{ + int ret; + asm volatile("ldstub %1, %0" : "=r" (ret) : "m" (*p) : "memory"); + return ret; +} + +#define DOWN_VAR \ + struct task_struct *tsk = current; \ + DECLARE_WAITQUEUE(wait, tsk); + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + DOWN_VAR + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (!ldstub(&sem->read_not_granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->read_not_granted) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + DOWN_VAR + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (!ldstub(&sem->write_not_granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->write_not_granted) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (sem->count >= 0) + wake_up(&sem->wait); +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + DOWN_VAR + + __up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->count >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + DOWN_VAR + + __up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->count >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers) +{ + if (readers) { + /* Due to lame ldstub we don't do here + a BUG() consistency check */ + sem->read_not_granted = 0; + wake_up(&sem->wait); + } else { + sem->write_not_granted = 0; + wake_up(&sem->write_bias_wait); + } +} diff -urN 2.4.4pre4/arch/sparc64/config.in 2.4.4pre4-rwsem/arch/sparc64/config.in --- 2.4.4pre4/arch/sparc64/config.in Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/sparc64/config.in Thu Apr 19 02:20:42 2001 @@ -33,8 +33,6 @@ # Global things across all Sun machines. define_bool CONFIG_HAVE_DEC_LOCK y -define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y -define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n define_bool CONFIG_ISA n define_bool CONFIG_EISA n define_bool CONFIG_MCA n diff -urN 2.4.4pre4/arch/sparc64/kernel/semaphore.c 2.4.4pre4-rwsem/arch/sparc64/kernel/semaphore.c --- 2.4.4pre4/arch/sparc64/kernel/semaphore.c Thu Apr 19 02:17:10 2001 +++ 2.4.4pre4-rwsem/arch/sparc64/kernel/semaphore.c Thu Apr 19 02:20:42 2001 @@ -126,3 +126,172 @@ { return waking_non_zero_trylock(sem); } + +/* rw mutexes + * Implemented by Jakub Jelinek (jakub@redhat.com) based on + * i386 implementation by Ben LaHaise (bcrl@redhat.com). + */ + +asm(" + .text + .align 32 + .globl __down_read_failed +__down_read_failed: + save %sp, -160, %sp + membar #StoreStore + brz,pt %g5, 3f + mov %g7, %l0 +1: call down_read_failed + mov %l0, %o0 +2: lduw [%l0], %l1 + sub %l1, 1, %l2 + cas [%l0], %l1, %l2 + + cmp %l1, %l2 + bne,pn %icc, 2b + membar #StoreStore + subcc %l1, 1, %g0 + bpos,pt %icc, 4f + nop + bcc,pn %icc, 1b + nop + +3: call down_read_failed_biased + mov %l0, %o0 +4: ret + restore + .previous +"); + +asm(" + .text + .align 32 + .globl __down_write_failed +__down_write_failed: + save %sp, -160, %sp + membar #StoreStore + tst %g5 + bge,pt %icc, 3f + mov %g7, %l0 +1: call down_write_failed + mov %l0, %o0 +2: lduw [%l0], %l1 + sethi %hi (" RW_LOCK_BIAS_STR "), %l3 + sub %l1, %l3, %l2 + cas [%l0], %l1, %l2 + + cmp %l1, %l2 + bne,pn %icc, 2b + membar #StoreStore + subcc %l1, %l3, %g0 + be,pt %icc, 4f + nop + bcc,pn %icc, 1b + nop + +3: call down_write_failed_biased + mov %l0, %o0 +4: ret + restore + .previous +"); + +void down_read_failed_biased(struct rw_semaphore *sem) +{ + DOWN_VAR + + add_wait_queue(&sem->wait, &wait); /* put ourselves at the head of the list */ + + for (;;) { + if (test_and_clear_le_bit(0, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!test_le_bit(0, &sem->granted)) + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void down_write_failed_biased(struct rw_semaphore *sem) +{ + DOWN_VAR + + add_wait_queue_exclusive(&sem->write_bias_wait, &wait); /* put ourselves at the end of the list */ + + for (;;) { + if (test_and_clear_le_bit(1, &sem->granted)) + break; + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (!test_le_bit(1, &sem->granted)) + schedule(); + } + + remove_wait_queue(&sem->write_bias_wait, &wait); + tsk->state = TASK_RUNNING; + + /* if the lock is currently unbiased, awaken the sleepers + * FIXME: this wakes up the readers early in a bit of a + * stampede -> bad! + */ + if (sem->count >= 0) + wake_up(&sem->wait); +} + +/* Wait for the lock to become unbiased. Readers + * are non-exclusive. =) + */ +void down_read_failed(struct rw_semaphore *sem) +{ + DOWN_VAR + + __up_read(sem); /* this takes care of granting the lock */ + + add_wait_queue(&sem->wait, &wait); + + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->count >= 0) + break; + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +/* Wait for the lock to become unbiased. Since we're + * a writer, we'll make ourselves exclusive. + */ +void down_write_failed(struct rw_semaphore *sem) +{ + DOWN_VAR + + __up_write(sem); /* this takes care of granting the lock */ + + add_wait_queue_exclusive(&sem->wait, &wait); + + while (sem->count < 0) { + set_task_state(tsk, TASK_UNINTERRUPTIBLE); + if (sem->count >= 0) + break; /* we must attempt to acquire or bias the lock */ + schedule(); + } + + remove_wait_queue(&sem->wait, &wait); + tsk->state = TASK_RUNNING; +} + +void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers) +{ + if (readers) { + if (test_and_set_le_bit(0, &sem->granted)) + BUG(); + wake_up(&sem->wait); + } else { + if (test_and_set_le_bit(1, &sem->granted)) + BUG(); + wake_up(&sem->write_bias_wait); + } +} diff -urN 2.4.4pre4/include/asm-alpha/semaphore.h 2.4.4pre4-rwsem/include/asm-alpha/semaphore.h --- 2.4.4pre4/include/asm-alpha/semaphore.h Thu Apr 19 02:17:16 2001 +++ 2.4.4pre4-rwsem/include/asm-alpha/semaphore.h Thu Apr 19 02:20:51 2001 @@ -225,5 +225,3 @@ #endif #endif - -#endif diff -urN 2.4.4pre4/include/asm-arm/rwsem.h 2.4.4pre4-rwsem/include/asm-arm/rwsem.h --- 2.4.4pre4/include/asm-arm/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-arm/rwsem.h Thu Apr 19 02:25:41 2001 @@ -0,0 +1,132 @@ +#ifndef __ASM_ARM_RWSEM_H +#define __ASM_ARM_RWSEM_H +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * The lock is initialized to BIAS. This way, a writer + * subtracts BIAS ands gets 0 for the case of an uncontended + * lock. Readers decrement by 1 and see a positive value + * when uncontended, negative if there are writers waiting + * (in which case it goes to sleep). + * + * In terms of fairness, this should result in the lock + * flopping back and forth between readers and writers + * under heavy use. + * + * -ben + */ +struct rw_semaphore { + atomic_t count; + volatile unsigned char write_bias_granted; + volatile unsigned char read_bias_granted; + volatile unsigned char pad1; + volatile unsigned char pad2; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +extern struct rw_semaphore *__down_read_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *__down_write_failed(struct rw_semaphore *sem); +extern struct rw_semaphore *__rwsem_wake(struct rw_semaphore *sem); + +extern inline void down_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + __down_op_read(sem, __down_read_failed); +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + __down_op_write(sem, __down_write_failed); +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + __up_op_read(sem, __rwsem_wake); +} + +extern inline void up_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + __up_op_write(sem, __rwsem_wake); +} + +#endif diff -urN 2.4.4pre4/include/asm-cris/rwsem.h 2.4.4pre4-rwsem/include/asm-cris/rwsem.h --- 2.4.4pre4/include/asm-cris/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-cris/rwsem.h Thu Apr 19 02:27:01 2001 @@ -0,0 +1,165 @@ +#ifndef _CRIS_RWSEM_H +#define _CRIS_RWSEM_H + +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * The lock is initialized to BIAS. This way, a writer + * subtracts BIAS ands gets 0 for the case of an uncontended + * lock. Readers decrement by 1 and see a positive value + * when uncontended, negative if there are writers waiting + * (in which case it goes to sleep). + * + * In terms of fairness, this should result in the lock + * flopping back and forth between readers and writers + * under heavy use. + * + * -ben + */ + +struct rw_semaphore { + atomic_t count; + /* bit 0 means read bias granted; + bit 1 means write bias granted. */ + unsigned granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +/* The expensive part is outlined. */ +extern void __down_read(struct rw_semaphore *sem, int count); +extern void __down_write(struct rw_semaphore *sem, int count); +extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers); + +extern inline void down_read(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_dec_return(&sem->count); + if (count < 0) { + __down_read(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); + if (count) { + __down_write(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->granted & 3) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant case is when + there was a writer waiting, and we've bumped the count to 0: we must + wake the writer up. */ + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + + mb(); + if (atomic_inc_return(&sem->count) == 0) + __rwsem_wake(sem, 0); +} + +/* + * Releasing the writer is easy -- just release it and wake up any sleepers. + */ +extern inline void up_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 3) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + + mb(); + count = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (count - RW_LOCK_BIAS < 0 && count >= 0) { + /* Only do the wake if we're no longer negative. */ + __rwsem_wake(sem, count); + } +} + +#endif diff -urN 2.4.4pre4/include/asm-i386/rwsem-spin.h 2.4.4pre4-rwsem/include/asm-i386/rwsem-spin.h --- 2.4.4pre4/include/asm-i386/rwsem-spin.h Thu Apr 19 02:17:17 2001 +++ 2.4.4pre4-rwsem/include/asm-i386/rwsem-spin.h Thu Jan 1 01:00:00 1970 @@ -1,322 +0,0 @@ -/* rwsem.h: R/W semaphores based on spinlocks - * - * Written by David Howells (dhowells@redhat.com). - * - * Derived from asm-i386/semaphore.h and asm-i386/spinlock.h - */ - -#ifndef _I386_RWSEM_SPIN_H -#define _I386_RWSEM_SPIN_H - -#include - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem-spin.h directly, use linux/rwsem.h instead -#endif - -#include - -#ifdef __KERNEL__ - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t lock; -#define RWSEM_SPINLOCK_OFFSET_STR "4" /* byte offset of spinlock */ - wait_queue_head_t wait; -#define RWSEM_WAITING_FOR_READ WQ_FLAG_CONTEXT_0 /* bits to use in wait_queue_t.flags */ -#define RWSEM_WAITING_FOR_WRITE WQ_FLAG_CONTEXT_1 -#if RWSEM_DEBUG - int debug; -#endif -#if RWSEM_DEBUG_MAGIC - long __magic; - atomic_t readers; - atomic_t writers; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif -#if RWSEM_DEBUG_MAGIC -#define __RWSEM_DEBUG_MINIT(name) , (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0) -#else -#define __RWSEM_DEBUG_MINIT(name) /* */ -#endif - -#define __RWSEM_INITIALIZER(name,count) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ - __RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) } - -#define __DECLARE_RWSEM_GENERIC(name,count) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) - -#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) -#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) -#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->lock); - init_waitqueue_head(&sem->wait); -#if RWSEM_DEBUG - sem->debug = 0; -#endif -#if RWSEM_DEBUG_MAGIC - sem->__magic = (long)&sem->__magic; - atomic_set(&sem->readers, 0); - atomic_set(&sem->writers, 0); -#endif -} - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning down_read\n\t" -#ifdef CONFIG_SMP -LOCK_PREFIX " decb "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */ - " js 3f\n" /* jump if failed */ - "1:\n\t" -#endif - " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ -#ifdef CONFIG_SMP - " movb $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */ -#endif - " js 4f\n\t" /* jump if we weren't granted the lock */ - "2:\n" - ".section .text.lock,\"ax\"\n" -#ifdef CONFIG_SMP - "3:\n\t" /* spin on the spinlock till we get it */ - " cmpb $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" - " rep;nop \n\t" - " jle 3b\n\t" - " jmp 1b\n" -#endif - "4:\n\t" - " call __rwsem_down_read_failed\n\t" - " jmp 2b\n" - ".previous" - "# ending __down_read\n\t" - : "=m"(sem->count), "=m"(sem->lock) - : "a"(sem), "m"(sem->count), "m"(sem->lock) - : "memory"); -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - int tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -#ifdef CONFIG_SMP -LOCK_PREFIX " decb "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */ - " js 3f\n" /* jump if failed */ - "1:\n\t" -#endif - " xchg %0,(%%eax)\n\t" /* retrieve the old value */ - " add %0,(%%eax)\n\t" /* add 0xffff0001, result in memory */ -#ifdef CONFIG_SMP - " movb $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */ -#endif - " testl %0,%0\n\t" /* was the count 0 before? */ - " jnz 4f\n\t" /* jump if we weren't granted the lock */ - "2:\n\t" - ".section .text.lock,\"ax\"\n" -#ifdef CONFIG_SMP - "3:\n\t" /* spin on the spinlock till we get it */ - " cmpb $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" - " rep;nop \n\t" - " jle 3b\n\t" - " jmp 1b\n" -#endif - "4:\n\t" - " call __rwsem_down_write_failed\n\t" - " jmp 2b\n" - ".previous\n" - "# ending down_write" - : "+r"(tmp), "=m"(sem->count), "=m"(sem->lock) - : "a"(sem), "m"(sem->count), "m"(sem->lock) - : "memory"); -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - int tmp; - - tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -#ifdef CONFIG_SMP -LOCK_PREFIX " decb "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */ - " js 3f\n" /* jump if failed */ - "1:\n\t" -#endif - " xchg %0,(%%eax)\n\t" /* retrieve the old value */ - " addl %0,(%%eax)\n\t" /* subtract 1, result in memory */ -#ifdef CONFIG_SMP - " movb $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */ -#endif - " js 4f\n\t" /* jump if the lock is being waited upon */ - "2:\n\t" - ".section .text.lock,\"ax\"\n" -#ifdef CONFIG_SMP - "3:\n\t" /* spin on the spinlock till we get it */ - " cmpb $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" - " rep;nop \n\t" - " jle 3b\n\t" - " jmp 1b\n" -#endif - "4:\n\t" - " decl %0\n\t" /* xchg gave us the old count */ - " testl %4,%0\n\t" /* do nothing if still outstanding active readers */ - " jnz 2b\n\t" - " call __rwsem_wake\n\t" - " jmp 2b\n" - ".previous\n" - "# ending __up_read\n" - : "+r"(tmp), "=m"(sem->count), "=m"(sem->lock) - : "a"(sem), "i"(RWSEM_ACTIVE_MASK), "m"(sem->count), "m"(sem->lock) - : "memory"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __up_write\n\t" -#ifdef CONFIG_SMP -LOCK_PREFIX " decb "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */ - " js 3f\n" /* jump if failed */ - "1:\n\t" -#endif - " addl %3,(%%eax)\n\t" /* adds 0x00010001 */ -#ifdef CONFIG_SMP - " movb $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */ -#endif - " js 4f\n\t" /* jump if the lock is being waited upon */ - "2:\n\t" - ".section .text.lock,\"ax\"\n" -#ifdef CONFIG_SMP - "3:\n\t" /* spin on the spinlock till we get it */ - " cmpb $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" - " rep;nop \n\t" - " jle 3b\n\t" - " jmp 1b\n" -#endif - "4:\n\t" - " call __rwsem_wake\n\t" - " jmp 2b\n" - ".previous\n" - "# ending __up_write\n" - : "=m"(sem->count), "=m"(sem->lock) - : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count), "m"(sem->lock) - : "memory"); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int tmp = delta; - - __asm__ __volatile__( - "# beginning rwsem_atomic_update\n\t" -#ifdef CONFIG_SMP -LOCK_PREFIX " decb "RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* try to grab the spinlock */ - " js 3f\n" /* jump if failed */ - "1:\n\t" -#endif - " xchgl %0,(%1)\n\t" /* retrieve the old value */ - " addl %0,(%1)\n\t" /* add 0xffff0001, result in memory */ -#ifdef CONFIG_SMP - " movb $1,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* release the spinlock */ -#endif - ".section .text.lock,\"ax\"\n" -#ifdef CONFIG_SMP - "3:\n\t" /* spin on the spinlock till we get it */ - " cmpb $0,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" - " rep;nop \n\t" - " jle 3b\n\t" - " jmp 1b\n" -#endif - ".previous\n" - "# ending rwsem_atomic_update\n\t" - : "+r"(tmp) - : "r"(sem) - : "memory"); - - return tmp+delta; -} - -/* - * implement compare and exchange functionality on the rw-semaphore count LSW - */ -static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new) -{ - __u16 prev; - - __asm__ __volatile__( - "# beginning rwsem_cmpxchgw\n\t" -#ifdef CONFIG_SMP -LOCK_PREFIX " decb "RWSEM_SPINLOCK_OFFSET_STR"(%3)\n\t" /* try to grab the spinlock */ - " js 3f\n" /* jump if failed */ - "1:\n\t" -#endif - " cmpw %w1,(%3)\n\t" - " jne 4f\n\t" /* jump if old doesn't match sem->count LSW */ - " movw %w2,(%3)\n\t" /* replace sem->count LSW with the new value */ - "2:\n\t" -#ifdef CONFIG_SMP - " movb $1,"RWSEM_SPINLOCK_OFFSET_STR"(%3)\n\t" /* release the spinlock */ -#endif - ".section .text.lock,\"ax\"\n" -#ifdef CONFIG_SMP - "3:\n\t" /* spin on the spinlock till we get it */ - " cmpb $0,"RWSEM_SPINLOCK_OFFSET_STR"(%3)\n\t" - " rep;nop \n\t" - " jle 3b\n\t" - " jmp 1b\n" -#endif - "4:\n\t" - " movw (%3),%w0\n" /* we'll want to return the current value */ - " jmp 2b\n" - ".previous\n" - "# ending rwsem_cmpxchgw\n\t" - : "=r"(prev) - : "r0"(old), "r"(new), "r"(sem) - : "memory"); - - return prev; -} - -#endif /* __KERNEL__ */ -#endif /* _I386_RWSEM_SPIN_H */ diff -urN 2.4.4pre4/include/asm-i386/rwsem-xadd.h 2.4.4pre4-rwsem/include/asm-i386/rwsem-xadd.h --- 2.4.4pre4/include/asm-i386/rwsem-xadd.h Thu Apr 19 02:17:17 2001 +++ 2.4.4pre4-rwsem/include/asm-i386/rwsem-xadd.h Thu Jan 1 01:00:00 1970 @@ -1,198 +0,0 @@ -/* rwsem-xadd.h: R/W semaphores implemented using XADD/CMPXCHG - * - * Written by David Howells (dhowells@redhat.com), 2001. - * Derived from asm-i386/semaphore.h - */ - -#ifndef _I386_RWSEM_XADD_H -#define _I386_RWSEM_XADD_H - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem-xadd.h directly, use linux/rwsem.h instead -#endif - -#ifdef __KERNEL__ - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - wait_queue_head_t wait; -#define RWSEM_WAITING_FOR_READ WQ_FLAG_CONTEXT_0 /* bits to use in wait_queue_t.flags */ -#define RWSEM_WAITING_FOR_WRITE WQ_FLAG_CONTEXT_1 -#if RWSEM_DEBUG - int debug; -#endif -#if RWSEM_DEBUG_MAGIC - long __magic; - atomic_t readers; - atomic_t writers; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif -#if RWSEM_DEBUG_MAGIC -#define __RWSEM_DEBUG_MINIT(name) , (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0) -#else -#define __RWSEM_DEBUG_MINIT(name) /* */ -#endif - -#define __RWSEM_INITIALIZER(name,count) \ -{ RWSEM_UNLOCKED_VALUE, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ - __RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) } - -#define __DECLARE_RWSEM_GENERIC(name,count) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) - -#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) -#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) -#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - init_waitqueue_head(&sem->wait); -#if RWSEM_DEBUG - sem->debug = 0; -#endif -#if RWSEM_DEBUG_MAGIC - sem->__magic = (long)&sem->__magic; - atomic_set(&sem->readers, 0); - atomic_set(&sem->writers, 0); -#endif -} - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning down_read\n\t" -LOCK_PREFIX " incl (%%eax)\n\t" /* adds 0x00000001, returns the old value */ - " js 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " call __rwsem_down_read_failed\n\t" - " jmp 1b\n" - ".previous" - "# ending down_read\n\t" - : "=m"(sem->count) - : "a"(sem), "m"(sem->count) - : "memory"); -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - int tmp; - - tmp = RWSEM_ACTIVE_WRITE_BIAS; - __asm__ __volatile__( - "# beginning down_write\n\t" -LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtract 0x00010001, returns the old value */ - " testl %0,%0\n\t" /* was the count 0 before? */ - " jnz 2f\n\t" /* jump if we weren't granted the lock */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " call __rwsem_down_write_failed\n\t" - " jmp 1b\n" - ".previous\n" - "# ending down_write" - : "+r"(tmp), "=m"(sem->count) - : "a"(sem), "m"(sem->count) - : "memory"); -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - int tmp; - - tmp = -RWSEM_ACTIVE_READ_BIAS; - __asm__ __volatile__( - "# beginning __up_read\n\t" -LOCK_PREFIX " xadd %0,(%%eax)\n\t" /* subtracts 1, returns the old value */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " decl %0\n\t" /* xadd gave us the old count */ - " testl %3,%0\n\t" /* do nothing if still outstanding active readers */ - " jnz 1b\n\t" - " call __rwsem_wake\n\t" - " jmp 1b\n" - ".previous\n" - "# ending __up_read\n" - : "+r"(tmp), "=m"(sem->count) - : "a"(sem), "i"(RWSEM_ACTIVE_MASK), "m"(sem->count) - : "memory"); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - __asm__ __volatile__( - "# beginning __up_write\n\t" -LOCK_PREFIX " addl %2,(%%eax)\n\t" /* adds 0x0000ffff */ - " js 2f\n\t" /* jump if the lock is being waited upon */ - "1:\n\t" - ".section .text.lock,\"ax\"\n" - "2:\n\t" - " call __rwsem_wake\n\t" - " jmp 1b\n" - ".previous\n" - "# ending __up_write\n" - : "=m"(sem->count) - : "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count) - : "memory"); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int tmp = delta; - - __asm__ __volatile__( - LOCK_PREFIX "xadd %0,(%1)" - : "+r"(tmp) - : "r"(sem) - : "memory"); - - return tmp+delta; -} - -/* - * implement compare and exchange functionality on the rw-semaphore count LSW - */ -static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new) -{ - return cmpxchg((__u16*)&sem->count,0,RWSEM_ACTIVE_BIAS); -} - -#endif /* __KERNEL__ */ -#endif /* _I386_RWSEM_XADD_H */ diff -urN 2.4.4pre4/include/asm-i386/rwsem.h 2.4.4pre4-rwsem/include/asm-i386/rwsem.h --- 2.4.4pre4/include/asm-i386/rwsem.h Thu Apr 19 02:17:17 2001 +++ 2.4.4pre4-rwsem/include/asm-i386/rwsem.h Thu Jan 1 01:00:00 1970 @@ -1,29 +0,0 @@ -/* rwsem.h: R/W semaphores optimised using i386 assembly - * - * Written by David Howells (dhowells@redhat.com). - * - * Derived from asm-i386/semaphore.h - */ - -#ifndef _I386_RWSEM_H -#define _I386_RWSEM_H - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead -#endif - -#ifdef __KERNEL__ - -#ifdef CONFIG_X86_XADD -#include /* use XADD based semaphores if possible */ -#else -#include /* use optimised spinlock based semaphores otherwise */ -#endif - -/* we use FASTCALL convention for the helpers */ -extern struct rw_semaphore *FASTCALL(__rwsem_down_read_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(__rwsem_down_write_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(__rwsem_wake(struct rw_semaphore *sem)); - -#endif /* __KERNEL__ */ -#endif /* _I386_RWSEM_H */ diff -urN 2.4.4pre4/include/asm-ia64/rwsem.h 2.4.4pre4-rwsem/include/asm-ia64/rwsem.h --- 2.4.4pre4/include/asm-ia64/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-ia64/rwsem.h Thu Apr 19 02:28:00 2001 @@ -0,0 +1,206 @@ +#ifndef _ASM_IA64_RWSEM_H +#define _ASM_IA64_RWSEM_H + +/* + * rw mutexes (should that be mutices? =) -- throw rw spinlocks and + * semaphores together, and this is what we end up with... + * + * The lock is initialized to BIAS. This way, a writer subtracts BIAS + * ands gets 0 for the case of an uncontended lock. Readers decrement + * by 1 and see a positive value when uncontended, negative if there + * are writers waiting (in which case it goes to sleep). BIAS must be + * chosen such that subtracting BIAS once per CPU will result either + * in zero (uncontended case) or in a negative value (contention + * case). On the other hand, BIAS must be at least as big as the + * number of processes in the system. + * + * On IA-64, we use a BIAS value of 0x100000000, which supports up to + * 2 billion (2^31) processors and 4 billion processes. + * + * In terms of fairness, when there is heavy use of the lock, we want + * to see the lock being passed back and forth between readers and + * writers (like in a producer/consumer style of communication). + * + * -ben (with clarifications & IA-64 comments by davidm) + */ +#define RW_LOCK_BIAS 0x100000000ul + +struct rw_semaphore { + volatile long count; + volatile __u8 write_bias_granted; + volatile __u8 read_bias_granted; + __u16 pad1; + __u32 pad2; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +# define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +# define __RWSEM_DEBUG_INIT +#endif + +#define __RWSEM_INITIALIZER(name,count) \ +{ \ + (count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT \ +} + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS - 1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 0) + +extern void __down_read_failed (struct rw_semaphore *sem, long count); +extern void __down_write_failed (struct rw_semaphore *sem, long count); +extern void __rwsem_wake (struct rw_semaphore *sem, long count); + +static inline void +init_rwsem (struct rw_semaphore *sem) +{ + sem->count = RW_LOCK_BIAS; + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +static inline void +down_read (struct rw_semaphore *sem) +{ + long count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = ia64_fetch_and_add(-1, &sem->count); + if (count < 0) + __down_read_failed(sem, count); + +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +static inline void +down_write (struct rw_semaphore *sem) +{ + long old_count, new_count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + do { + old_count = sem->count; + new_count = old_count - RW_LOCK_BIAS; + } while (cmpxchg_acq(&sem->count, old_count, new_count) != old_count); + + if (new_count != 0) + __down_write_failed(sem, new_count); +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* + * When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +static inline void +__up_read (struct rw_semaphore *sem) +{ + long count; + + count = ia64_fetch_and_add(1, &sem->count); + if (count == 0) + /* + * Other processes are blocked already; resolve + * contention by letting either a writer or a reader + * proceed... + */ + __rwsem_wake(sem, count); +} + +/* + * Releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +static inline void +__up_write (struct rw_semaphore *sem) +{ + long old_count, new_count; + + do { + old_count = sem->count; + new_count = old_count + RW_LOCK_BIAS; + } while (cmpxchg_rel(&sem->count, old_count, new_count) != old_count); + + /* + * Note: new_count old_count < 0 && new_count >= 0. + * (where write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + __up_read(sem); +} + +static inline void +up_write (struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + __up_write(sem); +} + +#endif diff -urN 2.4.4pre4/include/asm-m68k/rwsem.h 2.4.4pre4-rwsem/include/asm-m68k/rwsem.h --- 2.4.4pre4/include/asm-m68k/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-m68k/rwsem.h Thu Apr 19 02:29:07 2001 @@ -0,0 +1,200 @@ +#ifndef _M68K_RWSEM_H +#define _M68K_RWSEM_H + +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * m68k version by Roman Zippel + */ + +struct rw_semaphore { + atomic_t count; + volatile unsigned char write_bias_granted; + volatile unsigned char read_bias_granted; + volatile unsigned char pad1; + volatile unsigned char pad2; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +extern inline void down_read(struct rw_semaphore *sem) +{ + register struct rw_semaphore *__sem __asm__ ("%a1") = sem; + +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif + __asm__ __volatile__( + "| atomic down_read operation\n\t" + "subql #1,%0@\n\t" + "jmi 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + ".even\n" + "2:\n\t" + "pea 1b\n\t" + "jbra __down_read_failed\n" + ".previous" + : /* no outputs */ + : "a" (__sem) + : "memory"); +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ + register struct rw_semaphore *__sem __asm__ ("%a1") = sem; + +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif + __asm__ __volatile__( + "| atomic down_write operation\n\t" + "subl %1,%0@\n\t" + "jne 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + ".even\n" + "2:\n\t" + "pea 1b\n\t" + "jbra __down_write_failed\n" + ".previous" + : /* no outputs */ + : "a" (__sem), "id" (RW_LOCK_BIAS) + : "memory"); +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +extern inline void __up_read(struct rw_semaphore *sem) +{ + register struct rw_semaphore *__sem __asm__ ("%a1") = sem; + + __asm__ __volatile__( + "| atomic up_read operation\n\t" + "addql #1,%0@\n\t" + "jeq 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + ".even\n" + "2:\n\t" + "pea 1b\n\t" + "jbra __rwsem_wake\n" + ".previous" + : /* no outputs */ + : "a" (__sem) + : "memory"); +} + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + __up_read(sem); +} + +/* releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +extern inline void __up_write(struct rw_semaphore *sem) +{ + register struct rw_semaphore *__sem __asm__ ("%a1") = sem; + + __asm__ __volatile__( + "| atomic up_write operation\n\t" + "addl %1,%0@\n\t" + "jcs 2f\n" + "1:\n" + ".section .text.lock,\"ax\"\n" + ".even\n" + "2:\n\t" + "pea 1b\n\t" + "jbra __rwsem_wake\n" + ".previous" + : /* no outputs */ + : "a" (__sem), "id" (RW_LOCK_BIAS) + : "memory"); +} + +extern inline void up_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + __up_write(sem); +} +#endif /* __ASSEMBLY__ */ + +#endif diff -urN 2.4.4pre4/include/asm-m68k/semaphore.h 2.4.4pre4-rwsem/include/asm-m68k/semaphore.h --- 2.4.4pre4/include/asm-m68k/semaphore.h Thu Apr 19 02:17:18 2001 +++ 2.4.4pre4-rwsem/include/asm-m68k/semaphore.h Thu Apr 19 02:30:13 2001 @@ -186,6 +186,4 @@ : "memory"); } -#endif /* __ASSEMBLY__ */ - #endif diff -urN 2.4.4pre4/include/asm-mips/rwsem.h 2.4.4pre4-rwsem/include/asm-mips/rwsem.h --- 2.4.4pre4/include/asm-mips/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-mips/rwsem.h Thu Apr 19 02:29:50 2001 @@ -0,0 +1,344 @@ +#ifndef _ASM_SEMAPHORE_H +#define _ASM_SEMAPHORE_H + +/* + * rw mutexes (should that be mutices? =) -- throw rw spinlocks and + * semaphores together, and this is what we end up with... + * + * The lock is initialized to BIAS. This way, a writer subtracts BIAS ands + * gets 0 for the case of an uncontended lock. Readers decrement by 1 and + * see a positive value when uncontended, negative if there are writers + * waiting (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and lots of processes. + * BIAS must be chosen such that subtracting BIAS once per CPU will result + * in the int remaining negative. In terms of fairness, this should result + * in the lock flopping back and forth between readers and writers under + * heavy use. + * + * Once we start supporting machines with more than 128 CPUs, we should go + * for using a 64bit atomic type instead of 32bit as counter. We shall + * probably go for bias 0x80000000 then, so that single sethi can set it. + * */ + +#define RW_LOCK_BIAS 0x01000000 + +struct rw_semaphore { + atomic_t count; + /* bit 0 means read bias granted; + bit 1 means write bias granted. */ + unsigned granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ + { ATOMIC_INIT(count), 0, \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) \ + __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) \ + __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) \ + __DECLARE_RWSEM_GENERIC(name, 0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +/* The expensive part is outlined. */ +extern void __down_read(struct rw_semaphore *sem, int count); +extern void __down_write(struct rw_semaphore *sem, int count); +extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers); + +extern inline void down_read(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_dec_return(&sem->count); + if (count < 0) { + __down_read(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); + if (count) { + __down_write(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->granted & 3) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant case is when + there was a writer waiting, and we've bumped the count to 0: we must + wake the writer up. */ + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + + mb(); + if (atomic_inc_return(&sem->count) == 0) + __rwsem_wake(sem, 0); +} + +/* + * Releasing the writer is easy -- just release it and wake up any sleepers. + */ +extern inline void up_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 3) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + + mb(); + count = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (count - RW_LOCK_BIAS < 0 && count >= 0) { + /* Only do the wake if we're no longer negative. */ + __rwsem_wake(sem, count); + } +} + +/* + * rw mutexes (should that be mutices? =) -- throw rw spinlocks and + * semaphores together, and this is what we end up with... + * + * The lock is initialized to BIAS. This way, a writer subtracts BIAS ands + * gets 0 for the case of an uncontended lock. Readers decrement by 1 and + * see a positive value when uncontended, negative if there are writers + * waiting (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and lots of processes. + * BIAS must be chosen such that subtracting BIAS once per CPU will result + * in the int remaining negative. In terms of fairness, this should result + * in the lock flopping back and forth between readers and writers under + * heavy use. + * + * Once we start supporting machines with more than 128 CPUs, we should go + * for using a 64bit atomic type instead of 32bit as counter. We shall + * probably go for bias 0x80000000 then, so that single sethi can set it. + * */ + +#define RW_LOCK_BIAS 0x01000000 + +struct rw_semaphore { + atomic_t count; + /* bit 0 means read bias granted; + bit 1 means write bias granted. */ + unsigned long granted; /* pedant: long req'd for set_bit */ + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ + { ATOMIC_INIT(count), 0, \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) \ + __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) \ + __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) \ + __DECLARE_RWSEM_GENERIC(name, 0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +/* The expensive part is outlined. */ +extern void __down_read(struct rw_semaphore *sem, int count); +extern void __down_write(struct rw_semaphore *sem, int count); +extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers); + +extern inline void down_read(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_dec_return(&sem->count); + if (count < 0) { + __down_read(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); + if (count) { + __down_write(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->granted & 3) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant case is when + there was a writer waiting, and we've bumped the count to 0: we must + wake the writer up. */ + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + + mb(); + if (atomic_inc_return(&sem->count) == 0) + __rwsem_wake(sem, 0); +} + +/* + * Releasing the writer is easy -- just release it and wake up any sleepers. + */ +extern inline void up_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 3) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + + mb(); + count = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (count - RW_LOCK_BIAS < 0 && count >= 0) { + /* Only do the wake if we're no longer negative. */ + __rwsem_wake(sem, count); + } +} + +#endif diff -urN 2.4.4pre4/include/asm-mips/semaphore.h 2.4.4pre4-rwsem/include/asm-mips/semaphore.h --- 2.4.4pre4/include/asm-mips/semaphore.h Thu Apr 19 02:17:18 2001 +++ 2.4.4pre4-rwsem/include/asm-mips/semaphore.h Thu Apr 19 02:29:58 2001 @@ -190,174 +190,4 @@ __up(sem); } -/* - * rw mutexes (should that be mutices? =) -- throw rw spinlocks and - * semaphores together, and this is what we end up with... - * - * The lock is initialized to BIAS. This way, a writer subtracts BIAS ands - * gets 0 for the case of an uncontended lock. Readers decrement by 1 and - * see a positive value when uncontended, negative if there are writers - * waiting (in which case it goes to sleep). - * - * The value 0x01000000 supports up to 128 processors and lots of processes. - * BIAS must be chosen such that subtracting BIAS once per CPU will result - * in the int remaining negative. In terms of fairness, this should result - * in the lock flopping back and forth between readers and writers under - * heavy use. - * - * Once we start supporting machines with more than 128 CPUs, we should go - * for using a 64bit atomic type instead of 32bit as counter. We shall - * probably go for bias 0x80000000 then, so that single sethi can set it. - * */ - -#define RW_LOCK_BIAS 0x01000000 - -struct rw_semaphore { - atomic_t count; - /* bit 0 means read bias granted; - bit 1 means write bias granted. */ - unsigned long granted; /* pedant: long req'd for set_bit */ - wait_queue_head_t wait; - wait_queue_head_t write_bias_wait; -#if WAITQUEUE_DEBUG - long __magic; - atomic_t readers; - atomic_t writers; -#endif -}; - -#if WAITQUEUE_DEBUG -#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif - -#define __RWSEM_INITIALIZER(name,count) \ - { ATOMIC_INIT(count), 0, \ - __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ - __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ - __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } - -#define __DECLARE_RWSEM_GENERIC(name,count) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) - -#define DECLARE_RWSEM(name) \ - __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS) -#define DECLARE_RWSEM_READ_LOCKED(name) \ - __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1) -#define DECLARE_RWSEM_WRITE_LOCKED(name) \ - __DECLARE_RWSEM_GENERIC(name, 0) - -extern inline void init_rwsem(struct rw_semaphore *sem) -{ - atomic_set(&sem->count, RW_LOCK_BIAS); - sem->granted = 0; - init_waitqueue_head(&sem->wait); - init_waitqueue_head(&sem->write_bias_wait); -#if WAITQUEUE_DEBUG - sem->__magic = (long)&sem->__magic; - atomic_set(&sem->readers, 0); - atomic_set(&sem->writers, 0); -#endif -} - -/* The expensive part is outlined. */ -extern void __down_read(struct rw_semaphore *sem, int count); -extern void __down_write(struct rw_semaphore *sem, int count); -extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers); - -extern inline void down_read(struct rw_semaphore *sem) -{ - int count; - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = atomic_dec_return(&sem->count); - if (count < 0) { - __down_read(sem, count); - } - mb(); - -#if WAITQUEUE_DEBUG - if (sem->granted & 2) - BUG(); - if (atomic_read(&sem->writers)) - BUG(); - atomic_inc(&sem->readers); -#endif -} - -extern inline void down_write(struct rw_semaphore *sem) -{ - int count; - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); -#endif - - count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); - if (count) { - __down_write(sem, count); - } - mb(); - -#if WAITQUEUE_DEBUG - if (atomic_read(&sem->writers)) - BUG(); - if (atomic_read(&sem->readers)) - BUG(); - if (sem->granted & 3) - BUG(); - atomic_inc(&sem->writers); -#endif -} - -/* When a reader does a release, the only significant case is when - there was a writer waiting, and we've bumped the count to 0: we must - wake the writer up. */ - -extern inline void up_read(struct rw_semaphore *sem) -{ -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); - if (sem->granted & 2) - BUG(); - if (atomic_read(&sem->writers)) - BUG(); - atomic_dec(&sem->readers); -#endif - - mb(); - if (atomic_inc_return(&sem->count) == 0) - __rwsem_wake(sem, 0); -} - -/* - * Releasing the writer is easy -- just release it and wake up any sleepers. - */ -extern inline void up_write(struct rw_semaphore *sem) -{ - int count; - -#if WAITQUEUE_DEBUG - CHECK_MAGIC(sem->__magic); - if (sem->granted & 3) - BUG(); - if (atomic_read(&sem->readers)) - BUG(); - if (atomic_read(&sem->writers) != 1) - BUG(); - atomic_dec(&sem->writers); -#endif - - mb(); - count = atomic_add_return(RW_LOCK_BIAS, &sem->count); - if (count - RW_LOCK_BIAS < 0 && count >= 0) { - /* Only do the wake if we're no longer negative. */ - __rwsem_wake(sem, count); - } -} - #endif /* _ASM_SEMAPHORE_H */ diff -urN 2.4.4pre4/include/asm-mips64/rwsem.h 2.4.4pre4-rwsem/include/asm-mips64/rwsem.h --- 2.4.4pre4/include/asm-mips64/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-mips64/rwsem.h Thu Apr 19 02:31:33 2001 @@ -0,0 +1,177 @@ +#ifndef _ASM_RWSEM_H +#define _ASM_RWSEM_H +/* + * rw mutexes (should that be mutices? =) -- throw rw spinlocks and + * semaphores together, and this is what we end up with... + * + * The lock is initialized to BIAS. This way, a writer subtracts BIAS ands + * gets 0 for the case of an uncontended lock. Readers decrement by 1 and + * see a positive value when uncontended, negative if there are writers + * waiting (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and lots of processes. + * BIAS must be chosen such that subtracting BIAS once per CPU will result + * in the int remaining negative. In terms of fairness, this should result + * in the lock flopping back and forth between readers and writers under + * heavy use. + * + * Once we start supporting machines with more than 128 CPUs, we should go + * for using a 64bit atomic type instead of 32bit as counter. We shall + * probably go for bias 0x80000000 then, so that single sethi can set it. + * */ + +#define RW_LOCK_BIAS 0x01000000 + +struct rw_semaphore { + atomic_t count; + /* bit 0 means read bias granted; + bit 1 means write bias granted. */ + unsigned long granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ + { ATOMIC_INIT(count), 0, \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) \ + __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) \ + __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) \ + __DECLARE_RWSEM_GENERIC(name, 0) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +/* The expensive part is outlined. */ +extern void __down_read(struct rw_semaphore *sem, int count); +extern void __down_write(struct rw_semaphore *sem, int count); +extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers); + +static inline void down_read(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_dec_return(&sem->count); + if (count < 0) { + __down_read(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +static inline void down_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + count = atomic_sub_return(RW_LOCK_BIAS, &sem->count); + if (count) { + __down_write(sem, count); + } + mb(); + +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->granted & 3) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant case is when + there was a writer waiting, and we've bumped the count to 0: we must + wake the writer up. */ + +static inline void up_read(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 2) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + + mb(); + count = atomic_inc_return(&sem->count); + if (count == 0) { + __rwsem_wake(sem, 0); + } +} + +/* + * Releasing the writer is easy -- just release it and wake up any sleepers. + */ +static inline void up_write(struct rw_semaphore *sem) +{ + int count; + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); + if (sem->granted & 3) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + + mb(); + count = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (count - RW_LOCK_BIAS < 0 && count >= 0) { + /* Only do the wake if we're no longer negative. */ + __rwsem_wake(sem, count); + } +} + +#endif diff -urN 2.4.4pre4/include/asm-parisc/rwsem.h 2.4.4pre4-rwsem/include/asm-parisc/rwsem.h --- 2.4.4pre4/include/asm-parisc/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-parisc/rwsem.h Thu Apr 19 02:32:20 2001 @@ -0,0 +1,166 @@ +#ifndef _ASM_PARISC_RWSEM_H +#define _ASM_PARISC_RWSEM_H +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * The lock is initialized to BIAS. This way, a writer + * subtracts BIAS ands gets 0 for the case of an uncontended + * lock. Readers decrement by 1 and see a positive value + * when uncontended, negative if there are writers waiting + * (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that subl'ing + * BIAS once per CPU will result in the long remaining + * negative. + * + * In terms of fairness, this should result in the lock + * flopping back and forth between readers and writers + * under heavy use. + * + * -ben + */ +struct rw_semaphore { + atomic_t count; + volatile unsigned char write_bias_granted; + volatile unsigned char read_bias_granted; + volatile unsigned char pad1; + volatile unsigned char pad2; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define RW_LOCK_BIAS 0x01000000 + +#define __RWSEM_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +#ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME +extern struct rw_semaphore *__build_read_lock(struct rw_semaphore *sem, const char *what); +extern struct rw_semaphore *__build_write_lock(struct rw_semaphore *sem, const char *what); +#endif + +/* we use FASTCALL convention for the helpers */ +extern struct rw_semaphore *FASTCALL(__down_read_failed(struct rw_semaphore *sem)); +extern struct rw_semaphore *FASTCALL(__down_write_failed(struct rw_semaphore *sem)); +extern struct rw_semaphore *FASTCALL(__rwsem_wake(struct rw_semaphore *sem)); + +extern inline void down_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif +#ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME + __build_read_lock(sem, "__down_read_failed"); +#endif +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif +#ifdef FIXME_WILLY_FIXME_FOR_REAL_THIS_TIME + __build_write_lock(sem, "__down_write_failed"); +#endif +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +extern inline void __up_read(struct rw_semaphore *sem) +{ +} + +/* releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +extern inline void __up_write(struct rw_semaphore *sem) +{ +} + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + __up_read(sem); +} + +extern inline void up_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + __up_write(sem); +} + +#endif diff -urN 2.4.4pre4/include/asm-parisc/semaphore.h 2.4.4pre4-rwsem/include/asm-parisc/semaphore.h --- 2.4.4pre4/include/asm-parisc/semaphore.h Thu Apr 19 02:17:19 2001 +++ 2.4.4pre4-rwsem/include/asm-parisc/semaphore.h Thu Apr 19 02:32:26 2001 @@ -2,6 +2,7 @@ #define _ASM_PARISC_SEMAPHORE_H #include +#include /* * SMP- and interrupt-safe semaphores. @@ -17,7 +18,6 @@ */ #include -#include #include #include diff -urN 2.4.4pre4/include/asm-ppc/semaphore.h 2.4.4pre4-rwsem/include/asm-ppc/semaphore.h --- 2.4.4pre4/include/asm-ppc/semaphore.h Thu Apr 19 02:17:19 2001 +++ 2.4.4pre4-rwsem/include/asm-ppc/semaphore.h Thu Apr 19 02:20:51 2001 @@ -14,7 +14,6 @@ #include #include #include -#include struct semaphore { atomic_t count; @@ -106,7 +105,6 @@ if (atomic_inc_return(&sem->count) <= 0) __up(sem); } - #endif /* __KERNEL__ */ diff -urN 2.4.4pre4/include/asm-s390/rwsem.h 2.4.4pre4-rwsem/include/asm-s390/rwsem.h --- 2.4.4pre4/include/asm-s390/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-s390/rwsem.h Thu Apr 19 02:33:23 2001 @@ -0,0 +1,101 @@ +#ifndef _S390_RWSEM_H +#define _S390_RWSEM_H + +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * The lock is initialized to BIAS. This way, a writer + * subtracts BIAS ands gets 0 for the case of an uncontended + * lock. Readers decrement by 1 and see a positive value + * when uncontended, negative if there are writers waiting + * (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that subl'ing + * BIAS once per CPU will result in the long remaining + * negative. + * + * In terms of fairness, this should result in the lock + * flopping back and forth between readers and writers + * under heavy use. + * + * -ben + */ +struct rw_semaphore { + atomic_t count; + volatile unsigned int write_bias_granted; + volatile unsigned int read_bias_granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +}; + +#define RW_LOCK_BIAS 0x01000000 + +#define __RWSEM_DEBUG_INIT /* */ + +#define __RWSEM_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +} + +extern void __down_read_failed(int, struct rw_semaphore *); +extern void __down_write_failed(int, struct rw_semaphore *); +extern void __rwsem_wake(int, struct rw_semaphore *); + +static inline void down_read(struct rw_semaphore *sem) +{ + int count; + count = atomic_dec_return(&sem->count); + if (count < 0) + __down_read_failed(count, sem); +} + +static inline void down_write(struct rw_semaphore *sem) +{ + int count; + count = atomic_add_return (-RW_LOCK_BIAS, &sem->count); + if (count < 0) + __down_write_failed(count, sem); +} + +/* When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +static inline void up_read(struct rw_semaphore *sem) +{ + int count; + count = atomic_inc_return(&sem->count); + if (count == 0) + __rwsem_wake(count, sem); +} + +/* releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +static inline void up_write(struct rw_semaphore *sem) +{ + int count; + count = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (count >= 0 && count < RW_LOCK_BIAS) + __rwsem_wake(count, sem); +} + +#endif diff -urN 2.4.4pre4/include/asm-s390x/rwsem.h 2.4.4pre4-rwsem/include/asm-s390x/rwsem.h --- 2.4.4pre4/include/asm-s390x/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-s390x/rwsem.h Thu Apr 19 02:33:59 2001 @@ -0,0 +1,101 @@ +#ifndef _S390_RWSEM_H +#define _S390_RWSEM_H + +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * The lock is initialized to BIAS. This way, a writer + * subtracts BIAS ands gets 0 for the case of an uncontended + * lock. Readers decrement by 1 and see a positive value + * when uncontended, negative if there are writers waiting + * (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that subl'ing + * BIAS once per CPU will result in the long remaining + * negative. + * + * In terms of fairness, this should result in the lock + * flopping back and forth between readers and writers + * under heavy use. + * + * -ben + */ +struct rw_semaphore { + atomic_t count; + volatile unsigned int write_bias_granted; + volatile unsigned int read_bias_granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +}; + +#define RW_LOCK_BIAS 0x01000000 + +#define __RWSEM_DEBUG_INIT /* */ + +#define __RWSEM_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +} + +extern void __down_read_failed(int, struct rw_semaphore *); +extern void __down_write_failed(int, struct rw_semaphore *); +extern void __rwsem_wake(int, struct rw_semaphore *); + +static inline void down_read(struct rw_semaphore *sem) +{ + int count; + count = atomic_dec_return(&sem->count); + if (count < 0) + __down_read_failed(count, sem); +} + +static inline void down_write(struct rw_semaphore *sem) +{ + int count; + count = atomic_add_return (-RW_LOCK_BIAS, &sem->count); + if (count < 0) + __down_write_failed(count, sem); +} + +/* When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +static inline void up_read(struct rw_semaphore *sem) +{ + int count; + count = atomic_inc_return(&sem->count); + if (count == 0) + __rwsem_wake(count, sem); +} + +/* releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +static inline void up_write(struct rw_semaphore *sem) +{ + int count; + count = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (count >= 0 && count < RW_LOCK_BIAS) + __rwsem_wake(count, sem); +} + +#endif diff -urN 2.4.4pre4/include/asm-sh/rwsem.h 2.4.4pre4-rwsem/include/asm-sh/rwsem.h --- 2.4.4pre4/include/asm-sh/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-sh/rwsem.h Thu Apr 19 02:35:04 2001 @@ -0,0 +1,149 @@ +#ifndef __ASM_SH_RWSEM_H +#define __ASM_SH_RWSEM_H + +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * SuperH version by Niibe Yutaka + */ +struct rw_semaphore { + atomic_t count; + volatile unsigned char write_bias_granted; + volatile unsigned char read_bias_granted; + volatile unsigned char pad1; + volatile unsigned char pad2; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#define RW_LOCK_BIAS 0x01000000 + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ +{ ATOMIC_INIT(count), 0, 0, 0, 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + atomic_set(&sem->count, RW_LOCK_BIAS); + sem->read_bias_granted = 0; + sem->write_bias_granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +extern inline void down_read(struct rw_semaphore *sem) +{ + int saved = atomic_read(&sem->count), new; +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif + if ((new = atomic_dec_return(&sem->count)) < 0) + __down_read(sem, (new < 0 && saved >=0)); +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ + int saved = atomic_read(&sem->count), new; +#if WAITQUEUE_DEBUG + if (sem->__magic != (long)&sem->__magic) + BUG(); +#endif + if ((new = atomic_sub_return(RW_LOCK_BIAS, &sem->count)) != 0) + __down_write(sem, (new < 0 && saved >=0)); +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +extern inline void __up_read(struct rw_semaphore *sem) +{ + if (atomic_inc_return(&sem->count) == 0) + __rwsem_wake(sem); +} + +/* releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +extern inline void __up_write(struct rw_semaphore *sem) +{ + int saved = atomic_read(&sem->count), new; + + new = atomic_add_return(RW_LOCK_BIAS, &sem->count); + if (saved < 0 && new >= 0) + __rwsem_wake(sem); +} + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + __up_read(sem); +} + +extern inline void up_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (sem->read_bias_granted) + BUG(); + if (sem->write_bias_granted) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + __up_write(sem); +} + +#endif diff -urN 2.4.4pre4/include/asm-sparc/rwsem.h 2.4.4pre4-rwsem/include/asm-sparc/rwsem.h --- 2.4.4pre4/include/asm-sparc/rwsem.h Thu Jan 1 01:00:00 1970 +++ 2.4.4pre4-rwsem/include/asm-sparc/rwsem.h Thu Apr 19 02:35:50 2001 @@ -0,0 +1,199 @@ +#ifndef _SPARC_RWSEM_H +#define _SPARC_RWSEM_H + +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * The lock is initialized to BIAS. This way, a writer + * subtracts BIAS ands gets 0 for the case of an uncontended + * lock. Readers decrement by 1 and see a positive value + * when uncontended, negative if there are writers waiting + * (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that subtracting + * BIAS once per CPU will result in the int remaining + * negative. + * In terms of fairness, this should result in the lock + * flopping back and forth between readers and writers + * under heavy use. + * + * -ben + */ +#define RW_LOCK_BIAS 0x01000000 + +struct rw_semaphore { + int count; + unsigned char lock; + unsigned char read_not_granted; + unsigned char write_not_granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ +{ (count), 0, 0xff, 0xff, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +static inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RW_LOCK_BIAS; + sem->lock = 0; + sem->read_not_granted = 0xff; + sem->write_not_granted = 0xff; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +extern void ___down_read(/* Special calling convention */ void); +extern void ___down_write(/* Special calling convention */ void); +extern void ___up_read(/* Special calling convention */ void); +extern void ___up_write(/* Special calling convention */ void); + +static inline void down_read(struct rw_semaphore *sem) +{ + register volatile int *ptr asm("g1"); + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + ptr = &sem->count; + + __asm__ __volatile__(" + mov %%o7, %%g4 + call %1 + add %%o7, 8, %%o7 + " + :: "r" (ptr), "i" (___down_read) + : "g2", "g3", "g4", "g7", "memory", "cc"); +#if WAITQUEUE_DEBUG + if (!sem->write_not_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +static inline void down_write(struct rw_semaphore *sem) +{ + register volatile int *ptr asm("g1"); + +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + + ptr = &sem->count; + + __asm__ __volatile__(" + mov %%o7, %%g4 + call %1 + add %%o7, 8, %%o7 + " + :: "r" (ptr), "i" (___down_write) + : "g2", "g3", "g4", "g7", "memory", "cc"); +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (!sem->read_not_granted) + BUG(); + if (!sem->write_not_granted) + BUG(); + atomic_inc(&sem->writers); +#endif +} + +/* When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +static inline void __up_read(struct rw_semaphore *sem) +{ + register volatile int *ptr asm("g1"); + + ptr = &sem->count; + + __asm__ __volatile__(" + mov %%o7, %%g4 + call %1 + add %%o7, 8, %%o7 + " + :: "r" (ptr), "i" (___up_read) + : "g2", "g3", "g4", "g7", "memory", "cc"); +} + +/* releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +static inline void __up_write(struct rw_semaphore *sem) +{ + register volatile int *ptr asm("g1"); + + ptr = &sem->count; + + __asm__ __volatile__(" + mov %%o7, %%g4 + call %1 + add %%o7, 8, %%o7 + " + :: "r" (ptr), "i" (___up_write) + : "g2", "g3", "g4", "g7", "memory", "cc"); +} + +static inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (!sem->write_not_granted) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + __up_read(sem); +} + +static inline void up_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (!sem->read_not_granted) + BUG(); + if (!sem->write_not_granted) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + __up_write(sem); +} + +#endif diff -urN 2.4.4pre4/include/asm-sparc64/rwsem.h 2.4.4pre4-rwsem/include/asm-sparc64/rwsem.h --- 2.4.4pre4/include/asm-sparc64/rwsem.h Thu Apr 19 02:17:19 2001 +++ 2.4.4pre4-rwsem/include/asm-sparc64/rwsem.h Thu Apr 19 02:36:44 2001 @@ -2,10 +2,266 @@ #ifndef _SPARC64_RWSEM_H #define _SPARC64_RWSEM_H -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead +/* rw mutexes (should that be mutices? =) -- throw rw + * spinlocks and semaphores together, and this is what we + * end up with... + * + * The lock is initialized to BIAS. This way, a writer + * subtracts BIAS ands gets 0 for the case of an uncontended + * lock. Readers decrement by 1 and see a positive value + * when uncontended, negative if there are writers waiting + * (in which case it goes to sleep). + * + * The value 0x01000000 supports up to 128 processors and + * lots of processes. BIAS must be chosen such that subtracting + * BIAS once per CPU will result in the int remaining + * negative. + * In terms of fairness, this should result in the lock + * flopping back and forth between readers and writers + * under heavy use. + * + * -ben + * + * Once we start supporting machines with more than 128 CPUs, + * we should go for using a 64bit atomic type instead of 32bit + * as counter. We shall probably go for bias 0x80000000 then, + * so that single sethi can set it. + * + * -jj + */ +#define RW_LOCK_BIAS 0x01000000 +#define RW_LOCK_BIAS_STR "0x01000000" + +struct rw_semaphore { + int count; + /* So that this does not have to be 64bit type, + * we'll use le bitops on it which use casa instead of casx. + * bit 0 means read bias granted + * bit 1 means write bias granted + */ + unsigned granted; + wait_queue_head_t wait; + wait_queue_head_t write_bias_wait; +#if WAITQUEUE_DEBUG + long __magic; + atomic_t readers; + atomic_t writers; +#endif +}; + +#if WAITQUEUE_DEBUG +#define __RWSEM_DEBUG_INIT , ATOMIC_INIT(0), ATOMIC_INIT(0) +#else +#define __RWSEM_DEBUG_INIT /* */ +#endif + +#define __RWSEM_INITIALIZER(name,count) \ +{ (count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \ + __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait) \ + __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT } + +#define __DECLARE_RWSEM_GENERIC(name,count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) + +#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) + +extern inline void init_rwsem(struct rw_semaphore *sem) +{ + sem->count = RW_LOCK_BIAS; + sem->granted = 0; + init_waitqueue_head(&sem->wait); + init_waitqueue_head(&sem->write_bias_wait); +#if WAITQUEUE_DEBUG + sem->__magic = (long)&sem->__magic; + atomic_set(&sem->readers, 0); + atomic_set(&sem->writers, 0); +#endif +} + +extern void __down_read_failed(/* Special calling convention */ void); +extern void __down_write_failed(/* Special calling convention */ void); +extern void __rwsem_wake(struct rw_semaphore *sem, unsigned long readers); + +extern inline void down_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + __asm__ __volatile__(" + 1: lduw [%0], %%g5 + subcc %%g5, 1, %%g7 + cas [%0], %%g5, %%g7 + bneg,pn %%icc, 3f + cmp %%g5, %%g7 + bne,pn %%icc, 1b + membar #StoreStore + 2: + .subsection 2 + 3: bne,pn %%icc, 1b + mov %0, %%g7 + save %%sp, -160, %%sp + mov %%g1, %%l1 + mov %%g2, %%l2 + call %1 + mov %%g3, %%l3 + mov %%l1, %%g1 + mov %%l2, %%g2 + ba,pt %%xcc, 2b + restore %%l3, %%g0, %%g3 + .previous\n" + : : "r" (sem), "i" (__down_read_failed) + : "g5", "g7", "memory", "cc"); +#if WAITQUEUE_DEBUG + if (test_le_bit(1, &sem->granted)) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_inc(&sem->readers); +#endif +} + +extern inline void down_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + CHECK_MAGIC(sem->__magic); +#endif + __asm__ __volatile__(" + 1: lduw [%0], %%g5 + sethi %%hi(" RW_LOCK_BIAS_STR "), %%g7 + subcc %%g5, %%g7, %%g7 + cas [%0], %%g5, %%g7 + bne,pn %%icc, 3f + cmp %%g5, %%g7 + bne,pn %%icc, 1b + membar #StoreStore + 2: + .subsection 2 + 3: bne,pn %%icc, 1b + mov %0, %%g7 + save %%sp, -160, %%sp + mov %%g1, %%l1 + mov %%g2, %%l2 + call %1 + mov %%g3, %%l3 + mov %%l1, %%g1 + mov %%l2, %%g2 + ba,pt %%xcc, 2b + restore %%l3, %%g0, %%g3 + .previous\n" + : : "r" (sem), "i" (__down_write_failed) + : "g5", "g7", "memory", "cc"); +#if WAITQUEUE_DEBUG + if (atomic_read(&sem->writers)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (test_le_bit(0, &sem->granted)) + BUG(); + if (test_le_bit(1, &sem->granted)) + BUG(); + atomic_inc(&sem->writers); #endif +} -#undef __HAVE_ARCH_SPECIFIC_RWSEM_IMPLEMENTATION +/* When a reader does a release, the only significant + * case is when there was a writer waiting, and we've + * bumped the count to 0: we must wake the writer up. + */ +extern inline void __up_read(struct rw_semaphore *sem) +{ + __asm__ __volatile__(" + membar #StoreLoad | #LoadLoad + 1: lduw [%0], %%g5 + addcc %%g5, 1, %%g7 + cas [%0], %%g5, %%g7 + be,pn %%icc, 3f + cmp %%g5, %%g7 + bne,pn %%icc, 1b + nop + 2: + .subsection 2 + 3: bne,pn %%icc, 1b + mov %0, %%g7 + save %%sp, -160, %%sp + mov %%g1, %%l1 + mov %%g2, %%l2 + clr %%o1 + mov %%g7, %%o0 + call %1 + mov %%g3, %%l3 + mov %%l1, %%g1 + mov %%l2, %%g2 + ba,pt %%xcc, 2b + restore %%l3, %%g0, %%g3 + .previous\n" + : : "r" (sem), "i" (__rwsem_wake) + : "g5", "g7", "memory", "cc"); +} + +/* releasing the writer is easy -- just release it and + * wake up any sleepers. + */ +extern inline void __up_write(struct rw_semaphore *sem) +{ + __asm__ __volatile__(" + membar #StoreLoad | #LoadLoad + 1: lduw [%0], %%g5 + sethi %%hi(" RW_LOCK_BIAS_STR "), %%g7 + add %%g5, %%g7, %%g7 + cas [%0], %%g5, %%g7 + cmp %%g5, %%g7 + bne,pn %%icc, 1b + sethi %%hi(" RW_LOCK_BIAS_STR "), %%g7 + addcc %%g5, %%g7, %%g5 + bcs,pn %%icc, 3f + nop + 2: + .subsection 2 + 3: mov %0, %%g7 + save %%sp, -160, %%sp + mov %%g1, %%l1 + mov %%g2, %%l2 + srl %%g5, 0, %%o1 + mov %%g7, %%o0 + call %1 + mov %%g3, %%l3 + mov %%l1, %%g1 + mov %%l2, %%g2 + ba,pt %%xcc, 2b + restore %%l3, %%g0, %%g3 + .previous\n" + : : "r" (sem), "i" (__rwsem_wake) + : "g5", "g7", "memory", "cc"); +} + +extern inline void up_read(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (test_le_bit(1, &sem->granted)) + BUG(); + if (atomic_read(&sem->writers)) + BUG(); + atomic_dec(&sem->readers); +#endif + __up_read(sem); +} + +extern inline void up_write(struct rw_semaphore *sem) +{ +#if WAITQUEUE_DEBUG + if (test_le_bit(0, &sem->granted)) + BUG(); + if (test_le_bit(1, &sem->granted)) + BUG(); + if (atomic_read(&sem->readers)) + BUG(); + if (atomic_read(&sem->writers) != 1) + BUG(); + atomic_dec(&sem->writers); +#endif + __up_write(sem); +} #endif /* _SPARC64_RWSEM_H */ diff -urN 2.4.4pre4/include/linux/rwsem-spinlock.h 2.4.4pre4-rwsem/include/linux/rwsem-spinlock.h --- 2.4.4pre4/include/linux/rwsem-spinlock.h Thu Apr 19 02:17:20 2001 +++ 2.4.4pre4-rwsem/include/linux/rwsem-spinlock.h Thu Jan 1 01:00:00 1970 @@ -1,172 +0,0 @@ -/* rwsem-spinlock.h: fallback C implementation - * - * Copyright (c) 2001 David Howells (dhowells@redhat.com). - */ - -#ifndef _LINUX_RWSEM_SPINLOCK_H -#define _LINUX_RWSEM_SPINLOCK_H - -#ifndef _LINUX_RWSEM_H -#error please dont include asm/rwsem-spinlock.h directly, use linux/rwsem.h instead -#endif - -#include - -#ifdef __KERNEL__ - -/* - * the semaphore definition - */ -struct rw_semaphore { - signed long count; -#define RWSEM_UNLOCKED_VALUE 0x00000000 -#define RWSEM_ACTIVE_BIAS 0x00000001 -#define RWSEM_ACTIVE_MASK 0x0000ffff -#define RWSEM_WAITING_BIAS (-0x00010000) -#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS -#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) - spinlock_t lock; -#define RWSEM_SPINLOCK_OFFSET_STR "4" /* byte offset of spinlock */ - wait_queue_head_t wait; -#define RWSEM_WAITING_FOR_READ WQ_FLAG_CONTEXT_0 /* bits to use in wait_queue_t.flags */ -#define RWSEM_WAITING_FOR_WRITE WQ_FLAG_CONTEXT_1 -#if RWSEM_DEBUG - int debug; -#endif -#if RWSEM_DEBUG_MAGIC - long __magic; - atomic_t readers; - atomic_t writers; -#endif -}; - -/* - * initialisation - */ -#if RWSEM_DEBUG -#define __RWSEM_DEBUG_INIT , 0 -#else -#define __RWSEM_DEBUG_INIT /* */ -#endif -#if RWSEM_DEBUG_MAGIC -#define __RWSEM_DEBUG_MINIT(name) , (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0) -#else -#define __RWSEM_DEBUG_MINIT(name) /* */ -#endif - -#define __RWSEM_INITIALIZER(name,count) \ -{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ - __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ - __RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) } - -#define __DECLARE_RWSEM_GENERIC(name,count) \ - struct rw_semaphore name = __RWSEM_INITIALIZER(name,count) - -#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS) -#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1) -#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0) - -static inline void init_rwsem(struct rw_semaphore *sem) -{ - sem->count = RWSEM_UNLOCKED_VALUE; - spin_lock_init(&sem->lock); - init_waitqueue_head(&sem->wait); -#if RWSEM_DEBUG - sem->debug = 0; -#endif -#if RWSEM_DEBUG_MAGIC - sem->__magic = (long)&sem->__magic; - atomic_set(&sem->readers, 0); - atomic_set(&sem->writers, 0); -#endif -} - -/* - * lock for reading - */ -static inline void __down_read(struct rw_semaphore *sem) -{ - int count; - spin_lock(&sem->lock); - sem->count += RWSEM_ACTIVE_READ_BIAS; - count = sem->count; - spin_unlock(&sem->lock); - if (count<0) - rwsem_down_read_failed(sem); -} - -/* - * lock for writing - */ -static inline void __down_write(struct rw_semaphore *sem) -{ - int count; - spin_lock(&sem->lock); - count = sem->count; - sem->count += RWSEM_ACTIVE_WRITE_BIAS; - spin_unlock(&sem->lock); - if (count) - rwsem_down_write_failed(sem); -} - -/* - * unlock after reading - */ -static inline void __up_read(struct rw_semaphore *sem) -{ - int count; - spin_lock(&sem->lock); - count = sem->count; - sem->count -= RWSEM_ACTIVE_READ_BIAS; - spin_unlock(&sem->lock); - if (count<0 && !((count-RWSEM_ACTIVE_READ_BIAS)&RWSEM_ACTIVE_MASK)) - rwsem_wake(sem); -} - -/* - * unlock after writing - */ -static inline void __up_write(struct rw_semaphore *sem) -{ - int count; - spin_lock(&sem->lock); - sem->count -= RWSEM_ACTIVE_WRITE_BIAS; - count = sem->count; - spin_unlock(&sem->lock); - if (count<0) - rwsem_wake(sem); -} - -/* - * implement exchange and add functionality - */ -static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) -{ - int count; - - spin_lock(&sem->lock); - sem->count += delta; - count = sem->count; - spin_unlock(&sem->lock); - - return count; -} - -/* - * implement compare and exchange functionality on the rw-semaphore count LSW - */ -static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new) -{ - __u16 prev; - - spin_lock(&sem->lock); - prev = sem->count & RWSEM_ACTIVE_MASK; - if (prev==old) - sem->count = (sem->count & ~RWSEM_ACTIVE_MASK) | new; - spin_unlock(&sem->lock); - - return prev; -} - -#endif /* __KERNEL__ */ -#endif /* _LINUX_RWSEM_SPINLOCK_H */ diff -urN 2.4.4pre4/include/linux/rwsem.h 2.4.4pre4-rwsem/include/linux/rwsem.h --- 2.4.4pre4/include/linux/rwsem.h Thu Apr 19 02:17:20 2001 +++ 2.4.4pre4-rwsem/include/linux/rwsem.h Thu Apr 19 02:20:51 2001 @@ -1,156 +1,132 @@ -/* rwsem.h: R/W semaphores, public interface - * - * Written by David Howells (dhowells@redhat.com). - * Derived from asm-i386/semaphore.h - * - * - * The MSW of the count is the negated number of active writers and waiting - * lockers, and the LSW is the total number of active locks - * - * The lock count is initialized to 0 (no active and no waiting lockers). - * - * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an - * uncontended lock. This can be determined because XADD returns the old value. - * Readers increment by 1 and see a positive value when uncontended, negative - * if there are writers (and maybe) readers waiting (in which case it goes to - * sleep). - * - * The value of WAITING_BIAS supports up to 32766 waiting processes. This can - * be extended to 65534 by manually checking the whole MSW rather than relying - * on the S flag. - * - * The value of ACTIVE_BIAS supports up to 65535 active processes. - * - * This should be totally fair - if anything is waiting, a process that wants a - * lock will go to the back of the queue. When the currently active lock is - * released, if there's a writer at the front of the queue, then that and only - * that will be woken up; if there's a bunch of consequtive readers at the - * front, then they'll all be woken up, but no other readers will be. - */ - #ifndef _LINUX_RWSEM_H #define _LINUX_RWSEM_H -#include +#ifdef __KERNEL__ -#define RWSEM_DEBUG 0 -#define RWSEM_DEBUG_MAGIC 0 +#include -#ifdef __KERNEL__ +#ifndef CONFIG_GENERIC_RWSEM +#include +#else /* CONFIG_GENERIC_RWSEM */ -#include -#include #include -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include /* use a generic implementation */ -#else -#include /* use an arch-specific implementation */ -#endif - -/* defined contention handler functions for the generic case - * - these are also used for the exchange-and-add based algorithm - */ -#if defined(CONFIG_RWSEM_GENERIC) || defined(CONFIG_RWSEM_XCHGADD_ALGORITHM) -/* we use FASTCALL convention for the helpers */ -extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem)); -extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *sem)); +struct rw_semaphore +{ + spinlock_t lock; + int count; + struct list_head wait; +#if RWSEM_DEBUG + long __magic; #endif +}; -#ifndef rwsemtrace #if RWSEM_DEBUG -#include -#define rwsemtrace(SEM,FMT) do { if ((SEM)->debug) printk("[%d] "FMT"(count=%08lx)\n",current->pid,(SEM)->count); } while(0) +#define __SEM_DEBUG_INIT(name) \ + , (int)&(name).__magic +#define RWSEM_MAGIC(x) \ + do { \ + if ((x) != (long)&(x)) { \ + printk("rwsem bad magic %lx (should be %lx), ", \ + (long)x, (long)&(x)); \ + BUG(); \ + } \ + } while (0) #else -#define rwsemtrace(SEM,FMT) -#endif +#define __SEM_DEBUG_INIT(name) +#define CHECK_MAGIC(x) #endif -/* - * lock for reading - */ -static inline void down_read(struct rw_semaphore *sem) -{ - rwsemtrace(sem,"Entering down_read"); +#define __RWSEM_INITIALIZER(name, count) \ +{ \ + SPIN_LOCK_UNLOCKED, \ + (count), \ + LIST_HEAD_INIT((name).wait) \ + __SEM_DEBUG_INIT(name) \ +} +#define RWSEM_INITIALIZER(name) __RWSEM_INITIALIZER(name, 0) -#if RWSEM_DEBUG_MAGIC - if (sem->__magic != (long)&sem->__magic) - BUG(); -#endif +#define __DECLARE_RWSEM(name, count) \ + struct rw_semaphore name = __RWSEM_INITIALIZER(name, count) +#define DECLARE_RWSEM(name) __DECLARE_RWSEM(name, 0) +#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM(name, 1) +#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM(name, -1) - __down_read(sem); +#define RWSEM_WAITQUEUE_READ 0 +#define RWSEM_WAITQUEUE_WRITE 1 -#if RWSEM_DEBUG_MAGIC - if (atomic_read(&sem->writers)) - BUG(); - atomic_inc(&sem->readers); -#endif +extern void down_failed(struct rw_semaphore *, int); +extern void rwsem_wake(struct rw_semaphore *); - rwsemtrace(sem,"Leaving down_read"); +static inline void init_rwsem(struct rw_semaphore *sem) +{ + spin_lock_init(&sem->lock); + sem->count = 0; + INIT_LIST_HEAD(&sem->wait); +#if RWSEM_DEBUG + sem->__magic = (long)&sem->__magic; +#endif } -/* - * lock for writing - */ -static inline void down_write(struct rw_semaphore *sem) +static inline void down_read(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering down_write"); - -#if RWSEM_DEBUG_MAGIC - if (sem->__magic != (long)&sem->__magic) - BUG(); -#endif + CHECK_MAGIC(sem->__magic); - __down_write(sem); + spin_lock_irq(&sem->lock); + if (sem->count < 0 || !list_empty(&sem->wait)) + goto slow_path; + sem->count++; + out: + spin_unlock_irq(&sem->lock); + return; + + slow_path: + down_failed(sem, RWSEM_WAITQUEUE_READ); + goto out; +} -#if RWSEM_DEBUG_MAGIC - if (atomic_read(&sem->writers)) - BUG(); - if (atomic_read(&sem->readers)) - BUG(); - atomic_inc(&sem->writers); -#endif +static inline void down_write(struct rw_semaphore *sem) +{ + CHECK_MAGIC(sem->__magic); - rwsemtrace(sem,"Leaving down_write"); + spin_lock_irq(&sem->lock); + if (sem->count || !list_empty(&sem->wait)) + goto slow_path; + sem->count = -1; + out: + spin_unlock_irq(&sem->lock); + return; + + slow_path: + down_failed(sem, RWSEM_WAITQUEUE_WRITE); + goto out; } -/* - * release a read lock - */ static inline void up_read(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering up_read"); + unsigned long flags; -#if RWSEM_DEBUG_MAGIC - if (atomic_read(&sem->writers)) - BUG(); - atomic_dec(&sem->readers); -#endif - __up_read(sem); + CHECK_MAGIC(sem->__magic); - rwsemtrace(sem,"Leaving up_read"); + spin_lock_irqsave(&sem->lock, flags); + if (!--sem->count && !list_empty(&sem->wait)) + rwsem_wake(sem); + spin_unlock_irqrestore(&sem->lock, flags); } -/* - * release a write lock - */ static inline void up_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering up_write"); + unsigned long flags; -#if RWSEM_DEBUG_MAGIC - if (atomic_read(&sem->readers)) - BUG(); - if (atomic_read(&sem->writers) != 1) - BUG(); - atomic_dec(&sem->writers); -#endif - __up_write(sem); + CHECK_MAGIC(sem->__magic); - rwsemtrace(sem,"Leaving up_write"); + spin_lock_irqsave(&sem->lock, flags); + sem->count = 0; + if (!list_empty(&sem->wait)) + rwsem_wake(sem); + spin_unlock_irqrestore(&sem->lock, flags); } - +#endif /* CONFIG_GENERIC_RWSEM */ #endif /* __KERNEL__ */ #endif /* _LINUX_RWSEM_H */ diff -urN 2.4.4pre4/include/linux/sched.h 2.4.4pre4-rwsem/include/linux/sched.h --- 2.4.4pre4/include/linux/sched.h Thu Apr 19 02:17:20 2001 +++ 2.4.4pre4-rwsem/include/linux/sched.h Thu Apr 19 02:20:51 2001 @@ -239,7 +239,7 @@ mm_users: ATOMIC_INIT(2), \ mm_count: ATOMIC_INIT(1), \ map_count: 1, \ - mmap_sem: __RWSEM_INITIALIZER(name.mmap_sem, RW_LOCK_BIAS), \ + mmap_sem: RWSEM_INITIALIZER(name.mmap_sem), \ page_table_lock: SPIN_LOCK_UNLOCKED, \ mmlist: LIST_HEAD_INIT(name.mmlist), \ } @@ -548,8 +548,6 @@ extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr)); extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); -extern int FASTCALL(__wake_up_ctx(wait_queue_head_t *q, unsigned int mode, int count, int bit)); -extern int FASTCALL(__wake_up_sync_ctx(wait_queue_head_t *q, unsigned int mode, int count, int bit)); extern void FASTCALL(sleep_on(wait_queue_head_t *q)); extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q, signed long timeout)); diff -urN 2.4.4pre4/include/linux/wait.h 2.4.4pre4-rwsem/include/linux/wait.h --- 2.4.4pre4/include/linux/wait.h Thu Apr 19 02:17:20 2001 +++ 2.4.4pre4-rwsem/include/linux/wait.h Thu Apr 19 02:20:51 2001 @@ -26,14 +26,6 @@ struct __wait_queue { unsigned int flags; #define WQ_FLAG_EXCLUSIVE 0x01 -#define WQ_FLAG_CONTEXT_0 8 /* context specific flag bit numbers */ -#define WQ_FLAG_CONTEXT_1 9 -#define WQ_FLAG_CONTEXT_2 10 -#define WQ_FLAG_CONTEXT_3 11 -#define WQ_FLAG_CONTEXT_4 12 -#define WQ_FLAG_CONTEXT_5 13 -#define WQ_FLAG_CONTEXT_6 14 -#define WQ_FLAG_CONTEXT_7 15 struct task_struct * task; struct list_head task_list; #if WAITQUEUE_DEBUG diff -urN 2.4.4pre4/kernel/sched.c 2.4.4pre4-rwsem/kernel/sched.c --- 2.4.4pre4/kernel/sched.c Thu Apr 19 02:17:20 2001 +++ 2.4.4pre4-rwsem/kernel/sched.c Thu Apr 19 02:20:51 2001 @@ -765,75 +765,6 @@ } } -/* - * wake up processes in the wait queue depending on the state of a context bit in the flags - * - wakes up a process if the specified bit is set in the flags member - * - the context bit is cleared if the process is woken up - * - if the bit number is negative, then the loop stops at the first unset context bit encountered - * - returns the number of processes woken - */ -static inline int __wake_up_ctx_common (wait_queue_head_t *q, - int count, int bit, const int sync) -{ - struct list_head *tmp, *head; - struct task_struct *p; - int stop, woken; - - woken = 0; - stop = bit<0; - if (bit<0) bit = -bit; - - CHECK_MAGIC_WQHEAD(q); - head = &q->task_list; - WQ_CHECK_LIST_HEAD(head); - tmp = head->next; - while (tmp != head) { - wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list); - - tmp = tmp->next; - CHECK_MAGIC(curr->__magic); - p = curr->task; - if (!test_and_clear_bit(bit,&curr->flags)) { - if (stop) - break; - continue; - } - - WQ_NOTE_WAKER(curr); - try_to_wake_up(p,sync); - - woken++; - if (woken>=count) - break; - } - - return woken; -} - -int __wake_up_ctx(wait_queue_head_t *q, unsigned int mode, int count, int bit) -{ - int woken = 0; - if (q && count) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - woken = __wake_up_ctx_common(q, count, bit, 0); - wq_read_unlock_irqrestore(&q->lock, flags); - } - return woken; -} - -int __wake_up_ctx_sync(wait_queue_head_t *q, unsigned int mode, int count, int bit) -{ - int woken = 0; - if (q && count) { - unsigned long flags; - wq_read_lock_irqsave(&q->lock, flags); - woken = __wake_up_ctx_common(q, count, bit, 1); - wq_read_unlock_irqrestore(&q->lock, flags); - } - return woken; -} - #define SLEEP_ON_VAR \ unsigned long flags; \ wait_queue_t wait; \ diff -urN 2.4.4pre4/lib/Makefile 2.4.4pre4-rwsem/lib/Makefile --- 2.4.4pre4/lib/Makefile Thu Apr 19 02:17:20 2001 +++ 2.4.4pre4-rwsem/lib/Makefile Thu Apr 19 02:20:51 2001 @@ -8,17 +8,14 @@ L_TARGET := lib.a -export-objs := cmdline.o +export-objs := cmdline.o rwsem.o obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o -ifneq ($(CONFIG_RWSEM_GENERIC_SPINLOCK)$(CONFIG_RWSEM_XCHGADD_ALGORITHM),nn) -export-objs += rwsem.o -obj-y += rwsem.o -endif - ifneq ($(CONFIG_HAVE_DEC_LOCK),y) obj-y += dec_and_lock.o endif + +obj-$(CONFIG_GENERIC_RWSEM) += rwsem.o include $(TOPDIR)/Rules.make diff -urN 2.4.4pre4/lib/rwsem.c 2.4.4pre4-rwsem/lib/rwsem.c --- 2.4.4pre4/lib/rwsem.c Thu Apr 19 02:17:20 2001 +++ 2.4.4pre4-rwsem/lib/rwsem.c Thu Apr 19 02:20:51 2001 @@ -1,152 +1,52 @@ -/* rwsem.c: R/W semaphores: contention handling functions - * - * Written by David Howells (dhowells@redhat.com). - * Derived from arch/i386/kernel/semaphore.c +/* + * generic rw_semaphores + * Copyright (C) 2001 Andrea Arcangeli SuSE */ -#include + #include #include +#include -/* - * wait for the read lock to be granted - * - need to repeal the increment made inline by the caller - * - need to throw a write-lock style spanner into the works (sub 0x00010000 from count) - */ -struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem) +void down_failed(struct rw_semaphore *sem, int flags) { struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait,tsk); - signed long count; - - rwsemtrace(sem,"Entering rwsem_down_read_failed"); - - /* this waitqueue context flag will be cleared when we are granted the lock */ - __set_bit(RWSEM_WAITING_FOR_READ,&wait.flags); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - - add_wait_queue_exclusive(&sem->wait, &wait); /* FIFO */ - - /* note that we're now waiting on the lock, but no longer actively read-locking */ - count = rwsem_atomic_update(RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS,sem); + DECLARE_WAITQUEUE(wait, tsk); - /* if there are no longer active locks, wake the front queued process(es) up - * - it might even be this process, since the waker takes a more active part - */ - if (!(count & RWSEM_ACTIVE_MASK)) - rwsem_wake(sem); + wait.flags = flags; + list_add(&wait.task_list, &sem->wait); - /* wait to be given the lock */ - for (;;) { - if (!test_bit(RWSEM_WAITING_FOR_READ,&wait.flags)) - break; + do { + __set_task_state(tsk, TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&sem->lock); schedule(); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); - } - - remove_wait_queue(&sem->wait,&wait); - tsk->state = TASK_RUNNING; - - rwsemtrace(sem,"Leaving rwsem_down_read_failed"); - return sem; + spin_lock_irq(&sem->lock); + } while(wait.task_list.next); } -/* - * wait for the write lock to be granted - */ -struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem) +void rwsem_wake(struct rw_semaphore *sem) { - struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait,tsk); - signed long count; + struct list_head * entry, * head = &sem->wait; - rwsemtrace(sem,"Entering rwsem_down_write_failed"); + while ((entry = head->prev) != head) { + wait_queue_t * wait; - /* this waitqueue context flag will be cleared when we are granted the lock */ - __set_bit(RWSEM_WAITING_FOR_WRITE,&wait.flags); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); + wait = list_entry(entry, wait_queue_t, task_list); - add_wait_queue_exclusive(&sem->wait, &wait); /* FIFO */ - - /* note that we're waiting on the lock, but no longer actively locking */ - count = rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem); - - /* if there are no longer active locks, wake the front queued process(es) up - * - it might even be this process, since the waker takes a more active part - */ - if (!(count & RWSEM_ACTIVE_MASK)) - rwsem_wake(sem); - - /* wait to be given the lock */ - for (;;) { - if (!test_bit(RWSEM_WAITING_FOR_WRITE,&wait.flags)) + if (wait->flags == RWSEM_WAITQUEUE_WRITE) { + if (!sem->count) { + sem->count = -1; + list_del(entry); + entry->next = NULL; + wake_up_process(wait->task); + } break; - schedule(); - set_task_state(tsk, TASK_UNINTERRUPTIBLE); + } + sem->count++; + list_del(entry); + entry->next = NULL; + wake_up_process(wait->task); } - - remove_wait_queue(&sem->wait,&wait); - tsk->state = TASK_RUNNING; - - rwsemtrace(sem,"Leaving rwsem_down_write_failed"); - return sem; -} - -/* - * handle the lock being released whilst there are processes blocked on it that can now run - * - if we come here, then: - * - the 'active part' of the count (&0x0000ffff) reached zero (but may no longer be zero) - * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so) - */ -struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) -{ - signed long count; - int woken; - - rwsemtrace(sem,"Entering rwsem_wake"); - - try_again: - /* try to grab an 'activity' marker - * - need to make sure two copies of rwsem_wake() don't do this for two separate processes - * simultaneously - * - be horribly naughty, and only deal with the LSW of the atomic counter - */ - if (rwsem_cmpxchgw(sem,0,RWSEM_ACTIVE_BIAS)!=0) { - rwsemtrace(sem,"rwsem_wake: abort wakeup due to renewed activity"); - goto out; - } - - /* try to grant a single write lock if there's a writer at the front of the queue - * - note we leave the 'active part' of the count incremented by 1 and the waiting part - * incremented by 0x00010000 - */ - if (wake_up_ctx(&sem->wait,1,-RWSEM_WAITING_FOR_WRITE)==1) - goto out; - - /* grant an infinite number of read locks to the readers at the front of the queue - * - note we increment the 'active part' of the count by the number of readers just woken, - * less one for the activity decrement we've already done - */ - woken = wake_up_ctx(&sem->wait,65535,-RWSEM_WAITING_FOR_READ); - if (woken<=0) - goto counter_correction; - - woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS; - woken -= RWSEM_ACTIVE_BIAS; - rwsem_atomic_update(woken,sem); - - out: - rwsemtrace(sem,"Leaving rwsem_wake"); - return sem; - - /* come here if we need to correct the counter for odd SMP-isms */ - counter_correction: - count = rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem); - rwsemtrace(sem,"corrected count"); - if (!(count & RWSEM_ACTIVE_MASK)) - goto try_again; - goto out; } -EXPORT_SYMBOL(rwsem_down_read_failed); -EXPORT_SYMBOL(rwsem_down_write_failed); +EXPORT_SYMBOL(down_failed); EXPORT_SYMBOL(rwsem_wake);