Binary files 1/ID and 2/ID differ
diff -urN 1/arch/alpha/config.in 2/arch/alpha/config.in
--- 1/arch/alpha/config.in	Tue Apr 17 04:00:35 2001
+++ 2/arch/alpha/config.in	Tue Apr 17 02:41:40 2001
@@ -5,6 +5,7 @@
 
 define_bool CONFIG_ALPHA y
 define_bool CONFIG_UID16 n
+define_bool CONFIG_GENERIC_RWSEM y
 
 mainmenu_name "Kernel configuration of Linux for Alpha machines"
 
diff -urN 1/arch/alpha/kernel/alpha_ksyms.c 2/arch/alpha/kernel/alpha_ksyms.c
--- 1/arch/alpha/kernel/alpha_ksyms.c	Tue Apr 17 04:00:34 2001
+++ 2/arch/alpha/kernel/alpha_ksyms.c	Tue Apr 17 03:14:24 2001
@@ -173,13 +173,6 @@
 EXPORT_SYMBOL(down_interruptible);
 EXPORT_SYMBOL(down_trylock);
 EXPORT_SYMBOL(up);
-EXPORT_SYMBOL(__down_read_failed);
-EXPORT_SYMBOL(__down_write_failed);
-EXPORT_SYMBOL(__rwsem_wake);
-EXPORT_SYMBOL(down_read);
-EXPORT_SYMBOL(down_write);
-EXPORT_SYMBOL(up_read);
-EXPORT_SYMBOL(up_write);
 
 /* 
  * SMP-specific symbols.
diff -urN 1/arch/alpha/kernel/semaphore.c 2/arch/alpha/kernel/semaphore.c
--- 1/arch/alpha/kernel/semaphore.c	Tue Nov 28 18:39:59 2000
+++ 2/arch/alpha/kernel/semaphore.c	Tue Apr 17 03:13:53 2001
@@ -263,185 +263,3 @@
 #endif
 	__up(sem);
 }
-
-
-/*
- * RW Semaphores
- */
-
-void
-__down_read_failed(struct rw_semaphore *sem, int count)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
- retry_down:
-	if (count < 0) {
-		/* Waiting on multiple readers and/or writers.  */
-		
-		/* Undo the acquisition we started in down_read.  */
-		atomic_inc(&sem->count);
-
-		current->state = TASK_UNINTERRUPTIBLE;
-		wmb();
-		add_wait_queue(&sem->wait, &wait);
-		mb();
-		while (atomic_read(&sem->count) < 0) {
-			schedule();
-			set_task_state(current, TASK_UNINTERRUPTIBLE);
-		}
-
-		remove_wait_queue(&sem->wait, &wait);
-		current->state = TASK_RUNNING;
-
-		mb();
-		count = atomic_dec_return(&sem->count);
-		if (count <= 0)
-			goto retry_down;
-	} else {
-		/* Waiting on exactly one writer.  */
-
-		current->state = TASK_UNINTERRUPTIBLE;
-		wmb();
-		add_wait_queue(&sem->wait, &wait);
-		mb();
-
-		while (!test_and_clear_bit(0, &sem->granted)) {
-			schedule();
-			set_task_state(current, TASK_UNINTERRUPTIBLE);
-		}
-
-		remove_wait_queue(&sem->wait, &wait);
-		current->state = TASK_RUNNING;
-	}
-}
-
-void
-__down_write_failed(struct rw_semaphore *sem, int count)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
- retry_down:
-	if (count + RW_LOCK_BIAS < 0) {
-		/* Waiting on multiple readers and/or writers.  */
-
-		/* Undo the acquisition we started in down_write.  */
-		atomic_add(RW_LOCK_BIAS, &sem->count);
-
-		current->state = TASK_UNINTERRUPTIBLE;
-		wmb();
-		add_wait_queue_exclusive(&sem->wait, &wait);
-		mb();
-	
-		while (atomic_read(&sem->count) + RW_LOCK_BIAS < 0) {
-			schedule();
-			set_task_state(current, TASK_UNINTERRUPTIBLE);
-		}
-
-		remove_wait_queue(&sem->wait, &wait);
-		current->state = TASK_RUNNING;
-
-		count = atomic_sub_return(RW_LOCK_BIAS, &sem->count);
-		if (count != 0)
-			goto retry_down;
-	} else {
-		/* Waiting on exactly one writer.  */
-
-		current->state = TASK_UNINTERRUPTIBLE;
-		wmb();
-		add_wait_queue_exclusive(&sem->wait, &wait);
-		mb();
-
-		while (!test_and_clear_bit(1, &sem->granted)) {
-			schedule();
-			set_task_state(current, TASK_UNINTERRUPTIBLE);
-		}
-
-		remove_wait_queue(&sem->write_bias_wait, &wait);
-		current->state = TASK_RUNNING;
-
-		/* If the lock is currently unbiased, awaken the sleepers.
-		   FIXME: This wakes up the readers early in a bit of a
-		   stampede -> bad!  */
-		count = atomic_read(&sem->count);
-		if (__builtin_expect(count >= 0, 0))
-			wake_up(&sem->wait);
-	}
-}
-
-void
-__rwsem_wake(struct rw_semaphore *sem, int readers)
-{
-	if (readers) {
-		if (test_and_set_bit(0, &sem->granted))
-			BUG();
-		wake_up(&sem->wait);
-	} else {
-		if (test_and_set_bit(1, &sem->granted))
-			BUG();
-		wake_up(&sem->write_bias_wait);
-	}
-}
-
-void
-down_read(struct rw_semaphore *sem)
-{
-#if WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-	__down_read(sem);
-#if WAITQUEUE_DEBUG
-	if (sem->granted & 2)
-		BUG();
-	if (atomic_read(&sem->writers))
-		BUG();
-	atomic_inc(&sem->readers);
-#endif
-}
-
-void
-down_write(struct rw_semaphore *sem)
-{
-#if WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-#endif
-	__down_write(sem);
-#if WAITQUEUE_DEBUG
-	if (sem->granted & 3)
-		BUG();
-	if (atomic_read(&sem->writers))
-		BUG();
-	if (atomic_read(&sem->readers))
-		BUG();
-	atomic_inc(&sem->writers);
-#endif
-}
-
-void
-up_read(struct rw_semaphore *sem)
-{
-#if WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-	if (sem->granted & 2)
-		BUG();
-	if (atomic_read(&sem->writers))
-		BUG();
-	atomic_dec(&sem->readers);
-#endif
-	__up_read(sem);
-}
-
-void
-up_write(struct rw_semaphore *sem)
-{
-#if WAITQUEUE_DEBUG
-	CHECK_MAGIC(sem->__magic);
-	if (sem->granted & 3)
-		BUG();
-	if (atomic_read(&sem->readers))
-		BUG();
-	if (atomic_read(&sem->writers) != 1)
-		BUG();
-	atomic_dec(&sem->writers);
-#endif
-	__up_write(sem);
-}
diff -urN 1/arch/i386/config.in 2/arch/i386/config.in
--- 1/arch/i386/config.in	Tue Apr 17 04:00:35 2001
+++ 2/arch/i386/config.in	Tue Apr 17 02:41:50 2001
@@ -7,6 +7,7 @@
 define_bool CONFIG_X86 y
 define_bool CONFIG_ISA y
 define_bool CONFIG_SBUS n
+define_bool CONFIG_GENERIC_RWSEM y
 
 define_bool CONFIG_UID16 y
 
diff -urN 1/arch/i386/kernel/i386_ksyms.c 2/arch/i386/kernel/i386_ksyms.c
--- 1/arch/i386/kernel/i386_ksyms.c	Sat Apr 14 15:21:17 2001
+++ 2/arch/i386/kernel/i386_ksyms.c	Tue Apr 17 02:31:38 2001
@@ -80,9 +80,6 @@
 EXPORT_SYMBOL_NOVERS(__down_failed_interruptible);
 EXPORT_SYMBOL_NOVERS(__down_failed_trylock);
 EXPORT_SYMBOL_NOVERS(__up_wakeup);
-EXPORT_SYMBOL_NOVERS(__rwsem_down_write_failed);
-EXPORT_SYMBOL_NOVERS(__rwsem_down_read_failed);
-EXPORT_SYMBOL_NOVERS(__rwsem_wake);
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
 /* Delay loops */
diff -urN 1/arch/i386/lib/Makefile 2/arch/i386/lib/Makefile
--- 1/arch/i386/lib/Makefile	Sat Apr 14 15:21:17 2001
+++ 2/arch/i386/lib/Makefile	Tue Apr 17 02:32:18 2001
@@ -9,7 +9,7 @@
 
 obj-y = checksum.o old-checksum.o delay.o \
 	usercopy.o getuser.o putuser.o \
-	memcpy.o strstr.o rwsem.o
+	memcpy.o strstr.o
 
 obj-$(CONFIG_X86_USE_3DNOW) += mmx.o
 obj-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
diff -urN 1/arch/i386/lib/rwsem.S 2/arch/i386/lib/rwsem.S
--- 1/arch/i386/lib/rwsem.S	Sat Apr 14 15:21:17 2001
+++ 2/arch/i386/lib/rwsem.S	Thu Jan  1 01:00:00 1970
@@ -1,36 +0,0 @@
-/* rwsem.S: R/W semaphores, register saving wrapper function stubs
- *
- * Written by David Howells (dhowells@redhat.com).
- * Derived from arch/i386/kernel/semaphore.c
- */
-
-.text
-.align 4
-.globl __rwsem_down_read_failed
-__rwsem_down_read_failed:
-	pushl	%edx
-	pushl	%ecx
-	call	rwsem_down_read_failed
-	popl	%ecx
-	popl	%edx
-	ret
-
-.align 4
-.globl __rwsem_down_write_failed
-__rwsem_down_write_failed:
-	pushl	%edx
-	pushl	%ecx
-	call	rwsem_down_write_failed
-	popl	%ecx
-	popl	%edx
-	ret
-
-.align 4
-.globl __rwsem_wake
-__rwsem_wake:
-	pushl	%edx
-	pushl	%ecx
-	call	rwsem_wake
-	popl	%ecx
-	popl	%edx
-	ret
diff -urN 1/arch/ppc/config.in 2/arch/ppc/config.in
--- 1/arch/ppc/config.in	Sun Apr  1 01:17:09 2001
+++ 2/arch/ppc/config.in	Tue Apr 17 02:50:24 2001
@@ -3,6 +3,7 @@
 # see Documentation/kbuild/config-language.txt.
 #
 define_bool CONFIG_UID16 n
+define_bool CONFIG_GENERIC_RWSEM y
 
 mainmenu_name "Linux/PowerPC Kernel Configuration"
 
diff -urN 1/arch/ppc/kernel/ppc_ksyms.c 2/arch/ppc/kernel/ppc_ksyms.c
--- 1/arch/ppc/kernel/ppc_ksyms.c	Sun Apr  1 01:17:11 2001
+++ 2/arch/ppc/kernel/ppc_ksyms.c	Mon Apr 16 17:50:15 2001
@@ -332,8 +332,6 @@
 EXPORT_SYMBOL(__down);
 EXPORT_SYMBOL(__down_interruptible);
 EXPORT_SYMBOL(__down_trylock);
-EXPORT_SYMBOL(down_read_failed);
-EXPORT_SYMBOL(down_write_failed);
 
 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
 extern void (*debugger)(struct pt_regs *regs);
diff -urN 1/arch/ppc/kernel/semaphore.c 2/arch/ppc/kernel/semaphore.c
--- 1/arch/ppc/kernel/semaphore.c	Tue Nov 28 18:39:59 2000
+++ 2/arch/ppc/kernel/semaphore.c	Tue Apr 17 02:49:43 2001
@@ -137,44 +137,3 @@
 {
 	return waking_non_zero_trylock(sem);
 }
-
-
-/*
- * rw semaphores    Ani Joshi <ajoshi@unixbox.com>
- * based on alpha port by Andrea Arcangeli <andrea@suse.de>
- */
-
-void down_read_failed(struct rw_semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	do {
-		__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-		spin_unlock_irq(&sem->lock);
-		schedule();
-		spin_lock_irq(&sem->lock);
-	} while(sem->wr);
-
-	remove_wait_queue(&sem->wait, &wait);
-}
-
-void down_write_failed(struct rw_semaphore *sem)
-{
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
-
-	add_wait_queue_exclusive(&sem->wait, &wait);
-
-	do {
-		__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-		spin_unlock_irq(&sem->lock);
-		schedule();
-		spin_lock_irq(&sem->lock);
-	} while(sem->rd || sem->wr);
-
-	remove_wait_queue(&sem->wait, &wait);
-}
-
diff -urN 1/include/asm-alpha/semaphore.h 2/include/asm-alpha/semaphore.h
--- 1/include/asm-alpha/semaphore.h	Sun Apr  1 20:11:13 2001
+++ 2/include/asm-alpha/semaphore.h	Mon Apr 16 04:56:29 2001
@@ -13,6 +13,7 @@
 #include <asm/atomic.h>
 #include <asm/compiler.h>	/* __builtin_expect */
 #include <linux/wait.h>
+#include <linux/rwsem.h>
 
 #define DEBUG_SEMAPHORE 0
 #define DEBUG_RW_SEMAPHORE 0
@@ -220,153 +221,6 @@
 extern inline void up(struct semaphore *sem)
 {
 	__up(sem);
-}
-#endif
-
-/* rw mutexes (should that be mutices? =) -- throw rw
- * spinlocks and semaphores together, and this is what we
- * end up with...
- *
- * The lock is initialized to BIAS.  This way, a writer
- * subtracts BIAS ands gets 0 for the case of an uncontended
- * lock.  Readers decrement by 1 and see a positive value
- * when uncontended, negative if there are writers waiting
- * (in which case it goes to sleep).
- *
- * The value 0x01000000 supports up to 128 processors and
- * lots of processes.  BIAS must be chosen such that subtracting
- * BIAS once per CPU will result in the int remaining
- * negative.
- * In terms of fairness, this should result in the lock
- * flopping back and forth between readers and writers
- * under heavy use.
- *
- *	      -ben
- *
- * Once we start supporting machines with more than 128 CPUs,
- * we should go for using a 64bit atomic type instead of 32bit
- * as counter. We shall probably go for bias 0x80000000 then,
- * so that single sethi can set it.
- *
- *	      -jj
- */
-
-#define RW_LOCK_BIAS		0x01000000
-
-struct rw_semaphore {
-	atomic_t		count;
-	/* bit 0 means read bias granted;
-	   bit 1 means write bias granted.  */
-	unsigned		granted;
-	wait_queue_head_t	wait;
-	wait_queue_head_t	write_bias_wait;
-#if WAITQUEUE_DEBUG
-	long			__magic;
-	atomic_t		readers;
-	atomic_t		writers;
-#endif
-};
-
-#if WAITQUEUE_DEBUG
-#define __RWSEM_DEBUG_INIT	, ATOMIC_INIT(0), ATOMIC_INIT(0)
-#else
-#define __RWSEM_DEBUG_INIT	/* */
-#endif
-
-#define __RWSEM_INITIALIZER(name,count)					\
-	{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait), \
-	  __WAIT_QUEUE_HEAD_INITIALIZER((name).write_bias_wait)		\
-	  __SEM_DEBUG_INIT(name) __RWSEM_DEBUG_INIT }
-
-#define __DECLARE_RWSEM_GENERIC(name,count) \
-	struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
-
-#define DECLARE_RWSEM(name) \
-	__DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
-#define DECLARE_RWSEM_READ_LOCKED(name) \
-	__DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1)
-#define DECLARE_RWSEM_WRITE_LOCKED(name) \
-	__DECLARE_RWSEM_GENERIC(name, 0)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-	atomic_set (&sem->count, RW_LOCK_BIAS);
-	sem->granted = 0;
-	init_waitqueue_head(&sem->wait);
-	init_waitqueue_head(&sem->write_bias_wait);
-#if WAITQUEUE_DEBUG
-	sem->__magic = (long)&sem->__magic;
-	atomic_set(&sem->readers, 0);
-	atomic_set(&sem->writers, 0);
-#endif
-}
-
-extern void down_read(struct rw_semaphore *);
-extern void down_write(struct rw_semaphore *);
-extern void up_read(struct rw_semaphore *);
-extern void up_write(struct rw_semaphore *);
-extern void __down_read_failed(struct rw_semaphore *, int);
-extern void __down_write_failed(struct rw_semaphore *, int);
-extern void __rwsem_wake(struct rw_semaphore *, int);
-
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	long count = atomic_dec_return(&sem->count);
-	if (__builtin_expect(count < 0, 0))
-		__down_read_failed(sem, count);
-}
-
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	long count = atomic_sub_return(RW_LOCK_BIAS, &sem->count);
-	if (__builtin_expect(count != 0, 0))
-		__down_write_failed(sem, count);
-}
-
-/* When a reader does a release, the only significant case is when there
-   was a writer waiting, and we've bumped the count to 0, then we must
-   wake the writer up.  */
-
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	long count;
-	mb();
-	count = atomic_inc_return(&sem->count);
-	if (__builtin_expect(count == 0, 0))
-		__rwsem_wake(sem, 0);
-}
-
-/* Releasing the writer is easy -- just release it and wake up
-   any sleepers.  */
-
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	long count, wake;
-	mb();
-	count = atomic_add_return(RW_LOCK_BIAS, &sem->count);
-
-	/* Only do the wake if we were, but are no longer, negative.  */
-	wake = ((int)(count - RW_LOCK_BIAS) < 0) && count >= 0;
-	if (__builtin_expect(wake, 0))
-		__rwsem_wake(sem, count);
-}
-
-#if !WAITQUEUE_DEBUG && !DEBUG_RW_SEMAPHORE
-extern inline void down_read(struct rw_semaphore *sem)
-{
-	__down_read(sem);
-}
-extern inline void down_write(struct rw_semaphore *sem)
-{
-	__down_write(sem);
-}
-extern inline void up_read(struct rw_semaphore *sem)
-{
-	__up_read(sem);
-}
-extern inline void up_write(struct rw_semaphore *sem)
-{
-	__up_write(sem);
 }
 #endif
 
diff -urN 1/include/asm-i386/rwsem-spin.h 2/include/asm-i386/rwsem-spin.h
--- 1/include/asm-i386/rwsem-spin.h	Mon Apr 16 05:22:34 2001
+++ 2/include/asm-i386/rwsem-spin.h	Thu Jan  1 01:00:00 1970
@@ -1,324 +0,0 @@
-/* rwsem.h: R/W semaphores based on spinlocks
- *
- * Written by David Howells (dhowells@redhat.com).
- *
- * Derived from asm-i386/semaphore.h and asm-i386/spinlock.h
- */
-
-#ifndef _I386_RWSEM_SPIN_H
-#define _I386_RWSEM_SPIN_H
-
-#include <linux/config.h>
-
-#ifndef _LINUX_RWSEM_H
-#error please dont include asm/rwsem-spin.h directly, use linux/rwsem.h instead
-#endif
-
-#include <linux/spinlock.h>
-
-#ifdef __KERNEL__
-
-#define CONFIG_USING_SPINLOCK_BASED_RWSEM 1
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-	signed long		count;
-#define RWSEM_UNLOCKED_VALUE		0x00000000
-#define RWSEM_ACTIVE_BIAS		0x00000001
-#define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		(-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-	spinlock_t		lock;
-#define RWSEM_SPINLOCK_OFFSET_STR	"4" /* byte offset of spinlock */
-	wait_queue_head_t	wait;
-#define RWSEM_WAITING_FOR_READ	WQ_FLAG_CONTEXT_0	/* bits to use in wait_queue_t.flags */
-#define RWSEM_WAITING_FOR_WRITE	WQ_FLAG_CONTEXT_1
-#if RWSEM_DEBUG
-	int			debug;
-#endif
-#if RWSEM_DEBUG_MAGIC
-	long			__magic;
-	atomic_t		readers;
-	atomic_t		writers;
-#endif
-};
-
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT      , 0
-#else
-#define __RWSEM_DEBUG_INIT	/* */
-#endif
-#if RWSEM_DEBUG_MAGIC
-#define __RWSEM_DEBUG_MINIT(name)	, (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0)
-#else
-#define __RWSEM_DEBUG_MINIT(name)	/* */
-#endif
-
-#define __RWSEM_INITIALIZER(name,count) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-	__WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-	__RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) }
-
-#define __DECLARE_RWSEM_GENERIC(name,count) \
-	struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
-
-#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
-#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
-#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-	sem->count = RWSEM_UNLOCKED_VALUE;
-	spin_lock_init(&sem->lock);
-	init_waitqueue_head(&sem->wait);
-#if RWSEM_DEBUG
-	sem->debug = 0;
-#endif
-#if RWSEM_DEBUG_MAGIC
-	sem->__magic = (long)&sem->__magic;
-	atomic_set(&sem->readers, 0);
-	atomic_set(&sem->writers, 0);
-#endif
-}
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	__asm__ __volatile__(
-		"# beginning down_read\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX	"  decb      "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */
-		"  js        3f\n" /* jump if failed */
-		"1:\n\t"
-#endif
-		"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
-#ifdef CONFIG_SMP
-		"  movb      $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */
-#endif
-		"  js        4f\n\t" /* jump if we weren't granted the lock */
-		"2:\n"
-		".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-		"3:\n\t" /* spin on the spinlock till we get it */
-		"  cmpb      $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t"
-		"  rep;nop   \n\t"
-		"  jle       3b\n\t"
-		"  jmp       1b\n"
-#endif
-		"4:\n\t"
-		"  call      __rwsem_down_read_failed\n\t"
-		"  jmp       2b\n"
-		".previous"
-		"# ending __down_read\n\t"
-		: "=m"(sem->count), "=m"(sem->lock)
-		: "a"(sem), "m"(sem->count), "m"(sem->lock)
-		: "memory");
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = RWSEM_ACTIVE_WRITE_BIAS;
-	__asm__ __volatile__(
-		"# beginning down_write\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX	"  decb      "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */
-		"  js        3f\n" /* jump if failed */
-		"1:\n\t"
-#endif
-		"  xchg      %0,(%%eax)\n\t" /* retrieve the old value */
-		"  add       %0,(%%eax)\n\t" /* add 0xffff0001, result in memory */
-#ifdef CONFIG_SMP
-		"  movb      $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */
-#endif
-		"  testl     %0,%0\n\t" /* was the count 0 before? */
-		"  jnz       4f\n\t" /* jump if we weren't granted the lock */
-		"2:\n\t"
-		".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-		"3:\n\t" /* spin on the spinlock till we get it */
-		"  cmpb      $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t"
-		"  rep;nop   \n\t"
-		"  jle       3b\n\t"
-		"  jmp       1b\n"
-#endif
-		"4:\n\t"
-		"  call     __rwsem_down_write_failed\n\t"
-		"  jmp      2b\n"
-		".previous\n"
-		"# ending down_write"
-		: "+r"(tmp), "=m"(sem->count), "=m"(sem->lock)
-		: "a"(sem), "m"(sem->count), "m"(sem->lock)
-		: "memory");
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = -RWSEM_ACTIVE_READ_BIAS;
-	__asm__ __volatile__(
-		"# beginning __up_read\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX	"  decb      "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */
-		"  js        3f\n" /* jump if failed */
-		"1:\n\t"
-#endif
-		"  xchg      %0,(%%eax)\n\t" /* retrieve the old value */
-		"  addl      %0,(%%eax)\n\t" /* subtract 1, result in memory */
-#ifdef CONFIG_SMP
-		"  movb      $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */
-#endif
-		"  js        4f\n\t" /* jump if the lock is being waited upon */
-		"2:\n\t"
-		".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-		"3:\n\t" /* spin on the spinlock till we get it */
-		"  cmpb      $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t"
-		"  rep;nop   \n\t"
-		"  jle       3b\n\t"
-		"  jmp       1b\n"
-#endif
-		"4:\n\t"
-		"  decl      %0\n\t" /* xchg gave us the old count */
-		"  testl     %4,%0\n\t" /* do nothing if still outstanding active readers */
-		"  jnz       2b\n\t"
-		"  call      __rwsem_wake\n\t"
-		"  jmp       2b\n"
-		".previous\n"
-		"# ending __up_read\n"
-		: "+r"(tmp), "=m"(sem->count), "=m"(sem->lock)
-		: "a"(sem), "i"(RWSEM_ACTIVE_MASK), "m"(sem->count), "m"(sem->lock)
-		: "memory");
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	__asm__ __volatile__(
-		"# beginning __up_write\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX	"  decb      "RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* try to grab the spinlock */
-		"  js        3f\n" /* jump if failed */
-		"1:\n\t"
-#endif
-		"  addl      %3,(%%eax)\n\t" /* adds 0x00010001 */
-#ifdef CONFIG_SMP
-		"  movb      $1,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t" /* release the spinlock */
-#endif
-		"  js        4f\n\t" /* jump if the lock is being waited upon */
-		"2:\n\t"
-		".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-		"3:\n\t" /* spin on the spinlock till we get it */
-		"  cmpb      $0,"RWSEM_SPINLOCK_OFFSET_STR"(%%eax)\n\t"
-		"  rep;nop   \n\t"
-		"  jle       3b\n\t"
-		"  jmp       1b\n"
-#endif
-		"4:\n\t"
-		"  call     __rwsem_wake\n\t"
-		"  jmp      2b\n"
-		".previous\n"
-		"# ending __up_write\n"
-		: "=m"(sem->count), "=m"(sem->lock)
-		: "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count), "m"(sem->lock)
-		: "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-	int tmp = delta;
-
-	__asm__ __volatile__(
-		"# beginning rwsem_atomic_update\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX	"  decb      "RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* try to grab the spinlock */
-		"  js        3f\n" /* jump if failed */
-		"1:\n\t"
-#endif
-		"  xchgl     %0,(%1)\n\t" /* retrieve the old value */
-		"  addl      %0,(%1)\n\t" /* add 0xffff0001, result in memory */
-#ifdef CONFIG_SMP
-		"  movb      $1,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t" /* release the spinlock */
-#endif
-		".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-		"3:\n\t" /* spin on the spinlock till we get it */
-		"  cmpb      $0,"RWSEM_SPINLOCK_OFFSET_STR"(%1)\n\t"
-		"  rep;nop   \n\t"
-		"  jle       3b\n\t"
-		"  jmp       1b\n"
-#endif
-		".previous\n"
-		"# ending rwsem_atomic_update\n\t"
-		: "+r"(tmp)
-		: "r"(sem)
-		: "memory");
-
-	return tmp+delta;
-}
-
-/*
- * implement compare and exchange functionality on the rw-semaphore count LSW
- */
-static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new)
-{
-	__u16 prev;
-
-	__asm__ __volatile__(
-		"# beginning rwsem_cmpxchgw\n\t"
-#ifdef CONFIG_SMP
-LOCK_PREFIX	"  decb      "RWSEM_SPINLOCK_OFFSET_STR"(%3)\n\t" /* try to grab the spinlock */
-		"  js        3f\n" /* jump if failed */
-		"1:\n\t"
-#endif
-		"  cmpw      %w1,(%3)\n\t"
-		"  jne       4f\n\t" /* jump if old doesn't match sem->count LSW */
-		"  movw      %w2,(%3)\n\t" /* replace sem->count LSW with the new value */
-		"2:\n\t"
-#ifdef CONFIG_SMP
-		"  movb      $1,"RWSEM_SPINLOCK_OFFSET_STR"(%3)\n\t" /* release the spinlock */
-#endif
-		".section .text.lock,\"ax\"\n"
-#ifdef CONFIG_SMP
-		"3:\n\t" /* spin on the spinlock till we get it */
-		"  cmpb      $0,"RWSEM_SPINLOCK_OFFSET_STR"(%3)\n\t"
-		"  rep;nop   \n\t"
-		"  jle       3b\n\t"
-		"  jmp       1b\n"
-#endif
-		"4:\n\t"
-		"  movw      (%3),%w0\n" /* we'll want to return the current value */
-		"  jmp       2b\n"
-		".previous\n"
-		"# ending rwsem_cmpxchgw\n\t"
-		: "=r"(prev)
-		: "r0"(old), "r"(new), "r"(sem)
-		: "memory");
-
-	return prev;
-}
-
-#endif /* __KERNEL__ */
-#endif /* _I386_RWSEM_SPIN_H */
diff -urN 1/include/asm-i386/rwsem-xadd.h 2/include/asm-i386/rwsem-xadd.h
--- 1/include/asm-i386/rwsem-xadd.h	Sat Apr 14 15:21:27 2001
+++ 2/include/asm-i386/rwsem-xadd.h	Thu Jan  1 01:00:00 1970
@@ -1,198 +0,0 @@
-/* rwsem-xadd.h: R/W semaphores implemented using XADD/CMPXCHG
- *
- * Written by David Howells (dhowells@redhat.com), 2001.
- * Derived from asm-i386/semaphore.h
- */
-
-#ifndef _I386_RWSEM_XADD_H
-#define _I386_RWSEM_XADD_H
-
-#ifndef _LINUX_RWSEM_H
-#error please dont include asm/rwsem-xadd.h directly, use linux/rwsem.h instead
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-	signed long		count;
-#define RWSEM_UNLOCKED_VALUE		0x00000000
-#define RWSEM_ACTIVE_BIAS		0x00000001
-#define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		(-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-	wait_queue_head_t	wait;
-#define RWSEM_WAITING_FOR_READ	WQ_FLAG_CONTEXT_0	/* bits to use in wait_queue_t.flags */
-#define RWSEM_WAITING_FOR_WRITE	WQ_FLAG_CONTEXT_1
-#if RWSEM_DEBUG
-	int			debug;
-#endif
-#if RWSEM_DEBUG_MAGIC
-	long			__magic;
-	atomic_t		readers;
-	atomic_t		writers;
-#endif
-};
-
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT      , 0
-#else
-#define __RWSEM_DEBUG_INIT	/* */
-#endif
-#if RWSEM_DEBUG_MAGIC
-#define __RWSEM_DEBUG_MINIT(name)	, (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0)
-#else
-#define __RWSEM_DEBUG_MINIT(name)	/* */
-#endif
-
-#define __RWSEM_INITIALIZER(name,count) \
-{ RWSEM_UNLOCKED_VALUE, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-	__RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) }
-
-#define __DECLARE_RWSEM_GENERIC(name,count) \
-	struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
-
-#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
-#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
-#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-	sem->count = RWSEM_UNLOCKED_VALUE;
-	init_waitqueue_head(&sem->wait);
-#if RWSEM_DEBUG
-	sem->debug = 0;
-#endif
-#if RWSEM_DEBUG_MAGIC
-	sem->__magic = (long)&sem->__magic;
-	atomic_set(&sem->readers, 0);
-	atomic_set(&sem->writers, 0);
-#endif
-}
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	__asm__ __volatile__(
-		"# beginning down_read\n\t"
-LOCK_PREFIX	"  incl      (%%eax)\n\t" /* adds 0x00000001, returns the old value */
-		"  js        2f\n\t" /* jump if we weren't granted the lock */
-		"1:\n\t"
-		".section .text.lock,\"ax\"\n"
-		"2:\n\t"
-		"  call      __rwsem_down_read_failed\n\t"
-		"  jmp       1b\n"
-		".previous"
-		"# ending down_read\n\t"
-		: "=m"(sem->count)
-		: "a"(sem), "m"(sem->count)
-		: "memory");
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = RWSEM_ACTIVE_WRITE_BIAS;
-	__asm__ __volatile__(
-		"# beginning down_write\n\t"
-LOCK_PREFIX	"  xadd      %0,(%%eax)\n\t" /* subtract 0x00010001, returns the old value */
-		"  testl     %0,%0\n\t" /* was the count 0 before? */
-		"  jnz       2f\n\t" /* jump if we weren't granted the lock */
-		"1:\n\t"
-		".section .text.lock,\"ax\"\n"
-		"2:\n\t"
-		"  call      __rwsem_down_write_failed\n\t"
-		"  jmp       1b\n"
-		".previous\n"
-		"# ending down_write"
-		: "+r"(tmp), "=m"(sem->count)
-		: "a"(sem), "m"(sem->count)
-		: "memory");
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	int tmp;
-
-	tmp = -RWSEM_ACTIVE_READ_BIAS;
-	__asm__ __volatile__(
-		"# beginning __up_read\n\t"
-LOCK_PREFIX	"  xadd      %0,(%%eax)\n\t" /* subtracts 1, returns the old value */
-		"  js        2f\n\t" /* jump if the lock is being waited upon */
-		"1:\n\t"
-		".section .text.lock,\"ax\"\n"
-		"2:\n\t"
-		"  decl      %0\n\t" /* xadd gave us the old count */
-		"  testl     %3,%0\n\t" /* do nothing if still outstanding active readers */
-		"  jnz       1b\n\t"
-		"  call      __rwsem_wake\n\t"
-		"  jmp       1b\n"
-		".previous\n"
-		"# ending __up_read\n"
-		: "+r"(tmp), "=m"(sem->count)
-		: "a"(sem), "i"(RWSEM_ACTIVE_MASK), "m"(sem->count)
-		: "memory");
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	__asm__ __volatile__(
-		"# beginning __up_write\n\t"
-LOCK_PREFIX	"  addl      %2,(%%eax)\n\t" /* adds 0x0000ffff */
-		"  js        2f\n\t" /* jump if the lock is being waited upon */
-		"1:\n\t"
-		".section .text.lock,\"ax\"\n"
-		"2:\n\t"
-		"  call      __rwsem_wake\n\t"
-		"  jmp       1b\n"
-		".previous\n"
-		"# ending __up_write\n"
-		: "=m"(sem->count)
-		: "a"(sem), "i"(-RWSEM_ACTIVE_WRITE_BIAS), "m"(sem->count)
-		: "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-	int tmp = delta;
-
-	__asm__ __volatile__(
-		LOCK_PREFIX "xadd %0,(%1)"
-		: "+r"(tmp)
-		: "r"(sem)
-		: "memory");
-
-	return tmp+delta;
-}
-
-/*
- * implement compare and exchange functionality on the rw-semaphore count LSW
- */
-static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new)
-{
-	return cmpxchg((__u16*)&sem->count,0,RWSEM_ACTIVE_BIAS);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _I386_RWSEM_XADD_H */
diff -urN 1/include/asm-i386/rwsem.h 2/include/asm-i386/rwsem.h
--- 1/include/asm-i386/rwsem.h	Mon Apr 16 05:22:34 2001
+++ 2/include/asm-i386/rwsem.h	Thu Jan  1 01:00:00 1970
@@ -1,30 +0,0 @@
-/* rwsem.h: R/W semaphores based on spinlocks
- *
- * Written by David Howells (dhowells@redhat.com).
- *
- * Derived from asm-i386/semaphore.h
- */
-
-#ifndef _I386_RWSEM_H
-#define _I386_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
-#endif
-
-#ifdef __KERNEL__
-
-#define __HAVE_ARCH_SPECIFIC_RWSEM_IMPLEMENTATION 1
-#ifdef CONFIG_X86_XADD
-#include <asm/rwsem-xadd.h> /* use XADD based semaphores if possible */
-#else
-#include <asm/rwsem-spin.h> /* use optimised spinlock based semaphores otherwise */
-#endif
-
-/* we use FASTCALL convention for the helpers */
-extern struct rw_semaphore *FASTCALL(__rwsem_down_read_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(__rwsem_down_write_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(__rwsem_wake(struct rw_semaphore *sem));
-
-#endif /* __KERNEL__ */
-#endif /* _I386_RWSEM_H */
diff -urN 1/include/asm-ppc/semaphore.h 2/include/asm-ppc/semaphore.h
--- 1/include/asm-ppc/semaphore.h	Sun Apr  1 01:17:32 2001
+++ 2/include/asm-ppc/semaphore.h	Mon Apr 16 17:53:17 2001
@@ -106,100 +106,6 @@
 		__up(sem);
 }	
 
-
-/* RW spinlock-based semaphores */
-
-struct rw_semaphore
-{
-	spinlock_t lock;
-	int rd, wr;
-	wait_queue_head_t wait;
-#if WAITQUEUE_DEBUG
-	long __magic;
-#endif
-};
-
-#define RW_LOCK_BIAS	2	/* XXX bogus */
-#define __RWSEM_INITIALIZER(name, count)		\
-{							\
-	SPIN_LOCK_UNLOCKED,				\
-	(count) == 1, (count) == 0,			\
-	__WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\
-	__SEM_DEBUG_INIT(name)				\
-}
-
-#define __DECLARE_RWSEM_GENERIC(name, count)		\
-	struct rw_semaphore name = __RWSEM_INITIALIZER(name, count)
-
-#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS)
-#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, RW_LOCK_BIAS-1)
-#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name, 0)
-
-extern inline void init_rwsem(struct rw_semaphore *sem)
-{
-	spin_lock_init(&sem->lock);
-	sem->rd = sem->wr = 0;
-	init_waitqueue_head(&sem->wait);
-#if WAITQUEUE_DEBUG
-	sem->__magic = (long)&sem->__magic;
-#endif
-}
-
-#ifndef CHECK_MAGIC
-#define CHECK_MAGIC(x)
-#endif
-
-extern void down_read_failed(struct rw_semaphore *);
-extern void down_write_failed(struct rw_semaphore *);
-
-extern inline void down_read(struct rw_semaphore *sem)
-{
-	CHECK_MAGIC(sem->__magic);
-
-	spin_lock_irq(&sem->lock);
-	if (sem->wr)
-		down_read_failed(sem);
-	sem->rd++;
-	spin_unlock_irq(&sem->lock);
-}
-
-extern inline void down_write(struct rw_semaphore *sem)
-{
-	CHECK_MAGIC(sem->__magic);
-
-	spin_lock(&sem->lock);
-	if(sem->rd || sem->wr)
-		down_write_failed(sem);
-	sem->wr = 1;
-	spin_unlock(&sem->lock);
-}
-
-#define up_read(sem)							\
-	do {								\
-		unsigned long flags;					\
-									\
-		CHECK_MAGIC((sem)->__magic);				\
-									\
-		spin_lock_irqsave(&(sem)->lock, flags);			\
-		if (!--(sem)->rd && waitqueue_active(&(sem)->wait))	\
-			wake_up(&(sem)->wait);				\
-		spin_unlock_irqrestore(&(sem)->lock, flags);		\
-	} while (0)
-
-#define up_write(sem)							\
-	do {								\
-		unsigned long flags;					\
-									\
-		CHECK_MAGIC((sem)->__magic);				\
-									\
-		spin_lock_irqsave(&(sem)->lock, flags);			\
-		(sem)->wr = 0;						\
-		if (waitqueue_active(&(sem)->wait))			\
-			wake_up(&(sem)->wait);				\
-		spin_unlock_irqrestore(&(sem)->lock, flags);		\
-	} while (0)
-
-
 #endif /* __KERNEL__ */
 
 #endif /* !(_PPC_SEMAPHORE_H) */
diff -urN 1/include/linux/rwsem-spinlock.h 2/include/linux/rwsem-spinlock.h
--- 1/include/linux/rwsem-spinlock.h	Mon Apr 16 05:22:34 2001
+++ 2/include/linux/rwsem-spinlock.h	Thu Jan  1 01:00:00 1970
@@ -1,174 +0,0 @@
-/* rwsem-spinlock.h: fallback C implementation
- *
- * Copyright (c) 2001   David Howells (dhowells@redhat.com).
- */
-
-#ifndef _LINUX_RWSEM_SPINLOCK_H
-#define _LINUX_RWSEM_SPINLOCK_H
-
-#ifndef _LINUX_RWSEM_H
-#error please dont include asm/rwsem-spinlock.h directly, use linux/rwsem.h instead
-#endif
-
-#include <linux/spinlock.h>
-
-#ifdef __KERNEL__
-
-#define CONFIG_USING_SPINLOCK_BASED_RWSEM 1
-
-/*
- * the semaphore definition
- */
-struct rw_semaphore {
-	signed long			count;
-#define RWSEM_UNLOCKED_VALUE		0x00000000
-#define RWSEM_ACTIVE_BIAS		0x00000001
-#define RWSEM_ACTIVE_MASK		0x0000ffff
-#define RWSEM_WAITING_BIAS		(-0x00010000)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-	spinlock_t		lock;
-#define RWSEM_SPINLOCK_OFFSET_STR	"4" /* byte offset of spinlock */
-	wait_queue_head_t	wait;
-#define RWSEM_WAITING_FOR_READ	WQ_FLAG_CONTEXT_0	/* bits to use in wait_queue_t.flags */
-#define RWSEM_WAITING_FOR_WRITE	WQ_FLAG_CONTEXT_1
-#if RWSEM_DEBUG
-	int			debug;
-#endif
-#if RWSEM_DEBUG_MAGIC
-	long			__magic;
-	atomic_t		readers;
-	atomic_t		writers;
-#endif
-};
-
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT      , 0
-#else
-#define __RWSEM_DEBUG_INIT	/* */
-#endif
-#if RWSEM_DEBUG_MAGIC
-#define __RWSEM_DEBUG_MINIT(name)	, (int)&(name).__magic, ATOMIC_INIT(0), ATOMIC_INIT(0)
-#else
-#define __RWSEM_DEBUG_MINIT(name)	/* */
-#endif
-
-#define __RWSEM_INITIALIZER(name,count) \
-{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
-	__WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
-	__RWSEM_DEBUG_INIT __RWSEM_DEBUG_MINIT(name) }
-
-#define __DECLARE_RWSEM_GENERIC(name,count) \
-	struct rw_semaphore name = __RWSEM_INITIALIZER(name,count)
-
-#define DECLARE_RWSEM(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS)
-#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,RW_LOCK_BIAS-1)
-#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM_GENERIC(name,0)
-
-static inline void init_rwsem(struct rw_semaphore *sem)
-{
-	sem->count = RWSEM_UNLOCKED_VALUE;
-	spin_lock_init(&sem->lock);
-	init_waitqueue_head(&sem->wait);
-#if RWSEM_DEBUG
-	sem->debug = 0;
-#endif
-#if RWSEM_DEBUG_MAGIC
-	sem->__magic = (long)&sem->__magic;
-	atomic_set(&sem->readers, 0);
-	atomic_set(&sem->writers, 0);
-#endif
-}
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	int count;
-	spin_lock(&sem->lock);
-	sem->count += RWSEM_ACTIVE_READ_BIAS;
-	count = sem->count;
-	spin_unlock(&sem->lock);
-	if (count<0)
-		rwsem_down_read_failed(sem);
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	int count;
-	spin_lock(&sem->lock);
-	count = sem->count;
-	sem->count += RWSEM_ACTIVE_WRITE_BIAS;
-	spin_unlock(&sem->lock);
-	if (count)
-		rwsem_down_write_failed(sem);
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	int count;
-	spin_lock(&sem->lock);
-	count = sem->count;
-	sem->count -= RWSEM_ACTIVE_READ_BIAS;
-	spin_unlock(&sem->lock);
-	if (count<0 && !((count-RWSEM_ACTIVE_READ_BIAS)&RWSEM_ACTIVE_MASK))
-		rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	int count;
-	spin_lock(&sem->lock);
-	sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
-	count = sem->count;
-	spin_unlock(&sem->lock);
-	if (count<0)
-		rwsem_wake(sem);
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
-{
-	int count;
-
-	spin_lock(&sem->lock);
-	sem->count += delta;
-	count = sem->count;
-	spin_unlock(&sem->lock);
-
-	return count;
-}
-
-/*
- * implement compare and exchange functionality on the rw-semaphore count LSW
- */
-static inline __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 old, __u16 new)
-{
-	__u16 prev;
-
-	spin_lock(&sem->lock);
-	prev = sem->count & RWSEM_ACTIVE_MASK;
-	if (prev==old)
-		sem->count = (sem->count & ~RWSEM_ACTIVE_MASK) | new;
-	spin_unlock(&sem->lock);
-
-	return prev;
-}
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff -urN 1/include/linux/rwsem.h 2/include/linux/rwsem.h
--- 1/include/linux/rwsem.h	Mon Apr 16 05:22:34 2001
+++ 2/include/linux/rwsem.h	Tue Apr 17 04:01:03 2001
@@ -1,148 +1,65 @@
-/* rwsem.h: R/W semaphores, public interface
- *
- * Written by David Howells (dhowells@redhat.com).
- * Derived from asm-i386/semaphore.h
- *
- *
- * The MSW of the count is the negated number of active writers and waiting
- * lockers, and the LSW is the total number of active locks
- *
- * The lock count is initialized to 0 (no active and no waiting lockers).
- *
- * When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
- * uncontended lock. This can be determined because XADD returns the old value.
- * Readers increment by 1 and see a positive value when uncontended, negative
- * if there are writers (and maybe) readers waiting (in which case it goes to
- * sleep).
- *
- * The value of WAITING_BIAS supports up to 32766 waiting processes. This can
- * be extended to 65534 by manually checking the whole MSW rather than relying
- * on the S flag.
- *
- * The value of ACTIVE_BIAS supports up to 65535 active processes.
- *
- * This should be totally fair - if anything is waiting, a process that wants a
- * lock will go to the back of the queue. When the currently active lock is
- * released, if there's a writer at the front of the queue, then that and only
- * that will be woken up; if there's a bunch of consequtive readers at the
- * front, then they'll all be woken up, but no other readers will be.
- */
-
 #ifndef _LINUX_RWSEM_H
 #define _LINUX_RWSEM_H
 
-#include <linux/linkage.h>
-
-#define RWSEM_DEBUG 0
-#define RWSEM_DEBUG_MAGIC 0
-
 #ifdef __KERNEL__
 
-#include <asm/system.h>
-#include <asm/atomic.h>
-#include <linux/wait.h>
-
-#if RWSEM_DEBUG
-#define rwsemdebug(FMT, ARGS...) do { if (sem->debug) printk(FMT,##ARGS); } while(0)
-#else
-#define rwsemdebug(FMT, ARGS...)
-#endif
+#include <linux/config.h>
 
-/* we use FASTCALL convention for the helpers */
-extern struct rw_semaphore *FASTCALL(rwsem_down_read_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(rwsem_down_write_failed(struct rw_semaphore *sem));
-extern struct rw_semaphore *FASTCALL(rwsem_wake(struct rw_semaphore *sem));
+#ifndef CONFIG_GENERIC_RWSEM
+#include <asm/rwsem.h>
+#else /* CONFIG_GENERIC_RWSEM */
 
-#include <asm/rwsem.h> /* find the arch specific bits */
-
-#ifndef __HAVE_ARCH_SPECIFIC_RWSEM_IMPLEMENTATION
-#include <linux/rwsem-spinlock.h>
-#endif
-
-/*
- * lock for reading
- */
-static inline void down_read(struct rw_semaphore *sem)
-{
-	rwsemdebug("Entering down_read(count=%08lx)\n",sem->count);
-
-#if RWSEM_DEBUG_MAGIC
-	if (sem->__magic != (long)&sem->__magic)
-		BUG();
-#endif
-
-	__down_read(sem);
-
-#if RWSEM_DEBUG_MAGIC
-	if (atomic_read(&sem->writers))
-		BUG();
-	atomic_inc(&sem->readers);
-#endif
-
-	rwsemdebug("Leaving down_read(count=%08lx)\n",sem->count);
-}
-
-/*
- * lock for writing
- */
-static inline void down_write(struct rw_semaphore *sem)
-{
-	rwsemdebug("Entering down_write(count=%08lx)\n",sem->count);
-
-#if RWSEM_DEBUG_MAGIC
-	if (sem->__magic != (long)&sem->__magic)
-		BUG();
-#endif
-
-	__down_write(sem);
-
-#if RWSEM_DEBUG_MAGIC
-	if (atomic_read(&sem->writers))
-		BUG();
-	if (atomic_read(&sem->readers))
-		BUG();
-	atomic_inc(&sem->writers);
-#endif
-
-	rwsemdebug("Leaving down_write(count=%08lx)\n",sem->count);
-}
+#include <linux/wait.h>
 
-/*
- * release a read lock
- */
-static inline void up_read(struct rw_semaphore *sem)
+struct rw_semaphore
 {
-	rwsemdebug("Entering up_read(count=%08lx)\n",sem->count);
-
-#if RWSEM_DEBUG_MAGIC
-	if (atomic_read(&sem->writers))
-		BUG();
-	atomic_dec(&sem->readers);
+	spinlock_t lock;
+	int rd, wr:1;
+	wait_queue_head_t read_wait;
+	wait_queue_head_t write_wait;
+#if RWSEM_DEBUG
+	long __magic;
 #endif
-	__up_read(sem);
-
-	rwsemdebug("Leaving up_read(count=%08lx)\n",sem->count);
-}
+};
 
-/*
- * release a write lock
- */
-static inline void up_write(struct rw_semaphore *sem)
-{
-	rwsemdebug("Entering up_write(count=%08lx)\n",sem->count);
-
-#if RWSEM_DEBUG_MAGIC
-	if (atomic_read(&sem->readers))
-		BUG();
-	if (atomic_read(&sem->writers) != 1)
-		BUG();
-	atomic_dec(&sem->writers);
+#if RWSEM_DEBUG
+#define __SEM_DEBUG_INIT(name) \
+	, (int)&(name).__magic
+#define RWSEM_MAGIC(x)							\
+	do {								\
+		if ((x) != (long)&(x)) {				\
+			printk("rwsem bad magic %lx (should be %lx), ",	\
+				(long)x, (long)&(x));			\
+			BUG();						\
+		}							\
+	} while (0)
+#else
+#define __SEM_DEBUG_INIT(name)
+#define CHECK_MAGIC(x)
 #endif
-	__up_write(sem);
 
-	rwsemdebug("Leaving up_write(count=%08lx)\n",sem->count);
+#define __RWSEM_INITIALIZER(name, rd, wr)			\
+{								\
+	SPIN_LOCK_UNLOCKED,					\
+	(rd), (wr),						\
+	__WAIT_QUEUE_HEAD_INITIALIZER((name).read_wait),	\
+	__WAIT_QUEUE_HEAD_INITIALIZER((name).write_wait)	\
+	__SEM_DEBUG_INIT(name)					\
 }
+#define RWSEM_INITIALIZER(name) __RWSEM_INITIALIZER(name, 0, 0)
 
+#define __DECLARE_RWSEM(name, rd, wr)		\
+	struct rw_semaphore name = __RWSEM_INITIALIZER(name, rd, wr)
+#define DECLARE_RWSEM(name) __DECLARE_RWSEM(name, 0, 0)
+#define DECLARE_RWSEM_READ_LOCKED(name) __DECLARE_RWSEM(name, 1, 0)
+#define DECLARE_RWSEM_WRITE_LOCKED(name) __DECLARE_RWSEM(name, 0, 1)
+
+extern void FASTCALL(init_rwsem(struct rw_semaphore *));
+extern void FASTCALL(down_read(struct rw_semaphore *));
+extern void FASTCALL(down_write(struct rw_semaphore *));
+extern void FASTCALL(up_read(struct rw_semaphore *));
+extern void FASTCALL(up_write(struct rw_semaphore *));
 
+#endif /* CONFIG_GENERIC_RWSEM */
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_H */
diff -urN 1/include/linux/sched.h 2/include/linux/sched.h
--- 1/include/linux/sched.h	Tue Apr 17 04:00:35 2001
+++ 2/include/linux/sched.h	Tue Apr 17 04:01:03 2001
@@ -240,7 +240,7 @@
 	mm_users:	ATOMIC_INIT(2), 		\
 	mm_count:	ATOMIC_INIT(1), 		\
 	map_count:	1, 				\
-	mmap_sem:	__RWSEM_INITIALIZER(name.mmap_sem, RW_LOCK_BIAS), \
+	mmap_sem:	RWSEM_INITIALIZER(name.mmap_sem), \
 	page_table_lock: SPIN_LOCK_UNLOCKED, 		\
 	mmlist:		LIST_HEAD_INIT(name.mmlist),	\
 }
@@ -552,8 +552,7 @@
 
 extern void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr));
 extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
-extern int FASTCALL(__wake_up_ctx(wait_queue_head_t *q, unsigned int mode, int count, int bit));
-extern int FASTCALL(__wake_up_sync_ctx(wait_queue_head_t *q, unsigned int mode, int count, int bit));
+extern void FASTCALL(__hard_wake_up(wait_queue_head_t *q, unsigned int mode, int nr, int sync));
 extern void FASTCALL(sleep_on(wait_queue_head_t *q));
 extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
 				      signed long timeout));
diff -urN 1/include/linux/wait.h 2/include/linux/wait.h
--- 1/include/linux/wait.h	Mon Apr 16 05:22:34 2001
+++ 2/include/linux/wait.h	Tue Apr 17 04:01:03 2001
@@ -26,14 +26,6 @@
 struct __wait_queue {
 	unsigned int flags;
 #define WQ_FLAG_EXCLUSIVE	0x01
-#define WQ_FLAG_CONTEXT_0	8	/* context specific flag bit numbers */
-#define WQ_FLAG_CONTEXT_1	9
-#define WQ_FLAG_CONTEXT_2	10
-#define WQ_FLAG_CONTEXT_3	11
-#define WQ_FLAG_CONTEXT_4	12
-#define WQ_FLAG_CONTEXT_5	13
-#define WQ_FLAG_CONTEXT_6	14
-#define WQ_FLAG_CONTEXT_7	15
 	struct task_struct * task;
 	struct list_head task_list;
 #if WAITQUEUE_DEBUG
diff -urN 1/kernel/sched.c 2/kernel/sched.c
--- 1/kernel/sched.c	Tue Apr 17 04:00:35 2001
+++ 2/kernel/sched.c	Tue Apr 17 03:26:40 2001
@@ -836,73 +836,9 @@
 	}
 }
 
-/*
- * wake up processes in the wait queue depending on the state of a context bit in the flags
- * - wakes up a process if the specified bit is set in the flags member
- * - the context bit is cleared if the process is woken up
- * - if the bit number is negative, then the loop stops at the first unset context bit encountered
- * - returns the number of processes woken
- */
-static inline int __wake_up_ctx_common (wait_queue_head_t *q,
-					int count, int bit, const int sync)
+void __hard_wake_up(wait_queue_head_t *q, unsigned int mode, int nr, int sync)
 {
-	struct list_head *tmp, *head;
-	struct task_struct *p;
-	int stop, woken;
-
-	woken = 0;
-	stop = bit<0;
-	if (bit<0) bit = -bit;
-
-	CHECK_MAGIC_WQHEAD(q);
-	head = &q->task_list;
-	WQ_CHECK_LIST_HEAD(head);
-	tmp = head->next;
-	while (tmp != head) {
-                wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
-
-		tmp = tmp->next;
-		CHECK_MAGIC(curr->__magic);
-		p = curr->task;
-		if (!test_and_clear_bit(bit,&curr->flags)) {
-			if (stop)
-				break;
-			continue;
-		}
-
-		WQ_NOTE_WAKER(curr);
-		try_to_wake_up(p,sync);
-
-		woken++;
-		if (woken>=count)
-			break;
-	}
-
-	return woken;
-}
-
-int __wake_up_ctx(wait_queue_head_t *q, unsigned int mode, int count, int bit)
-{
-	int woken = 0;
-	if (q && count) {
-		unsigned long flags;
-		wq_read_lock_irqsave(&q->lock, flags);
-		woken = __wake_up_ctx_common(q, count, bit, 0);
-		wq_read_unlock_irqrestore(&q->lock, flags);
-	}
-	return woken;
-}
-
-int __wake_up_ctx_sync(wait_queue_head_t *q, unsigned int mode, int count, int bit)
-{
-	int woken = 0;
-	if (q && count) {
-		unsigned long flags;
-		wq_read_lock_irqsave(&q->lock, flags);
-		woken = __wake_up_ctx_common(q, count, bit, 1);
-		wq_read_unlock_irqrestore(&q->lock, flags);
-	}
-	return woken;
+	__wake_up_common(q, mode, nr, sync);
 }
 
 #define	SLEEP_ON_VAR				\
diff -urN 1/lib/Makefile 2/lib/Makefile
--- 1/lib/Makefile	Sat Apr 14 15:21:29 2001
+++ 2/lib/Makefile	Tue Apr 17 03:42:54 2001
@@ -8,12 +8,17 @@
 
 L_TARGET := lib.a
 
-export-objs := cmdline.o rwsem.o
+export-objs := cmdline.o
 
-obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o rwsem.o
+obj-y := errno.o ctype.o string.o vsprintf.o brlock.o cmdline.o
 
 ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 
   obj-y += dec_and_lock.o
+endif
+
+ifeq ($(CONFIG_GENERIC_RWSEM),y)
+  obj-y += rwsem.o
+  export-objs += rwsem.o
 endif
 
 include $(TOPDIR)/Rules.make
diff -urN 1/lib/rwsem.c 2/lib/rwsem.c
--- 1/lib/rwsem.c	Sat Apr 14 15:21:29 2001
+++ 2/lib/rwsem.c	Tue Apr 17 03:52:06 2001
@@ -1,156 +1,110 @@
-/* rwsem.c: R/W semaphores: contention handling functions
- *
- * Written by David Howells (dhowells@redhat.com).
- * Derived from arch/i386/kernel/semaphore.c
+/*
+ *  generic rw_semaphores
+ *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
  */
+
 #include <linux/rwsem.h>
 #include <linux/sched.h>
 #include <linux/module.h>
 
-/*
- * wait for the read lock to be granted
- * - need to repeal the increment made inline by the caller
- * - need to throw a write-lock style spanner into the works (sub 0x00010000 from count)
- */
-struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
+void init_rwsem(struct rw_semaphore *sem)
+{
+	spin_lock_init(&sem->lock);
+	sem->rd = sem->wr = 0;
+	init_waitqueue_head(&sem->read_wait);
+	init_waitqueue_head(&sem->write_wait);
+#if RWSEM_DEBUG
+	sem->__magic = (long)&sem->__magic;
+#endif
+}
+
+static inline void down_read_failed(struct rw_semaphore *sem)
 {
 	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait,tsk);
-	signed long count;
+	DECLARE_WAITQUEUE(wait, tsk);
 
-	rwsemdebug("[%d] Entering rwsem_down_read_failed(%08lx)\n",current->pid,sem->count);
+	wait.flags &= ~WQ_FLAG_EXCLUSIVE;
+	__add_wait_queue(&sem->read_wait, &wait);
 
-	/* this waitqueue context flag will be cleared when we are granted the lock */
-	__set_bit(RWSEM_WAITING_FOR_READ,&wait.flags);
-	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-
-	add_wait_queue_exclusive(&sem->wait, &wait); /* FIFO */
-
-	/* note that we're now waiting on the lock, but no longer actively read-locking */
-	count = rwsem_atomic_update(RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS,sem);
-	rwsemdebug("X(%08lx)\n",count);
-
-	/* if there are no longer active locks, wake the front queued process(es) up
-	 * - it might even be this process, since the waker takes a more active part
-	 */
-	if (!(count & RWSEM_ACTIVE_MASK))
-		rwsem_wake(sem);
-
-	/* wait to be given the lock */
-	for (;;) {
-		if (!test_bit(RWSEM_WAITING_FOR_READ,&wait.flags))
-			break;
+	do {
+		__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		spin_unlock_irq(&sem->lock);
 		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
+		spin_lock_irq(&sem->lock);
+	} while(sem->wr);
 
-	remove_wait_queue(&sem->wait,&wait);
-	tsk->state = TASK_RUNNING;
-
-	rwsemdebug("[%d] Leaving rwsem_down_read_failed(%08lx)\n",current->pid,sem->count);
-
-	return sem;
+	__remove_wait_queue(&sem->read_wait, &wait);
 }
 
-/*
- * wait for the write lock to be granted
- */
-struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem)
+static inline void down_write_failed(struct rw_semaphore *sem)
 {
 	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait,tsk);
-	signed long count;
+	DECLARE_WAITQUEUE(wait, tsk);
 
-	rwsemdebug("[%d] Entering rwsem_down_write_failed(%08lx)\n",current->pid,sem->count);
+	wait.flags |= WQ_FLAG_EXCLUSIVE;
+	__add_wait_queue_tail(&sem->write_wait, &wait);
 
-	/* this waitqueue context flag will be cleared when we are granted the lock */
-	__set_bit(RWSEM_WAITING_FOR_WRITE,&wait.flags);
-	set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-
-	add_wait_queue_exclusive(&sem->wait, &wait); /* FIFO */
-
-	/* note that we're waiting on the lock, but no longer actively locking */
-	count = rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem);
-	rwsemdebug("[%d] updated(%08lx)\n",current->pid,count);
-
-	/* if there are no longer active locks, wake the front queued process(es) up
-	 * - it might even be this process, since the waker takes a more active part
-	 */
-	if (!(count & RWSEM_ACTIVE_MASK))
-		rwsem_wake(sem);
-
-	/* wait to be given the lock */
-	for (;;) {
-		if (!test_bit(RWSEM_WAITING_FOR_WRITE,&wait.flags))
-			break;
+	do {
+		__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+		spin_unlock_irq(&sem->lock);
 		schedule();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-	}
+		spin_lock_irq(&sem->lock);
+	} while(sem->rd || sem->wr);
+
+	__remove_wait_queue(&sem->write_wait, &wait);
+}
 
-	remove_wait_queue(&sem->wait,&wait);
-	tsk->state = TASK_RUNNING;
+void down_read(struct rw_semaphore *sem)
+{
+	CHECK_MAGIC(sem->__magic);
+
+	spin_lock_irq(&sem->lock);
+	if (sem->wr)
+		down_read_failed(sem);
+	sem->rd++;
+	spin_unlock_irq(&sem->lock);
+}
 
-	rwsemdebug("[%d] Leaving rwsem_down_write_failed(%08lx)\n",current->pid,sem->count);
+void down_write(struct rw_semaphore *sem)
+{
+	CHECK_MAGIC(sem->__magic);
 
-	return sem;
+	spin_lock(&sem->lock);
+	if (sem->rd || sem->wr)
+		down_write_failed(sem);
+	sem->wr = 1;
+	spin_unlock(&sem->lock);
 }
 
-/*
- * handle the lock being released whilst there are processes blocked on it that can now run
- * - if we come here, then:
- *   - the 'active part' of the count (&0x0000ffff) reached zero (but may no longer be zero)
- *   - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
- */
-struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
+void up_read(struct rw_semaphore *sem)
 {
-	signed long count;
-	int woken;
+	unsigned long flags;
+
+	CHECK_MAGIC(sem->__magic);
 
-	rwsemdebug("[%d] Entering rwsem_wake(%08lx)\n",current->pid,sem->count);
+	spin_lock_irqsave(&sem->lock, flags);
+	if (!--sem->rd && waitqueue_active(&sem->write_wait))
+		__hard_wake_up(&sem->write_wait, TASK_UNINTERRUPTIBLE, 1, 0);
+	spin_unlock_irqrestore(&sem->lock, flags);
+}
+
+void up_write(struct rw_semaphore *sem)
+{
+	unsigned long flags;
+
+	CHECK_MAGIC(sem->__magic);
+
+	spin_lock_irqsave(&sem->lock, flags);
+	sem->wr = 0;
+	if (waitqueue_active(&sem->read_wait))
+		__hard_wake_up(&sem->read_wait, TASK_UNINTERRUPTIBLE, 0, 0);
+	else if (waitqueue_active(&sem->write_wait))
+		__hard_wake_up(&sem->write_wait, TASK_UNINTERRUPTIBLE, 1, 0);
+	spin_unlock_irqrestore(&sem->lock, flags);
+}
 
- try_again:
-	/* try to grab an 'activity' marker
-	 * - need to make sure two copies of rwsem_wake() don't do this for two separate processes
-	 *   simultaneously
-	 * - be horribly naughty, and only deal with the LSW of the atomic counter
-	 */
-	if (rwsem_cmpxchgw(sem,0,RWSEM_ACTIVE_BIAS)!=0) {
-		rwsemdebug("[%d] rwsem_wake: abort wakeup due to renewed activity\n",current->pid);
-		goto out;
-	}
-
-	/* try to grant a single write lock if there's a writer at the front of the queue
-	 * - note we leave the 'active part' of the count incremented by 1 and the waiting part
-	 *   incremented by 0x00010000
-	 */
-	if (wake_up_ctx(&sem->wait,1,-RWSEM_WAITING_FOR_WRITE)==1)
-		goto out;
-
-	/* grant an infinite number of read locks to the readers at the front of the queue
-	 * - note we increment the 'active part' of the count by the number of readers just woken,
-	 *   less one for the activity decrement we've already done
-	 */
-	woken = wake_up_ctx(&sem->wait,65535,-RWSEM_WAITING_FOR_READ);
-	if (woken<=0)
-		goto counter_correction;
-
-	woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
-	woken -= RWSEM_ACTIVE_BIAS;
-	rwsem_atomic_update(woken,sem);
-
- out:
-	rwsemdebug("[%d] Leaving rwsem_wake(%08lx)\n",current->pid,sem->count);
-	return sem;
-
-	/* come here if we need to correct the counter for odd SMP-isms */
- counter_correction:
-	count = rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem);
-	rwsemdebug("[%d] corrected(%08lx)\n",current->pid,count);
-	if (!(count & RWSEM_ACTIVE_MASK))
-		goto try_again;
-	goto out;
-}
-
-EXPORT_SYMBOL(rwsem_down_read_failed);
-EXPORT_SYMBOL(rwsem_down_write_failed);
-EXPORT_SYMBOL(rwsem_wake);
+EXPORT_SYMBOL(init_rwsem);
+EXPORT_SYMBOL(down_read);
+EXPORT_SYMBOL(down_write);
+EXPORT_SYMBOL(up_read);
+EXPORT_SYMBOL(up_write);