diff -urpN -X /home/fletch/.diff.exclude 717-fs_aio_7_ext2getblk_wq/arch/ppc64/kernel/ppc_ksyms.c 718-fs_aio_8_down_wq-ppc64/arch/ppc64/kernel/ppc_ksyms.c --- 717-fs_aio_7_ext2getblk_wq/arch/ppc64/kernel/ppc_ksyms.c Tue Feb 25 23:03:44 2003 +++ 718-fs_aio_8_down_wq-ppc64/arch/ppc64/kernel/ppc_ksyms.c Sat Jun 14 20:44:27 2003 @@ -91,7 +91,7 @@ EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(__down_interruptible); EXPORT_SYMBOL(__up); EXPORT_SYMBOL(naca); -EXPORT_SYMBOL(__down); +EXPORT_SYMBOL(__down_wq); /* EXPORT_SYMBOL(csum_partial); already in net/netsyms.c */ EXPORT_SYMBOL(csum_partial_copy_generic); diff -urpN -X /home/fletch/.diff.exclude 717-fs_aio_7_ext2getblk_wq/arch/ppc64/kernel/semaphore.c 718-fs_aio_8_down_wq-ppc64/arch/ppc64/kernel/semaphore.c --- 717-fs_aio_7_ext2getblk_wq/arch/ppc64/kernel/semaphore.c Sun Nov 17 20:29:22 2002 +++ 718-fs_aio_8_down_wq-ppc64/arch/ppc64/kernel/semaphore.c Sat Jun 14 20:44:28 2003 @@ -70,13 +70,18 @@ void __up(struct semaphore *sem) * Thus it is only when we decrement count from some value > 0 * that we have actually got the semaphore. */ -void __down(struct semaphore *sem) +int __down_wq(struct semaphore *sem, wait_queue_t *wait) { struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait, tsk); + DECLARE_WAITQUEUE(local_wait, tsk); + unsigned long flags; - tsk->state = TASK_UNINTERRUPTIBLE; - add_wait_queue_exclusive(&sem->wait, &wait); + if (!is_sync_wait(wait)) + tsk->state = TASK_UNINTERRUPTIBLE; + if (!wait) + wait = &local_wait; + + add_wait_queue_exclusive(&sem->wait, wait); smp_wmb(); /* @@ -86,10 +91,15 @@ void __down(struct semaphore *sem) * that we are asleep, and then sleep. */ while (__sem_update_count(sem, -1) <= 0) { + if (!is_sync_wait(wait)) + return -EIOCBRETRY; schedule(); tsk->state = TASK_UNINTERRUPTIBLE; } - remove_wait_queue(&sem->wait, &wait); + spin_lock_irqsave(&sem->wait.lock, flags) + if (is_sync_wait(wait) || !list_empty(&wait->task_list)) + remove_wait_queue_locked(&sem->wait, wait); + spin_unlock_irqrestore(&sem->wait.lock, flags); tsk->state = TASK_RUNNING; /* @@ -98,6 +108,8 @@ void __down(struct semaphore *sem) * indicating that there are still processes sleeping. */ wake_up(&sem->wait); + + return 0; } int __down_interruptible(struct semaphore * sem) diff -urpN -X /home/fletch/.diff.exclude 717-fs_aio_7_ext2getblk_wq/include/asm-ppc64/semaphore.h 718-fs_aio_8_down_wq-ppc64/include/asm-ppc64/semaphore.h --- 717-fs_aio_7_ext2getblk_wq/include/asm-ppc64/semaphore.h Tue Feb 25 23:03:50 2003 +++ 718-fs_aio_8_down_wq-ppc64/include/asm-ppc64/semaphore.h Sat Jun 14 20:44:28 2003 @@ -68,12 +68,14 @@ static inline void init_MUTEX_LOCKED (st sema_init(sem, 0); } -extern void __down(struct semaphore * sem); +extern void __down_wq(struct semaphore * sem, wait_queue_t *wait); extern int __down_interruptible(struct semaphore * sem); extern void __up(struct semaphore * sem); -static inline void down(struct semaphore * sem) +static inline int down_wq(struct semaphore * sem, wait_queue_t *wait) { + int ret = 0; + #ifdef WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif @@ -83,8 +85,14 @@ static inline void down(struct semaphore * Try to get the semaphore, take the slow path if we fail. */ if (atomic_dec_return(&sem->count) < 0) - __down(sem); + ret =__down_wq(sem, wait); smp_wmb(); + return ret; +} + +static inline void down(struct semaphore * sem) +{ + down_wq(sem, NULL); } static inline int down_interruptible(struct semaphore * sem)