From: Nick Piggin I ran the files through Lindent and made the comments fit in 80 lines. --- 25-akpm/include/linux/rwsem-spinlock.h | 10 +- 25-akpm/include/linux/rwsem.h | 32 +++---- 25-akpm/lib/rwsem-spinlock.c | 104 +++++++++++++------------ 25-akpm/lib/rwsem.c | 136 ++++++++++++++++++--------------- 4 files changed, 151 insertions(+), 131 deletions(-) diff -puN include/linux/rwsem.h~lindent-rwsem include/linux/rwsem.h --- 25/include/linux/rwsem.h~lindent-rwsem 2004-04-14 02:00:48.116721696 -0700 +++ 25-akpm/include/linux/rwsem.h 2004-04-14 02:00:48.128719872 -0700 @@ -22,9 +22,9 @@ struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -#include /* use a generic implementation */ +#include /* use a generic implementation */ #else -#include /* use an arch-specific implementation */ +#include /* use an arch-specific implementation */ #endif #ifndef rwsemtrace @@ -41,9 +41,9 @@ extern void FASTCALL(rwsemtrace(struct r static inline void down_read(struct rw_semaphore *sem) { might_sleep(); - rwsemtrace(sem,"Entering down_read"); + rwsemtrace(sem, "Entering down_read"); __down_read(sem); - rwsemtrace(sem,"Leaving down_read"); + rwsemtrace(sem, "Leaving down_read"); } /* @@ -52,9 +52,9 @@ static inline void down_read(struct rw_s static inline int down_read_trylock(struct rw_semaphore *sem) { int ret; - rwsemtrace(sem,"Entering down_read_trylock"); + rwsemtrace(sem, "Entering down_read_trylock"); ret = __down_read_trylock(sem); - rwsemtrace(sem,"Leaving down_read_trylock"); + rwsemtrace(sem, "Leaving down_read_trylock"); return ret; } @@ -64,9 +64,9 @@ static inline int down_read_trylock(stru static inline void down_write(struct rw_semaphore *sem) { might_sleep(); - rwsemtrace(sem,"Entering down_write"); + rwsemtrace(sem, "Entering down_write"); __down_write(sem); - rwsemtrace(sem,"Leaving down_write"); + rwsemtrace(sem, "Leaving down_write"); } /* @@ -75,9 +75,9 @@ static inline void down_write(struct rw_ static inline int down_write_trylock(struct rw_semaphore *sem) { int ret; - rwsemtrace(sem,"Entering down_write_trylock"); + rwsemtrace(sem, "Entering down_write_trylock"); ret = __down_write_trylock(sem); - rwsemtrace(sem,"Leaving down_write_trylock"); + rwsemtrace(sem, "Leaving down_write_trylock"); return ret; } @@ -86,9 +86,9 @@ static inline int down_write_trylock(str */ static inline void up_read(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering up_read"); + rwsemtrace(sem, "Entering up_read"); __up_read(sem); - rwsemtrace(sem,"Leaving up_read"); + rwsemtrace(sem, "Leaving up_read"); } /* @@ -96,9 +96,9 @@ static inline void up_read(struct rw_sem */ static inline void up_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering up_write"); + rwsemtrace(sem, "Entering up_write"); __up_write(sem); - rwsemtrace(sem,"Leaving up_write"); + rwsemtrace(sem, "Leaving up_write"); } /* @@ -106,9 +106,9 @@ static inline void up_write(struct rw_se */ static inline void downgrade_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering downgrade_write"); + rwsemtrace(sem, "Entering downgrade_write"); __downgrade_write(sem); - rwsemtrace(sem,"Leaving downgrade_write"); + rwsemtrace(sem, "Leaving downgrade_write"); } #endif /* __KERNEL__ */ diff -puN include/linux/rwsem-spinlock.h~lindent-rwsem include/linux/rwsem-spinlock.h --- 25/include/linux/rwsem-spinlock.h~lindent-rwsem 2004-04-14 02:00:48.118721392 -0700 +++ 25-akpm/include/linux/rwsem-spinlock.h 2004-04-14 02:00:48.129719720 -0700 @@ -26,14 +26,14 @@ struct rwsem_waiter; * - if activity is 0 then there are no active readers or writers * - if activity is +ve then that is the number of active readers * - if activity is -1 then there is one active writer - * - if wait_list is not empty, then there are processes waiting for the semaphore + * - if wait_list is not empty, there are processes waiting for the semaphore */ struct rw_semaphore { - __s32 activity; - spinlock_t wait_lock; - struct list_head wait_list; + __s32 activity; + spinlock_t wait_lock; + struct list_head wait_list; #if RWSEM_DEBUG - int debug; + int debug; #endif }; diff -puN lib/rwsem.c~lindent-rwsem lib/rwsem.c --- 25/lib/rwsem.c~lindent-rwsem 2004-04-14 02:00:48.120721088 -0700 +++ 25-akpm/lib/rwsem.c 2004-04-14 02:00:48.126720176 -0700 @@ -9,9 +9,9 @@ #include struct rwsem_waiter { - struct list_head list; - struct task_struct *task; - unsigned int flags; + struct list_head list; + struct task_struct *task; + unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; @@ -20,46 +20,53 @@ struct rwsem_waiter { #undef rwsemtrace void rwsemtrace(struct rw_semaphore *sem, const char *str) { - printk("sem=%p\n",sem); - printk("(sem)=%08lx\n",sem->count); + printk("sem=%p\n", sem); + printk("(sem)=%08lx\n", sem->count); if (sem->debug) - printk("[%d] %s({%08lx})\n",current->pid,str,sem->count); + printk("[%d] %s({%08lx})\n", current->pid, str, sem->count); } #endif /* - * handle the lock being released whilst there are processes blocked on it that can now run + * handle the lock being released whilst there are processes blocked on it + * that can now run * - if we come here, then: - * - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented - * - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so) + * - the 'active part' of the count (&0x0000ffff) reached zero but has been + * re-incremented + * - the 'waiting part' of the count (&0xffff0000) is negative (and will + * still be so) * - there must be someone on the queue * - the spinlock must be held by the caller - * - woken process blocks are discarded from the list after having flags zeroised + * - woken process blocks are discarded from the list after having flags + * zeroised * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) +static inline struct rw_semaphore * +__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; struct list_head *next; signed long oldcount; int woken, loop; - rwsemtrace(sem,"Entering __rwsem_do_wake"); + rwsemtrace(sem, "Entering __rwsem_do_wake"); if (!wakewrite) goto dont_wake_writers; - /* only wake someone up if we can transition the active part of the count from 0 -> 1 */ - try_again: - oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS; + /* only wake someone if we can transition the active part of the count + * from 0 -> 1 */ +try_again: + oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS, sem) + - RWSEM_ACTIVE_BIAS; if (oldcount & RWSEM_ACTIVE_MASK) goto undo; - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); - /* try to grant a single write lock if there's a writer at the front of the queue - * - note we leave the 'active part' of the count incremented by 1 and the waiting part - * incremented by 0x00010000 + /* try to grant a single write lock if there's a writer at the front + * of the queue - note we leave the 'active part' of the count + * incremented by 1 and the waiting part incremented by 0x00010000 */ if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE)) goto readers_only; @@ -70,35 +77,37 @@ static inline struct rw_semaphore *__rws goto out; /* don't want to wake any writers */ - dont_wake_writers: - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); +dont_wake_writers: + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (waiter->flags & RWSEM_WAITING_FOR_WRITE) goto out; - /* grant an infinite number of read locks to the readers at the front of the queue - * - note we increment the 'active part' of the count by the number of readers (less one - * for the activity decrement we've already done) before waking any processes up + /* grant an infinite number of read locks to the readers at the front + * of the queue - note we increment the 'active part' of the count by + * the number of readers (less one for the activity decrement we've + * already done) before waking any processes up */ - readers_only: +readers_only: woken = 0; do { woken++; - if (waiter->list.next==&sem->wait_list) + if (waiter->list.next == &sem->wait_list) break; - waiter = list_entry(waiter->list.next,struct rwsem_waiter,list); + waiter = list_entry(waiter->list.next, + struct rwsem_waiter, list); } while (waiter->flags & RWSEM_WAITING_FOR_READ); loop = woken; - woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS; + woken *= RWSEM_ACTIVE_BIAS - RWSEM_WAITING_BIAS; woken -= RWSEM_ACTIVE_BIAS; - rwsem_atomic_add(woken,sem); + rwsem_atomic_add(woken, sem); next = sem->wait_list.next; - for (; loop>0; loop--) { - waiter = list_entry(next,struct rwsem_waiter,list); + for (; loop > 0; loop--) { + waiter = list_entry(next, struct rwsem_waiter, list); next = waiter->list.next; waiter->flags = 0; wake_up_process(waiter->task); @@ -107,13 +116,13 @@ static inline struct rw_semaphore *__rws sem->wait_list.next = next; next->prev = &sem->wait_list; - out: - rwsemtrace(sem,"Leaving __rwsem_do_wake"); +out: + rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; /* undo the change to count, but check for a transition 1->0 */ - undo: - if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0) +undo: + if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS, sem) != 0) goto out; goto try_again; } @@ -121,29 +130,31 @@ static inline struct rw_semaphore *__rws /* * wait for a lock to be granted */ -static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem, - struct rwsem_waiter *waiter, - signed long adjustment) +static inline struct rw_semaphore * +rwsem_down_failed_common(struct rw_semaphore *sem, + struct rwsem_waiter *waiter, signed long adjustment) { struct task_struct *tsk = current; signed long count; - set_task_state(tsk,TASK_UNINTERRUPTIBLE); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ spin_lock(&sem->wait_lock); waiter->task = tsk; - list_add_tail(&waiter->list,&sem->wait_list); + list_add_tail(&waiter->list, &sem->wait_list); - /* note that we're now waiting on the lock, but no longer actively read-locking */ - count = rwsem_atomic_update(adjustment,sem); - - /* if there are no longer active locks, wake the front queued process(es) up - * - it might even be this process, since the waker takes a more active part + /* note that we're now waiting on the lock, but no longer actively + * read-locking */ + count = rwsem_atomic_update(adjustment, sem); + + /* if there are no longer active locks, wake the front queued + * process(es) up - it might even be this process, since the waker + * takes a more active part */ if (!(count & RWSEM_ACTIVE_MASK)) - sem = __rwsem_do_wake(sem,1); + sem = __rwsem_do_wake(sem, 1); spin_unlock(&sem->wait_lock); @@ -163,32 +174,35 @@ static inline struct rw_semaphore *rwsem /* * wait for the read lock to be granted */ -struct rw_semaphore fastcall __sched *rwsem_down_read_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall __sched * +rwsem_down_read_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; - rwsemtrace(sem,"Entering rwsem_down_read_failed"); + rwsemtrace(sem, "Entering rwsem_down_read_failed"); waiter.flags = RWSEM_WAITING_FOR_READ; - rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS); + rwsem_down_failed_common(sem, &waiter, + RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS); - rwsemtrace(sem,"Leaving rwsem_down_read_failed"); + rwsemtrace(sem, "Leaving rwsem_down_read_failed"); return sem; } /* * wait for the write lock to be granted */ -struct rw_semaphore fastcall __sched *rwsem_down_write_failed(struct rw_semaphore *sem) +struct rw_semaphore fastcall __sched * +rwsem_down_write_failed(struct rw_semaphore *sem) { struct rwsem_waiter waiter; - rwsemtrace(sem,"Entering rwsem_down_write_failed"); + rwsemtrace(sem, "Entering rwsem_down_write_failed"); waiter.flags = RWSEM_WAITING_FOR_WRITE; - rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS); + rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS); - rwsemtrace(sem,"Leaving rwsem_down_write_failed"); + rwsemtrace(sem, "Leaving rwsem_down_write_failed"); return sem; } @@ -198,39 +212,39 @@ struct rw_semaphore fastcall __sched *rw */ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering rwsem_wake"); + rwsemtrace(sem, "Entering rwsem_wake"); spin_lock(&sem->wait_lock); /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem,1); + sem = __rwsem_do_wake(sem, 1); spin_unlock(&sem->wait_lock); - rwsemtrace(sem,"Leaving rwsem_wake"); + rwsemtrace(sem, "Leaving rwsem_wake"); return sem; } /* * downgrade a write lock into a read lock - * - caller incremented waiting part of count, and discovered it to be still negative + * - caller incremented waiting part of count, and discovered it to be still -ve * - just wake up any readers at the front of the queue */ struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering rwsem_downgrade_wake"); + rwsemtrace(sem, "Entering rwsem_downgrade_wake"); spin_lock(&sem->wait_lock); /* do nothing if list empty */ if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem,0); + sem = __rwsem_do_wake(sem, 0); spin_unlock(&sem->wait_lock); - rwsemtrace(sem,"Leaving rwsem_downgrade_wake"); + rwsemtrace(sem, "Leaving rwsem_downgrade_wake"); return sem; } diff -puN lib/rwsem-spinlock.c~lindent-rwsem lib/rwsem-spinlock.c --- 25/lib/rwsem-spinlock.c~lindent-rwsem 2004-04-14 02:00:48.121720936 -0700 +++ 25-akpm/lib/rwsem-spinlock.c 2004-04-14 02:00:48.127720024 -0700 @@ -1,5 +1,5 @@ -/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic spinlock - * implementation +/* rwsem-spinlock.c: R/W semaphores: contention handling functions for generic + * spinlock implementation * * Copyright (c) 2001 David Howells (dhowells@redhat.com). * - Derived partially from idea by Andrea Arcangeli @@ -10,9 +10,9 @@ #include struct rwsem_waiter { - struct list_head list; - struct task_struct *task; - unsigned int flags; + struct list_head list; + struct task_struct *task; + unsigned int flags; #define RWSEM_WAITING_FOR_READ 0x00000001 #define RWSEM_WAITING_FOR_WRITE 0x00000002 }; @@ -22,7 +22,8 @@ void rwsemtrace(struct rw_semaphore *sem { if (sem->debug) printk("[%d] %s({%d,%d})\n", - current->pid,str,sem->activity,list_empty(&sem->wait_list)?0:1); + current->pid, str, sem->activity, + list_empty(&sem->wait_list) ? 0 : 1); } #endif @@ -40,22 +41,24 @@ void fastcall init_rwsem(struct rw_semap } /* - * handle the lock being released whilst there are processes blocked on it that can now run + * handle the lock being released whilst there are processes blocked on it + * that can now run * - if we come here, then: * - the 'active count' _reached_ zero * - the 'waiting count' is non-zero * - the spinlock must be held by the caller - * - woken process blocks are discarded from the list after having flags zeroised + * - woken process blocks are discarded from the list after having flags zeroed * - writers are only woken if wakewrite is non-zero */ -static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) +static inline struct rw_semaphore * +__rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) { struct rwsem_waiter *waiter; int woken; - rwsemtrace(sem,"Entering __rwsem_do_wake"); + rwsemtrace(sem, "Entering __rwsem_do_wake"); - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); if (!wakewrite) { if (waiter->flags & RWSEM_WAITING_FOR_WRITE) @@ -63,9 +66,9 @@ static inline struct rw_semaphore *__rws goto dont_wake_writers; } - /* if we are allowed to wake writers try to grant a single write lock if there's a - * writer at the front of the queue - * - we leave the 'waiting count' incremented to signify potential contention + /* if we are allowed to wake writers try to grant a single write lock + * if there's a writer at the front of the queue - we leave the + * 'waiting count' incremented to signify potential contention */ if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { sem->activity = -1; @@ -75,10 +78,11 @@ static inline struct rw_semaphore *__rws goto out; } - /* grant an infinite number of read locks to the readers at the front of the queue */ - dont_wake_writers: + /* grant an infinite number of read locks to the readers at the front + * of the queue */ +dont_wake_writers: woken = 0; - while (waiter->flags&RWSEM_WAITING_FOR_READ) { + while (waiter->flags & RWSEM_WAITING_FOR_READ) { struct list_head *next = waiter->list.next; list_del(&waiter->list); @@ -87,26 +91,27 @@ static inline struct rw_semaphore *__rws woken++; if (list_empty(&sem->wait_list)) break; - waiter = list_entry(next,struct rwsem_waiter,list); + waiter = list_entry(next, struct rwsem_waiter, list); } sem->activity += woken; - out: - rwsemtrace(sem,"Leaving __rwsem_do_wake"); +out: + rwsemtrace(sem, "Leaving __rwsem_do_wake"); return sem; } /* * wake a single writer */ -static inline struct rw_semaphore *__rwsem_wake_one_writer(struct rw_semaphore *sem) +static inline struct rw_semaphore * +__rwsem_wake_one_writer(struct rw_semaphore *sem) { struct rwsem_waiter *waiter; sem->activity = -1; - waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list); + waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); list_del(&waiter->list); waiter->flags = 0; @@ -122,11 +127,11 @@ void fastcall __down_read(struct rw_sema struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem,"Entering __down_read"); + rwsemtrace(sem, "Entering __down_read"); spin_lock(&sem->wait_lock); - if (sem->activity>=0 && list_empty(&sem->wait_list)) { + if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; spin_unlock(&sem->wait_lock); @@ -134,13 +139,13 @@ void fastcall __down_read(struct rw_sema } tsk = current; - set_task_state(tsk,TASK_UNINTERRUPTIBLE); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_READ; - list_add_tail(&waiter.list,&sem->wait_list); + list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock(&sem->wait_lock); @@ -155,8 +160,8 @@ void fastcall __down_read(struct rw_sema tsk->state = TASK_RUNNING; - out: - rwsemtrace(sem,"Leaving __down_read"); +out: + rwsemtrace(sem, "Leaving __down_read"); } /* @@ -165,11 +170,11 @@ void fastcall __down_read(struct rw_sema int fastcall __down_read_trylock(struct rw_semaphore *sem) { int ret = 0; - rwsemtrace(sem,"Entering __down_read_trylock"); + rwsemtrace(sem, "Entering __down_read_trylock"); spin_lock(&sem->wait_lock); - if (sem->activity>=0 && list_empty(&sem->wait_list)) { + if (sem->activity >= 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity++; ret = 1; @@ -177,24 +182,25 @@ int fastcall __down_read_trylock(struct spin_unlock(&sem->wait_lock); - rwsemtrace(sem,"Leaving __down_read_trylock"); + rwsemtrace(sem, "Leaving __down_read_trylock"); return ret; } /* * get a write lock on the semaphore - * - note that we increment the waiting count anyway to indicate an exclusive lock + * - note that we increment the waiting count anyway to indicate an exclusive + * lock */ void fastcall __down_write(struct rw_semaphore *sem) { struct rwsem_waiter waiter; struct task_struct *tsk; - rwsemtrace(sem,"Entering __down_write"); + rwsemtrace(sem, "Entering __down_write"); spin_lock(&sem->wait_lock); - if (sem->activity==0 && list_empty(&sem->wait_list)) { + if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; spin_unlock(&sem->wait_lock); @@ -202,13 +208,13 @@ void fastcall __down_write(struct rw_sem } tsk = current; - set_task_state(tsk,TASK_UNINTERRUPTIBLE); + set_task_state(tsk, TASK_UNINTERRUPTIBLE); /* set up my own style of waitqueue */ waiter.task = tsk; waiter.flags = RWSEM_WAITING_FOR_WRITE; - list_add_tail(&waiter.list,&sem->wait_list); + list_add_tail(&waiter.list, &sem->wait_list); /* we don't need to touch the semaphore struct anymore */ spin_unlock(&sem->wait_lock); @@ -223,8 +229,8 @@ void fastcall __down_write(struct rw_sem tsk->state = TASK_RUNNING; - out: - rwsemtrace(sem,"Leaving __down_write"); +out: + rwsemtrace(sem, "Leaving __down_write"); } /* @@ -233,11 +239,11 @@ void fastcall __down_write(struct rw_sem int fastcall __down_write_trylock(struct rw_semaphore *sem) { int ret = 0; - rwsemtrace(sem,"Entering __down_write_trylock"); + rwsemtrace(sem, "Entering __down_write_trylock"); spin_lock(&sem->wait_lock); - if (sem->activity==0 && list_empty(&sem->wait_list)) { + if (sem->activity == 0 && list_empty(&sem->wait_list)) { /* granted */ sem->activity = -1; ret = 1; @@ -245,7 +251,7 @@ int fastcall __down_write_trylock(struct spin_unlock(&sem->wait_lock); - rwsemtrace(sem,"Leaving __down_write_trylock"); + rwsemtrace(sem, "Leaving __down_write_trylock"); return ret; } @@ -254,16 +260,16 @@ int fastcall __down_write_trylock(struct */ void fastcall __up_read(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering __up_read"); + rwsemtrace(sem, "Entering __up_read"); spin_lock(&sem->wait_lock); - if (--sem->activity==0 && !list_empty(&sem->wait_list)) + if (--sem->activity == 0 && !list_empty(&sem->wait_list)) sem = __rwsem_wake_one_writer(sem); spin_unlock(&sem->wait_lock); - rwsemtrace(sem,"Leaving __up_read"); + rwsemtrace(sem, "Leaving __up_read"); } /* @@ -271,7 +277,7 @@ void fastcall __up_read(struct rw_semaph */ void fastcall __up_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering __up_write"); + rwsemtrace(sem, "Entering __up_write"); spin_lock(&sem->wait_lock); @@ -281,7 +287,7 @@ void fastcall __up_write(struct rw_semap spin_unlock(&sem->wait_lock); - rwsemtrace(sem,"Leaving __up_write"); + rwsemtrace(sem, "Leaving __up_write"); } /* @@ -290,17 +296,17 @@ void fastcall __up_write(struct rw_semap */ void fastcall __downgrade_write(struct rw_semaphore *sem) { - rwsemtrace(sem,"Entering __downgrade_write"); + rwsemtrace(sem, "Entering __downgrade_write"); spin_lock(&sem->wait_lock); sem->activity = 1; if (!list_empty(&sem->wait_list)) - sem = __rwsem_do_wake(sem,0); + sem = __rwsem_do_wake(sem, 0); spin_unlock(&sem->wait_lock); - rwsemtrace(sem,"Leaving __downgrade_write"); + rwsemtrace(sem, "Leaving __downgrade_write"); } EXPORT_SYMBOL(init_rwsem); _