/* * BK Id: SCCS/s.semaphore.h 1.15 05/17/01 18:14:25 cort */ #ifndef _PPC_SEMAPHORE_H #define _PPC_SEMAPHORE_H /* * Swiped from asm-sparc/semaphore.h and modified * -- Cort (cort@cs.nmt.edu) * * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h * -- Ani Joshi (ajoshi@unixbox.com) * * Remove spinlock-based RW semaphores; RW semaphore definitions are * now in rwsem.h and we use the the generic lib/rwsem.c implementation. * Rework semaphores to use atomic_dec_if_positive. * -- Paul Mackerras (paulus@samba.org) */ #ifdef __KERNEL__ #include #include #include #include struct semaphore { /* * Note that any negative value of count is equivalent to 0, * but additionally indicates that some process(es) might be * sleeping on `wait'. */ atomic_t count; wait_queue_head_t wait; #if WAITQUEUE_DEBUG long __magic; #endif }; #if WAITQUEUE_DEBUG # define __SEM_DEBUG_INIT(name) \ , (long)&(name).__magic #else # define __SEM_DEBUG_INIT(name) #endif #define __SEMAPHORE_INITIALIZER(name, count) \ { ATOMIC_INIT(count), \ __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ __SEM_DEBUG_INIT(name) } #define __MUTEX_INITIALIZER(name) \ __SEMAPHORE_INITIALIZER(name, 1) #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) static inline void sema_init (struct semaphore *sem, int val) { atomic_set(&sem->count, val); init_waitqueue_head(&sem->wait); #if WAITQUEUE_DEBUG sem->__magic = (long)&sem->__magic; #endif } static inline void init_MUTEX (struct semaphore *sem) { sema_init(sem, 1); } static inline void init_MUTEX_LOCKED (struct semaphore *sem) { sema_init(sem, 0); } extern void __down(struct semaphore * sem); extern int __down_interruptible(struct semaphore * sem); extern void __up(struct semaphore * sem); extern inline void down(struct semaphore * sem) { #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif /* * Try to get the semaphore, take the slow path if we fail. */ if (atomic_dec_return(&sem->count) < 0) __down(sem); smp_wmb(); } extern inline int down_interruptible(struct semaphore * sem) { int ret = 0; #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif if (atomic_dec_return(&sem->count) < 0) ret = __down_interruptible(sem); smp_wmb(); return ret; } extern inline int down_trylock(struct semaphore * sem) { int ret; #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif ret = atomic_dec_if_positive(&sem->count) < 0; smp_wmb(); return ret; } extern inline void up(struct semaphore * sem) { #if WAITQUEUE_DEBUG CHECK_MAGIC(sem->__magic); #endif smp_wmb(); if (atomic_inc_return(&sem->count) <= 0) __up(sem); } #endif /* __KERNEL__ */ #endif /* !(_PPC_SEMAPHORE_H) */