From: Stephen Rothwell I figured I might as well do some simple cleanups. This patch does: - single bit int bitfields are a bit suspect and Anndrew pointed out recently that they are probably slower to access than ints - get rid of some more studly caps - define the semaphore and the atomic in struct alloc_parms rather than pointers to them since we just allocate them on the stack anyway. - one small white space cleanup - use the HvLpIndexInvalid constant instead of ita value Built and booted on iSeries (which is the only place it is used). Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton --- 25-akpm/arch/ppc64/kernel/viopath.c | 47 ++++++++++++++++-------------------- 1 files changed, 22 insertions(+), 25 deletions(-) diff -puN arch/ppc64/kernel/viopath.c~ppc64-iseries-cleanup-viopath arch/ppc64/kernel/viopath.c --- 25/arch/ppc64/kernel/viopath.c~ppc64-iseries-cleanup-viopath 2005-03-14 19:58:18.000000000 -0800 +++ 25-akpm/arch/ppc64/kernel/viopath.c 2005-03-14 19:58:18.000000000 -0800 @@ -42,6 +42,7 @@ #include #include +#include #include #include #include @@ -56,8 +57,8 @@ * But this allows for other support in the future. */ static struct viopathStatus { - int isOpen:1; /* Did we open the path? */ - int isActive:1; /* Do we have a mon msg outstanding */ + int isOpen; /* Did we open the path? */ + int isActive; /* Do we have a mon msg outstanding */ int users[VIO_MAX_SUBTYPES]; HvLpInstanceId mSourceInst; HvLpInstanceId mTargetInst; @@ -81,10 +82,10 @@ static void handleMonitorEvent(struct Hv * blocks on the semaphore and the handler posts the semaphore. However, * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ... */ -struct doneAllocParms_t { - struct semaphore *sem; +struct alloc_parms { + struct semaphore sem; int number; - atomic_t *wait_atomic; + atomic_t wait_atomic; int used_wait_atomic; }; @@ -97,9 +98,9 @@ static u8 viomonseq = 22; /* Our hosting logical partition. We get this at startup * time, and different modules access this variable directly. */ -HvLpIndex viopath_hostLp = 0xff; /* HvLpIndexInvalid */ +HvLpIndex viopath_hostLp = HvLpIndexInvalid; EXPORT_SYMBOL(viopath_hostLp); -HvLpIndex viopath_ourLp = 0xff; +HvLpIndex viopath_ourLp = HvLpIndexInvalid; EXPORT_SYMBOL(viopath_ourLp); /* For each kind of incoming event we set a pointer to a @@ -200,7 +201,7 @@ EXPORT_SYMBOL(viopath_isactive); /* * We cache the source and target instance ids for each - * partition. + * partition. */ HvLpInstanceId viopath_sourceinst(HvLpIndex lp) { @@ -450,36 +451,33 @@ static void vio_handleEvent(struct HvLpE static void viopath_donealloc(void *parm, int number) { - struct doneAllocParms_t *parmsp = (struct doneAllocParms_t *)parm; + struct alloc_parms *parmsp = parm; parmsp->number = number; if (parmsp->used_wait_atomic) - atomic_set(parmsp->wait_atomic, 0); + atomic_set(&parmsp->wait_atomic, 0); else - up(parmsp->sem); + up(&parmsp->sem); } static int allocateEvents(HvLpIndex remoteLp, int numEvents) { - struct doneAllocParms_t parms; - DECLARE_MUTEX_LOCKED(Semaphore); - atomic_t wait_atomic; + struct alloc_parms parms; if (system_state != SYSTEM_RUNNING) { parms.used_wait_atomic = 1; - atomic_set(&wait_atomic, 1); - parms.wait_atomic = &wait_atomic; + atomic_set(&parms.wait_atomic, 1); } else { parms.used_wait_atomic = 0; - parms.sem = &Semaphore; + init_MUTEX_LOCKED(&parms.sem); } mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */ numEvents, &viopath_donealloc, &parms); if (system_state != SYSTEM_RUNNING) { - while (atomic_read(&wait_atomic)) + while (atomic_read(&parms.wait_atomic)) mb(); } else - down(&Semaphore); + down(&parms.sem); return parms.number; } @@ -558,8 +556,7 @@ int viopath_close(HvLpIndex remoteLp, in unsigned long flags; int i; int numOpen; - struct doneAllocParms_t doneAllocParms; - DECLARE_MUTEX_LOCKED(Semaphore); + struct alloc_parms parms; if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid)) return -EINVAL; @@ -580,11 +577,11 @@ int viopath_close(HvLpIndex remoteLp, in spin_unlock_irqrestore(&statuslock, flags); - doneAllocParms.used_wait_atomic = 0; - doneAllocParms.sem = &Semaphore; + parms.used_wait_atomic = 0; + init_MUTEX_LOCKED(&parms.sem); mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, - numReq, &viopath_donealloc, &doneAllocParms); - down(&Semaphore); + numReq, &viopath_donealloc, &parms); + down(&parms.sem); spin_lock_irqsave(&statuslock, flags); for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++) _