grrrr!  It defeats typechecking. slap.



 fs/aio.c            |    2 -
 fs/aiopoll.c        |   59 +++++++++++++++++++++++++---------------------------
 include/linux/aio.h |    2 +
 3 files changed, 31 insertions(+), 32 deletions(-)

diff -puN fs/aio.c~aio-poll-cleanup fs/aio.c
--- 25/fs/aio.c~aio-poll-cleanup	2003-05-16 23:11:56.000000000 -0700
+++ 25-akpm/fs/aio.c	2003-05-16 23:11:56.000000000 -0700
@@ -61,8 +61,6 @@ LIST_HEAD(fput_head);
 
 static void aio_kick_handler(void *);
 
-int async_poll(struct kiocb *iocb, int events);
-
 /* aio_setup
  *	Creates the slab caches used by the aio routines, panic on
  *	failure as this is done early during the boot sequence.
diff -puN include/linux/aio.h~aio-poll-cleanup include/linux/aio.h
--- 25/include/linux/aio.h~aio-poll-cleanup	2003-05-16 23:11:56.000000000 -0700
+++ 25-akpm/include/linux/aio.h	2003-05-16 23:11:56.000000000 -0700
@@ -167,6 +167,8 @@ extern int FASTCALL(io_submit_one(struct
 struct kioctx *lookup_ioctx(unsigned long ctx_id);
 int FASTCALL(io_submit_one(struct kioctx *ctx, struct iocb *user_iocb,
 				  struct iocb *iocb));
+int async_poll(struct kiocb *iocb, int events);
+
 
 #define get_ioctx(kioctx)	do { if (unlikely(atomic_read(&(kioctx)->users) <= 0)) BUG(); atomic_inc(&(kioctx)->users); } while (0)
 #define put_ioctx(kioctx)	do { if (unlikely(atomic_dec_and_test(&(kioctx)->users))) __put_ioctx(kioctx); else if (unlikely(atomic_read(&(kioctx)->users) < 0)) BUG(); } while (0)
diff -puN fs/aiopoll.c~aio-poll-cleanup fs/aiopoll.c
--- 25/fs/aiopoll.c~aio-poll-cleanup	2003-05-16 23:12:00.000000000 -0700
+++ 25-akpm/fs/aiopoll.c	2003-05-16 23:16:02.000000000 -0700
@@ -27,10 +27,10 @@ struct async_poll_iocb {
 
 static kmem_cache_t *async_poll_entry_cache;
 
-static inline struct async_poll_iocb *kiocb_to_apiocb(struct kiocb *iocb)
+static inline struct async_poll_iocb *kiocb_to_apiocb(struct kiocb *kiocb)
 {
 	BUG_ON(sizeof(struct async_poll_iocb) > KIOCB_PRIVATE_SIZE);
-	return (struct async_poll_iocb *)iocb->private;
+	return (struct async_poll_iocb *)kiocb->private;
 }
 
 static inline struct kiocb *apiocb_to_kiocb(struct async_poll_iocb *apiocb)
@@ -38,22 +38,21 @@ static inline struct kiocb *apiocb_to_ki
 	return container_of((void *)apiocb, struct kiocb, private);
 }
 
-static void async_poll_freewait(struct async_poll_iocb *apiocb, wait_queue_t *wait)
+static void async_poll_freewait(struct async_poll_iocb *apiocb,
+				wait_queue_t *wait)
 {
 	struct async_poll_entry *entry = apiocb->ehead;
 	struct async_poll_entry *old;
 
 	while (entry) {
-		if (wait != &entry->wait) {
+		if (wait != &entry->wait)
 			remove_wait_queue(entry->whead, &entry->wait);
-		} else {
+		else
 			__remove_wait_queue(entry->whead, &entry->wait);
-		}
 		old = entry;
 		entry = entry->next;
-		if (old != &apiocb->entry[0] && old != &apiocb->entry[1]) {
+		if (old != &apiocb->entry[0] && old != &apiocb->entry[1])
 			kmem_cache_free(async_poll_entry_cache, old);
-		}
 	}
 }
 
@@ -68,7 +67,7 @@ static int async_poll_waiter(wait_queue_
 	mask &= apiocb->events | POLLERR | POLLHUP;
 	if (mask) {
 		if (xchg(&apiocb->armed, NULL)) {
-			async_poll_freewait(apiocb, wait); 
+			async_poll_freewait(apiocb, wait);
 			aio_complete(iocb, mask, 0);
 			return 1;
 		}
@@ -76,27 +75,28 @@ static int async_poll_waiter(wait_queue_
 	return 0;
 }
 
-int async_poll_cancel(struct kiocb *iocb, struct io_event *res)
+int async_poll_cancel(struct kiocb *kiocb, struct io_event *res)
 {
-	struct async_poll_iocb *apiocb = kiocb_to_apiocb(iocb);
+	struct async_poll_iocb *apiocb = kiocb_to_apiocb(kiocb);
 	void *armed;
 
 	armed = xchg(&apiocb->armed, NULL);
-	aio_put_req(iocb);
+	aio_put_req(kiocb);
 	if (armed) {
-		async_poll_freewait(apiocb, NULL); 
+		async_poll_freewait(apiocb, NULL);
  		/*
  		 * Since async_poll_freewait() locks the wait queue, we
 		 * know that async_poll_waiter() is either not going to
 		 * be run or has finished all its work.
  		 */
-  		aio_put_req(iocb);
+  		aio_put_req(kiocb);
 		return 0;
 	}
 	return -EAGAIN;
 }
 
-static void async_poll_queue_proc(struct file *file, wait_queue_head_t *whead, poll_table *pt)
+static void async_poll_queue_proc(struct file *file,
+			wait_queue_head_t *whead, poll_table *pt)
 {
 	struct async_poll_iocb *apiocb = (struct async_poll_iocb *)pt;
 	struct async_poll_entry *entry;
@@ -120,14 +120,14 @@ static void async_poll_queue_proc(struct
 	apiocb->ehead = entry;
 }
 
-int async_poll(struct kiocb *iocb, int events)
+int async_poll(struct kiocb *kiocb, int events)
 {
 	unsigned int mask;
-	struct async_poll_iocb *apiocb = kiocb_to_apiocb(iocb);
+	struct async_poll_iocb *apiocb = kiocb_to_apiocb(kiocb);
 
 	/* Fast path */
-	if (iocb->ki_filp->f_op && iocb->ki_filp->f_op->poll) {
-		mask = iocb->ki_filp->f_op->poll(iocb->ki_filp, NULL);
+	if (kiocb->ki_filp->f_op && kiocb->ki_filp->f_op->poll) {
+		mask = kiocb->ki_filp->f_op->poll(kiocb->ki_filp, NULL);
 		mask &= events | POLLERR | POLLHUP;
 		if (mask & events)
 			return events;
@@ -139,37 +139,36 @@ int async_poll(struct kiocb *iocb, int e
 	apiocb->events = events;
 	apiocb->ehead = NULL;
 
-	iocb->ki_users++;
+	kiocb->ki_users++;
 	wmb();
 
 	mask = DEFAULT_POLLMASK;
-	if (iocb->ki_filp->f_op && iocb->ki_filp->f_op->poll)
-		mask = iocb->ki_filp->f_op->poll(iocb->ki_filp, &apiocb->pt);
+	if (kiocb->ki_filp->f_op && kiocb->ki_filp->f_op->poll)
+		mask = kiocb->ki_filp->f_op->poll(kiocb->ki_filp, &apiocb->pt);
 	mask &= events | POLLERR | POLLHUP;
  	if (mask && xchg(&apiocb->armed, NULL)) {
 		async_poll_freewait(apiocb, NULL);
-		aio_complete(iocb, mask, 0);
+		aio_complete(kiocb, mask, 0);
 	}
 	if (unlikely(apiocb->outofmem) && xchg(&apiocb->armed, NULL)) {
 		async_poll_freewait(apiocb, NULL);
-		aio_put_req(iocb);
-		aio_put_req(iocb);
+		aio_put_req(kiocb);
+		aio_put_req(kiocb);
 		return -ENOMEM;
 	}
 
-	iocb->ki_cancel = async_poll_cancel;
-	aio_put_req(iocb);
+	kiocb->ki_cancel = async_poll_cancel;
+	aio_put_req(kiocb);
 	return -EIOCBQUEUED;
 }
 
 static int __init async_poll_init(void)
 {
-	async_poll_entry_cache = kmem_cache_create("async poll entry",
-                        sizeof(struct async_poll_entry), 0, 0, NULL, NULL);
+	async_poll_entry_cache = kmem_cache_create("async_poll",
+			sizeof(struct async_poll_entry), 0, 0, NULL, NULL);
 	if (!async_poll_entry_cache)
 		panic("unable to alloc poll_entry_cache");
 	return 0;
 }
 
 module_init(async_poll_init);
-

_