diff -urN slab-kiobuf-ref/drivers/char/raw.c slab-kiobuf/drivers/char/raw.c --- slab-kiobuf-ref/drivers/char/raw.c Sat May 4 13:48:24 2002 +++ slab-kiobuf/drivers/char/raw.c Sat May 4 13:48:36 2002 @@ -376,9 +376,9 @@ break; for (i=0; i < blocks; i++) - iobuf->blocks[i] = blocknr++; + iobuf->kio_blocks[i] = blocknr++; - err = brw_kiovec(rw, 1, &iobuf, dev, iobuf->blocks, sector_size); + err = brw_kiovec(rw, 1, &iobuf, dev, iobuf->kio_blocks, sector_size); if (rw == READ && err > 0) mark_dirty_kiobuf(iobuf, err); diff -urN slab-kiobuf-ref/drivers/md/lvm-snap.c slab-kiobuf/drivers/md/lvm-snap.c --- slab-kiobuf-ref/drivers/md/lvm-snap.c Sat May 4 13:48:24 2002 +++ slab-kiobuf/drivers/md/lvm-snap.c Sat May 4 13:48:36 2002 @@ -353,7 +353,7 @@ unsigned long phys_start; int idx = lv_snap->lv_remap_ptr, chunk_size = lv_snap->lv_chunk_size; struct kiobuf * iobuf = lv_snap->lv_iobuf; - unsigned long *blocks = iobuf->blocks; + unsigned long *blocks = iobuf->kio_blocks; int blksize_snap, blksize_org, min_blksize, max_blksize; int max_sectors, nr_sectors; diff -urN slab-kiobuf-ref/drivers/mtd/devices/blkmtd.c slab-kiobuf/drivers/mtd/devices/blkmtd.c --- slab-kiobuf-ref/drivers/mtd/devices/blkmtd.c Mon Feb 25 22:05:07 2002 +++ slab-kiobuf/drivers/mtd/devices/blkmtd.c Sat May 4 13:48:42 2002 @@ -235,7 +235,7 @@ return -ENOMEM; } #else - blocks = iobuf->blocks; + blocks = iobuf->kio_blocks; #endif iobuf->offset = 0; @@ -329,7 +329,7 @@ return 0; } #else - blocks = iobuf->blocks; + blocks = iobuf->kio_blocks; #endif DEBUG(2, "blkmtd: writetask: entering main loop\n"); diff -urN slab-kiobuf-ref/fs/buffer.c slab-kiobuf/fs/buffer.c --- slab-kiobuf-ref/fs/buffer.c Sat May 4 13:48:24 2002 +++ slab-kiobuf/fs/buffer.c Sat May 4 13:48:36 2002 @@ -2103,7 +2103,7 @@ int generic_direct_IO(int rw, struct inode * inode, struct kiobuf * iobuf, unsigned long blocknr, int blocksize, get_block_t * get_block) { int i, nr_blocks, retval; - unsigned long * blocks = iobuf->blocks; + unsigned long * blocks = iobuf->kio_blocks; int length; length = iobuf->length; @@ -2145,11 +2145,10 @@ /* patch length to handle short I/O */ iobuf->length = i * blocksize; - retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, iobuf->blocks, blocksize); + retval = brw_kiovec(rw, 1, &iobuf, inode->i_dev, iobuf->kio_blocks, blocksize); /* restore orig length */ iobuf->length = length; out: - return retval; } @@ -2257,7 +2256,7 @@ length = iobuf->length; iobuf->errno = 0; if (!bhs) - bhs = iobuf->bh; + bhs = iobuf->kio_bh; for (pageind = 0; pageind < iobuf->nr_pages; pageind++) { map = iobuf->maplist[pageind]; diff -urN slab-kiobuf-ref/fs/dcache.c slab-kiobuf/fs/dcache.c --- slab-kiobuf-ref/fs/dcache.c Sat May 4 13:48:23 2002 +++ slab-kiobuf/fs/dcache.c Sat May 4 13:48:36 2002 @@ -1252,6 +1252,7 @@ extern void bdev_cache_init(void); extern void cdev_cache_init(void); +extern void kio_cache_init(void); void __init vfs_caches_init(unsigned long mempages) { @@ -1286,4 +1287,5 @@ mnt_init(mempages); bdev_cache_init(); cdev_cache_init(); + kio_cache_init(); } diff -urN slab-kiobuf-ref/fs/iobuf.c slab-kiobuf/fs/iobuf.c --- slab-kiobuf-ref/fs/iobuf.c Sat May 4 13:48:24 2002 +++ slab-kiobuf/fs/iobuf.c Sat May 4 13:48:36 2002 @@ -10,6 +10,8 @@ #include #include +static kmem_cache_t *kiobuf_cachep; + void end_kio_request(struct kiobuf *kiobuf, int uptodate) { if ((!uptodate) && !kiobuf->errno) @@ -22,40 +24,60 @@ } } -static void kiobuf_init(struct kiobuf *iobuf) +static int kiobuf_init(struct kiobuf *iobuf) { init_waitqueue_head(&iobuf->wait_queue); - iobuf->array_len = KIO_STATIC_PAGES; - iobuf->maplist = iobuf->map_array; + iobuf->array_len = 0; iobuf->nr_pages = 0; iobuf->locked = 0; iobuf->dovary = 0; atomic_set(&iobuf->io_count, 0); iobuf->end_io = NULL; + iobuf->kio_bh = NULL; + iobuf->kio_blocks = NULL; + return expand_kiobuf(iobuf, KIO_STATIC_PAGES); } int alloc_kiobuf_bhs(struct kiobuf * kiobuf) { int i; + kiobuf->kio_blocks = kmalloc(KIO_MAX_SECTORS * sizeof(unsigned long), GFP_KERNEL); + if (!kiobuf->kio_blocks) + goto nomem; + + kiobuf->kio_bh = kmalloc(sizeof(struct buffer_head *) * KIO_MAX_SECTORS, GFP_KERNEL); + if (!kiobuf->kio_bh) + goto nomem; + for (i = 0; i < KIO_MAX_SECTORS; i++) - if (!(kiobuf->bh[i] = kmem_cache_alloc(bh_cachep, SLAB_KERNEL))) { - while (i--) { - kmem_cache_free(bh_cachep, kiobuf->bh[i]); - kiobuf->bh[i] = NULL; - } - return -ENOMEM; + if (!(kiobuf->kio_bh[i] = kmem_cache_alloc(bh_cachep, SLAB_KERNEL))) { + while (i--) + kmem_cache_free(bh_cachep, kiobuf->kio_bh[i]); + memset(kiobuf->kio_bh, 0, sizeof(struct buffer_head *) * KIO_MAX_SECTORS); /* slow path */ + goto nomem; } return 0; + +nomem: + free_kiobuf_bhs(kiobuf); + return -ENOMEM; } void free_kiobuf_bhs(struct kiobuf * kiobuf) { int i; - for (i = 0; i < KIO_MAX_SECTORS; i++) { - kmem_cache_free(bh_cachep, kiobuf->bh[i]); - kiobuf->bh[i] = NULL; + if (kiobuf->kio_bh) { + for (i = 0; i < KIO_MAX_SECTORS; i++) + if (kiobuf->kio_bh[i]) + kmem_cache_free(bh_cachep, kiobuf->kio_bh[i]); + kfree(kiobuf->kio_bh); + kiobuf->kio_bh = NULL; + } + if (kiobuf->kio_blocks) { + kfree(kiobuf->kio_blocks); + kiobuf->kio_blocks = NULL; } } @@ -65,14 +87,18 @@ struct kiobuf *iobuf; for (i = 0; i < nr; i++) { - iobuf = vmalloc(sizeof(struct kiobuf)); + iobuf = kmem_cache_alloc(kiobuf_cachep, GFP_KERNEL); if (!iobuf) { free_kiovec(i, bufp); return -ENOMEM; } - kiobuf_init(iobuf); + if (kiobuf_init(iobuf)) { + kmem_cache_free(kiobuf_cachep, iobuf); + free_kiovec(i, bufp); + return -ENOMEM; + } if (alloc_kiobuf_bhs(iobuf)) { - vfree(iobuf); + kmem_cache_free(kiobuf_cachep, iobuf); free_kiovec(i, bufp); return -ENOMEM; } @@ -91,10 +117,9 @@ iobuf = bufp[i]; if (iobuf->locked) unlock_kiovec(1, &iobuf); - if (iobuf->array_len > KIO_STATIC_PAGES) - kfree (iobuf->maplist); + kfree (iobuf->maplist); free_kiobuf_bhs(iobuf); - vfree(bufp[i]); + kmem_cache_free(kiobuf_cachep, bufp[i]); } } @@ -105,20 +130,31 @@ if (iobuf->array_len >= wanted) return 0; - maplist = (struct page **) - kmalloc(wanted * sizeof(struct page **), GFP_KERNEL); + maplist = (struct page **) kmalloc(wanted * sizeof(struct page **), GFP_KERNEL); if (!maplist) return -ENOMEM; + /* + * The below check makes sense only under the big kernel lock, + * it should be changed in 2.5, current way is bad design. + * It's not needed anyways because even 2.4 doesn't hold the + * big kernel lock here. Left it here just because it doesn't hurt + * either. + */ /* Did it grow while we waited? */ if (iobuf->array_len >= wanted) { kfree(maplist); return 0; } - + + /* + * Let's hope every arch implements a memcpy that as expected + * first cehcks the size, and then only if the size is > 0 goes to + * dereference the buffer pointer. + */ memcpy (maplist, iobuf->maplist, iobuf->array_len * sizeof(struct page **)); - if (iobuf->array_len > KIO_STATIC_PAGES) + if (iobuf->array_len) kfree (iobuf->maplist); iobuf->maplist = maplist; @@ -148,5 +184,10 @@ remove_wait_queue(&kiobuf->wait_queue, &wait); } - - +void __init kio_cache_init(void) +{ + kiobuf_cachep = kmem_cache_create("kiobuf", sizeof(struct kiobuf), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + if (!kiobuf_cachep) + panic("Cannot create kiobuf SLAB cache"); +} diff -urN slab-kiobuf-ref/include/linux/iobuf.h slab-kiobuf/include/linux/iobuf.h --- slab-kiobuf-ref/include/linux/iobuf.h Sat May 4 13:48:24 2002 +++ slab-kiobuf/include/linux/iobuf.h Sat May 4 13:48:36 2002 @@ -50,10 +50,8 @@ unsigned int locked : 1, /* If set, pages has been locked */ dovary : 1; /* If set, do variable size IO */ - /* Always embed enough struct pages for atomic IO */ - struct page * map_array[KIO_STATIC_PAGES]; - struct buffer_head * bh[KIO_MAX_SECTORS]; - unsigned long blocks[KIO_MAX_SECTORS]; + struct buffer_head ** kio_bh; + unsigned long * kio_blocks; /* Dynamic state for IO completion: */ atomic_t io_count; /* IOs still in progress */