diff -urNp varyioref/drivers/block/ll_rw_blk.c varyio/drivers/block/ll_rw_blk.c --- varyioref/drivers/block/ll_rw_blk.c Mon Jul 29 00:31:01 2002 +++ varyio/drivers/block/ll_rw_blk.c Mon Jul 29 00:31:39 2002 @@ -118,6 +118,13 @@ int * max_readahead[MAX_BLKDEV]; */ int * max_sectors[MAX_BLKDEV]; +/* + * blkdev_varyio indicates if variable size IO can be done on a device. + * + * Currently used for doing variable size IO on RAW devices. + */ +char * blkdev_varyio[MAX_BLKDEV]; + unsigned long blk_max_low_pfn, blk_max_pfn; int blk_nohighio = 0; @@ -1180,6 +1187,39 @@ void submit_bh(int rw, struct buffer_hea generic_make_request(rw, bh); + switch (rw) { + case WRITE: + kstat.pgpgout += count; + break; + default: + kstat.pgpgin += count; + break; + } + conditional_schedule(); +} + +/* + * submit_bh_blknr() - same as submit_bh() except that b_rsector is + * set to b_blocknr. Used for RAW VARY. + */ +void submit_bh_blknr(int rw, struct buffer_head * bh) +{ + int count = bh->b_size >> 9; + + if (!test_bit(BH_Lock, &bh->b_state)) + BUG(); + + set_bit(BH_Req, &bh->b_state); + + /* + * First step, 'identity mapping' - RAID or LVM might + * further remap this. + */ + bh->b_rdev = bh->b_dev; + bh->b_rsector = bh->b_blocknr; + + generic_make_request(rw, bh); + switch (rw) { case WRITE: kstat.pgpgout += count; diff -urNp varyioref/drivers/char/raw.c varyio/drivers/char/raw.c --- varyioref/drivers/char/raw.c Mon Jul 22 17:29:09 2002 +++ varyio/drivers/char/raw.c Mon Jul 29 00:31:39 2002 @@ -23,6 +23,7 @@ typedef struct raw_device_data_s { struct block_device *binding; int inuse, sector_size, sector_bits; struct semaphore mutex; + int can_do_vary; } raw_device_data_t; static raw_device_data_t raw_devices[256]; @@ -119,6 +120,8 @@ int raw_open(struct inode *inode, struct if (raw_devices[minor].inuse++) goto out; + raw_devices[minor].can_do_vary = + get_blkdev_varyio(MAJOR(rdev), MINOR(rdev)); /* * Don't interfere with mounted devices: we cannot safely set * the blocksize on a device which is already mounted. @@ -128,6 +131,7 @@ int raw_open(struct inode *inode, struct if (is_mounted(rdev)) { if (blksize_size[MAJOR(rdev)]) sector_size = blksize_size[MAJOR(rdev)][MINOR(rdev)]; + raw_devices[minor].can_do_vary = 0; } else { if (hardsect_size[MAJOR(rdev)]) sector_size = hardsect_size[MAJOR(rdev)][MINOR(rdev)]; @@ -135,6 +139,7 @@ int raw_open(struct inode *inode, struct set_blocksize(rdev, sector_size); raw_devices[minor].sector_size = sector_size; + filp->f_iobuf->dovary = raw_devices[minor].can_do_vary; for (sector_bits = 0; !(sector_size & 1); ) sector_size>>=1, sector_bits++; @@ -322,6 +327,7 @@ ssize_t rw_raw_dev(int rw, struct file * if (err) goto out; new_iobuf = 1; + iobuf->dovary = raw_devices[minor].can_do_vary; } dev = to_kdev_t(raw_devices[minor].binding->bd_dev); diff -urNp varyioref/drivers/scsi/aic7xxx/aic7xxx_host.h varyio/drivers/scsi/aic7xxx/aic7xxx_host.h --- varyioref/drivers/scsi/aic7xxx/aic7xxx_host.h Mon Jul 29 00:30:58 2002 +++ varyio/drivers/scsi/aic7xxx/aic7xxx_host.h Mon Jul 29 00:31:39 2002 @@ -91,6 +91,7 @@ int ahc_linux_abort(Scsi_Cmnd *); use_clustering: ENABLE_CLUSTERING, \ use_new_eh_code: 1, \ highmem_io: 1, \ + can_do_varyio: 1, \ } #endif /* _AIC7XXX_HOST_H_ */ diff -urNp varyioref/drivers/scsi/hosts.h varyio/drivers/scsi/hosts.h --- varyioref/drivers/scsi/hosts.h Mon Jul 29 00:30:58 2002 +++ varyio/drivers/scsi/hosts.h Mon Jul 29 00:31:39 2002 @@ -294,6 +294,11 @@ typedef struct SHT unsigned highmem_io:1; /* + * True for drivers which can handle variable length IO + */ + unsigned can_do_varyio:1; + + /* * Name of proc directory */ char *proc_name; diff -urNp varyioref/drivers/scsi/qlogicisp.h varyio/drivers/scsi/qlogicisp.h --- varyioref/drivers/scsi/qlogicisp.h Fri Nov 12 13:40:46 1999 +++ varyio/drivers/scsi/qlogicisp.h Mon Jul 29 00:31:39 2002 @@ -84,7 +84,8 @@ int isp1020_biosparam(Disk *, kdev_t, in cmd_per_lun: 1, \ present: 0, \ unchecked_isa_dma: 0, \ - use_clustering: DISABLE_CLUSTERING \ + use_clustering: DISABLE_CLUSTERING, \ + can_do_varyio: 1, \ } #endif /* _QLOGICISP_H */ diff -urNp varyioref/drivers/scsi/sd.c varyio/drivers/scsi/sd.c --- varyioref/drivers/scsi/sd.c Mon Jul 29 00:31:06 2002 +++ varyio/drivers/scsi/sd.c Mon Jul 29 00:32:05 2002 @@ -95,6 +95,7 @@ static int *sd_sizes; static int *sd_blocksizes; static int *sd_hardsizes; /* Hardware sector size */ static int *sd_max_sectors; +static char *sd_varyio; static int check_scsidisk_media_change(kdev_t); static int fop_revalidate_scsidisk(kdev_t); @@ -1140,6 +1141,12 @@ static int sd_init() if (!sd_max_sectors) goto cleanup_max_sectors; + sd_varyio = kmalloc((sd_template.dev_max << 4), GFP_ATOMIC); + if (!sd_varyio) + goto cleanup_varyio; + + memset(sd_varyio, 0, (sd_template.dev_max << 4)); + for (i = 0; i < sd_template.dev_max << 4; i++) { sd_blocksizes[i] = 1024; sd_hardsizes[i] = 512; @@ -1204,6 +1211,8 @@ cleanup_gendisks_de_arr: kfree(sd_gendisks); sd_gendisks = NULL; cleanup_sd_gendisks: + kfree(sd_varyio); +cleanup_varyio: kfree(sd_max_sectors); cleanup_max_sectors: kfree(sd_hardsizes); @@ -1268,6 +1277,8 @@ static int sd_detect(Scsi_Device * SDp) return 1; } +#define SD_DISK_MAJOR(i) SD_MAJOR((i) >> 4) + static int sd_attach(Scsi_Device * SDp) { unsigned int devnum; @@ -1306,6 +1317,14 @@ static int sd_attach(Scsi_Device * SDp) printk("Attached scsi %sdisk %s at scsi%d, channel %d, id %d, lun %d\n", SDp->removable ? "removable " : "", nbuff, SDp->host->host_no, SDp->channel, SDp->id, SDp->lun); + + if (SDp->host->hostt->can_do_varyio) { + if (blkdev_varyio[SD_DISK_MAJOR(i)] == NULL) { + blkdev_varyio[SD_DISK_MAJOR(i)] = + sd_varyio + ((i / SCSI_DISKS_PER_MAJOR) << 8); + } + memset(blkdev_varyio[SD_DISK_MAJOR(i)] + (devnum << 4), 1, 16); + } return 0; } @@ -1438,6 +1457,7 @@ static void __exit exit_sd(void) kfree(sd_sizes); kfree(sd_blocksizes); kfree(sd_hardsizes); + kfree(sd_varyio); for (i = 0; i < N_USED_SD_MAJORS; i++) { kfree(sd_gendisks[i].de_arr); kfree(sd_gendisks[i].flags); diff -urNp varyioref/fs/buffer.c varyio/fs/buffer.c --- varyioref/fs/buffer.c Mon Jul 29 00:31:07 2002 +++ varyio/fs/buffer.c Mon Jul 29 00:31:39 2002 @@ -2191,9 +2191,9 @@ static int wait_kio(int rw, int nr, stru err = 0; for (i = nr; --i >= 0; ) { - iosize += size; tmp = bh[i]; wait_on_buffer(tmp); + iosize += tmp->b_size; if (!buffer_uptodate(tmp)) { /* We are traversing bh'es in reverse order so @@ -2235,6 +2235,7 @@ int brw_kiovec(int rw, int nr, struct ki struct kiobuf * iobuf = NULL; struct page * map; struct buffer_head *tmp, **bhs = NULL; + int iosize = size; if (!nr) return 0; @@ -2271,7 +2272,7 @@ int brw_kiovec(int rw, int nr, struct ki } while (length > 0) { - blocknr = b[bufind++]; + blocknr = b[bufind]; if (blocknr == -1UL) { if (rw == READ) { /* there was an hole in the filesystem */ @@ -2284,9 +2285,15 @@ int brw_kiovec(int rw, int nr, struct ki } else BUG(); } + if (iobuf->dovary && ((offset & RAWIO_BLOCKMASK) == 0)) { + iosize = RAWIO_BLOCKSIZE; + if (iosize > length) + iosize = length; + } + bufind += (iosize/size); tmp = bhs[bhind++]; - tmp->b_size = size; + tmp->b_size = iosize; set_bh_page(tmp, map, offset); tmp->b_this_page = tmp; @@ -2302,7 +2309,10 @@ int brw_kiovec(int rw, int nr, struct ki set_bit(BH_Uptodate, &tmp->b_state); atomic_inc(&iobuf->io_count); - submit_bh(rw, tmp); + if (iobuf->dovary) + submit_bh_blknr(rw, tmp); + else + submit_bh(rw, tmp); /* * Wait for IO if we have got too much */ @@ -2317,8 +2327,8 @@ int brw_kiovec(int rw, int nr, struct ki } skip_block: - length -= size; - offset += size; + length -= iosize; + offset += iosize; if (offset >= PAGE_SIZE) { offset = 0; diff -urNp varyioref/fs/iobuf.c varyio/fs/iobuf.c --- varyioref/fs/iobuf.c Mon Jul 22 17:29:19 2002 +++ varyio/fs/iobuf.c Mon Jul 29 00:31:39 2002 @@ -29,6 +29,7 @@ static void kiobuf_init(struct kiobuf *i iobuf->maplist = iobuf->map_array; iobuf->nr_pages = 0; iobuf->locked = 0; + iobuf->dovary = 0; atomic_set(&iobuf->io_count, 0); iobuf->end_io = NULL; } diff -urNp varyioref/include/linux/blkdev.h varyio/include/linux/blkdev.h --- varyioref/include/linux/blkdev.h Mon Jul 29 00:31:00 2002 +++ varyio/include/linux/blkdev.h Mon Jul 29 00:31:39 2002 @@ -225,6 +225,8 @@ extern int * max_sectors[MAX_BLKDEV]; extern int * max_segments[MAX_BLKDEV]; +extern char * blkdev_varyio[MAX_BLKDEV]; + #define MAX_SEGMENTS 128 #define MAX_SECTORS 255 @@ -278,4 +280,12 @@ static inline unsigned int block_size(kd return retval; } +static inline int get_blkdev_varyio(int major, int minor) +{ + int retval = 0; + if (blkdev_varyio[major]) { + retval = blkdev_varyio[major][minor]; + } + return retval; +} #endif diff -urNp varyioref/include/linux/fs.h varyio/include/linux/fs.h --- varyioref/include/linux/fs.h Mon Jul 29 00:31:02 2002 +++ varyio/include/linux/fs.h Mon Jul 29 00:31:39 2002 @@ -1375,6 +1375,7 @@ extern struct buffer_head * get_hash_tab extern struct buffer_head * getblk(kdev_t, int, int); extern void ll_rw_block(int, int, struct buffer_head * bh[]); extern void submit_bh(int, struct buffer_head *); +extern void submit_bh_blknr(int, struct buffer_head *); extern int is_read_only(kdev_t); extern void __brelse(struct buffer_head *); static inline void brelse(struct buffer_head *buf) diff -urNp varyioref/include/linux/iobuf.h varyio/include/linux/iobuf.h --- varyioref/include/linux/iobuf.h Tue Jul 16 23:56:43 2002 +++ varyio/include/linux/iobuf.h Mon Jul 29 00:31:39 2002 @@ -28,6 +28,9 @@ #define KIO_STATIC_PAGES (KIO_MAX_ATOMIC_IO / (PAGE_SIZE >> 10) + 1) #define KIO_MAX_SECTORS (KIO_MAX_ATOMIC_IO * 2) +#define RAWIO_BLOCKSIZE 4096 +#define RAWIO_BLOCKMASK (RAWIO_BLOCKSIZE-1) + /* The main kiobuf struct used for all our IO! */ struct kiobuf @@ -44,7 +47,8 @@ struct kiobuf struct page ** maplist; - unsigned int locked : 1; /* If set, pages has been locked */ + unsigned int locked : 1, /* If set, pages has been locked */ + dovary : 1; /* If set, do variable size IO */ /* Always embed enough struct pages for atomic IO */ struct page * map_array[KIO_STATIC_PAGES]; diff -urNp varyioref/kernel/ksyms.c varyio/kernel/ksyms.c --- varyioref/kernel/ksyms.c Mon Jul 29 00:31:07 2002 +++ varyio/kernel/ksyms.c Mon Jul 29 00:31:39 2002 @@ -325,6 +325,7 @@ EXPORT_SYMBOL(init_buffer); EXPORT_SYMBOL(refile_buffer); EXPORT_SYMBOL(max_sectors); EXPORT_SYMBOL(max_readahead); +EXPORT_SYMBOL(blkdev_varyio); /* tty routines */ EXPORT_SYMBOL(tty_hangup);