aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 23:59:17 -0800
committerLinus Torvalds <torvalds@athlon.transmeta.com>2002-02-04 23:59:17 -0800
commit51f4a834d5661caada395e99fd713065f04bfc69 (patch)
tree3c409bfa49dd93aefccd9a05dcec76899c059828
parentfe0976511d3b5cf2894da54bc451e561bd6b1482 (diff)
downloadhistory-51f4a834d5661caada395e99fd713065f04bfc69.tar.gz
v2.5.0.11 -> v2.5.1v2.5.1
- Al Viro: floppy_eject cleanup, mount cleanups - Jens Axboe: bio updates - Ingo Molnar: mempool fixes - GOTO Masanori: Fix O_DIRECT error handling
-rw-r--r--Makefile2
-rw-r--r--arch/m68k/kernel/setup.c6
-rw-r--r--arch/m68k/q40/config.c1
-rw-r--r--arch/ppc/kernel/apus_setup.c7
-rw-r--r--arch/sparc64/kernel/iommu_common.c23
-rw-r--r--arch/sparc64/kernel/iommu_common.h5
-rw-r--r--drivers/acorn/block/fd1772.c4
-rw-r--r--drivers/block/DAC960.c9
-rw-r--r--drivers/block/acsi.c4
-rw-r--r--drivers/block/amiflop.c4
-rw-r--r--drivers/block/ataflop.c4
-rw-r--r--drivers/block/block_ioctl.c3
-rw-r--r--drivers/block/cciss.c11
-rw-r--r--drivers/block/cpqarray.c11
-rw-r--r--drivers/block/floppy.c19
-rw-r--r--drivers/block/ll_rw_blk.c240
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/block/paride/pd.c38
-rw-r--r--drivers/block/paride/pf.c3
-rw-r--r--drivers/block/ps2esdi.c9
-rw-r--r--drivers/block/swim3.c13
-rw-r--r--drivers/block/swim_iop.c4
-rw-r--r--drivers/block/xd.c4
-rw-r--r--drivers/block/z2ram.c4
-rw-r--r--drivers/fc4/fc.c8
-rw-r--r--drivers/fc4/soc.c5
-rw-r--r--drivers/fc4/soc.h1
-rw-r--r--drivers/fc4/socal.c5
-rw-r--r--drivers/fc4/socal.h1
-rw-r--r--drivers/ide/hd.c52
-rw-r--r--drivers/ide/ide-dma.c4
-rw-r--r--drivers/ide/ide-probe.c7
-rw-r--r--drivers/ide/ide.c2
-rw-r--r--drivers/message/fusion/mptctl.c1
-rw-r--r--drivers/message/fusion/mptscsih.c38
-rw-r--r--drivers/message/i2o/i2o_block.c9
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/README.ncr53c8xx6
-rw-r--r--drivers/scsi/aic7xxx_old.c4
-rw-r--r--drivers/scsi/esp.c45
-rw-r--r--drivers/scsi/esp.h4
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/hosts.h1
-rw-r--r--drivers/scsi/ncr53c8xx.c13
-rw-r--r--drivers/scsi/ncr53c8xx.h2
-rw-r--r--drivers/scsi/qlogicfc.c9
-rw-r--r--drivers/scsi/qlogicisp.c5
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/qlogicpti.h1
-rw-r--r--drivers/scsi/scsi.c131
-rw-r--r--drivers/scsi/scsi.h12
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_dma.c450
-rw-r--r--drivers/scsi/scsi_ioctl.c13
-rw-r--r--drivers/scsi/scsi_lib.c25
-rw-r--r--drivers/scsi/scsi_merge.c688
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/scsi_syms.c4
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/sg.c12
-rw-r--r--drivers/scsi/sr.c125
-rw-r--r--drivers/scsi/sr_ioctl.c8
-rw-r--r--drivers/scsi/sr_vendor.c8
-rw-r--r--drivers/scsi/sym53c8xx.c17
-rw-r--r--drivers/scsi/sym53c8xx.h6
-rw-r--r--drivers/scsi/sym53c8xx_2/ChangeLog.txt18
-rw-r--r--drivers/scsi/sym53c8xx_2/sym53c8xx.h10
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c17
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.h3
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c16
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.h3
-rw-r--r--drivers/scsi/sym53c8xx_comm.h4
-rw-r--r--drivers/scsi/sym53c8xx_defs.h4
-rw-r--r--fs/adfs/adfs.h2
-rw-r--r--fs/adfs/dir_f.c2
-rw-r--r--fs/adfs/dir_fplus.c4
-rw-r--r--fs/adfs/inode.c2
-rw-r--r--fs/adfs/super.c7
-rw-r--r--fs/affs/file.c4
-rw-r--r--fs/affs/super.c2
-rw-r--r--fs/bfs/dir.c6
-rw-r--r--fs/bfs/file.c2
-rw-r--r--fs/bfs/inode.c8
-rw-r--r--fs/bio.c21
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/efs/dir.c2
-rw-r--r--fs/efs/file.c2
-rw-r--r--fs/efs/inode.c6
-rw-r--r--fs/efs/namei.c2
-rw-r--r--fs/efs/super.c13
-rw-r--r--fs/efs/symlink.c4
-rw-r--r--fs/ext2/balloc.c2
-rw-r--r--fs/ext2/ialloc.c3
-rw-r--r--fs/ext2/inode.c13
-rw-r--r--fs/ext2/super.c8
-rw-r--r--fs/ext3/balloc.c8
-rw-r--r--fs/ext3/ialloc.c3
-rw-r--r--fs/ext3/inode.c19
-rw-r--r--fs/ext3/super.c8
-rw-r--r--fs/fat/buffer.c4
-rw-r--r--fs/fat/inode.c4
-rw-r--r--fs/freevxfs/vxfs_bmap.c5
-rw-r--r--fs/freevxfs/vxfs_inode.c2
-rw-r--r--fs/freevxfs/vxfs_subr.c4
-rw-r--r--fs/hfs/file.c9
-rw-r--r--fs/hfs/hfs.h2
-rw-r--r--fs/hfs/super.c4
-rw-r--r--fs/hfs/sysdep.c4
-rw-r--r--fs/hpfs/buffer.c18
-rw-r--r--fs/hpfs/file.c2
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/isofs/dir.c4
-rw-r--r--fs/isofs/inode.c26
-rw-r--r--fs/isofs/namei.c4
-rw-r--r--fs/isofs/rock.c4
-rw-r--r--fs/minix/bitmap.c4
-rw-r--r--fs/minix/inode.c10
-rw-r--r--fs/minix/itree_common.c8
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/ncpfs/ncplib_kernel.c8
-rw-r--r--fs/ntfs/fs.c6
-rw-r--r--fs/ntfs/support.c2
-rw-r--r--fs/qnx4/bitmap.c6
-rw-r--r--fs/qnx4/dir.c2
-rw-r--r--fs/qnx4/fsync.c6
-rw-r--r--fs/qnx4/inode.c18
-rw-r--r--fs/reiserfs/fix_node.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/reiserfs/journal.c43
-rw-r--r--fs/reiserfs/resize.c2
-rw-r--r--fs/reiserfs/stree.c2
-rw-r--r--fs/romfs/inode.c10
-rw-r--r--fs/sysv/balloc.c6
-rw-r--r--fs/sysv/ialloc.c2
-rw-r--r--fs/sysv/itree.c13
-rw-r--r--fs/sysv/super.c16
-rw-r--r--fs/udf/balloc.c11
-rw-r--r--fs/udf/dir.c4
-rw-r--r--fs/udf/directory.c10
-rw-r--r--fs/udf/file.c9
-rw-r--r--fs/udf/inode.c34
-rw-r--r--fs/udf/misc.c20
-rw-r--r--fs/udf/namei.c12
-rw-r--r--fs/udf/partition.c2
-rw-r--r--fs/udf/super.c15
-rw-r--r--fs/udf/symlink.c5
-rw-r--r--fs/udf/udfdecl.h4
-rw-r--r--fs/ufs/balloc.c4
-rw-r--r--fs/ufs/cylinder.c2
-rw-r--r--fs/ufs/dir.c2
-rw-r--r--fs/ufs/inode.c19
-rw-r--r--fs/ufs/super.c9
-rw-r--r--fs/ufs/truncate.c14
-rw-r--r--fs/ufs/util.c8
-rw-r--r--fs/ufs/util.h6
-rw-r--r--include/asm-i386/io.h7
-rw-r--r--include/asm-m68k/machdep.h1
-rw-r--r--include/asm-sparc64/dma.h8
-rw-r--r--include/asm-sparc64/io.h10
-rw-r--r--include/linux/amigaffs.h11
-rw-r--r--include/linux/bio.h26
-rw-r--r--include/linux/blkdev.h33
-rw-r--r--include/linux/blkdev.h.orig371
-rw-r--r--include/linux/fd.h6
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/highmem.h3
-rw-r--r--include/linux/iso_fs.h6
-rw-r--r--include/linux/qnx4_fs.h2
-rw-r--r--init/do_mounts.c15
-rw-r--r--kernel/ksyms.c2
-rw-r--r--kernel/signal.c8
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/highmem.c84
-rw-r--r--mm/mempool.c33
174 files changed, 1492 insertions, 2017 deletions
diff --git a/Makefile b/Makefile
index a62e69d0f39c9..a5aafb72c0801 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 5
SUBLEVEL = 1
-EXTRAVERSION =-pre11
+EXTRAVERSION =
KERNELRELEASE=$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION)
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
index 86007efb1d288..57a954d7e0194 100644
--- a/arch/m68k/kernel/setup.c
+++ b/arch/m68k/kernel/setup.c
@@ -93,7 +93,6 @@ void (*mach_power_off)( void ) = NULL;
long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-void (*mach_floppy_eject) (void) = NULL;
#endif
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int) = NULL;
@@ -514,11 +513,6 @@ void __init floppy_setup(char *str, int *ints)
mach_floppy_setup (str, ints);
}
-void floppy_eject(void)
-{
- if (mach_floppy_eject)
- mach_floppy_eject();
-}
#endif
/* for "kbd-reset" cmdline param */
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 23690901ce9b4..4989e67bc19df 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -36,7 +36,6 @@
#include <asm/q40_master.h>
#include <asm/keyboard.h>
-extern void floppy_eject(void);
extern void floppy_setup(char *str, int *ints);
extern int q40kbd_translate(unsigned char scancode, unsigned char *keycode,
diff --git a/arch/ppc/kernel/apus_setup.c b/arch/ppc/kernel/apus_setup.c
index bdbc452bc0e38..c3fe77cde972f 100644
--- a/arch/ppc/kernel/apus_setup.c
+++ b/arch/ppc/kernel/apus_setup.c
@@ -106,7 +106,6 @@ void (*mach_reset)( void );
long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
#if defined(CONFIG_AMIGA_FLOPPY)
void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-void (*mach_floppy_eject) (void) = NULL;
#endif
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int) = NULL;
@@ -404,12 +403,6 @@ void floppy_setup(char *str, int *ints)
if (mach_floppy_setup)
mach_floppy_setup (str, ints);
}
-
-void floppy_eject(void)
-{
- if (mach_floppy_eject)
- mach_floppy_eject();
-}
#endif
/*********************************************************** MEMORY */
diff --git a/arch/sparc64/kernel/iommu_common.c b/arch/sparc64/kernel/iommu_common.c
index e9d4bea7cc722..134be5cd793cf 100644
--- a/arch/sparc64/kernel/iommu_common.c
+++ b/arch/sparc64/kernel/iommu_common.c
@@ -1,4 +1,4 @@
-/* $Id: iommu_common.c,v 1.6 2001/10/09 02:24:33 davem Exp $
+/* $Id: iommu_common.c,v 1.8 2001/12/11 11:13:06 davem Exp $
* iommu_common.c: UltraSparc SBUS/PCI common iommu code.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -66,7 +66,9 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
daddr = dma_sg->dma_address;
sglen = sg->length;
- sgaddr = (unsigned long) sg->address;
+ sgaddr = (unsigned long) (sg->address ?
+ sg->address :
+ page_address(sg->page) + sg->offset);
while (dlen > 0) {
unsigned long paddr;
@@ -116,7 +118,9 @@ static int verify_one_map(struct scatterlist *dma_sg, struct scatterlist **__sg,
sg++;
if (--nents <= 0)
break;
- sgaddr = (unsigned long) sg->address;
+ sgaddr = (unsigned long) (sg->address ?
+ sg->address :
+ page_address(sg->page) + sg->offset);
sglen = sg->length;
}
if (dlen < 0) {
@@ -197,14 +201,21 @@ unsigned long prepare_sg(struct scatterlist *sg, int nents)
unsigned long prev;
u32 dent_addr, dent_len;
- prev = (unsigned long) sg->address;
+ prev = (unsigned long) (sg->address ?
+ sg->address :
+ page_address(sg->page) + sg->offset);
prev += (unsigned long) (dent_len = sg->length);
- dent_addr = (u32) ((unsigned long)sg->address & (IO_PAGE_SIZE - 1UL));
+ dent_addr = (u32) ((unsigned long)(sg->address ?
+ sg->address :
+ page_address(sg->page) + sg->offset)
+ & (IO_PAGE_SIZE - 1UL));
while (--nents) {
unsigned long addr;
sg++;
- addr = (unsigned long) sg->address;
+ addr = (unsigned long) (sg->address ?
+ sg->address :
+ page_address(sg->page) + sg->offset);
if (! VCONTIG(prev, addr)) {
dma_sg->dma_address = dent_addr;
dma_sg->dma_length = dent_len;
diff --git a/arch/sparc64/kernel/iommu_common.h b/arch/sparc64/kernel/iommu_common.h
index 4d0795ad4eff9..039744070ff67 100644
--- a/arch/sparc64/kernel/iommu_common.h
+++ b/arch/sparc64/kernel/iommu_common.h
@@ -1,4 +1,4 @@
-/* $Id: iommu_common.h,v 1.4 2001/10/09 02:24:33 davem Exp $
+/* $Id: iommu_common.h,v 1.5 2001/12/11 09:41:01 davem Exp $
* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
@@ -6,8 +6,9 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
-#include <asm/page.h>
#include <asm/iommu.h>
#include <asm/scatterlist.h>
diff --git a/drivers/acorn/block/fd1772.c b/drivers/acorn/block/fd1772.c
index d9cf8b505ce68..1667797e2662d 100644
--- a/drivers/acorn/block/fd1772.c
+++ b/drivers/acorn/block/fd1772.c
@@ -1620,7 +1620,3 @@ int fd1772_init(void)
return 0;
}
-
-void floppy_eject(void)
-{
-}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 55d3fbe7e7d47..0f0060dbb6a36 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1948,8 +1948,11 @@ static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
RequestQueue = BLK_DEFAULT_QUEUE(MajorNumber);
blk_init_queue(RequestQueue, DAC960_RequestFunction);
RequestQueue->queuedata = Controller;
- RequestQueue->max_segments = Controller->DriverScatterGatherLimit;
- RequestQueue->max_sectors = Controller->MaxBlocksPerCommand;
+ blk_queue_max_hw_segments(RequestQueue,
+ Controller->DriverScatterGatherLimit);
+ blk_queue_max_phys_segments(RequestQueue, ~0);
+ blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
+
Controller->RequestQueue = RequestQueue;
/*
Initialize the Disk Partitions array, Partition Sizes array, Block Sizes
@@ -2889,7 +2892,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller,
Command->LogicalDriveNumber = DAC960_LogicalDriveNumber(Request->rq_dev);
Command->BlockNumber = Request->sector;
Command->BlockCount = Request->nr_sectors;
- Command->SegmentCount = Request->nr_segments;
+ Command->SegmentCount = Request->nr_phys_segments;
Command->BufferHeader = Request->bio;
Command->RequestBuffer = Request->buffer;
blkdev_dequeue_request(Request);
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
index 916a192e5e5ef..28e5ae8e04f53 100644
--- a/drivers/block/acsi.c
+++ b/drivers/block/acsi.c
@@ -253,6 +253,8 @@ static int CurrentNReq;
static int CurrentNSect;
static char *CurrentBuffer;
+static spinlock_t acsi_lock = SPIN_LOCK_UNLOCKED;
+
#define SET_TIMER() mod_timer(&acsi_timer, jiffies + ACSI_TIMEOUT)
#define CLEAR_TIMER() del_timer(&acsi_timer)
@@ -1784,7 +1786,7 @@ int acsi_init( void )
phys_acsi_buffer = virt_to_phys( acsi_buffer );
STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &acsi_lock);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */
add_gendisk(&acsi_gendisk);
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 45d30ce457d52..c2e353b88d9e2 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -174,6 +174,8 @@ static int writepending;
static int writefromint;
static char *raw_buf;
+static spinlock_t amiflop_lock = SPIN_LOCK_UNLOCKED;
+
#define RAW_BUF_SIZE 30000 /* size of raw disk data */
/*
@@ -1855,7 +1857,7 @@ int __init amiga_floppy_init(void)
post_write_timer.data = 0;
post_write_timer.function = post_write;
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &amiflop_lock);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index eea26ff9e9d02..1386f3eba58de 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -156,6 +156,8 @@ static int StartDiskType[] = {
static int DriveType = TYPE_HD;
+static spinlock_t ataflop_lock = SPIN_LOCK_UNLOCKED;
+
/* Array for translating minors into disk formats */
static struct {
int index;
@@ -2013,7 +2015,7 @@ int __init atari_floppy_init (void)
blk_size[MAJOR_NR] = floppy_sizes;
blksize_size[MAJOR_NR] = floppy_blocksizes;
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &ataflop_lock);
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
diff --git a/drivers/block/block_ioctl.c b/drivers/block/block_ioctl.c
index a894888483c97..75d71ca05c986 100644
--- a/drivers/block/block_ioctl.c
+++ b/drivers/block/block_ioctl.c
@@ -76,5 +76,8 @@ int block_ioctl(kdev_t dev, unsigned int cmd, unsigned long arg)
err = -ENOTTY;
}
+#if 0
+ blk_put_queue(q);
+#endif
return err;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 74aca53678130..b038dde8b39d4 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1219,7 +1219,7 @@ queue:
goto startio;
creq = elv_next_request(q);
- if (creq->nr_segments > MAXSGENTRIES)
+ if (creq->nr_phys_segments > MAXSGENTRIES)
BUG();
if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR )
@@ -1866,9 +1866,16 @@ static int __init cciss_init_one(struct pci_dev *pdev,
q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
q->queuedata = hba[i];
+ spin_lock_init(&hba[i]->lock);
blk_init_queue(q, do_cciss_request, &hba[i]->lock);
blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
- blk_queue_max_segments(q, MAXSGENTRIES);
+
+ /* This is a hardware imposed limit. */
+ blk_queue_max_hw_segments(q, MAXSGENTRIES);
+
+ /* This is a limit in the driver and could be eliminated. */
+ blk_queue_max_phys_segments(q, MAXSGENTRIES);
+
blk_queue_max_sectors(q, 512);
/* fill in the other Kernel structs */
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 5f85cb0b5b6b9..5f2298ba9720d 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -467,9 +467,16 @@ int __init cpqarray_init(void)
q = BLK_DEFAULT_QUEUE(MAJOR_NR + i);
q->queuedata = hba[i];
+ spin_lock_init(&hba[i]->lock);
blk_init_queue(q, do_ida_request, &hba[i]->lock);
blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
- blk_queue_max_segments(q, SG_MAX);
+
+ /* This is a hardware imposed limit. */
+ blk_queue_max_hw_segments(q, SG_MAX);
+
+ /* This is a driver limit and could be eliminated. */
+ blk_queue_max_phys_segments(q, SG_MAX);
+
blksize_size[MAJOR_NR+i] = ida_blocksizes + (i*256);
read_ahead[MAJOR_NR+i] = READ_AHEAD;
@@ -864,7 +871,7 @@ queue_next:
goto startio;
creq = elv_next_request(q);
- if (creq->nr_segments > SG_MAX)
+ if (creq->nr_phys_segments > SG_MAX)
BUG();
if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR || h->ctlr > nr_ctlr)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 2417023debafe..2fcdcc59ace7e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -204,7 +204,7 @@ static int use_virtual_dma;
* record each buffers capabilities
*/
-static spinlock_t floppy_lock;
+static spinlock_t floppy_lock = SPIN_LOCK_UNLOCKED;
static unsigned short virtual_dma_port=0x3f0;
void floppy_interrupt(int irq, void *dev_id, struct pt_regs * regs);
@@ -4479,21 +4479,4 @@ MODULE_LICENSE("GPL");
__setup ("floppy=", floppy_setup);
module_init(floppy_init)
-
-/* eject the boot floppy (if we need the drive for a different root floppy) */
-/* This should only be called at boot time when we're sure that there's no
- * resource contention. */
-void floppy_eject(void)
-{
- int dummy;
- if (have_no_fdc)
- return;
- if(floppy_grab_irq_and_dma()==0)
- {
- lock_fdc(MAXTIMEOUT,0);
- dummy=fd_eject(0);
- process_fd_request();
- floppy_release_irq_and_dma();
- }
-}
#endif
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 9849061f045aa..e5c93889d2091 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -144,7 +144,8 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
/*
* set defaults
*/
- q->max_segments = MAX_SEGMENTS;
+ q->max_phys_segments = MAX_PHYS_SEGMENTS;
+ q->max_hw_segments = MAX_HW_SEGMENTS;
q->make_request_fn = mfn;
blk_queue_max_sectors(q, MAX_SECTORS);
blk_queue_hardsect_size(q, 512);
@@ -171,6 +172,18 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
static request_queue_t *last_q;
/*
+ * set appropriate bounce gfp mask -- unfortunately we don't have a
+ * full 4GB zone, so we have to resort to low memory for any bounces.
+ * ISA has its own < 16MB zone.
+ */
+ if (dma_addr == BLK_BOUNCE_ISA) {
+ init_emergency_isa_pool();
+ q->bounce_gfp = GFP_NOIO | GFP_DMA;
+ printk("isa pfn %lu, max low %lu, max %lu\n", bounce_pfn, blk_max_low_pfn, blk_max_pfn);
+ } else
+ q->bounce_gfp = GFP_NOHIGHIO;
+
+ /*
* keep this for debugging for now...
*/
if (dma_addr != BLK_BOUNCE_HIGH && q != last_q) {
@@ -178,7 +191,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
if (dma_addr == BLK_BOUNCE_ANY)
printk("no I/O memory limit\n");
else
- printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (u64) dma_addr);
+ printk("I/O limit %luMb (mask 0x%Lx)\n", mb, (long long) dma_addr);
}
q->bounce_pfn = bounce_pfn;
@@ -201,17 +214,34 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
}
/**
- * blk_queue_max_segments - set max segments for a request for this queue
+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
*
* Description:
* Enables a low level driver to set an upper limit on the number of
- * data segments in a request
+ * physical data segments in a request. This would be the largest sized
+ * scatter list the driver could handle.
**/
-void blk_queue_max_segments(request_queue_t *q, unsigned short max_segments)
+void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
{
- q->max_segments = max_segments;
+ q->max_phys_segments = max_segments;
+}
+
+/**
+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+ * @q: the request queue for the device
+ * @max_segments: max number of segments
+ *
+ * Description:
+ * Enables a low level driver to set an upper limit on the number of
+ * hw data segments in a request. This would be the largest number of
+ * address/length pairs the host adapter can actually give as once
+ * to the device.
+ **/
+void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+{
+ q->max_hw_segments = max_segments;
}
/**
@@ -325,44 +355,78 @@ static int ll_10byte_cmd_build(request_queue_t *q, struct request *rq)
void blk_recount_segments(request_queue_t *q, struct bio *bio)
{
struct bio_vec *bv, *bvprv = NULL;
- int i, nr_segs, seg_size, cluster;
+ int i, nr_phys_segs, nr_hw_segs, seg_size, cluster;
if (unlikely(!bio->bi_io_vec))
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
- seg_size = nr_segs = 0;
+ seg_size = nr_phys_segs = nr_hw_segs = 0;
bio_for_each_segment(bv, bio, i) {
if (bvprv && cluster) {
- if (seg_size + bv->bv_len > q->max_segment_size)
+ int phys, seg;
+
+ if (seg_size + bv->bv_len > q->max_segment_size) {
+ nr_phys_segs++;
goto new_segment;
- if (!BIOVEC_MERGEABLE(bvprv, bv))
+ }
+
+ phys = BIOVEC_PHYS_MERGEABLE(bvprv, bv);
+ seg = BIOVEC_SEG_BOUNDARY(q, bvprv, bv);
+ if (!phys || !seg)
+ nr_phys_segs++;
+ if (!seg)
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+
+ if (!BIOVEC_VIRT_MERGEABLE(bvprv, bv))
goto new_segment;
seg_size += bv->bv_len;
bvprv = bv;
continue;
+ } else {
+ nr_phys_segs++;
}
new_segment:
- nr_segs++;
+ nr_hw_segs++;
bvprv = bv;
- seg_size = 0;
+ seg_size = bv->bv_len;
}
- bio->bi_hw_seg = nr_segs;
+ bio->bi_phys_segments = nr_phys_segs;
+ bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
-inline int blk_contig_segment(request_queue_t *q, struct bio *bio,
- struct bio *nxt)
+inline int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+ struct bio *nxt)
+{
+ if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+ return 0;
+
+ if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+ return 0;
+ if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+ return 0;
+
+ /*
+ * bio and nxt are contigous in memory, check if the queue allows
+ * these two to be merged into one
+ */
+ if (BIO_SEG_BOUNDARY(q, bio, nxt))
+ return 1;
+
+ return 0;
+}
+
+inline int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+ struct bio *nxt)
{
if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
return 0;
- if (!BIO_CONTIG(bio, nxt))
+ if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
return 0;
if (bio->bi_size + nxt->bi_size > q->max_segment_size)
return 0;
@@ -379,7 +443,7 @@ inline int blk_contig_segment(request_queue_t *q, struct bio *bio,
/*
* map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_segments entries
+ * must make sure sg can hold rq->nr_phys_segments entries
*/
int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
{
@@ -405,7 +469,7 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
goto new_segment;
- if (!BIOVEC_MERGEABLE(bvprv, bvec))
+ if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
goto new_segment;
if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
goto new_segment;
@@ -413,11 +477,6 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg
sg[nsegs - 1].length += nbytes;
} else {
new_segment:
- if (nsegs >= q->max_segments) {
- printk("map: %d >= %d, i %d, segs %d, size %ld\n", nsegs, q->max_segments, i, rq->nr_segments, rq->nr_sectors);
- BUG();
- }
-
sg[nsegs].address = NULL;
sg[nsegs].page = bvec->bv_page;
sg[nsegs].length = nbytes;
@@ -436,18 +495,44 @@ new_segment:
* the standard queue merge functions, can be overridden with device
* specific ones if so desired
*/
-static inline int ll_new_segment(request_queue_t *q, struct request *req,
- struct bio *bio)
+
+static inline int ll_new_mergeable(request_queue_t *q,
+ struct request *req,
+ struct bio *bio)
{
- int nr_segs = bio_hw_segments(q, bio);
+ int nr_phys_segs = bio_phys_segments(q, bio);
- if (req->nr_segments + nr_segs <= q->max_segments) {
- req->nr_segments += nr_segs;
- return 1;
+ if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+ req->flags |= REQ_NOMERGE;
+ return 0;
}
- req->flags |= REQ_NOMERGE;
- return 0;
+ /*
+ * A hw segment is just getting larger, bump just the phys
+ * counter.
+ */
+ req->nr_phys_segments += nr_phys_segs;
+ return 1;
+}
+
+static inline int ll_new_hw_segment(request_queue_t *q,
+ struct request *req,
+ struct bio *bio)
+{
+ int nr_hw_segs = bio_hw_segments(q, bio);
+
+ if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments) {
+ req->flags |= REQ_NOMERGE;
+ return 0;
+ }
+
+ /*
+ * This will form the start of a new hw segment. Bump both
+ * counters.
+ */
+ req->nr_hw_segments += nr_hw_segs;
+ req->nr_phys_segments += bio_phys_segments(q, bio);
+ return 1;
}
static int ll_back_merge_fn(request_queue_t *q, struct request *req,
@@ -458,7 +543,11 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
return 0;
}
- return ll_new_segment(q, req, bio);
+ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail),
+ __BVEC_START(bio)))
+ return ll_new_mergeable(q, req, bio);
+
+ return ll_new_hw_segment(q, req, bio);
}
static int ll_front_merge_fn(request_queue_t *q, struct request *req,
@@ -469,21 +558,49 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
return 0;
}
- return ll_new_segment(q, req, bio);
+ if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio),
+ __BVEC_START(req->bio)))
+ return ll_new_mergeable(q, req, bio);
+
+ return ll_new_hw_segment(q, req, bio);
}
static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
struct request *next)
{
- int total_segments = req->nr_segments + next->nr_segments;
+ int total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+ int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
- if (blk_contig_segment(q, req->biotail, next->bio))
- total_segments--;
+ /*
+ * First check if the either of the requests are re-queued
+ * requests. Can't merge them if they are.
+ */
+ if (req->special || next->special)
+ return 0;
+
+ /*
+ * Will it become to large?
+ */
+ if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+ return 0;
+
+ total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+ if (blk_phys_contig_segment(q, req->biotail, next->bio))
+ total_phys_segments--;
+
+ if (total_phys_segments > q->max_phys_segments)
+ return 0;
+
+ total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+ if (blk_hw_contig_segment(q, req->biotail, next->bio))
+ total_hw_segments--;
- if (total_segments > q->max_segments)
+ if (total_hw_segments > q->max_hw_segments)
return 0;
- req->nr_segments = total_segments;
+ /* Merge is OK... */
+ req->nr_phys_segments = total_phys_segments;
+ req->nr_hw_segments = total_hw_segments;
return 1;
}
@@ -1107,7 +1224,7 @@ get_rq:
req->hard_sector = req->sector = sector;
req->hard_nr_sectors = req->nr_sectors = nr_sectors;
req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
- req->nr_segments = bio->bi_vcnt;
+ req->nr_phys_segments = bio_phys_segments(q, bio);
req->nr_hw_segments = bio_hw_segments(q, bio);
req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL;
@@ -1201,7 +1318,7 @@ void generic_make_request(struct bio *bio)
printk(KERN_INFO "%s: rw=%ld, want=%ld, limit=%Lu\n",
kdevname(bio->bi_dev), bio->bi_rw,
(sector + nr_sectors)>>1,
- (u64) blk_size[major][minor]);
+ (long long) blk_size[major][minor]);
}
set_bit(BIO_EOF, &bio->bi_flags);
goto end_io;
@@ -1221,7 +1338,7 @@ void generic_make_request(struct bio *bio)
if (!q) {
printk(KERN_ERR
"generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
- kdevname(bio->bi_dev), (u64) bio->bi_sector);
+ kdevname(bio->bi_dev), (long long) bio->bi_sector);
end_io:
bio->bi_end_io(bio, nr_sectors);
break;
@@ -1433,7 +1550,27 @@ sorry:
extern int stram_device_init (void);
#endif
-inline void blk_recalc_request(struct request *rq, int nsect)
+inline void blk_recalc_rq_segments(struct request *rq)
+{
+ struct bio *bio;
+ int nr_phys_segs, nr_hw_segs;
+
+ rq->buffer = bio_data(rq->bio);
+
+ nr_phys_segs = nr_hw_segs = 0;
+ rq_for_each_bio(bio, rq) {
+ /* Force bio hw/phys segs to be recalculated. */
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+ nr_phys_segs += bio_phys_segments(rq->q, bio);
+ nr_hw_segs += bio_hw_segments(rq->q, bio);
+ }
+
+ rq->nr_phys_segments = nr_phys_segs;
+ rq->nr_hw_segments = nr_hw_segs;
+}
+
+inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
rq->hard_sector += nsect;
rq->hard_nr_sectors -= nsect;
@@ -1451,8 +1588,6 @@ inline void blk_recalc_request(struct request *rq, int nsect)
printk("blk: request botched\n");
rq->nr_sectors = rq->current_nr_sectors;
}
-
- rq->buffer = bio_data(rq->bio);
}
/**
@@ -1495,7 +1630,8 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
bio->bi_size -= residual;
bio_iovec(bio)->bv_offset += residual;
bio_iovec(bio)->bv_len -= residual;
- blk_recalc_request(req, nr_sectors);
+ blk_recalc_rq_sectors(req, nr_sectors);
+ blk_recalc_rq_segments(req);
return 1;
}
@@ -1518,13 +1654,15 @@ int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
}
if ((bio = req->bio)) {
- blk_recalc_request(req, nsect);
+ blk_recalc_rq_sectors(req, nsect);
/*
* end more in this run, or just return 'not-done'
*/
- if (unlikely(nr_sectors <= 0))
+ if (unlikely(nr_sectors <= 0)) {
+ blk_recalc_rq_segments(req);
return 1;
+ }
}
}
@@ -1605,7 +1743,8 @@ EXPORT_SYMBOL(generic_unplug_device);
EXPORT_SYMBOL(blk_attempt_remerge);
EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_queue_max_sectors);
-EXPORT_SYMBOL(blk_queue_max_segments);
+EXPORT_SYMBOL(blk_queue_max_phys_segments);
+EXPORT_SYMBOL(blk_queue_max_hw_segments);
EXPORT_SYMBOL(blk_queue_max_segment_size);
EXPORT_SYMBOL(blk_queue_hardsect_size);
EXPORT_SYMBOL(blk_queue_segment_boundary);
@@ -1613,5 +1752,6 @@ EXPORT_SYMBOL(blk_rq_map_sg);
EXPORT_SYMBOL(blk_nohighio);
EXPORT_SYMBOL(blk_dump_rq_flags);
EXPORT_SYMBOL(submit_bio);
-EXPORT_SYMBOL(blk_contig_segment);
EXPORT_SYMBOL(blk_queue_assign_lock);
+EXPORT_SYMBOL(blk_phys_contig_segment);
+EXPORT_SYMBOL(blk_hw_contig_segment);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c16b6163af895..38b2514d71d08 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -62,7 +62,7 @@ static u64 nbd_bytesizes[MAX_NBD];
static struct nbd_device nbd_dev[MAX_NBD];
static devfs_handle_t devfs_handle;
-static spinlock_t nbd_lock;
+static spinlock_t nbd_lock = SPIN_LOCK_UNLOCKED;
#define DEBUG( s )
/* #define DEBUG( s ) printk( s )
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 1430fcb800109..8e1374c18d66c 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -166,6 +166,8 @@ static int pd_drive_count;
#include <asm/uaccess.h>
+static spinlock_t pd_lock = SPIN_LOCK_UNLOCKED;
+
#ifndef MODULE
#include "setup.h"
@@ -394,7 +396,7 @@ int pd_init (void)
return -1;
}
q = BLK_DEFAULT_QUEUE(MAJOR_NR);
- blk_init_queue(q, DEVICE_REQUEST);
+ blk_init_queue(q, DEVICE_REQUEST, &pd_lock);
blk_queue_max_sectors(q, cluster);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
@@ -875,9 +877,9 @@ static void pd_next_buf( int unit )
{ long saved_flags;
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(1);
- if (!pd_run) { spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ if (!pd_run) { spin_unlock_irqrestore(&pd_lock,saved_flags);
return;
}
@@ -893,7 +895,7 @@ static void pd_next_buf( int unit )
pd_count = CURRENT->current_nr_sectors;
pd_buf = CURRENT->buffer;
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
}
static void do_pd_read( void )
@@ -916,11 +918,11 @@ static void do_pd_read_start( void )
pi_do_claimed(PI,do_pd_read_start);
return;
}
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(0);
pd_busy = 0;
do_pd_request(NULL);
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
return;
}
pd_ide_command(unit,IDE_READ,pd_block,pd_run);
@@ -940,11 +942,11 @@ static void do_pd_read_drq( void )
pi_do_claimed(PI,do_pd_read_start);
return;
}
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(0);
pd_busy = 0;
do_pd_request(NULL);
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
return;
}
pi_read_block(PI,pd_buf,512);
@@ -955,11 +957,11 @@ static void do_pd_read_drq( void )
if (!pd_count) pd_next_buf(unit);
}
pi_disconnect(PI);
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(1);
pd_busy = 0;
do_pd_request(NULL);
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
}
static void do_pd_write( void )
@@ -982,11 +984,11 @@ static void do_pd_write_start( void )
pi_do_claimed(PI,do_pd_write_start);
return;
}
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(0);
pd_busy = 0;
do_pd_request(NULL);
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
return;
}
pd_ide_command(unit,IDE_WRITE,pd_block,pd_run);
@@ -998,11 +1000,11 @@ static void do_pd_write_start( void )
pi_do_claimed(PI,do_pd_write_start);
return;
}
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(0);
pd_busy = 0;
do_pd_request(NULL);
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
return;
}
pi_write_block(PI,pd_buf,512);
@@ -1027,19 +1029,19 @@ static void do_pd_write_done( void )
pi_do_claimed(PI,do_pd_write_start);
return;
}
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(0);
pd_busy = 0;
do_pd_request(NULL);
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
return;
}
pi_disconnect(PI);
- spin_lock_irqsave(&QUEUE->queue_lock,saved_flags);
+ spin_lock_irqsave(&pd_lock,saved_flags);
end_request(1);
pd_busy = 0;
do_pd_request(NULL);
- spin_unlock_irqrestore(&QUEUE->queue_lock,saved_flags);
+ spin_unlock_irqrestore(&pd_lock,saved_flags);
}
/* end of pd.c */
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e49565417eda4..c83901c70d366 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -361,7 +361,8 @@ int pf_init (void) /* preliminary initialisation */
}
q = BLK_DEFAULT_QUEUE(MAJOR_NR);
blk_init_queue(q, DEVICE_REQUEST, &pf_spin_lock);
- blk_queue_max_segments(q, cluster);
+ blk_queue_max_phys_segments(q, cluster);
+ blk_queue_max_hw_segments(q, cluster);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
for (i=0;i<PF_UNITS;i++) pf_blocksizes[i] = 1024;
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
index b248b437bf748..dec02f0f832eb 100644
--- a/drivers/block/ps2esdi.c
+++ b/drivers/block/ps2esdi.c
@@ -66,8 +66,6 @@
#define TYPE_0_CMD_BLK_LENGTH 2
#define TYPE_1_CMD_BLK_LENGTH 4
-#define PS2ESDI_LOCK (&((BLK_DEFAULT_QUEUE(MAJOR_NR))->queue_lock))
-
static void reset_ctrl(void);
int ps2esdi_init(void);
@@ -130,6 +128,7 @@ static int intg_esdi = 0; /* If integrated adapter */
struct ps2esdi_i_struct {
unsigned int head, sect, cyl, wpcom, lzone, ctl;
};
+static spinlock_t ps2esdi_lock = SPIN_LOCK_UNLOCKED;
#if 0
#if 0 /* try both - I don't know which one is better... UB */
@@ -180,7 +179,7 @@ int __init ps2esdi_init(void)
return -1;
}
/* set up some global information - indicating device specific info */
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &ps2esdi_lock);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
/* some minor housekeeping - setup the global gendisk structure */
@@ -954,10 +953,10 @@ static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
break;
}
if(ending != -1) {
- spin_lock_irqsave(PS2ESDI_LOCK, flags);
+ spin_lock_irqsave(ps2esdi_LOCK, flags);
end_request(ending);
do_ps2esdi_request(BLK_DEFAULT_QUEUE(MAJOR_NR));
- spin_unlock_irqrestore(PS2ESDI_LOCK, flags);
+ spin_unlock_irqrestore(ps2esdi_LOCK, flags);
}
} /* handle interrupts */
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index f4dee49d40f29..ad3ead3611b5d 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -203,6 +203,7 @@ struct floppy_state {
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
+static spinlock_t swim3_lock = SPIN_LOCK_UNLOCKED;
static unsigned short write_preamble[] = {
0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, /* gap field */
@@ -807,16 +808,6 @@ static int fd_eject(struct floppy_state *fs)
return err;
}
-int swim3_fd_eject(int devnum)
-{
- if (devnum >= floppy_count)
- return -ENODEV;
- /* Do not check this - this function should ONLY be called early
- * in the boot process! */
- /* if (floppy_states[devnum].ref_count != 1) return -EBUSY; */
- return fd_eject(&floppy_states[devnum]);
-}
-
static struct floppy_struct floppy_type =
{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */
@@ -1041,7 +1032,7 @@ int swim3_init(void)
MAJOR_NR);
return -EBUSY;
}
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST,&swim3_lock);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
}
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
index 29b8f8213dea1..0fbe21e399869 100644
--- a/drivers/block/swim_iop.c
+++ b/drivers/block/swim_iop.c
@@ -84,6 +84,8 @@ static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_blocksizes[2] = {512,512};
static int floppy_sizes[2] = {2880,2880};
+static spinlock_t swim_iop_lock = SPIN_LOCK_UNLOCKED;
+
static char *drive_names[7] = {
"not installed", /* DRV_NONE */
"unknown (1)", /* DRV_UNKNOWN */
@@ -147,7 +149,7 @@ int swimiop_init(void)
MAJOR_NR);
return -EBUSY;
}
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &swim_iop_lock);
blksize_size[MAJOR_NR] = floppy_blocksizes;
blk_size[MAJOR_NR] = floppy_sizes;
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 4357b317b28c4..55587b4640ad8 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -122,6 +122,8 @@ static struct hd_struct xd_struct[XD_MAXDRIVES << 6];
static int xd_sizes[XD_MAXDRIVES << 6], xd_access[XD_MAXDRIVES];
static int xd_blocksizes[XD_MAXDRIVES << 6];
+static spinlock_t xd_lock = SPIN_LOCK_UNLOCKED;
+
extern struct block_device_operations xd_fops;
static struct gendisk xd_gendisk = {
@@ -170,7 +172,7 @@ int __init xd_init (void)
return -1;
}
devfs_handle = devfs_mk_dir (NULL, xd_gendisk.major_name, NULL);
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &xd_lock);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read ahead */
add_gendisk(&xd_gendisk);
xd_geninit();
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index e050e31ce7058..f7d35d7305d8f 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -68,6 +68,8 @@ static int chip_count = 0;
static int list_count = 0;
static int current_device = -1;
+static spinlock_t z2ram_lock = SPIN_LOCK_UNLOCKED;
+
static void
do_z2_request( request_queue_t * q )
{
@@ -364,7 +366,7 @@ z2_init( void )
}
}
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUES, &z2ram_lock);
blksize_size[ MAJOR_NR ] = z2_blocksizes;
blk_size[ MAJOR_NR ] = z2_sizes;
diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c
index 6a128292379a7..9068ede28f2b6 100644
--- a/drivers/fc4/fc.c
+++ b/drivers/fc4/fc.c
@@ -767,8 +767,12 @@ void fcp_release(fc_channel *fcchain, int count) /* count must > 0 */
static void fcp_scsi_done (Scsi_Cmnd *SCpnt)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
if (FCP_CMND(SCpnt)->done)
FCP_CMND(SCpnt)->done(SCpnt);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
}
static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, int prepare)
@@ -913,8 +917,12 @@ int fcp_scsi_abort(Scsi_Cmnd *SCpnt)
*/
if (++fc->abort_count < (fc->can_queue >> 1)) {
+ unsigned long flags;
+
SCpnt->result = DID_ABORT;
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
fcmd->done(SCpnt);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
printk("FC: soft abort\n");
return SUCCESS;
} else {
diff --git a/drivers/fc4/soc.c b/drivers/fc4/soc.c
index 924de1cb7eb05..19aee0628adfb 100644
--- a/drivers/fc4/soc.c
+++ b/drivers/fc4/soc.c
@@ -341,14 +341,14 @@ static void soc_intr(int irq, void *dev_id, struct pt_regs *regs)
unsigned long flags;
register struct soc *s = (struct soc *)dev_id;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&s->lock, flags);
cmd = sbus_readl(s->regs + CMD);
for (; (cmd = SOC_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) {
if (cmd & SOC_CMD_RSP_Q1) soc_unsolicited (s);
if (cmd & SOC_CMD_RSP_Q0) soc_solicited (s);
if (cmd & SOC_CMD_REQ_QALL) soc_request (s, cmd);
}
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&s->lock, flags);
}
#define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port))
@@ -559,6 +559,7 @@ static inline void soc_init(struct sbus_dev *sdev, int no)
if (s == NULL)
return;
memset (s, 0, sizeof(struct soc));
+ spin_lock_init(&s->lock);
s->soc_no = no;
SOD(("socs %08lx soc_intr %08lx soc_hw_enque %08x\n",
diff --git a/drivers/fc4/soc.h b/drivers/fc4/soc.h
index 740e1a3955296..c9c6d1d9d9591 100644
--- a/drivers/fc4/soc.h
+++ b/drivers/fc4/soc.h
@@ -265,6 +265,7 @@ typedef struct {
} soc_cq;
struct soc {
+ spinlock_t lock;
soc_port port[2]; /* Every SOC has one or two FC ports */
soc_cq req[2]; /* Request CQs */
soc_cq rsp[2]; /* Response CQs */
diff --git a/drivers/fc4/socal.c b/drivers/fc4/socal.c
index bec5167335861..447a4de67f6a2 100644
--- a/drivers/fc4/socal.c
+++ b/drivers/fc4/socal.c
@@ -411,7 +411,7 @@ static void socal_intr(int irq, void *dev_id, struct pt_regs *regs)
unsigned long flags;
register struct socal *s = (struct socal *)dev_id;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&s->lock, flags);
cmd = sbus_readl(s->regs + CMD);
for (; (cmd = SOCAL_INTR (s, cmd)); cmd = sbus_readl(s->regs + CMD)) {
#ifdef SOCALDEBUG
@@ -428,7 +428,7 @@ static void socal_intr(int irq, void *dev_id, struct pt_regs *regs)
if (cmd & SOCAL_CMD_REQ_QALL)
socal_request (s, cmd);
}
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&s->lock, flags);
}
#define TOKEN(proto, port, token) (((proto)<<12)|(token)|(port))
@@ -667,6 +667,7 @@ static inline void socal_init(struct sbus_dev *sdev, int no)
s = kmalloc (sizeof (struct socal), GFP_KERNEL);
if (!s) return;
memset (s, 0, sizeof(struct socal));
+ spin_lock_init(&s->lock);
s->socal_no = no;
SOD(("socals %08lx socal_intr %08lx socal_hw_enque %08lx\n",
diff --git a/drivers/fc4/socal.h b/drivers/fc4/socal.h
index 8e8c7f4519829..a853fad92a755 100644
--- a/drivers/fc4/socal.h
+++ b/drivers/fc4/socal.h
@@ -290,6 +290,7 @@ typedef struct {
} socal_cq;
struct socal {
+ spinlock_t lock;
socal_port port[2]; /* Every SOCAL has one or two FC ports */
socal_cq req[4]; /* Request CQs */
socal_cq rsp[4]; /* Response CQs */
diff --git a/drivers/ide/hd.c b/drivers/ide/hd.c
index 38c0777473eaa..08485cf66fe35 100644
--- a/drivers/ide/hd.c
+++ b/drivers/ide/hd.c
@@ -62,6 +62,8 @@
#define HD_IRQ IRQ_HARDDISK
#endif
+static spinlock_t hd_lock = SPIN_LOCK_UNLOCKED;
+
static int revalidate_hddisk(kdev_t, int);
#define HD_DELAY 0
@@ -106,7 +108,7 @@ static int NR_HD;
static struct hd_struct hd[MAX_HD<<6];
static int hd_sizes[MAX_HD<<6];
static int hd_blocksizes[MAX_HD<<6];
-static int hd_hardsectsizes[MAX_HD<<6];
+
static struct timer_list device_timer;
@@ -464,7 +466,7 @@ ok_to_write:
i = --CURRENT->nr_sectors;
--CURRENT->current_nr_sectors;
CURRENT->buffer += 512;
- if (!i || (CURRENT->bh && !SUBSECTOR(i)))
+ if (!i || (CURRENT->bio && !SUBSECTOR(i)))
end_request(1);
if (i > 0) {
SET_INTR(&write_intr);
@@ -586,24 +588,29 @@ repeat:
dev+'a', (CURRENT->cmd == READ)?"read":"writ",
cyl, head, sec, nsect, (unsigned long) CURRENT->buffer);
#endif
- if (CURRENT->cmd == READ) {
- hd_out(dev,nsect,sec,head,cyl,WIN_READ,&read_intr);
- if (reset)
- goto repeat;
- return;
- }
- if (CURRENT->cmd == WRITE) {
- hd_out(dev,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
- if (reset)
- goto repeat;
- if (wait_DRQ()) {
- bad_rw_intr();
- goto repeat;
+ if(CURRENT->flags & REQ_CMD) {
+ switch (rq_data_dir(CURRENT)) {
+ case READ:
+ hd_out(dev,nsect,sec,head,cyl,WIN_READ,&read_intr);
+ if (reset)
+ goto repeat;
+ break;
+ case WRITE:
+ hd_out(dev,nsect,sec,head,cyl,WIN_WRITE,&write_intr);
+ if (reset)
+ goto repeat;
+ if (wait_DRQ()) {
+ bad_rw_intr();
+ goto repeat;
+ }
+ outsw(HD_DATA,CURRENT->buffer,256);
+ break;
+ default:
+ printk("unknown hd-command\n");
+ end_request(0);
+ break;
}
- outsw(HD_DATA,CURRENT->buffer,256);
- return;
}
- panic("unknown hd-command");
}
static void do_hd_request (request_queue_t * q)
@@ -723,12 +730,11 @@ static void __init hd_geninit(void)
{
int drive;
- for(drive=0; drive < (MAX_HD << 6); drive++) {
+ for(drive=0; drive < (MAX_HD << 6); drive++)
hd_blocksizes[drive] = 1024;
- hd_hardsectsizes[drive] = 512;
- }
+
blksize_size[MAJOR_NR] = hd_blocksizes;
- hardsect_size[MAJOR_NR] = hd_hardsectsizes;
+ blk_queue_hardsect_size(QUEUE, 512);
#ifdef __i386__
if (!NR_HD) {
@@ -830,7 +836,7 @@ int __init hd_init(void)
printk("hd: unable to get major %d for hard disk\n",MAJOR_NR);
return -1;
}
- blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST);
+ blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), DEVICE_REQUEST, &hd_lock);
blk_queue_max_sectors(BLK_DEFAULT_QUEUE(MAJOR_NR), 255);
read_ahead[MAJOR_NR] = 8; /* 8 sector (4kB) read-ahead */
add_gendisk(&hd_gendisk);
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index ecdcd85d5d28e..fb638ca306585 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -232,8 +232,8 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq)
nents = blk_rq_map_sg(q, rq, hwif->sg_table);
- if (rq->q && nents > rq->nr_segments)
- printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents);
+ if (rq->q && nents > rq->nr_phys_segments)
+ printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents);
if (rq_data_dir(rq) == READ)
hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 6201c2d1600d0..3f93dcc90fd31 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -608,8 +608,11 @@ static void ide_init_queue(ide_drive_t *drive)
#endif
blk_queue_max_sectors(q, max_sectors);
- /* IDE DMA can do PRD_ENTRIES number of segments */
- q->max_segments = PRD_ENTRIES;
+ /* IDE DMA can do PRD_ENTRIES number of segments. */
+ blk_queue_max_hw_segments(q, PRD_ENTRIES);
+
+ /* This is a driver limit and could be eliminated. */
+ blk_queue_max_phys_segments(q, PRD_ENTRIES);
}
/*
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index c1b19e1d92555..c4eb0a4a3c620 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -3686,6 +3686,7 @@ EXPORT_SYMBOL(ide_spin_wait_hwgroup);
*/
devfs_handle_t ide_devfs_handle;
+EXPORT_SYMBOL(ide_lock);
EXPORT_SYMBOL(ide_probe);
EXPORT_SYMBOL(drive_is_flashcard);
EXPORT_SYMBOL(ide_timer_expiry);
@@ -3718,6 +3719,7 @@ EXPORT_SYMBOL(ide_do_drive_cmd);
EXPORT_SYMBOL(ide_end_drive_cmd);
EXPORT_SYMBOL(ide_end_request);
EXPORT_SYMBOL(__ide_end_request);
+EXPORT_SYMBOL(ide_revalidate_drive);
EXPORT_SYMBOL(ide_revalidate_disk);
EXPORT_SYMBOL(ide_cmd);
EXPORT_SYMBOL(ide_wait_cmd);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 372583a05c216..6044b05311100 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -69,6 +69,7 @@
#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/major.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 1e7c77b395afa..9b6d48e9e3e91 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -65,7 +65,7 @@
#include <linux/errno.h>
#include <linux/kdev_t.h>
#include <linux/blkdev.h>
-#include <linux/blk.h> /* for io_request_lock (spinlock) decl */
+#include <linux/blk.h>
#include "../../scsi/scsi.h"
#include "../../scsi/hosts.h"
#include "../../scsi/sd.h"
@@ -246,9 +246,9 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r)
mf_chk = search_taskQ(1,sc,MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK);
if (mf_chk != NULL) {
sc->result = DID_ABORT << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&sc->host->host_lock, flags);
sc->scsi_done(sc);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&sc->host->host_lock, flags);
return 1;
}
}
@@ -426,9 +426,9 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r)
scsi_to_pci_dma_dir(sc->sc_data_direction));
}
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&sc->host->host_lock, flags);
sc->scsi_done(sc);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&sc->host->host_lock, flags);
}
return 1;
@@ -928,9 +928,9 @@ mptscsih_qcmd(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
}
SCpnt->resid = SCpnt->request_bufflen - mpt_sdev->sense_sz;
SCpnt->result = 0;
-/* spin_lock(&io_request_lock); */
+/* spin_lock(&SCpnt->host->host_lock); */
SCpnt->scsi_done(SCpnt);
-/* spin_unlock(&io_request_lock); */
+/* spin_unlock(&SCpnt->host->host_lock); */
return 0;
}
}
@@ -1333,9 +1333,9 @@ mptscsih_abort(Scsi_Cmnd * SCpnt)
if (ctx2abort == -1) {
printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#2) for SCpnt=%p\n", SCpnt);
SCpnt->result = DID_SOFT_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
SCpnt->scsi_done(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
} else {
dprintk((KERN_INFO MYNAM ":DbG: ctx2abort = %08x\n", ctx2abort));
@@ -1352,9 +1352,9 @@ mptscsih_abort(Scsi_Cmnd * SCpnt)
": WARNING[2] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
i, mf, SCpnt);
SCpnt->result = DID_SOFT_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
SCpnt->scsi_done(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
}
}
@@ -1428,9 +1428,9 @@ mptscsih_dev_reset(Scsi_Cmnd * SCpnt)
": WARNING[3] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
i, mf, SCpnt);
SCpnt->result = DID_SOFT_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
SCpnt->scsi_done(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
}
@@ -1502,9 +1502,9 @@ mptscsih_bus_reset(Scsi_Cmnd * SCpnt)
": WARNING[4] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n",
i, mf, SCpnt);
SCpnt->result = DID_SOFT_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
SCpnt->scsi_done(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
}
@@ -1748,9 +1748,9 @@ mptscsih_taskmgmt_bh(void *sc)
if (ctx2abort == -1) {
printk(KERN_ERR MYNAM ": ERROR - ScsiLookup fail(#1) for SCpnt=%p\n", SCpnt);
SCpnt->result = DID_SOFT_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
SCpnt->scsi_done(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
continue;
}
@@ -1797,9 +1797,9 @@ mptscsih_taskmgmt_bh(void *sc)
!= 0) {
printk(KERN_WARNING MYNAM ": WARNING[1] - IOC error (%d) processing TaskMgmt request (mf=%p:sc=%p)\n", i, mf, SCpnt);
SCpnt->result = DID_SOFT_ERROR << 16;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&SCpnt->host->host_lock, flags);
SCpnt->scsi_done(SCpnt);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&SCpnt->host->host_lock, flags);
mpt_free_msg_frame(ScsiTaskCtx, hd->ioc->id, mf);
} else {
/* Spin-Wait for TaskMgmt complete!!! */
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index bdf52592bd26b..c64b7393b4845 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -1301,7 +1301,8 @@ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, i
request_queue_t *q = i2ob_dev[unit].req_queue;
blk_queue_max_sectors(q, 256);
- blk_queue_max_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+ blk_queue_max_phys_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
+ blk_queue_max_hw_segments(q, (d->controller->status_block->inbound_frame_size - 8)/2);
if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 2)
i2ob_dev[i].depth = 32;
@@ -1309,14 +1310,16 @@ static int i2ob_install_device(struct i2o_controller *c, struct i2o_device *d, i
if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.queue_buggy == 1)
{
blk_queue_max_sectors(q, 32);
- blk_queue_max_segments(q, 8);
+ blk_queue_max_phys_segments(q, 8);
+ blk_queue_max_hw_segments(q, 8);
i2ob_dev[i].depth = 4;
}
if(d->controller->type == I2O_TYPE_PCI && d->controller->bus.pci.short_req)
{
blk_queue_max_sectors(q, 8);
- blk_queue_max_segments(q, 8);
+ blk_queue_max_phys_segments(q, 8);
+ blk_queue_max_hw_segments(q, 8);
}
}
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f3528a29e44cd..2e51889ac9705 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -135,7 +135,7 @@ obj-$(CONFIG_CHR_DEV_SG) += sg.o
list-multi := scsi_mod.o sd_mod.o sr_mod.o initio.o a100u2w.o cpqfc.o
scsi_mod-objs := scsi.o hosts.o scsi_ioctl.o constants.o scsicam.o \
scsi_proc.o scsi_error.o scsi_queue.o scsi_lib.o \
- scsi_merge.o scsi_dma.o scsi_scan.o scsi_syms.o
+ scsi_merge.o scsi_scan.o scsi_syms.o
sd_mod-objs := sd.o
sr_mod-objs := sr.o sr_ioctl.o sr_vendor.o
initio-objs := ini9100u.o i91uscsi.o
diff --git a/drivers/scsi/README.ncr53c8xx b/drivers/scsi/README.ncr53c8xx
index 206233d1af2da..514c2be6ba12e 100644
--- a/drivers/scsi/README.ncr53c8xx
+++ b/drivers/scsi/README.ncr53c8xx
@@ -1,6 +1,6 @@
The Linux NCR53C8XX/SYM53C8XX drivers README file
-Written by Gerard Roudier <groudier@club-internet.fr>
+Written by Gerard Roudier <groudier@free.fr>
21 Rue Carnot
95170 DEUIL LA BARRE - FRANCE
@@ -87,7 +87,7 @@ Written by Gerard Roudier <groudier@club-internet.fr>
The initial Linux ncr53c8xx driver has been a port of the ncr driver from
FreeBSD that has been achieved in November 1995 by:
- Gerard Roudier <groudier@club-internet.fr>
+ Gerard Roudier <groudier@free.fr>
The original driver has been written for 386bsd and FreeBSD by:
Wolfgang Stanglmeier <wolf@cologne.de>
@@ -1287,7 +1287,7 @@ appropriate mailing lists or news-groups. Send me a copy in order to
be sure I will receive it. Obviously, a bug in the driver code is
possible.
- My email address: Gerard Roudier <groudier@club-internet.fr>
+ My email address: Gerard Roudier <groudier@free.fr>
Allowing disconnections is important if you use several devices on
your SCSI bus but often causes problems with buggy devices.
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index be4495fdf7ab0..9177efb7342b3 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -3084,7 +3084,7 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
* we check data_cmnd[0]. This catches the conditions for st.c, but
* I'm still not sure if request.cmd is valid for sg devices.
*/
- if ( (cmd->request.cmd == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
+ if ( (rq_data_dir(&cmd->request) == WRITE) || (cmd->data_cmnd[0] == WRITE_6) ||
(cmd->data_cmnd[0] == WRITE_FILEMARKS) )
{
sp->w_total++;
@@ -4294,7 +4294,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
{
printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG "
"count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow,
- (cmd->request.cmd == WRITE) ? "wrote" : "read", actual,
+ (rq_data_dir(&cmd->request) == WRITE) ? "wrote" : "read", actual,
hscb->residual_SG_segment_count);
printk(INFO_LEAD "status 0x%x.\n", p->host_no, CTL_OF_SCB(scb),
hscb->target_status);
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c
index 0ec9d562bd3d4..3f5bbb08cb4a6 100644
--- a/drivers/scsi/esp.c
+++ b/drivers/scsi/esp.c
@@ -1,4 +1,4 @@
-/* $Id: esp.c,v 1.99 2001/02/13 01:17:01 davem Exp $
+/* $Id: esp.c,v 1.100 2001/12/11 04:55:48 davem Exp $
* esp.c: EnhancedScsiProcessor Sun SCSI driver code.
*
* Copyright (C) 1995, 1998 David S. Miller (davem@caip.rutgers.edu)
@@ -1035,9 +1035,6 @@ static void __init esp_init_swstate(struct esp *esp)
{
int i;
- /* Driver spinlock... */
- spin_lock_init(&esp->lock);
-
/* Command queues... */
esp->current_SC = NULL;
esp->disconnected_SC = NULL;
@@ -1816,7 +1813,6 @@ after_nego_msg_built:
int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
{
struct esp *esp;
- unsigned long flags;
/* Set up func ptr and initial driver cmd-phase. */
SCpnt->scsi_done = done;
@@ -1834,8 +1830,6 @@ int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
SCpnt->SCp.Message = 0xff;
SCpnt->SCp.sent_command = 0;
- spin_lock_irqsave(&esp->lock, flags);
-
/* Place into our queue. */
if (SCpnt->cmnd[0] == REQUEST_SENSE) {
ESPQUEUE(("RQSENSE\n"));
@@ -1849,8 +1843,6 @@ int esp_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
if (!esp->current_SC && !esp->resetting_bus)
esp_exec_cmd(esp);
- spin_unlock_irqrestore(&esp->lock, flags);
-
return 0;
}
@@ -1926,7 +1918,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
unsigned long flags;
int don;
- spin_lock_irqsave(&esp->lock, flags);
+ spin_lock_irqsave(&esp->ehost->host_lock, flags);
ESPLOG(("esp%d: Aborting command\n", esp->esp_id));
esp_dump_state(esp);
@@ -1942,7 +1934,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
esp->msgout_len = 1;
esp->msgout_ctr = 0;
esp_cmd(esp, ESP_CMD_SATN);
- spin_unlock_irqrestore(&esp->lock, flags);
+ spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
return SCSI_ABORT_PENDING;
}
@@ -1964,14 +1956,14 @@ int esp_abort(Scsi_Cmnd *SCptr)
*prev = (Scsi_Cmnd *) this->host_scribble;
this->host_scribble = NULL;
- spin_unlock_irqrestore(&esp->lock, flags);
-
esp_release_dmabufs(esp, this);
this->result = DID_ABORT << 16;
this->scsi_done(this);
+
if (don)
ESP_INTSON(esp->dregs);
+ spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
return SCSI_ABORT_SUCCESS;
}
}
@@ -1985,7 +1977,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
if (esp->current_SC) {
if (don)
ESP_INTSON(esp->dregs);
- spin_unlock_irqrestore(&esp->lock, flags);
+ spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
return SCSI_ABORT_BUSY;
}
@@ -1998,7 +1990,7 @@ int esp_abort(Scsi_Cmnd *SCptr)
if (don)
ESP_INTSON(esp->dregs);
- spin_unlock_irqrestore(&esp->lock, flags);
+ spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
return SCSI_ABORT_SNOOZE;
}
@@ -2014,16 +2006,11 @@ static int esp_finish_reset(struct esp *esp)
/* Clean up currently executing command, if any. */
if (sp != NULL) {
esp->current_SC = NULL;
- spin_unlock(&esp->lock);
esp_release_dmabufs(esp, sp);
sp->result = (DID_RESET << 16);
- spin_lock(&io_request_lock);
sp->scsi_done(sp);
- spin_unlock(&io_request_lock);
-
- spin_lock(&esp->lock);
}
/* Clean up disconnected queue, they have been invalidated
@@ -2031,16 +2018,10 @@ static int esp_finish_reset(struct esp *esp)
*/
if (esp->disconnected_SC) {
while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) {
- spin_unlock(&esp->lock);
-
esp_release_dmabufs(esp, sp);
sp->result = (DID_RESET << 16);
- spin_lock(&io_request_lock);
sp->scsi_done(sp);
- spin_unlock(&io_request_lock);
-
- spin_lock(&esp->lock);
}
}
@@ -2071,9 +2052,9 @@ int esp_reset(Scsi_Cmnd *SCptr, unsigned int how)
struct esp *esp = (struct esp *) SCptr->host->hostdata;
unsigned long flags;
- spin_lock_irqsave(&esp->lock, flags);
+ spin_lock_irqsave(&esp->ehost->host_lock, flags);
(void) esp_do_resetbus(esp);
- spin_unlock_irqrestore(&esp->lock, flags);
+ spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
return SCSI_RESET_PENDING;
}
@@ -2085,16 +2066,12 @@ static void esp_done(struct esp *esp, int error)
esp->current_SC = NULL;
- spin_unlock(&esp->lock);
esp_release_dmabufs(esp, done_SC);
done_SC->result = error;
- spin_lock(&io_request_lock);
done_SC->scsi_done(done_SC);
- spin_unlock(&io_request_lock);
/* Bus is free, issue any commands in the queue. */
- spin_lock(&esp->lock);
if (esp->issue_SC && !esp->current_SC)
esp_exec_cmd(esp);
@@ -4344,7 +4321,7 @@ static void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
struct esp *esp = dev_id;
unsigned long flags;
- spin_lock_irqsave(&esp->lock, flags);
+ spin_lock_irqsave(&esp->ehost->host_lock, flags);
if (ESP_IRQ_P(esp->dregs)) {
ESP_INTSOFF(esp->dregs);
@@ -4354,7 +4331,7 @@ static void esp_intr(int irq, void *dev_id, struct pt_regs *pregs)
ESP_INTSON(esp->dregs);
}
- spin_unlock_irqrestore(&esp->lock, flags);
+ spin_unlock_irqrestore(&esp->ehost->host_lock, flags);
}
int esp_revoke(Scsi_Device* SDptr)
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h
index 0cc5e37532dbe..70f1a7c6eae5a 100644
--- a/drivers/scsi/esp.h
+++ b/drivers/scsi/esp.h
@@ -1,4 +1,4 @@
-/* $Id: esp.h,v 1.28 2000/03/30 01:33:17 davem Exp $
+/* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $
* esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI
* Processor) driver under Linux.
*
@@ -64,7 +64,6 @@ enum esp_rev {
/* We get one of these for each ESP probed. */
struct esp {
- spinlock_t lock;
unsigned long eregs; /* ESP controller registers */
unsigned long dregs; /* DMA controller registers */
struct sbus_dma *dma; /* DMA controller sw state */
@@ -416,6 +415,7 @@ extern int esp_revoke(Scsi_Device* SDptr);
sg_tablesize: SG_ALL, \
cmd_per_lun: 1, \
use_clustering: ENABLE_CLUSTERING, \
+ highmem_io: 1, \
}
/* For our interrupt engine. */
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index a33868d948b72..cf32a8a3acbdd 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -130,7 +130,8 @@ scsi_unregister(struct Scsi_Host * sh){
* pain to reverse this, so we try to avoid it
*/
extern int blk_nohighio;
-struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j){
+struct Scsi_Host * scsi_register(Scsi_Host_Template * tpnt, int j)
+{
struct Scsi_Host * retval, *shpnt, *o_shp;
Scsi_Host_Name *shn, *shn2;
int flag_new = 1;
diff --git a/drivers/scsi/hosts.h b/drivers/scsi/hosts.h
index 08f3ea2805b8e..9045cc4cbb14c 100644
--- a/drivers/scsi/hosts.h
+++ b/drivers/scsi/hosts.h
@@ -334,7 +334,6 @@ struct Scsi_Host
int resetting; /* if set, it means that last_reset is a valid value */
unsigned long last_reset;
-
/*
* These three parameters can be used to allow for wide scsi,
* and for host adapters that support multiple busses
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 263e1827d1654..ce2e2e0902125 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -22,7 +22,7 @@
** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
** and is currently maintained by
**
-** Gerard Roudier <groudier@club-internet.fr>
+** Gerard Roudier <groudier@free.fr>
**
** Being given that this driver originates from the FreeBSD version, and
** in order to keep synergy on both, any suggested enhancements and corrections
@@ -63,7 +63,7 @@
** August 18 1997 by Cort <cort@cs.nmt.edu>:
** Support for Power/PC (Big Endian).
**
-** June 20 1998 by Gerard Roudier <groudier@club-internet.fr>:
+** June 20 1998 by Gerard Roudier
** Support for up to 64 tags per lun.
** O(1) everywhere (C and SCRIPTS) for normal cases.
** Low PCI traffic for command handling when on-chip RAM is present.
@@ -8127,10 +8127,14 @@ static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
segment = 1;
}
}
- else if (use_sg <= MAX_SCATTER) {
+ else {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > MAX_SCATTER) {
+ unmap_scsi_data(np, cmd);
+ return -1;
+ }
data = &data[MAX_SCATTER - use_sg];
while (segment < use_sg) {
@@ -8143,9 +8147,6 @@ static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
++segment;
}
}
- else {
- return -1;
- }
return segment;
}
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index ac3f3b9e85da7..ac4e795a14034 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -22,7 +22,7 @@
** This driver has been ported to Linux from the FreeBSD NCR53C8XX driver
** and is currently maintained by
**
-** Gerard Roudier <groudier@club-internet.fr>
+** Gerard Roudier <groudier@free.fr>
**
** Being given that this driver originates from the FreeBSD version, and
** in order to keep synergy on both, any suggested enhancements and corrections
diff --git a/drivers/scsi/qlogicfc.c b/drivers/scsi/qlogicfc.c
index efe6d609db97b..0a6795243bf26 100644
--- a/drivers/scsi/qlogicfc.c
+++ b/drivers/scsi/qlogicfc.c
@@ -1375,7 +1375,7 @@ static void redo_port_db(unsigned long arg)
hostdata->explore_timer.data = 0;
del_timer(&hostdata->explore_timer);
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&host->host_lock, flags);
if (hostdata->adapter_state & AS_REDO_FABRIC_PORTDB || hostdata->adapter_state & AS_REDO_LOOP_PORTDB) {
isp2x00_make_portdb(host);
@@ -1422,7 +1422,7 @@ static void redo_port_db(unsigned long arg)
hostdata->adapter_state = AS_LOOP_GOOD;
}
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&host->host_lock, flags);
}
@@ -1430,11 +1430,12 @@ static void redo_port_db(unsigned long arg)
void do_isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
+ struct Scsi_Host *host = dev_id;
unsigned long flags;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&host->host_lock, flags);
isp2x00_intr_handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&host->host_lock, flags);
}
void isp2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/scsi/qlogicisp.c b/drivers/scsi/qlogicisp.c
index 6677a0f3e2e25..bc761211bd794 100644
--- a/drivers/scsi/qlogicisp.c
+++ b/drivers/scsi/qlogicisp.c
@@ -970,11 +970,12 @@ int isp1020_queuecommand(Scsi_Cmnd *Cmnd, void (*done)(Scsi_Cmnd *))
void do_isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
+ struct Scsi_Host *host = dev_id;
unsigned long flags;
- spin_lock_irqsave(&io_request_lock, flags);
+ spin_lock_irqsave(&host->host_lock, flags);
isp1020_intr_handler(irq, dev_id, regs);
- spin_unlock_irqrestore(&io_request_lock, flags);
+ spin_unlock_irqrestore(&host->host_lock, flags);
}
void isp1020_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 8561843c022b9..1b8acd2faa702 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1445,7 +1445,7 @@ static void qpti_intr(int irq, void *dev_id, struct pt_regs *regs)
spin_unlock(&qpti->lock);
if (dq != NULL) {
- spin_lock(&io_request_lock);
+ spin_lock(&qpti->qhost->host_lock);
do {
Scsi_Cmnd *next;
@@ -1453,7 +1453,7 @@ static void qpti_intr(int irq, void *dev_id, struct pt_regs *regs)
dq->scsi_done(dq);
dq = next;
} while (dq != NULL);
- spin_unlock(&io_request_lock);
+ spin_unlock(&qpti->qhost->host_lock);
}
__restore_flags(flags);
}
diff --git a/drivers/scsi/qlogicpti.h b/drivers/scsi/qlogicpti.h
index aad93471c7af4..6c49ea1df1c21 100644
--- a/drivers/scsi/qlogicpti.h
+++ b/drivers/scsi/qlogicpti.h
@@ -524,6 +524,7 @@ struct qlogicpti {
sg_tablesize: QLOGICPTI_MAX_SG(QLOGICPTI_REQ_QUEUE_LEN), \
cmd_per_lun: 1, \
use_clustering: ENABLE_CLUSTERING, \
+ highmem_io: 1, \
}
/* For our interrupt engine. */
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 656766c09f2d3..98a478083b0cb 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -55,6 +55,7 @@
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/completion.h>
+#include <linux/mempool.h>
#define __KERNEL_SYSCALLS__
@@ -83,6 +84,18 @@ static int scsi_proc_info(char *buffer, char **start, off_t offset, int length);
static void scsi_dump_status(int level);
#endif
+#define SG_MEMPOOL_NR 5
+#define SG_MEMPOOL_SIZE 32
+
+struct scsi_host_sg_pool {
+ int size;
+ kmem_cache_t *slab;
+ mempool_t *pool;
+};
+
+static const int scsi_host_sg_pool_sizes[SG_MEMPOOL_NR] = { 8, 16, 32, 64, MAX_PHYS_SEGMENTS };
+struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR];
+
/*
static const char RCSid[] = "$Header: /vger/u4/cvs/linux/drivers/scsi/scsi.c,v 1.38 1997/01/19 23:07:18 davem Exp $";
*/
@@ -181,23 +194,22 @@ void scsi_build_commandblocks(Scsi_Device * SDpnt);
void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt)
{
request_queue_t *q = &SDpnt->request_queue;
- int max_segments = SHpnt->sg_tablesize;
blk_init_queue(q, scsi_request_fn, &SHpnt->host_lock);
q->queuedata = (void *) SDpnt;
-#ifdef DMA_CHUNK_SIZE
- if (max_segments > 64)
- max_segments = 64;
-#endif
+ /* Hardware imposed limit. */
+ blk_queue_max_hw_segments(q, SHpnt->sg_tablesize);
+
+ /*
+ * When we remove scsi_malloc soonish, this can die too
+ */
+ blk_queue_max_phys_segments(q, PAGE_SIZE / sizeof(struct scatterlist));
- blk_queue_max_segments(q, max_segments);
blk_queue_max_sectors(q, SHpnt->max_sectors);
if (!SHpnt->use_clustering)
clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
- if (SHpnt->unchecked_isa_dma)
- blk_queue_segment_boundary(q, ISA_DMA_THRESHOLD);
}
#ifdef MODULE
@@ -1955,13 +1967,6 @@ static int scsi_register_host(Scsi_Host_Template * tpnt)
}
}
- /*
- * Now that we have all of the devices, resize the DMA pool,
- * as required. */
- if (!out_of_space)
- scsi_resize_dma_pool();
-
-
/* This does any final handling that is required. */
for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
if (sdtpnt->finish && sdtpnt->nr_dev) {
@@ -2160,14 +2165,6 @@ static int scsi_unregister_host(Scsi_Host_Template * tpnt)
tpnt->present--;
}
- /*
- * If there are absolutely no more hosts left, it is safe
- * to completely nuke the DMA pool. The resize operation will
- * do the right thing and free everything.
- */
- if (!scsi_hosts)
- scsi_resize_dma_pool();
-
if (pcount0 != next_scsi_host)
printk(KERN_INFO "scsi : %d host%s left.\n", next_scsi_host,
(next_scsi_host == 1) ? "" : "s");
@@ -2268,8 +2265,6 @@ static int scsi_register_device_module(struct Scsi_Device_Template *tpnt)
*/
if (tpnt->finish && tpnt->nr_dev)
(*tpnt->finish) ();
- if (!out_of_space)
- scsi_resize_dma_pool();
MOD_INC_USE_COUNT;
if (out_of_space) {
@@ -2535,16 +2530,81 @@ int __init scsi_setup(char *str)
__setup("scsihosts=", scsi_setup);
#endif
+static void *scsi_pool_alloc(int gfp_mask, void *data)
+{
+ return kmem_cache_alloc(data, gfp_mask);
+}
+
+static void scsi_pool_free(void *ptr, void *data)
+{
+ kmem_cache_free(data, ptr);
+}
+
+struct scatterlist *scsi_alloc_sgtable(Scsi_Cmnd *SCpnt, int gfp_mask)
+{
+ struct scsi_host_sg_pool *sgp;
+ struct scatterlist *sgl;
+
+ BUG_ON(!SCpnt->use_sg);
+
+ switch (SCpnt->use_sg) {
+ case 1 ... 8 : SCpnt->sglist_len = 0; break;
+ case 9 ... 16 : SCpnt->sglist_len = 1; break;
+ case 17 ... 32 : SCpnt->sglist_len = 2; break;
+ case 33 ... 64 : SCpnt->sglist_len = 3; break;
+ case 65 ... MAX_PHYS_SEGMENTS : SCpnt->sglist_len = 4; break;
+ default: return NULL;
+ }
+
+ sgp = scsi_sg_pools + SCpnt->sglist_len;
+
+ sgl = mempool_alloc(sgp->pool, gfp_mask);
+ if (sgl) {
+ memset(sgl, 0, sgp->size);
+ return sgl;
+ }
+
+ return sgl;
+}
+
+void scsi_free_sgtable(struct scatterlist *sgl, int index)
+{
+ struct scsi_host_sg_pool *sgp = scsi_sg_pools + index;
+
+ if (unlikely(index > SG_MEMPOOL_NR)) {
+ printk("scsi_free_sgtable: mempool %d\n", index);
+ BUG();
+ }
+
+ mempool_free(sgl, sgp->pool);
+}
+
static int __init init_scsi(void)
{
struct proc_dir_entry *generic;
+ char name[16];
+ int i;
printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
- if( scsi_init_minimal_dma_pool() != 0 )
- {
- return 1;
- }
+ /*
+ * setup sg memory pools
+ */
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+ int size = scsi_host_sg_pool_sizes[i] * sizeof(struct scatterlist);
+
+ snprintf(name, sizeof(name) - 1, "sgpool-%d", scsi_host_sg_pool_sizes[i]);
+ sgp->slab = kmem_cache_create(name, size, 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+ if (!sgp->slab)
+ panic("SCSI: can't init sg slab\n");
+
+ sgp->pool = mempool_create(SG_MEMPOOL_SIZE, scsi_pool_alloc, scsi_pool_free, sgp->slab);
+ if (!sgp->pool)
+ panic("SCSI: can't init sg mempool\n");
+
+ sgp->size = size;
+ }
/*
* This makes /proc/scsi and /proc/scsi/scsi visible.
@@ -2580,6 +2640,7 @@ static int __init init_scsi(void)
static void __exit exit_scsi(void)
{
Scsi_Host_Name *shn, *shn2 = NULL;
+ int i;
remove_bh(SCSI_BH);
@@ -2600,11 +2661,13 @@ static void __exit exit_scsi(void)
remove_proc_entry ("scsi", 0);
#endif
- /*
- * Free up the DMA pool.
- */
- scsi_resize_dma_pool();
-
+ for (i = 0; i < SG_MEMPOOL_NR; i++) {
+ struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
+ mempool_destroy(sgp->pool);
+ kmem_cache_destroy(sgp->slab);
+ sgp->pool = NULL;
+ sgp->slab = NULL;
+ }
}
module_init(init_scsi);
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h
index 3e6b1c3b34dc5..b8ad3f4aa887c 100644
--- a/drivers/scsi/scsi.h
+++ b/drivers/scsi/scsi.h
@@ -439,6 +439,12 @@ extern int scsi_partsize(struct buffer_head *bh, unsigned long capacity,
unsigned int *secs);
/*
+ * sg list allocations
+ */
+struct scatterlist *scsi_alloc_sgtable(Scsi_Cmnd *SCpnt, int gfp_mask);
+void scsi_free_sgtable(struct scatterlist *sgl, int index);
+
+/*
* Prototypes for functions in scsi_dma.c
*/
void scsi_resize_dma_pool(void);
@@ -449,8 +455,8 @@ int scsi_free(void *, unsigned int);
/*
* Prototypes for functions in scsi_merge.c
*/
-extern void recount_segments(Scsi_Cmnd * SCpnt);
-extern void initialize_merge_fn(Scsi_Device * SDpnt);
+extern void scsi_initialize_merge_fn(Scsi_Device *SDpnt);
+extern int scsi_init_io(Scsi_Cmnd *SCpnt);
/*
* Prototypes for functions in scsi_queue.c
@@ -555,8 +561,6 @@ struct scsi_device {
request_queue_t request_queue;
atomic_t device_active; /* commands checked out for device */
volatile unsigned short device_busy; /* commands actually active on low-level */
- int (*scsi_init_io_fn) (Scsi_Cmnd *); /* Used to initialize
- new request */
Scsi_Cmnd *device_queue; /* queue of SCSI Command structures */
/* public: */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 9b947ee8257f9..fda3c65283354 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -182,7 +182,6 @@ static void scsi_dump(Scsi_Cmnd * SCpnt, int flag)
};
printk("\n");
#endif
- printk("DMA free %d sectors.\n", scsi_dma_free_sectors);
}
int scsi_debug_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
@@ -653,7 +652,6 @@ int scsi_debug_abort(Scsi_Cmnd * SCpnt)
int scsi_debug_biosparam(Disk * disk, kdev_t dev, int *info)
{
- int size = disk->capacity;
info[0] = N_HEAD;
info[1] = N_SECTOR;
info[2] = N_CYLINDER;
diff --git a/drivers/scsi/scsi_dma.c b/drivers/scsi/scsi_dma.c
deleted file mode 100644
index 3de83513019ce..0000000000000
--- a/drivers/scsi/scsi_dma.c
+++ /dev/null
@@ -1,450 +0,0 @@
-/*
- * scsi_dma.c Copyright (C) 2000 Eric Youngdale
- *
- * mid-level SCSI DMA bounce buffer allocator
- *
- */
-
-#define __NO_VERSION__
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/blk.h>
-
-
-#include "scsi.h"
-#include "hosts.h"
-#include "constants.h"
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-/*
- * PAGE_SIZE must be a multiple of the sector size (512). True
- * for all reasonably recent architectures (even the VAX...).
- */
-#define SECTOR_SIZE 512
-#define SECTORS_PER_PAGE (PAGE_SIZE/SECTOR_SIZE)
-
-#if SECTORS_PER_PAGE <= 8
-typedef unsigned char FreeSectorBitmap;
-#elif SECTORS_PER_PAGE <= 32
-typedef unsigned int FreeSectorBitmap;
-#else
-#error You lose.
-#endif
-
-/*
- * Used for access to internal allocator used for DMA safe buffers.
- */
-static spinlock_t allocator_request_lock = SPIN_LOCK_UNLOCKED;
-
-static FreeSectorBitmap *dma_malloc_freelist = NULL;
-static int need_isa_bounce_buffers;
-static unsigned int dma_sectors = 0;
-unsigned int scsi_dma_free_sectors = 0;
-unsigned int scsi_need_isa_buffer = 0;
-static unsigned char **dma_malloc_pages = NULL;
-
-/*
- * Function: scsi_malloc
- *
- * Purpose: Allocate memory from the DMA-safe pool.
- *
- * Arguments: len - amount of memory we need.
- *
- * Lock status: No locks assumed to be held. This function is SMP-safe.
- *
- * Returns: Pointer to memory block.
- *
- * Notes: Prior to the new queue code, this function was not SMP-safe.
- * This function can only allocate in units of sectors
- * (i.e. 512 bytes).
- *
- * We cannot use the normal system allocator becuase we need
- * to be able to guarantee that we can process a complete disk
- * I/O request without touching the system allocator. Think
- * about it - if the system were heavily swapping, and tried to
- * write out a block of memory to disk, and the SCSI code needed
- * to allocate more memory in order to be able to write the
- * data to disk, you would wedge the system.
- */
-void *scsi_malloc(unsigned int len)
-{
- unsigned int nbits, mask;
- unsigned long flags;
-
- int i, j;
- if (len % SECTOR_SIZE != 0 || len > PAGE_SIZE)
- return NULL;
-
- nbits = len >> 9;
- mask = (1 << nbits) - 1;
-
- spin_lock_irqsave(&allocator_request_lock, flags);
-
- for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
- for (j = 0; j <= SECTORS_PER_PAGE - nbits; j++) {
- if ((dma_malloc_freelist[i] & (mask << j)) == 0) {
- dma_malloc_freelist[i] |= (mask << j);
- scsi_dma_free_sectors -= nbits;
-#ifdef DEBUG
- SCSI_LOG_MLQUEUE(3, printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9)));
- printk("SMalloc: %d %p [From:%p]\n", len, dma_malloc_pages[i] + (j << 9));
-#endif
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9));
- }
- }
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- return NULL; /* Nope. No more */
-}
-
-/*
- * Function: scsi_free
- *
- * Purpose: Free memory into the DMA-safe pool.
- *
- * Arguments: ptr - data block we are freeing.
- * len - size of block we are freeing.
- *
- * Lock status: No locks assumed to be held. This function is SMP-safe.
- *
- * Returns: Nothing
- *
- * Notes: This function *must* only be used to free memory
- * allocated from scsi_malloc().
- *
- * Prior to the new queue code, this function was not SMP-safe.
- * This function can only allocate in units of sectors
- * (i.e. 512 bytes).
- */
-int scsi_free(void *obj, unsigned int len)
-{
- unsigned int page, sector, nbits, mask;
- unsigned long flags;
-
-#ifdef DEBUG
- unsigned long ret = 0;
-
-#ifdef __mips__
- __asm__ __volatile__("move\t%0,$31":"=r"(ret));
-#else
- ret = __builtin_return_address(0);
-#endif
- printk("scsi_free %p %d\n", obj, len);
- SCSI_LOG_MLQUEUE(3, printk("SFree: %p %d\n", obj, len));
-#endif
-
- spin_lock_irqsave(&allocator_request_lock, flags);
-
- for (page = 0; page < dma_sectors / SECTORS_PER_PAGE; page++) {
- unsigned long page_addr = (unsigned long) dma_malloc_pages[page];
- if ((unsigned long) obj >= page_addr &&
- (unsigned long) obj < page_addr + PAGE_SIZE) {
- sector = (((unsigned long) obj) - page_addr) >> 9;
-
- nbits = len >> 9;
- mask = (1 << nbits) - 1;
-
- if (sector + nbits > SECTORS_PER_PAGE)
- panic("scsi_free:Bad memory alignment");
-
- if ((dma_malloc_freelist[page] &
- (mask << sector)) != (mask << sector)) {
-#ifdef DEBUG
- printk("scsi_free(obj=%p, len=%d) called from %08lx\n",
- obj, len, ret);
-#endif
- panic("scsi_free:Trying to free unused memory");
- }
- scsi_dma_free_sectors += nbits;
- dma_malloc_freelist[page] &= ~(mask << sector);
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- return 0;
- }
- }
- panic("scsi_free:Bad offset");
-}
-
-
-/*
- * Function: scsi_resize_dma_pool
- *
- * Purpose: Ensure that the DMA pool is sufficiently large to be
- * able to guarantee that we can always process I/O requests
- * without calling the system allocator.
- *
- * Arguments: None.
- *
- * Lock status: No locks assumed to be held. This function is SMP-safe.
- *
- * Returns: Nothing
- *
- * Notes: Prior to the new queue code, this function was not SMP-safe.
- * Go through the device list and recompute the most appropriate
- * size for the dma pool. Then grab more memory (as required).
- */
-void scsi_resize_dma_pool(void)
-{
- int i, k;
- unsigned long size;
- unsigned long flags;
- struct Scsi_Host *shpnt;
- struct Scsi_Host *host = NULL;
- Scsi_Device *SDpnt;
- FreeSectorBitmap *new_dma_malloc_freelist = NULL;
- unsigned int new_dma_sectors = 0;
- unsigned int new_need_isa_buffer = 0;
- unsigned char **new_dma_malloc_pages = NULL;
- int out_of_space = 0;
-
- spin_lock_irqsave(&allocator_request_lock, flags);
-
- if (!scsi_hostlist) {
- /*
- * Free up the DMA pool.
- */
- if (scsi_dma_free_sectors != dma_sectors)
- panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors);
-
- for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++)
- free_pages((unsigned long) dma_malloc_pages[i], 0);
- if (dma_malloc_pages)
- kfree((char *) dma_malloc_pages);
- dma_malloc_pages = NULL;
- if (dma_malloc_freelist)
- kfree((char *) dma_malloc_freelist);
- dma_malloc_freelist = NULL;
- dma_sectors = 0;
- scsi_dma_free_sectors = 0;
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- return;
- }
- /* Next, check to see if we need to extend the DMA buffer pool */
-
- new_dma_sectors = 2 * SECTORS_PER_PAGE; /* Base value we use */
-
- if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD)
- need_isa_bounce_buffers = 1;
- else
- need_isa_bounce_buffers = 0;
-
- if (scsi_devicelist)
- for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next)
- new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */
-
- for (host = scsi_hostlist; host; host = host->next) {
- for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) {
- /*
- * sd and sr drivers allocate scatterlists.
- * sr drivers may allocate for each command 1x2048 or 2x1024 extra
- * buffers for 2k sector size and 1k fs.
- * sg driver allocates buffers < 4k.
- * st driver does not need buffers from the dma pool.
- * estimate 4k buffer/command for devices of unknown type (should panic).
- */
- if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM ||
- SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) {
- int nents = host->sg_tablesize;
-#ifdef DMA_CHUNK_SIZE
- /* If the architecture does DMA sg merging, make sure
- we count with at least 64 entries even for HBAs
- which handle very few sg entries. */
- if (nents < 64) nents = 64;
-#endif
- new_dma_sectors += ((nents *
- sizeof(struct scatterlist) + 511) >> 9) *
- SDpnt->queue_depth;
- if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM)
- new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth;
- } else if (SDpnt->type == TYPE_SCANNER ||
- SDpnt->type == TYPE_PROCESSOR ||
- SDpnt->type == TYPE_COMM ||
- SDpnt->type == TYPE_MEDIUM_CHANGER ||
- SDpnt->type == TYPE_ENCLOSURE) {
- new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
- } else {
- if (SDpnt->type != TYPE_TAPE) {
- printk("resize_dma_pool: unknown device type %d\n", SDpnt->type);
- new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth;
- }
- }
-
- if (host->unchecked_isa_dma &&
- need_isa_bounce_buffers &&
- SDpnt->type != TYPE_TAPE) {
- new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize *
- SDpnt->queue_depth;
- new_need_isa_buffer++;
- }
- }
- }
-
-#ifdef DEBUG_INIT
- printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);
-#endif
-
- /* limit DMA memory to 32MB: */
- new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
-
- /*
- * We never shrink the buffers - this leads to
- * race conditions that I would rather not even think
- * about right now.
- */
-#if 0 /* Why do this? No gain and risks out_of_space */
- if (new_dma_sectors < dma_sectors)
- new_dma_sectors = dma_sectors;
-#endif
- if (new_dma_sectors <= dma_sectors) {
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- return; /* best to quit while we are in front */
- }
-
- for (k = 0; k < 20; ++k) { /* just in case */
- out_of_space = 0;
- size = (new_dma_sectors / SECTORS_PER_PAGE) *
- sizeof(FreeSectorBitmap);
- new_dma_malloc_freelist = (FreeSectorBitmap *)
- kmalloc(size, GFP_ATOMIC);
- if (new_dma_malloc_freelist) {
- memset(new_dma_malloc_freelist, 0, size);
- size = (new_dma_sectors / SECTORS_PER_PAGE) *
- sizeof(*new_dma_malloc_pages);
- new_dma_malloc_pages = (unsigned char **)
- kmalloc(size, GFP_ATOMIC);
- if (!new_dma_malloc_pages) {
- size = (new_dma_sectors / SECTORS_PER_PAGE) *
- sizeof(FreeSectorBitmap);
- kfree((char *) new_dma_malloc_freelist);
- out_of_space = 1;
- } else {
- memset(new_dma_malloc_pages, 0, size);
- }
- } else
- out_of_space = 1;
-
- if ((!out_of_space) && (new_dma_sectors > dma_sectors)) {
- for (i = dma_sectors / SECTORS_PER_PAGE;
- i < new_dma_sectors / SECTORS_PER_PAGE; i++) {
- new_dma_malloc_pages[i] = (unsigned char *)
- __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
- if (!new_dma_malloc_pages[i])
- break;
- }
- if (i != new_dma_sectors / SECTORS_PER_PAGE) { /* clean up */
- int k = i;
-
- out_of_space = 1;
- for (i = 0; i < k; ++i)
- free_pages((unsigned long) new_dma_malloc_pages[i], 0);
- }
- }
- if (out_of_space) { /* try scaling down new_dma_sectors request */
- printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, "
- "wanted=%u, scaling\n", dma_sectors, new_dma_sectors);
- if (new_dma_sectors < (8 * SECTORS_PER_PAGE))
- break; /* pretty well hopeless ... */
- new_dma_sectors = (new_dma_sectors * 3) / 4;
- new_dma_sectors = (new_dma_sectors + 15) & 0xfff0;
- if (new_dma_sectors <= dma_sectors)
- break; /* stick with what we have got */
- } else
- break; /* found space ... */
- } /* end of for loop */
- if (out_of_space) {
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- scsi_need_isa_buffer = new_need_isa_buffer; /* some useful info */
- printk(" WARNING, not enough memory, pool not expanded\n");
- return;
- }
- /* When we dick with the actual DMA list, we need to
- * protect things
- */
- if (dma_malloc_freelist) {
- size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
- memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size);
- kfree((char *) dma_malloc_freelist);
- }
- dma_malloc_freelist = new_dma_malloc_freelist;
-
- if (dma_malloc_pages) {
- size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages);
- memcpy(new_dma_malloc_pages, dma_malloc_pages, size);
- kfree((char *) dma_malloc_pages);
- }
- scsi_dma_free_sectors += new_dma_sectors - dma_sectors;
- dma_malloc_pages = new_dma_malloc_pages;
- dma_sectors = new_dma_sectors;
- scsi_need_isa_buffer = new_need_isa_buffer;
-
- spin_unlock_irqrestore(&allocator_request_lock, flags);
-
-#ifdef DEBUG_INIT
- printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors);
- printk("resize_dma_pool: dma sectors = %d\n", dma_sectors);
- printk("resize_dma_pool: need isa buffers = %d\n", scsi_need_isa_buffer);
-#endif
-}
-
-/*
- * Function: scsi_init_minimal_dma_pool
- *
- * Purpose: Allocate a minimal (1-page) DMA pool.
- *
- * Arguments: None.
- *
- * Lock status: No locks assumed to be held. This function is SMP-safe.
- *
- * Returns: Nothing
- *
- * Notes:
- */
-int scsi_init_minimal_dma_pool(void)
-{
- unsigned long size;
- unsigned long flags;
- int has_space = 0;
-
- spin_lock_irqsave(&allocator_request_lock, flags);
-
- dma_sectors = PAGE_SIZE / SECTOR_SIZE;
- scsi_dma_free_sectors = dma_sectors;
- /*
- * Set up a minimal DMA buffer list - this will be used during scan_scsis
- * in some cases.
- */
-
- /* One bit per sector to indicate free/busy */
- size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap);
- dma_malloc_freelist = (FreeSectorBitmap *)
- kmalloc(size, GFP_ATOMIC);
- if (dma_malloc_freelist) {
- memset(dma_malloc_freelist, 0, size);
- /* One pointer per page for the page list */
- dma_malloc_pages = (unsigned char **) kmalloc(
- (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages),
- GFP_ATOMIC);
- if (dma_malloc_pages) {
- memset(dma_malloc_pages, 0, size);
- dma_malloc_pages[0] = (unsigned char *)
- __get_free_pages(GFP_ATOMIC | GFP_DMA, 0);
- if (dma_malloc_pages[0])
- has_space = 1;
- }
- }
- if (!has_space) {
- if (dma_malloc_freelist) {
- kfree((char *) dma_malloc_freelist);
- if (dma_malloc_pages)
- kfree((char *) dma_malloc_pages);
- }
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- printk("scsi::init_module: failed, out of memory\n");
- return 1;
- }
-
- spin_unlock_irqrestore(&allocator_request_lock, flags);
- return 0;
-}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index dc4681cd4d7c0..f64d200909232 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -78,8 +78,7 @@ static int ioctl_probe(struct Scsi_Host *host, void *buffer)
* *(char *) ((int *) arg)[2] the actual command byte.
*
* Note that if more than MAX_BUF bytes are requested to be transferred,
- * the ioctl will fail with error EINVAL. MAX_BUF can be increased in
- * the future by increasing the size that scsi_malloc will accept.
+ * the ioctl will fail with error EINVAL.
*
* This size *does not* include the initial lengths that were passed.
*
@@ -197,10 +196,14 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
unsigned int inlen, outlen, cmdlen;
unsigned int needed, buf_needed;
int timeout, retries, result;
- int data_direction;
+ int data_direction, gfp_mask = GFP_KERNEL;
if (!sic)
return -EINVAL;
+
+ if (dev->host->unchecked_isa_dma)
+ gfp_mask |= GFP_DMA;
+
/*
* Verify that we can read at least this much.
*/
@@ -232,7 +235,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
buf_needed = (buf_needed + 511) & ~511;
if (buf_needed > MAX_BUF)
buf_needed = MAX_BUF;
- buf = (char *) scsi_malloc(buf_needed);
+ buf = (char *) kmalloc(buf_needed, gfp_mask);
if (!buf)
return -ENOMEM;
memset(buf, 0, buf_needed);
@@ -341,7 +344,7 @@ int scsi_ioctl_send_command(Scsi_Device * dev, Scsi_Ioctl_Command * sic)
error:
if (buf)
- scsi_free(buf, buf_needed);
+ kfree(buf);
return result;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d7cc000bcdd2a..317f21858c1fa 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -82,7 +82,7 @@ static void __scsi_insert_special(request_queue_t *q, struct request *rq,
rq->special = data;
rq->q = NULL;
rq->bio = rq->biotail = NULL;
- rq->nr_segments = 0;
+ rq->nr_phys_segments = 0;
rq->elevator_sequence = 0;
/*
@@ -461,13 +461,13 @@ static void scsi_release_buffers(Scsi_Cmnd * SCpnt)
if (bbpnt) {
for (i = 0; i < SCpnt->use_sg; i++) {
if (bbpnt[i])
- scsi_free(sgpnt[i].address, sgpnt[i].length);
+ kfree(sgpnt[i].address);
}
}
- scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
+ scsi_free_sgtable(SCpnt->request_buffer, SCpnt->sglist_len);
} else {
if (SCpnt->request_buffer != req->buffer)
- scsi_free(SCpnt->request_buffer,SCpnt->request_bufflen);
+ kfree(SCpnt->request_buffer);
}
/*
@@ -541,11 +541,11 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
sgpnt[i].address,
sgpnt[i].length);
}
- scsi_free(sgpnt[i].address, sgpnt[i].length);
+ kfree(sgpnt[i].address);
}
}
}
- scsi_free(SCpnt->buffer, SCpnt->sglist_len);
+ scsi_free_sgtable(SCpnt->buffer, SCpnt->sglist_len);
} else {
if (SCpnt->buffer != req->buffer) {
if (rq_data_dir(req) == READ) {
@@ -555,7 +555,7 @@ void scsi_io_completion(Scsi_Cmnd * SCpnt, int good_sectors,
memcpy(to, SCpnt->buffer, SCpnt->bufflen);
bio_kunmap_irq(to, &flags);
}
- scsi_free(SCpnt->buffer, SCpnt->bufflen);
+ kfree(SCpnt->buffer);
}
}
@@ -922,15 +922,6 @@ void scsi_request_fn(request_queue_t * q)
*/
if (req->special) {
SCpnt = (Scsi_Cmnd *) req->special;
- /*
- * We need to recount the number of
- * scatter-gather segments here - the
- * normal case code assumes this to be
- * correct, as it would be a performance
- * loss to always recount. Handling
- * errors is always unusual, of course.
- */
- recount_segments(SCpnt);
} else {
SCpnt = scsi_allocate_device(SDpnt, FALSE, FALSE);
}
@@ -1003,7 +994,7 @@ void scsi_request_fn(request_queue_t * q)
* required). Hosts that need bounce buffers will also
* get those allocated here.
*/
- if (!SDpnt->scsi_init_io_fn(SCpnt)) {
+ if (!scsi_init_io(SCpnt)) {
SCpnt = __scsi_end_request(SCpnt, 0,
SCpnt->request.nr_sectors, 0, 0);
if( SCpnt != NULL )
diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c
index 89def7c84d79a..72ac525dbf30d 100644
--- a/drivers/scsi/scsi_merge.c
+++ b/drivers/scsi/scsi_merge.c
@@ -11,26 +11,8 @@
/*
* This file contains queue management functions that are used by SCSI.
- * Typically this is used for several purposes. First, we need to ensure
- * that commands do not grow so large that they cannot be handled all at
- * once by a host adapter. The various flavors of merge functions included
- * here serve this purpose.
- *
- * Note that it would be quite trivial to allow the low-level driver the
- * flexibility to define it's own queue handling functions. For the time
- * being, the hooks are not present. Right now we are just using the
- * data in the host template as an indicator of how we should be handling
- * queues, and we select routines that are optimized for that purpose.
- *
- * Some hosts do not impose any restrictions on the size of a request.
- * In such cases none of the merge functions in this file are called,
- * and we allow ll_rw_blk to merge requests in the default manner.
- * This isn't guaranteed to be optimal, but it should be pretty darned
- * good. If someone comes up with ideas of better ways of managing queues
- * to improve on the default behavior, then certainly fit it into this
- * scheme in whatever manner makes the most sense. Please note that
- * since each device has it's own queue, we have considerable flexibility
- * in queue management.
+ * We need to ensure that commands do not grow so large that they cannot
+ * be handled all at once by a host adapter.
*/
#define __NO_VERSION__
@@ -65,430 +47,28 @@
#include <scsi/scsi_ioctl.h>
/*
- * This means that bounce buffers cannot be allocated in chunks > PAGE_SIZE.
- * Ultimately we should get away from using a dedicated DMA bounce buffer
- * pool, and we should instead try and use kmalloc() instead. If we can
- * eliminate this pool, then this restriction would no longer be needed.
- */
-#define DMA_SEGMENT_SIZE_LIMITED
-
-static void dma_exhausted(Scsi_Cmnd * SCpnt, int i)
-{
- int jj;
- struct scatterlist *sgpnt;
- void **bbpnt;
- int consumed = 0;
-
- sgpnt = (struct scatterlist *) SCpnt->request_buffer;
- bbpnt = SCpnt->bounce_buffers;
-
- /*
- * Now print out a bunch of stats. First, start with the request
- * size.
- */
- printk("dma_free_sectors:%d\n", scsi_dma_free_sectors);
- printk("use_sg:%d\ti:%d\n", SCpnt->use_sg, i);
- printk("request_bufflen:%d\n", SCpnt->request_bufflen);
- /*
- * Now dump the scatter-gather table, up to the point of failure.
- */
- for(jj=0; jj < SCpnt->use_sg; jj++)
- {
- printk("[%d]\tlen:%d\taddr:%p\tbounce:%p\n",
- jj,
- sgpnt[jj].length,
- sgpnt[jj].address,
- (bbpnt ? bbpnt[jj] : NULL));
- if (bbpnt && bbpnt[jj])
- consumed += sgpnt[jj].length;
- }
- printk("Total %d sectors consumed\n", consumed);
- panic("DMA pool exhausted");
-}
-
-/*
- * This entire source file deals with the new queueing code.
- */
-
-/*
- * Function: __count_segments()
- *
- * Purpose: Prototype for queue merge function.
- *
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * dma_host - 1 if this host has ISA DMA issues (bus doesn't
- * expose all of the address lines, so that DMA cannot
- * be done from an arbitrary address).
- * remainder - used to track the residual size of the last
- * segment. Comes in handy when we want to limit the
- * size of bounce buffer segments to PAGE_SIZE.
- *
- * Returns: Count of the number of SG segments for the request.
- *
- * Lock status:
- *
- * Notes: This is only used for diagnostic purposes.
- */
-__inline static int __count_segments(struct request *req,
- int dma_host,
- int * remainder)
-{
- int ret = 1;
- int reqsize = 0;
- int i;
- struct bio *bio;
- struct bio_vec *bvec;
-
- if (remainder)
- reqsize = *remainder;
-
- /*
- * Add in the size increment for the first buffer.
- */
- bio = req->bio;
-#ifdef DMA_SEGMENT_SIZE_LIMITED
- if (reqsize + bio->bi_size > PAGE_SIZE)
- ret++;
-#endif
-
- rq_for_each_bio(bio, req) {
- bio_for_each_segment(bvec, bio, i)
- ret++;
-
- reqsize += bio->bi_size;
- }
-
- if (remainder)
- *remainder = reqsize;
-
- return ret;
-}
-
-/*
- * Function: recount_segments()
- *
- * Purpose: Recount the number of scatter-gather segments for this request.
- *
- * Arguments: req - request that needs recounting.
- *
- * Returns: Count of the number of SG segments for the request.
- *
- * Lock status: Irrelevant.
- *
- * Notes: This is only used when we have partially completed requests
- * and the bit that is leftover is of an indeterminate size.
- * This can come up if you get a MEDIUM_ERROR, for example,
- * as we will have "completed" all of the sectors up to and
- * including the bad sector, and the leftover bit is what
- * we have to do now. This tends to be a rare occurrence, so
- * we aren't busting our butts to instantiate separate versions
- * of this function for the 4 different flag values. We
- * probably should, however.
- */
-void
-recount_segments(Scsi_Cmnd * SCpnt)
-{
- struct request *req = &SCpnt->request;
- struct Scsi_Host *SHpnt = SCpnt->host;
-
- req->nr_segments = __count_segments(req, SHpnt->unchecked_isa_dma,NULL);
-}
-
-/*
- * IOMMU hackery for sparc64
- */
-#ifdef DMA_CHUNK_SIZE
-
-#define MERGEABLE_BUFFERS(X,Y) \
- ((((bvec_to_phys(__BVEC_END((X))) + __BVEC_END((X))->bv_len) | bio_to_phys((Y))) & (DMA_CHUNK_SIZE - 1)) == 0)
-
-static inline int scsi_new_mergeable(request_queue_t * q,
- struct request * req,
- struct bio *bio)
-{
- int nr_segs = bio_hw_segments(q, bio);
-
- /*
- * pci_map_sg will be able to merge these two
- * into a single hardware sg entry, check if
- * we'll have enough memory for the sg list.
- * scsi.c allocates for this purpose
- * min(64,sg_tablesize) entries.
- */
- if (req->nr_segments + nr_segs > q->max_segments)
- return 0;
-
- req->nr_segments += nr_segs;
- return 1;
-}
-
-static inline int scsi_new_segment(request_queue_t * q,
- struct request * req,
- struct bio *bio)
-{
- int nr_segs = bio_hw_segments(q, bio);
- /*
- * pci_map_sg won't be able to map these two
- * into a single hardware sg entry, so we have to
- * check if things fit into sg_tablesize.
- */
- if (req->nr_hw_segments + nr_segs > q->max_segments)
- return 0;
- else if (req->nr_segments + nr_segs > q->max_segments)
- return 0;
-
- req->nr_hw_segments += nr_segs;
- req->nr_segments += nr_segs;
- return 1;
-}
-
-#else /* DMA_CHUNK_SIZE */
-
-static inline int scsi_new_segment(request_queue_t * q,
- struct request * req,
- struct bio *bio)
-{
- int nr_segs = bio_hw_segments(q, bio);
-
- if (req->nr_segments + nr_segs > q->max_segments) {
- req->flags |= REQ_NOMERGE;
- return 0;
- }
-
- /*
- * This will form the start of a new segment. Bump the
- * counter.
- */
- req->nr_segments += nr_segs;
- return 1;
-}
-#endif /* DMA_CHUNK_SIZE */
-
-/*
- * Function: __scsi_merge_fn()
- *
- * Purpose: Prototype for queue merge function.
- *
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * bio - Block which we may wish to merge into request
- * dma_host - 1 if this host has ISA DMA issues (bus doesn't
- * expose all of the address lines, so that DMA cannot
- * be done from an arbitrary address).
- *
- * Returns: 1 if it is OK to merge the block into the request. 0
- * if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes: Some drivers have limited scatter-gather table sizes, and
- * thus they cannot queue an infinitely large command. This
- * function is called from ll_rw_blk before it attempts to merge
- * a new block into a request to make sure that the request will
- * not become too large.
- *
- * This function is not designed to be directly called. Instead
- * it should be referenced from other functions where the
- * dma_host parameter should be an integer constant. The
- * compiler should thus be able to properly optimize the code,
- * eliminating stuff that is irrelevant.
- * It is more maintainable to do this way with a single function
- * than to have 4 separate functions all doing roughly the
- * same thing.
- */
-__inline static int __scsi_back_merge_fn(request_queue_t * q,
- struct request *req,
- struct bio *bio)
-{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
- req->flags |= REQ_NOMERGE;
- return 0;
- }
-
-#ifdef DMA_CHUNK_SIZE
- if (MERGEABLE_BUFFERS(req->biotail, bio))
- return scsi_new_mergeable(q, req, bio);
-#endif
-
- return scsi_new_segment(q, req, bio);
-}
-
-__inline static int __scsi_front_merge_fn(request_queue_t * q,
- struct request *req,
- struct bio *bio)
-{
- if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
- req->flags |= REQ_NOMERGE;
- return 0;
- }
-
-#ifdef DMA_CHUNK_SIZE
- if (MERGEABLE_BUFFERS(bio, req->bio))
- return scsi_new_mergeable(q, req, bio);
-#endif
- return scsi_new_segment(q, req, bio);
-}
-
-/*
- * Function: scsi_merge_fn_()
- *
- * Purpose: queue merge function.
+ * Function: scsi_init_io()
*
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * bio - Block which we may wish to merge into request
- *
- * Returns: 1 if it is OK to merge the block into the request. 0
- * if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- * Notes: Optimized for different cases depending upon whether
- * ISA DMA is in use and whether clustering should be used.
- */
-#define MERGEFCT(_FUNCTION, _BACK_FRONT) \
-static int _FUNCTION(request_queue_t * q, \
- struct request * req, \
- struct bio *bio) \
-{ \
- int ret; \
- ret = __scsi_ ## _BACK_FRONT ## _merge_fn(q, \
- req, \
- bio); \
- return ret; \
-}
-
-MERGEFCT(scsi_back_merge_fn, back)
-MERGEFCT(scsi_front_merge_fn, front)
-
-/*
- * Function: scsi_merge_requests_fn_()
- *
- * Purpose: queue merge function.
- *
- * Arguments: q - Queue for which we are merging request.
- * req - request into which we wish to merge.
- * next - Block which we may wish to merge into request
- *
- * Returns: 1 if it is OK to merge the block into the request. 0
- * if it is not OK.
- *
- * Lock status: queue lock is assumed to be held here.
- *
- */
-inline static int scsi_merge_requests_fn(request_queue_t * q,
- struct request *req,
- struct request *next)
-{
- int bio_segs;
-
- /*
- * First check if the either of the requests are re-queued
- * requests. Can't merge them if they are.
- */
- if (req->special || next->special)
- return 0;
-
- /*
- * will become to large?
- */
- if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
- return 0;
-
- bio_segs = req->nr_segments + next->nr_segments;
- if (blk_contig_segment(q, req->biotail, next->bio))
- bio_segs--;
-
- /*
- * exceeds our max allowed segments?
- */
- if (bio_segs > q->max_segments)
- return 0;
-
-#ifdef DMA_CHUNK_SIZE
- bio_segs = req->nr_hw_segments + next->nr_hw_segments;
- if (blk_contig_segment(q, req->biotail, next->bio))
- bio_segs--;
-
- /* If dynamic DMA mapping can merge last segment in req with
- * first segment in next, then the check for hw segments was
- * done above already, so we can always merge.
- */
- if (bio_segs > q->max_segments)
- return 0;
-
- req->nr_hw_segments = bio_segs;
-#endif
-
- /*
- * This will form the start of a new segment. Bump the
- * counter.
- */
- req->nr_segments = bio_segs;
- return 1;
-}
-
-/*
- * Function: __init_io()
- *
- * Purpose: Prototype for io initialize function.
+ * Purpose: SCSI I/O initialize function.
*
* Arguments: SCpnt - Command descriptor we wish to initialize
- * sg_count_valid - 1 if the sg count in the req is valid.
- * dma_host - 1 if this host has ISA DMA issues (bus doesn't
- * expose all of the address lines, so that DMA cannot
- * be done from an arbitrary address).
*
* Returns: 1 on success.
*
* Lock status:
- *
- * Notes: Only the SCpnt argument should be a non-constant variable.
- * This function is designed in such a way that it will be
- * invoked from a series of small stubs, each of which would
- * be optimized for specific circumstances.
- *
- * The advantage of this is that hosts that don't do DMA
- * get versions of the function that essentially don't have
- * any of the DMA code. Same goes for clustering - in the
- * case of hosts with no need for clustering, there is no point
- * in a whole bunch of overhead.
- *
- * Finally, in the event that a host has set can_queue to SG_ALL
- * implying that there is no limit to the length of a scatter
- * gather list, the sg count in the request won't be valid
- * (mainly because we don't need queue management functions
- * which keep the tally uptodate.
*/
-__inline static int __init_io(Scsi_Cmnd * SCpnt, int dma_host)
+int scsi_init_io(Scsi_Cmnd *SCpnt)
{
- struct bio * bio;
- char * buff;
- int count;
- int i;
- struct request * req;
- int sectors;
- struct scatterlist * sgpnt;
- int this_count;
- void ** bbpnt;
+ struct request *req;
+ struct scatterlist *sgpnt;
+ int count, gfp_mask;
req = &SCpnt->request;
/*
* First we need to know how many scatter gather segments are needed.
*/
- count = req->nr_segments;
-
- /*
- * If the dma pool is nearly empty, then queue a minimal request
- * with a single segment. Typically this will satisfy a single
- * buffer.
- */
- if (dma_host && scsi_dma_free_sectors <= 10) {
- this_count = req->current_nr_sectors;
- goto single_segment;
- }
+ count = req->nr_phys_segments;
/*
* we used to not use scatter-gather for single segment request,
@@ -497,50 +77,17 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt, int dma_host)
*/
SCpnt->use_sg = count;
- /*
- * Allocate the actual scatter-gather table itself.
- */
- SCpnt->sglist_len = (SCpnt->use_sg * sizeof(struct scatterlist));
+ gfp_mask = GFP_NOIO;
+ if (in_interrupt())
+ gfp_mask &= ~__GFP_WAIT;
- /* If we could potentially require ISA bounce buffers, allocate
- * space for this array here.
- */
- if (dma_host)
- SCpnt->sglist_len += (SCpnt->use_sg * sizeof(void *));
+ sgpnt = scsi_alloc_sgtable(SCpnt, gfp_mask);
+ BUG_ON(!sgpnt);
- /* scsi_malloc can only allocate in chunks of 512 bytes so
- * round it up.
- */
- SCpnt->sglist_len = (SCpnt->sglist_len + 511) & ~511;
-
- sgpnt = (struct scatterlist *) scsi_malloc(SCpnt->sglist_len);
-
- if (!sgpnt) {
- struct Scsi_Host *SHpnt = SCpnt->host;
-
- /*
- * If we cannot allocate the scatter-gather table, then
- * simply write the first buffer all by itself.
- */
- printk("Warning - running *really* short on DMA buffers\n");
- this_count = req->current_nr_sectors;
- printk("SCSI: depth is %d, # segs %d, # hw segs %d\n", SHpnt->host_busy, req->nr_segments, req->nr_hw_segments);
- goto single_segment;
- }
-
- memset(sgpnt, 0, SCpnt->sglist_len);
SCpnt->request_buffer = (char *) sgpnt;
SCpnt->request_bufflen = 0;
req->buffer = NULL;
- if (dma_host)
- bbpnt = (void **) ((char *)sgpnt +
- (SCpnt->use_sg * sizeof(struct scatterlist)));
- else
- bbpnt = NULL;
-
- SCpnt->bounce_buffers = bbpnt;
-
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
@@ -549,183 +96,22 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt, int dma_host)
count = blk_rq_map_sg(req->q, req, SCpnt->request_buffer);
/*
- * Verify that the count is correct.
+ * mapped well, send it off
*/
- if (count > SCpnt->use_sg) {
- printk("Incorrect number of segments after building list\n");
- printk("counted %d, received %d\n", count, SCpnt->use_sg);
- printk("req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, req->current_nr_sectors);
- scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
- this_count = req->current_nr_sectors;
- goto single_segment;
- }
-
- SCpnt->use_sg = count;
-
- if (!dma_host)
+ if (count <= SCpnt->use_sg) {
+ SCpnt->use_sg = count;
return 1;
-
- /*
- * Now allocate bounce buffers, if needed.
- */
- SCpnt->request_bufflen = 0;
- for (i = 0; i < count; i++) {
- sectors = (sgpnt[i].length >> 9);
- SCpnt->request_bufflen += sgpnt[i].length;
- if (virt_to_phys(sgpnt[i].address) + sgpnt[i].length - 1 >
- ISA_DMA_THRESHOLD) {
- if( scsi_dma_free_sectors - sectors <= 10 ) {
- /*
- * If this would nearly drain the DMA
- * pool empty, then let's stop here.
- * Don't make this request any larger.
- * This is kind of a safety valve that
- * we use - we could get screwed later
- * on if we run out completely.
- */
- SCpnt->request_bufflen -= sgpnt[i].length;
- SCpnt->use_sg = i;
- if (i == 0) {
- goto big_trouble;
- }
- break;
- }
-
- /*
- * this is not a dma host, so it will never
- * be a highmem page
- */
- bbpnt[i] = page_address(sgpnt[i].page) +sgpnt[i].offset;
- sgpnt[i].address = (char *)scsi_malloc(sgpnt[i].length);
- /*
- * If we cannot allocate memory for this DMA bounce
- * buffer, then queue just what we have done so far.
- */
- if (sgpnt[i].address == NULL) {
- printk("Warning - running low on DMA memory\n");
- SCpnt->request_bufflen -= sgpnt[i].length;
- SCpnt->use_sg = i;
- if (i == 0) {
- goto big_trouble;
- }
- break;
- }
- if (rq_data_dir(req) == WRITE)
- memcpy(sgpnt[i].address, bbpnt[i],
- sgpnt[i].length);
- }
}
- return 1;
-
- big_trouble:
- /*
- * We come here in the event that we get one humongous
- * request, where we need a bounce buffer, and the buffer is
- * more than we can allocate in a single call to
- * scsi_malloc(). In addition, we only come here when it is
- * the 0th element of the scatter-gather table that gets us
- * into this trouble. As a fallback, we fall back to
- * non-scatter-gather, and ask for a single segment. We make
- * a half-hearted attempt to pick a reasonably large request
- * size mainly so that we don't thrash the thing with
- * iddy-biddy requests.
- */
-
- /*
- * The original number of sectors in the 0th element of the
- * scatter-gather table.
- */
- sectors = sgpnt[0].length >> 9;
- /*
- * Free up the original scatter-gather table. Note that since
- * it was the 0th element that got us here, we don't have to
- * go in and free up memory from the other slots.
- */
- SCpnt->request_bufflen = 0;
- SCpnt->use_sg = 0;
- scsi_free(SCpnt->request_buffer, SCpnt->sglist_len);
-
- /*
- * Make an attempt to pick up as much as we reasonably can.
- * Just keep adding sectors until the pool starts running kind of
- * low. The limit of 30 is somewhat arbitrary - the point is that
- * it would kind of suck if we dropped down and limited ourselves to
- * single-block requests if we had hundreds of free sectors.
- */
- if( scsi_dma_free_sectors > 30 ) {
- for (this_count = 0, bio = req->bio; bio; bio = bio->bi_next) {
- if( scsi_dma_free_sectors - this_count < 30
- || this_count == sectors )
- {
- break;
- }
- this_count += bio_sectors(bio);
- }
-
- } else {
- /*
- * Yow! Take the absolute minimum here.
- */
- this_count = req->current_nr_sectors;
- }
-
- /*
- * Now drop through into the single-segment case.
- */
-
- single_segment:
- /*
- * Come here if for any reason we choose to do this as a single
- * segment. Possibly the entire request, or possibly a small
- * chunk of the entire request.
- */
-
- bio = req->bio;
- buff = req->buffer = bio_data(bio);
-
- if (dma_host || PageHighMem(bio_page(bio))) {
- /*
- * Allocate a DMA bounce buffer. If the allocation fails, fall
- * back and allocate a really small one - enough to satisfy
- * the first buffer.
- */
- if (bio_to_phys(bio) + bio->bi_size - 1 > ISA_DMA_THRESHOLD) {
- buff = (char *) scsi_malloc(this_count << 9);
- if (!buff) {
- printk("Warning - running low on DMA memory\n");
- this_count = req->current_nr_sectors;
- buff = (char *) scsi_malloc(this_count << 9);
- if (!buff) {
- dma_exhausted(SCpnt, 0);
- return 0;
- }
- }
- if (rq_data_dir(req) == WRITE) {
- unsigned long flags;
- char *buf = bio_kmap_irq(bio, &flags);
- memcpy(buff, buf, this_count << 9);
- bio_kunmap_irq(buf, &flags);
- }
- }
- }
- SCpnt->request_bufflen = this_count << 9;
- SCpnt->request_buffer = buff;
- SCpnt->use_sg = 0;
- return 1;
+ printk("Incorrect number of segments after building list\n");
+ printk("counted %d, received %d\n", count, SCpnt->use_sg);
+ printk("req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, req->current_nr_sectors);
+ BUG();
+ return 0; /* ahem */
}
-#define INITIO(_FUNCTION, _DMA) \
-static int _FUNCTION(Scsi_Cmnd * SCpnt) \
-{ \
- return __init_io(SCpnt, _DMA); \
-}
-
-INITIO(scsi_init_io_v, 0)
-INITIO(scsi_init_io_vd, 1)
-
/*
- * Function: initialize_merge_fn()
+ * Function: scsi_initialize_merge_fn()
*
* Purpose: Initialize merge function for a host
*
@@ -737,35 +123,15 @@ INITIO(scsi_init_io_vd, 1)
*
* Notes:
*/
-void initialize_merge_fn(Scsi_Device * SDpnt)
+void scsi_initialize_merge_fn(Scsi_Device * SDpnt)
{
struct Scsi_Host *SHpnt = SDpnt->host;
request_queue_t *q = &SDpnt->request_queue;
dma64_addr_t bounce_limit;
/*
- * If this host has an unlimited tablesize, then don't bother with a
- * merge manager. The whole point of the operation is to make sure
- * that requests don't grow too large, and this host isn't picky.
- *
- * Note that ll_rw_blk.c is effectively maintaining a segment
- * count which is only valid if clustering is used, and it obviously
- * doesn't handle the DMA case. In the end, it
- * is simply easier to do it ourselves with our own functions
- * rather than rely upon the default behavior of ll_rw_blk.
- */
- q->back_merge_fn = scsi_back_merge_fn;
- q->front_merge_fn = scsi_front_merge_fn;
- q->merge_requests_fn = scsi_merge_requests_fn;
-
- if (SHpnt->unchecked_isa_dma == 0) {
- SDpnt->scsi_init_io_fn = scsi_init_io_v;
- } else {
- SDpnt->scsi_init_io_fn = scsi_init_io_vd;
- }
-
- /*
- * now enable highmem I/O, if appropriate
+ * The generic merging functions work just fine for us.
+ * Enable highmem I/O, if appropriate.
*/
bounce_limit = BLK_BOUNCE_HIGH;
if (SHpnt->highmem_io && (SDpnt->type == TYPE_DISK)) {
@@ -777,6 +143,8 @@ void initialize_merge_fn(Scsi_Device * SDpnt)
else
bounce_limit = SHpnt->pci_dev->dma_mask;
}
+ if (SHpnt->unchecked_isa_dma)
+ bounce_limit = BLK_BOUNCE_ISA;
blk_queue_bounce_limit(q, bounce_limit);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ad3e31af55b31..9cd871b0ceec0 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -320,7 +320,7 @@ void scan_scsis(struct Scsi_Host *shpnt,
SDpnt->host = shpnt;
SDpnt->online = TRUE;
- initialize_merge_fn(SDpnt);
+ scsi_initialize_merge_fn(SDpnt);
/*
* Initialize the object that we will use to wait for command blocks.
@@ -390,8 +390,6 @@ void scan_scsis(struct Scsi_Host *shpnt,
}
}
}
- scsi_resize_dma_pool();
-
for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) {
if (sdtpnt->finish && sdtpnt->nr_dev) {
(*sdtpnt->finish) ();
@@ -759,7 +757,7 @@ static int scan_scsis_single(unsigned int channel, unsigned int dev,
*/
scsi_initialize_queue(SDpnt, shpnt);
SDpnt->host = shpnt;
- initialize_merge_fn(SDpnt);
+ scsi_initialize_merge_fn(SDpnt);
/*
* Mark this device as online, or otherwise we won't be able to do much with it.
diff --git a/drivers/scsi/scsi_syms.c b/drivers/scsi/scsi_syms.c
index 7fb246998bffc..dbe14bacb5ff2 100644
--- a/drivers/scsi/scsi_syms.c
+++ b/drivers/scsi/scsi_syms.c
@@ -33,8 +33,6 @@
*/
EXPORT_SYMBOL(scsi_register_module);
EXPORT_SYMBOL(scsi_unregister_module);
-EXPORT_SYMBOL(scsi_free);
-EXPORT_SYMBOL(scsi_malloc);
EXPORT_SYMBOL(scsi_register);
EXPORT_SYMBOL(scsi_unregister);
EXPORT_SYMBOL(scsicam_bios_param);
@@ -48,9 +46,7 @@ EXPORT_SYMBOL(print_sense);
EXPORT_SYMBOL(print_req_sense);
EXPORT_SYMBOL(print_msg);
EXPORT_SYMBOL(print_status);
-EXPORT_SYMBOL(scsi_dma_free_sectors);
EXPORT_SYMBOL(kernel_scsi_ioctl);
-EXPORT_SYMBOL(scsi_need_isa_buffer);
EXPORT_SYMBOL(scsi_release_command);
EXPORT_SYMBOL(print_Scsi_Cmnd);
EXPORT_SYMBOL(scsi_block_when_processing_errors);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index eb93a833a086f..7bada9dd8e49b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -765,7 +765,7 @@ static int sd_init_onedisk(int i)
return i;
}
- buffer = (unsigned char *) scsi_malloc(512);
+ buffer = kmalloc(512, GFP_DMA);
if (!buffer) {
printk(KERN_WARNING "(sd_init_onedisk:) Memory allocation failure.\n");
scsi_release_request(SRpnt);
@@ -1042,7 +1042,7 @@ static int sd_init_onedisk(int i)
scsi_release_request(SRpnt);
SRpnt = NULL;
- scsi_free(buffer, 512);
+ kfree(buffer);
return i;
}
@@ -1111,7 +1111,7 @@ static int sd_init()
* commands if they know what they're doing and they ask for it
* explicitly via the SHpnt->max_sectors API.
*/
- sd_max_sectors[i] = MAX_SEGMENTS*8;
+ sd_max_sectors[i] = MAX_PHYS_SEGMENTS*8;
}
for (i = 0; i < N_USED_SD_MAJORS; i++) {
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8c637639dbc64..44a5075b83301 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2280,9 +2280,8 @@ static char * sg_low_malloc(int rqSz, int lowDma, int mem_src, int * retSzp)
rqSz = num_sect * SG_SECTOR_SZ;
}
while (num_sect > 0) {
- if ((num_sect <= sg_pool_secs_avail) &&
- (scsi_dma_free_sectors > (SG_LOW_POOL_THRESHHOLD + num_sect))) {
- resp = scsi_malloc(rqSz);
+ if ((num_sect <= sg_pool_secs_avail)) {
+ resp = kmalloc(rqSz, page_mask);
if (resp) {
if (retSzp) *retSzp = rqSz;
sg_pool_secs_avail -= num_sect;
@@ -2374,7 +2373,7 @@ static void sg_low_free(char * buff, int size, int mem_src)
{
int num_sect = size / SG_SECTOR_SZ;
- scsi_free(buff, size);
+ kfree(buff);
sg_pool_secs_avail += num_sect;
}
break;
@@ -2681,9 +2680,8 @@ static int sg_proc_debug_info(char * buffer, int * len, off_t * begin,
max_dev = sg_last_dev();
PRINT_PROC("dev_max(currently)=%d max_active_device=%d (origin 1)\n",
sg_template.dev_max, max_dev);
- PRINT_PROC(" scsi_dma_free_sectors=%u sg_pool_secs_aval=%d "
- "def_reserved_size=%d\n",
- scsi_dma_free_sectors, sg_pool_secs_avail, sg_big_buff);
+ PRINT_PROC(" sg_pool_secs_aval=%d def_reserved_size=%d\n",
+ sg_pool_secs_avail, sg_big_buff);
for (j = 0; j < max_dev; ++j) {
if ((sdp = sg_get_dev(j))) {
Sg_fd * fp;
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1d1c2714149a6..530f893854284 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -258,112 +258,6 @@ static request_queue_t *sr_find_queue(kdev_t dev)
return &scsi_CDs[MINOR(dev)].device->request_queue;
}
-static int sr_scatter_pad(Scsi_Cmnd *SCpnt, int s_size)
-{
- struct scatterlist *sg, *old_sg = NULL;
- int i, fsize, bsize, sg_ent, sg_count;
- char *front, *back;
- void **bbpnt, **old_bbpnt = NULL;
-
- back = front = NULL;
- sg_ent = SCpnt->use_sg;
- bsize = 0; /* gcc... */
-
- /*
- * need front pad
- */
- if ((fsize = SCpnt->request.sector % (s_size >> 9))) {
- fsize <<= 9;
- sg_ent++;
- if ((front = scsi_malloc(fsize)) == NULL)
- goto no_mem;
- }
- /*
- * need a back pad too
- */
- if ((bsize = s_size - ((SCpnt->request_bufflen + fsize) % s_size))) {
- sg_ent++;
- if ((back = scsi_malloc(bsize)) == NULL)
- goto no_mem;
- }
-
- /*
- * extend or allocate new scatter-gather table
- */
- sg_count = SCpnt->use_sg;
- if (sg_count) {
- old_sg = (struct scatterlist *) SCpnt->request_buffer;
- old_bbpnt = SCpnt->bounce_buffers;
- } else {
- sg_count = 1;
- sg_ent++;
- }
-
- /* Get space for scatterlist and bounce buffer array. */
- i = sg_ent * sizeof(struct scatterlist);
- i += sg_ent * sizeof(void *);
- i = (i + 511) & ~511;
-
- if ((sg = scsi_malloc(i)) == NULL)
- goto no_mem;
-
- bbpnt = (void **)
- ((char *)sg + (sg_ent * sizeof(struct scatterlist)));
-
- /*
- * no more failing memory allocs possible, we can safely assign
- * SCpnt values now
- */
- SCpnt->sglist_len = i;
- SCpnt->use_sg = sg_count;
- memset(sg, 0, SCpnt->sglist_len);
-
- i = 0;
- if (fsize) {
- sg[0].address = bbpnt[0] = front;
- sg[0].length = fsize;
- i++;
- }
- if (old_sg) {
- memcpy(sg + i, old_sg, SCpnt->use_sg * sizeof(struct scatterlist));
- if (old_bbpnt)
- memcpy(bbpnt + i, old_bbpnt, SCpnt->use_sg * sizeof(void *));
- scsi_free(old_sg, (((SCpnt->use_sg * sizeof(struct scatterlist)) +
- (SCpnt->use_sg * sizeof(void *))) + 511) & ~511);
- } else {
- sg[i].address = NULL;
- sg[i].page = virt_to_page(SCpnt->request_buffer);
- sg[i].offset = (unsigned long) SCpnt->request_buffer&~PAGE_MASK;
- sg[i].length = SCpnt->request_bufflen;
- }
-
- SCpnt->request_bufflen += (fsize + bsize);
- SCpnt->request_buffer = sg;
- SCpnt->bounce_buffers = bbpnt;
- SCpnt->use_sg += i;
-
- if (bsize) {
- sg[SCpnt->use_sg].address = NULL;
- sg[SCpnt->use_sg].page = virt_to_page(back);
- sg[SCpnt->use_sg].offset = (unsigned long) back & ~PAGE_MASK;
- bbpnt[SCpnt->use_sg] = back;
- sg[SCpnt->use_sg].length = bsize;
- SCpnt->use_sg++;
- }
-
- return 0;
-
-no_mem:
- printk("sr: ran out of mem for scatter pad\n");
- if (front)
- scsi_free(front, fsize);
- if (back)
- scsi_free(back, bsize);
-
- return 1;
-}
-
-
static int sr_init_command(Scsi_Cmnd * SCpnt)
{
int dev, devm, block=0, this_count, s_size;
@@ -429,9 +323,10 @@ static int sr_init_command(Scsi_Cmnd * SCpnt)
/*
* request doesn't start on hw block boundary, add scatter pads
*/
- if ((SCpnt->request.sector % (s_size >> 9)) || (SCpnt->request_bufflen % s_size))
- if (sr_scatter_pad(SCpnt, s_size))
- return 0;
+ if ((SCpnt->request.sector % (s_size >> 9)) || (SCpnt->request_bufflen % s_size)) {
+ printk("sr: unaligned transfer\n");
+ return 0;
+ }
this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9);
@@ -583,7 +478,7 @@ void get_sectorsize(int i)
int sector_size;
Scsi_Request *SRpnt;
- buffer = (unsigned char *) scsi_malloc(512);
+ buffer = (unsigned char *) kmalloc(512, GFP_DMA);
SRpnt = scsi_allocate_request(scsi_CDs[i].device);
if(buffer == NULL || SRpnt == NULL)
@@ -592,7 +487,7 @@ void get_sectorsize(int i)
sector_size = 2048; /* A guess, just in case */
scsi_CDs[i].needs_sector_size = 1;
if(buffer)
- scsi_free(buffer, 512);
+ kfree(buffer);
if(SRpnt)
scsi_release_request(SRpnt);
return;
@@ -673,7 +568,7 @@ void get_sectorsize(int i)
sr_sizes[i] = scsi_CDs[i].capacity >> (BLOCK_SIZE_BITS - 9);
};
blk_queue_hardsect_size(blk_get_queue(MAJOR_NR), sector_size);
- scsi_free(buffer, 512);
+ kfree(buffer);
}
void get_capabilities(int i)
@@ -694,7 +589,7 @@ void get_capabilities(int i)
""
};
- buffer = (unsigned char *) scsi_malloc(512);
+ buffer = (unsigned char *) kmalloc(512, GFP_DMA);
if (!buffer)
{
printk(KERN_ERR "sr: out of memory.\n");
@@ -714,7 +609,7 @@ void get_capabilities(int i)
scsi_CDs[i].cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
CDC_DVD | CDC_DVD_RAM |
CDC_SELECT_DISC | CDC_SELECT_SPEED);
- scsi_free(buffer, 512);
+ kfree(buffer);
printk("sr%i: scsi-1 drive\n", i);
return;
}
@@ -767,7 +662,7 @@ void get_capabilities(int i)
/*else I don't think it can close its tray
scsi_CDs[i].cdi.mask |= CDC_CLOSE_TRAY; */
- scsi_free(buffer, 512);
+ kfree(buffer);
}
/*
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 3c3a53ad4e711..da3ec60fe77d2 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -95,7 +95,7 @@ int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflen
SRpnt->sr_request.buffer = buffer;
if (buffer && SRpnt->sr_host->unchecked_isa_dma &&
(virt_to_phys(buffer) + buflength - 1 > ISA_DMA_THRESHOLD)) {
- bounce_buffer = (char *) scsi_malloc((buflength + 511) & ~511);
+ bounce_buffer = (char *) kmalloc(buflength, GFP_DMA);
if (bounce_buffer == NULL) {
printk("SCSI DMA pool exhausted.");
return -ENOMEM;
@@ -114,7 +114,7 @@ int sr_do_ioctl(int target, unsigned char *sr_cmd, void *buffer, unsigned buflen
req = &SRpnt->sr_request;
if (SRpnt->sr_buffer && req->buffer && SRpnt->sr_buffer != req->buffer) {
memcpy(req->buffer, SRpnt->sr_buffer, SRpnt->sr_bufflen);
- scsi_free(SRpnt->sr_buffer, (SRpnt->sr_bufflen + 511) & ~511);
+ kfree(SRpnt->sr_buffer);
SRpnt->sr_buffer = req->buffer;
}
@@ -519,7 +519,7 @@ int sr_is_xa(int minor)
if (!xa_test)
return 0;
- raw_sector = (unsigned char *) scsi_malloc(2048 + 512);
+ raw_sector = (unsigned char *) kmalloc(2048, GFP_DMA | GFP_KERNEL);
if (!raw_sector)
return -ENOMEM;
if (0 == sr_read_sector(minor, scsi_CDs[minor].ms_offset + 16,
@@ -529,7 +529,7 @@ int sr_is_xa(int minor)
/* read a raw sector failed for some reason. */
is_xa = -1;
}
- scsi_free(raw_sector, 2048 + 512);
+ kfree(raw_sector);
#ifdef DEBUG
printk("sr%d: sr_is_xa: %d\n", minor, is_xa);
#endif
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 39bd3b6cb2264..a1d4a7db41a67 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -115,7 +115,7 @@ int sr_set_blocklength(int minor, int blocklength)
density = (blocklength > 2048) ? 0x81 : 0x83;
#endif
- buffer = (unsigned char *) scsi_malloc(512);
+ buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
@@ -142,7 +142,7 @@ int sr_set_blocklength(int minor, int blocklength)
printk("sr%d: switching blocklength to %d bytes failed\n",
minor, blocklength);
#endif
- scsi_free(buffer, 512);
+ kfree(buffer);
return rc;
}
@@ -162,7 +162,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (scsi_CDs[minor].cdi.mask & CDC_MULTI_SESSION)
return 0;
- buffer = (unsigned char *) scsi_malloc(512);
+ buffer = (unsigned char *) kmalloc(512, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
@@ -306,6 +306,6 @@ int sr_cd_check(struct cdrom_device_info *cdi)
printk(KERN_DEBUG "sr%d: multisession offset=%lu\n",
minor, sector);
#endif
- scsi_free(buffer, 512);
+ kfree(buffer);
return rc;
}
diff --git a/drivers/scsi/sym53c8xx.c b/drivers/scsi/sym53c8xx.c
index 70d8a00df5fe8..bc030dcb4fcc7 100644
--- a/drivers/scsi/sym53c8xx.c
+++ b/drivers/scsi/sym53c8xx.c
@@ -1,7 +1,7 @@
/******************************************************************************
** High Performance device driver for the Symbios 53C896 controller.
**
-** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+** Copyright (C) 1998-2001 Gerard Roudier <groudier@free.fr>
**
** This driver also supports all the Symbios 53C8XX controller family,
** except 53C810 revisions < 16, 53C825 revisions < 16 and all
@@ -32,7 +32,7 @@
** The Linux port of the FreeBSD ncr driver has been achieved in
** november 1995 by:
**
-** Gerard Roudier <groudier@club-internet.fr>
+** Gerard Roudier <groudier@free.fr>
**
** Being given that this driver originates from the FreeBSD version, and
** in order to keep synergy on both, any suggested enhancements and corrections
@@ -12126,13 +12126,16 @@ static int ncr_scatter_896R1(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
if (!use_sg)
segn = ncr_scatter_no_sglist(np, cp, cmd);
- else if (use_sg > MAX_SCATTER)
- segn = -1;
else {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
struct scr_tblmove *data;
use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > MAX_SCATTER) {
+ unmap_scsi_data(np, cmd);
+ return -1;
+ }
+
data = &cp->phys.data[MAX_SCATTER - use_sg];
for (segn = 0; segn < use_sg; segn++) {
@@ -12165,13 +12168,15 @@ static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd)
if (!use_sg)
segment = ncr_scatter_no_sglist(np, cp, cmd);
- else if (use_sg > MAX_SCATTER)
- segment = -1;
else {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
struct scr_tblmove *data;
use_sg = map_scsi_sg_data(np, cmd);
+ if (use_sg > MAX_SCATTER) {
+ unmap_scsi_data(np, cmd);
+ return -1;
+ }
data = &cp->phys.data[MAX_SCATTER - use_sg];
for (segment = 0; segment < use_sg; segment++) {
diff --git a/drivers/scsi/sym53c8xx.h b/drivers/scsi/sym53c8xx.h
index 780a8df9b70ca..256d34b6461b9 100644
--- a/drivers/scsi/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx.h
@@ -1,7 +1,7 @@
/******************************************************************************
** High Performance device driver for the Symbios 53C896 controller.
**
-** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+** Copyright (C) 1998-2001 Gerard Roudier <groudier@free.fr>
**
** This driver also supports all the Symbios 53C8XX controller family,
** except 53C810 revisions < 16, 53C825 revisions < 16 and all
@@ -32,7 +32,7 @@
** The Linux port of the FreeBSD ncr driver has been achieved in
** november 1995 by:
**
-** Gerard Roudier <groudier@club-internet.fr>
+** Gerard Roudier <groudier@free.fr>
**
** Being given that this driver originates from the FreeBSD version, and
** in order to keep synergy on both, any suggested enhancements and corrections
@@ -96,7 +96,7 @@ int sym53c8xx_release(struct Scsi_Host *);
this_id: 7, \
sg_tablesize: SCSI_NCR_SG_TABLESIZE, \
cmd_per_lun: SCSI_NCR_CMD_PER_LUN, \
- max_sectors: MAX_SEGMENTS*8, \
+ max_sectors: MAX_HW_SEGMENTS*8, \
use_clustering: DISABLE_CLUSTERING, \
highmem_io: 1}
diff --git a/drivers/scsi/sym53c8xx_2/ChangeLog.txt b/drivers/scsi/sym53c8xx_2/ChangeLog.txt
index c020492c0c8dc..6a4a3f8550485 100644
--- a/drivers/scsi/sym53c8xx_2/ChangeLog.txt
+++ b/drivers/scsi/sym53c8xx_2/ChangeLog.txt
@@ -128,3 +128,21 @@ Sun Oct 28 15:00 2001 Gerard Roudier
* version sym-2.1.16-20011028
- Slightly simplify driver configuration.
- Prepare a new patch against linux-2.4.13.
+
+Sat Nov 17 10:00 2001 Gerard Roudier
+ * version sym-2.1.17
+ - Fix a couple of gcc/gcc3 warnings.
+ - Allocate separately from the HCB the array for CCBs hashed by DSA.
+ All driver memory allocations are now not greater than 1 PAGE
+ even on PPC64 / 4KB PAGE surprising setup.
+
+Sat Dec 01 18:00 2001 Gerard Roudier
+ * version sym-2.1.17a
+ - Use u_long instead of U32 for the IO base cookie. This is more
+ consistent with what archs are expecting.
+ - Use MMIO per default for Power PC instead of some fake normal IO,
+ as Paul Mackerras stated that MMIO works fine now on this arch.
+
+
+
+
diff --git a/drivers/scsi/sym53c8xx_2/sym53c8xx.h b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
index 9c7ef0284753b..0f6114bda6369 100644
--- a/drivers/scsi/sym53c8xx_2/sym53c8xx.h
+++ b/drivers/scsi/sym53c8xx_2/sym53c8xx.h
@@ -130,17 +130,17 @@ int sym53c8xx_release(struct Scsi_Host *);
#if !defined(HOSTS_C)
/*
- * Use normal IO if configured. Forced for alpha and powerpc.
- * Powerpc fails copying to on-chip RAM using memcpy_toio().
+ * Use normal IO if configured.
+ * Normal IO forced for alpha.
* Forced to MMIO for sparc.
*/
#if defined(__alpha__)
#define SYM_CONF_IOMAPPED
-#elif defined(__powerpc__)
-#define SYM_CONF_IOMAPPED
-#define SYM_OPT_NO_BUS_MEMORY_MAPPING
#elif defined(__sparc__)
#undef SYM_CONF_IOMAPPED
+/* #elif defined(__powerpc__) */
+/* #define SYM_CONF_IOMAPPED */
+/* #define SYM_OPT_NO_BUS_MEMORY_MAPPING */
#elif defined(CONFIG_SCSI_SYM53C8XX_IOMAPPED)
#define SYM_CONF_IOMAPPED
#endif
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 9c9c3bc4d122d..d8c43a9d5abb7 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -647,12 +647,15 @@ static int sym_scatter(hcb_p np, ccb_p cp, Scsi_Cmnd *cmd)
if (!use_sg)
segment = sym_scatter_no_sglist(np, cp, cmd);
- else if (use_sg > SYM_CONF_MAX_SG)
- segment = -1;
else if ((use_sg = map_scsi_sg_data(np, cmd)) > 0) {
struct scatterlist *scatter = (struct scatterlist *)cmd->buffer;
struct sym_tblmove *data;
+ if (use_sg > SYM_CONF_MAX_SG) {
+ unmap_scsi_data(np, cmd);
+ return -1;
+ }
+
data = &cp->phys.data[SYM_CONF_MAX_SG - use_sg];
for (segment = 0; segment < use_sg; segment++) {
@@ -2452,8 +2455,8 @@ sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, sym_device *device)
u_char pci_fix_up = SYM_SETUP_PCI_FIX_UP;
u_char revision;
u_int irq;
- u_long base, base_2, io_port;
- u_long base_c, base_2_c;
+ u_long base, base_2, base_io;
+ u_long base_c, base_2_c, io_port;
int i;
sym_chip *chip;
@@ -2470,7 +2473,7 @@ sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, sym_device *device)
device_id = PciDeviceId(pdev);
irq = PciIrqLine(pdev);
- i = pci_get_base_address(pdev, 0, &io_port);
+ i = pci_get_base_address(pdev, 0, &base_io);
io_port = pci_get_base_cookie(pdev, 0);
base_c = pci_get_base_cookie(pdev, i);
@@ -2488,9 +2491,9 @@ sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, sym_device *device)
/*
* If user excluded this chip, donnot initialize it.
*/
- if (io_port) {
+ if (base_io) {
for (i = 0 ; i < 8 ; i++) {
- if (sym_driver_setup.excludes[i] == io_port)
+ if (sym_driver_setup.excludes[i] == base_io)
return -1;
}
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h
index c89832f20463e..4db72ce33f4ed 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.h
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.h
@@ -77,7 +77,6 @@
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/time.h>
@@ -463,7 +462,7 @@ struct sym_shcb {
vm_offset_t mmio_va; /* MMIO kernel virtual address */
vm_offset_t ram_va; /* RAM kernel virtual address */
- u32 io_port; /* IO port address */
+ u_long io_port; /* IO port address cookie */
u_short io_ws; /* IO window size */
int irq; /* IRQ number */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ce06b7222e34f..3ed0e9e531574 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4689,8 +4689,9 @@ out_reject:
return;
out_clrack:
OUTL_DSP (SCRIPTA_BA (np, clrack));
+ return;
out_stuck:
- ;
+ return;
}
/*
@@ -5223,8 +5224,10 @@ static void sym_alloc_lcb_tags (hcb_p np, u_char tn, u_char ln)
* And accept tagged commands now.
*/
lp->head.itlq_tbl_sa = cpu_to_scr(vtobus(lp->itlq_tbl));
+
+ return;
fail:
- ;
+ return;
}
/*
@@ -5787,6 +5790,13 @@ int sym_hcb_attach(hcb_p np, struct sym_fw *fw)
goto attach_failed;
/*
+ * Allocate the array of lists of CCBs hashed by DSA.
+ */
+ np->ccbh = sym_calloc(sizeof(ccb_p *)*CCB_HASH_SIZE, "CCBH");
+ if (!np->ccbh)
+ goto attach_failed;
+
+ /*
* Initialyze the CCB free and busy queues.
*/
sym_que_init(&np->free_ccbq);
@@ -5977,6 +5987,8 @@ void sym_hcb_free(hcb_p np)
sym_mfree_dma(cp, sizeof(*cp), "CCB");
}
}
+ if (np->ccbh)
+ sym_mfree(np->ccbh, sizeof(ccb_p *)*CCB_HASH_SIZE, "CCBH");
if (np->badluntbl)
sym_mfree_dma(np->badluntbl, 256,"BADLUNTBL");
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.h b/drivers/scsi/sym53c8xx_2/sym_hipd.h
index 62530d4c451b5..cd8d7919e741d 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.h
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.h
@@ -1068,7 +1068,8 @@ struct sym_hcb {
/*
* CCB lists and queue.
*/
- ccb_p ccbh[CCB_HASH_SIZE]; /* CCB hashed by DSA value */
+ ccb_p *ccbh; /* CCBs hashed by DSA value */
+ /* CCB_HASH_SIZE lists of CCBs */
SYM_QUEHEAD free_ccbq; /* Queue of available CCBs */
SYM_QUEHEAD busy_ccbq; /* Queue of busy CCBs */
diff --git a/drivers/scsi/sym53c8xx_comm.h b/drivers/scsi/sym53c8xx_comm.h
index 57934acc0a35d..ec4f1cc41c4cf 100644
--- a/drivers/scsi/sym53c8xx_comm.h
+++ b/drivers/scsi/sym53c8xx_comm.h
@@ -1,7 +1,7 @@
/******************************************************************************
** High Performance device driver for the Symbios 53C896 controller.
**
-** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+** Copyright (C) 1998-2001 Gerard Roudier <groudier@free.fr>
**
** This driver also supports all the Symbios 53C8XX controller family,
** except 53C810 revisions < 16, 53C825 revisions < 16 and all
@@ -32,7 +32,7 @@
** The Linux port of the FreeBSD ncr driver has been achieved in
** november 1995 by:
**
-** Gerard Roudier <groudier@club-internet.fr>
+** Gerard Roudier <groudier@free.fr>
**
** Being given that this driver originates from the FreeBSD version, and
** in order to keep synergy on both, any suggested enhancements and corrections
diff --git a/drivers/scsi/sym53c8xx_defs.h b/drivers/scsi/sym53c8xx_defs.h
index 52bd0eaab97fa..82f3f11195e34 100644
--- a/drivers/scsi/sym53c8xx_defs.h
+++ b/drivers/scsi/sym53c8xx_defs.h
@@ -1,7 +1,7 @@
/******************************************************************************
** High Performance device driver for the Symbios 53C896 controller.
**
-** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr>
+** Copyright (C) 1998-2001 Gerard Roudier <groudier@free.fr>
**
** This driver also supports all the Symbios 53C8XX controller family,
** except 53C810 revisions < 16, 53C825 revisions < 16 and all
@@ -32,7 +32,7 @@
** The Linux port of the FreeBSD ncr driver has been achieved in
** november 1995 by:
**
-** Gerard Roudier <groudier@club-internet.fr>
+** Gerard Roudier <groudier@free.fr>
**
** Being given that this driver originates from the FreeBSD version, and
** in order to keep synergy on both, any suggested enhancements and corrections
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index 30c400e5bcc6e..720b707a34e01 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -66,7 +66,7 @@ struct adfs_discmap {
/* Inode stuff */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
-int adfs_get_block(struct inode *inode, long block,
+int adfs_get_block(struct inode *inode, sector_t block,
struct buffer_head *bh, int create);
#else
int adfs_bmap(struct inode *inode, int block);
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index ef7715c4458da..66a0c36a74fbc 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -193,7 +193,7 @@ adfs_dir_read(struct super_block *sb, unsigned long object_id,
goto release_buffers;
}
- dir->bh[blk] = bread(sb->s_dev, phys, sb->s_blocksize);
+ dir->bh[blk] = sb_bread(sb, phys);
if (!dir->bh[blk])
goto release_buffers;
}
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index 329bbd5f99e92..71064bc55150c 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -35,7 +35,7 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
goto out;
}
- dir->bh[0] = bread(sb->s_dev, block, sb->s_blocksize);
+ dir->bh[0] = sb_bread(sb, block);
if (!dir->bh[0])
goto out;
dir->nr_buffers += 1;
@@ -60,7 +60,7 @@ adfs_fplus_read(struct super_block *sb, unsigned int id, unsigned int sz, struct
goto out;
}
- dir->bh[blk] = bread(sb->s_dev, block, sb->s_blocksize);
+ dir->bh[blk] = sb_bread(sb, block);
if (!dir->bh[blk])
goto out;
dir->nr_buffers = blk;
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index 22d9bfd250497..9e402bcd9a630 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -27,7 +27,7 @@
* not support creation of new blocks, so we return -EIO for this case.
*/
int
-adfs_get_block(struct inode *inode, long block, struct buffer_head *bh, int create)
+adfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh, int create)
{
if (block < 0)
goto abort_negative;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 00be08b380826..f1af56308a1ed 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -263,7 +263,7 @@ static struct adfs_discmap *adfs_read_map(struct super_block *sb, struct adfs_di
dm[zone].dm_startbit = 0;
dm[zone].dm_endbit = zone_size;
dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
- dm[zone].dm_bh = bread(sb->s_dev, map_addr, sb->s_blocksize);
+ dm[zone].dm_bh = sb_bread(sb, map_addr);
if (!dm[zone].dm_bh) {
adfs_error(sb, "unable to read map");
@@ -319,8 +319,9 @@ struct super_block *adfs_read_super(struct super_block *sb, void *data, int sile
if (parse_options(sb, data))
goto error;
+ sb->s_blocksize = BLOCK_SIZE;
set_blocksize(dev, BLOCK_SIZE);
- if (!(bh = bread(dev, ADFS_DISCRECORD / BLOCK_SIZE, BLOCK_SIZE))) {
+ if (!(bh = sb_bread(sb, ADFS_DISCRECORD / BLOCK_SIZE))) {
adfs_error(sb, "unable to read superblock");
goto error;
}
@@ -354,7 +355,7 @@ struct super_block *adfs_read_super(struct super_block *sb, void *data, int sile
brelse(bh);
set_blocksize(dev, sb->s_blocksize);
- bh = bread(dev, ADFS_DISCRECORD / sb->s_blocksize, sb->s_blocksize);
+ bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
if (!bh) {
adfs_error(sb, "couldn't read superblock on "
"2nd try.");
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 8a168f71fe00f..a54289141ad2d 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -38,8 +38,6 @@ static int affs_grow_extcache(struct inode *inode, u32 lc_idx);
static struct buffer_head *affs_alloc_extblock(struct inode *inode, struct buffer_head *bh, u32 ext);
static inline struct buffer_head *affs_get_extblock(struct inode *inode, u32 ext);
static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext);
-static int affs_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create);
-
static ssize_t affs_file_write(struct file *filp, const char *buf, size_t count, loff_t *ppos);
static int affs_file_open(struct inode *inode, struct file *filp);
static int affs_file_release(struct inode *inode, struct file *filp);
@@ -332,7 +330,7 @@ err_bread:
}
static int
-affs_get_block(struct inode *inode, long block, struct buffer_head *bh_result, int create)
+affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *ext_bh;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index a2a034fc2fe20..d65e44bb39a97 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -332,7 +332,7 @@ got_root:
blocksize == 2048 ? 11 : 12;
/* Find out which kind of FS we have */
- boot_bh = bread(sb->s_dev, 0, sb->s_blocksize);
+ boot_bh = sb_bread(sb, 0);
if (!boot_bh) {
printk(KERN_ERR "AFFS: Cannot read boot block\n");
goto out_error;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index 5caf04a7b8d16..5a887489070b8 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -41,7 +41,7 @@ static int bfs_readdir(struct file * f, void * dirent, filldir_t filldir)
while (f->f_pos < dir->i_size) {
offset = f->f_pos & (BFS_BSIZE-1);
block = dir->iu_sblock + (f->f_pos >> BFS_BSIZE_BITS);
- bh = bread(dev, block, BFS_BSIZE);
+ bh = sb_bread(dir->i_sb, block);
if (!bh) {
f->f_pos += BFS_BSIZE - offset;
continue;
@@ -270,7 +270,7 @@ static int bfs_add_entry(struct inode * dir, const char * name, int namelen, int
sblock = dir->iu_sblock;
eblock = dir->iu_eblock;
for (block=sblock; block<=eblock; block++) {
- bh = bread(dev, block, BFS_BSIZE);
+ bh = sb_bread(dir->i_sb, block);
if(!bh)
return -ENOSPC;
for (off=0; off<BFS_BSIZE; off+=BFS_DIRENT_SIZE) {
@@ -319,7 +319,7 @@ static struct buffer_head * bfs_find_entry(struct inode * dir,
block = offset = 0;
while (block * BFS_BSIZE + offset < dir->i_size) {
if (!bh) {
- bh = bread(dir->i_dev, dir->iu_sblock + block, BFS_BSIZE);
+ bh = sb_bread(dir->i_sb, dir->iu_sblock + block);
if (!bh) {
block++;
continue;
diff --git a/fs/bfs/file.c b/fs/bfs/file.c
index d7a284392225b..bb301b44e4212 100644
--- a/fs/bfs/file.c
+++ b/fs/bfs/file.c
@@ -54,7 +54,7 @@ static int bfs_move_blocks(kdev_t dev, unsigned long start, unsigned long end,
return 0;
}
-static int bfs_get_block(struct inode * inode, long block,
+static int bfs_get_block(struct inode * inode, sector_t block,
struct buffer_head * bh_result, int create)
{
long phys;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 6a4a4c5487cf3..f83f13f562007 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -47,7 +47,7 @@ static void bfs_read_inode(struct inode * inode)
}
block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
- bh = bread(dev, block, BFS_BSIZE);
+ bh = sb_bread(inode->i_sb, block);
if (!bh) {
printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
make_bad_inode(inode);
@@ -100,7 +100,7 @@ static void bfs_write_inode(struct inode * inode, int unused)
lock_kernel();
block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
- bh = bread(dev, block, BFS_BSIZE);
+ bh = sb_bread(inode->i_sb, block);
if (!bh) {
printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
unlock_kernel();
@@ -153,7 +153,7 @@ static void bfs_delete_inode(struct inode * inode)
lock_kernel();
mark_inode_dirty(inode);
block = (ino - BFS_ROOT_INO)/BFS_INODES_PER_BLOCK + 1;
- bh = bread(dev, block, BFS_BSIZE);
+ bh = sb_bread(s, block);
if (!bh) {
printf("Unable to read inode %s:%08lx\n", bdevname(dev), ino);
unlock_kernel();
@@ -252,7 +252,7 @@ static struct super_block * bfs_read_super(struct super_block * s,
s->s_blocksize = BFS_BSIZE;
s->s_blocksize_bits = BFS_BSIZE_BITS;
- bh = bread(dev, 0, BFS_BSIZE);
+ bh = sb_bread(s, 0);
if(!bh)
goto out;
bfs_sb = (struct bfs_super_block *)bh->b_data;
diff --git a/fs/bio.c b/fs/bio.c
index 36fe91f4a636a..555b7ac144214 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -111,7 +111,8 @@ inline void bio_init(struct bio *bio)
bio->bi_rw = 0;
bio->bi_vcnt = 0;
bio->bi_idx = 0;
- bio->bi_hw_seg = 0;
+ bio->bi_phys_segments = 0;
+ bio->bi_hw_segments = 0;
bio->bi_size = 0;
bio->bi_end_io = NULL;
atomic_set(&bio->bi_cnt, 1);
@@ -166,12 +167,20 @@ void bio_put(struct bio *bio)
}
}
+inline int bio_phys_segments(request_queue_t *q, struct bio *bio)
+{
+ if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
+ blk_recount_segments(q, bio);
+
+ return bio->bi_phys_segments;
+}
+
inline int bio_hw_segments(request_queue_t *q, struct bio *bio)
{
- if (unlikely(!(bio->bi_flags & BIO_SEG_VALID)))
+ if (unlikely(!(bio->bi_flags & (1 << BIO_SEG_VALID))))
blk_recount_segments(q, bio);
- return bio->bi_hw_seg;
+ return bio->bi_hw_segments;
}
/**
@@ -199,7 +208,8 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
bio->bi_vcnt = bio_src->bi_vcnt;
bio->bi_idx = bio_src->bi_idx;
if (bio_src->bi_flags & (1 << BIO_SEG_VALID)) {
- bio->bi_hw_seg = bio_src->bi_hw_seg;
+ bio->bi_phys_segments = bio_src->bi_phys_segments;
+ bio->bi_hw_segments = bio_src->bi_hw_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
bio->bi_size = bio_src->bi_size;
@@ -496,7 +506,7 @@ static int __init init_bio(void)
if (!bio_pool)
panic("bio: can't create mempool\n");
- printk("BIO: pool of %d setup, %uKb (%d bytes/bio)\n", BIO_POOL_SIZE, BIO_POOL_SIZE * sizeof(struct bio) >> 10, sizeof(struct bio));
+ printk("BIO: pool of %d setup, %ZuKb (%Zd bytes/bio)\n", BIO_POOL_SIZE, BIO_POOL_SIZE * sizeof(struct bio) >> 10, sizeof(struct bio));
biovec_init_pool();
@@ -513,4 +523,5 @@ EXPORT_SYMBOL(bio_init);
EXPORT_SYMBOL(bio_copy);
EXPORT_SYMBOL(__bio_clone);
EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_phys_segments);
EXPORT_SYMBOL(bio_hw_segments);
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 7582c9dd80a8c..7f4afb369346c 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -153,7 +153,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
bh = NULL;
if (blocknr + i < devsize) {
- bh = getblk(sb->s_dev, blocknr + i, PAGE_CACHE_SIZE);
+ bh = sb_getblk(sb, blocknr + i);
if (!buffer_uptodate(bh))
read_array[unread++] = bh;
}
diff --git a/fs/efs/dir.c b/fs/efs/dir.c
index 9bba7a2cbfa25..cc7df77a034ad 100644
--- a/fs/efs/dir.c
+++ b/fs/efs/dir.c
@@ -40,7 +40,7 @@ static int efs_readdir(struct file *filp, void *dirent, filldir_t filldir) {
/* look at all blocks */
while (block < inode->i_blocks) {
/* read the dir block */
- bh = bread(inode->i_dev, efs_bmap(inode, block), EFS_DIRBSIZE);
+ bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
if (!bh) {
printk(KERN_ERR "EFS: readdir(): failed to read dir block %d\n", block);
diff --git a/fs/efs/file.c b/fs/efs/file.c
index 67f58987ead05..faa5b9f2c6487 100644
--- a/fs/efs/file.c
+++ b/fs/efs/file.c
@@ -8,7 +8,7 @@
#include <linux/efs_fs.h>
-int efs_get_block(struct inode *inode, long iblock,
+int efs_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int error = -EROFS;
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index 39e503d3fbc8d..67d050f227839 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
-extern int efs_get_block(struct inode *, long, struct buffer_head *, int);
+extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
static int efs_readpage(struct file *file, struct page *page)
{
return block_read_full_page(page,efs_get_block);
@@ -77,7 +77,7 @@ void efs_read_inode(struct inode *inode) {
(EFS_BLOCKSIZE / sizeof(struct efs_dinode))) *
sizeof(struct efs_dinode);
- bh = bread(inode->i_dev, block, EFS_BLOCKSIZE);
+ bh = sb_bread(inode->i_sb, block);
if (!bh) {
printk(KERN_WARNING "EFS: bread() failed at block %d\n", block);
goto read_inode_error;
@@ -271,7 +271,7 @@ efs_block_t efs_map_block(struct inode *inode, efs_block_t block) {
if (first || lastblock != iblock) {
if (bh) brelse(bh);
- bh = bread(inode->i_dev, iblock, EFS_BLOCKSIZE);
+ bh = sb_bread(inode->i_sb, iblock);
if (!bh) {
printk(KERN_ERR "EFS: bread() failed at block %d\n", iblock);
return 0;
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index cc85f5d9a8691..cc06bc8cbab02 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -24,7 +24,7 @@ static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len)
for(block = 0; block < inode->i_blocks; block++) {
- bh = bread(inode->i_dev, efs_bmap(inode, block), EFS_DIRBSIZE);
+ bh = sb_bread(inode->i_sb, efs_bmap(inode, block));
if (!bh) {
printk(KERN_ERR "EFS: find_entry(): failed to read dir block %d\n", block);
return 0;
diff --git a/fs/efs/super.c b/fs/efs/super.c
index 79ee8d837a303..691f6df84fc39 100644
--- a/fs/efs/super.c
+++ b/fs/efs/super.c
@@ -137,11 +137,14 @@ struct super_block *efs_read_super(struct super_block *s, void *d, int silent) {
struct buffer_head *bh;
sb = SUPER_INFO(s);
-
+
+ s->s_magic = EFS_SUPER_MAGIC;
+ s->s_blocksize = EFS_BLOCKSIZE;
+ s->s_blocksize_bits = EFS_BLOCKSIZE_BITS;
set_blocksize(dev, EFS_BLOCKSIZE);
/* read the vh (volume header) block */
- bh = bread(dev, 0, EFS_BLOCKSIZE);
+ bh = sb_bread(s, 0);
if (!bh) {
printk(KERN_ERR "EFS: cannot read volume header\n");
@@ -160,7 +163,7 @@ struct super_block *efs_read_super(struct super_block *s, void *d, int silent) {
goto out_no_fs_ul;
}
- bh = bread(dev, sb->fs_start + EFS_SUPER, EFS_BLOCKSIZE);
+ bh = sb_bread(s, sb->fs_start + EFS_SUPER);
if (!bh) {
printk(KERN_ERR "EFS: cannot read superblock\n");
goto out_no_fs_ul;
@@ -174,10 +177,6 @@ struct super_block *efs_read_super(struct super_block *s, void *d, int silent) {
goto out_no_fs_ul;
}
brelse(bh);
-
- s->s_magic = EFS_SUPER_MAGIC;
- s->s_blocksize = EFS_BLOCKSIZE;
- s->s_blocksize_bits = EFS_BLOCKSIZE_BITS;
if (!(s->s_flags & MS_RDONLY)) {
#ifdef DEBUG
diff --git a/fs/efs/symlink.c b/fs/efs/symlink.c
index b5d17f3cc9544..5dd10f50edb04 100644
--- a/fs/efs/symlink.c
+++ b/fs/efs/symlink.c
@@ -26,13 +26,13 @@ static int efs_symlink_readpage(struct file *file, struct page *page)
lock_kernel();
/* read first 512 bytes of link target */
err = -EIO;
- bh = bread(inode->i_dev, efs_bmap(inode, 0), EFS_BLOCKSIZE);
+ bh = sb_bread(inode->i_sb, efs_bmap(inode, 0));
if (!bh)
goto fail;
memcpy(link, bh->b_data, (size > EFS_BLOCKSIZE) ? EFS_BLOCKSIZE : size);
brelse(bh);
if (size > EFS_BLOCKSIZE) {
- bh = bread(inode->i_dev, efs_bmap(inode, 1), EFS_BLOCKSIZE);
+ bh = sb_bread(inode->i_sb, efs_bmap(inode, 1));
if (!bh)
goto fail;
memcpy(link + EFS_BLOCKSIZE, bh->b_data, size - EFS_BLOCKSIZE);
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 3d799f7a0daa3..da07d94626a70 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -88,7 +88,7 @@ static int read_block_bitmap (struct super_block * sb,
if (!gdp)
goto error_out;
retval = 0;
- bh = bread (sb->s_dev, le32_to_cpu(gdp->bg_block_bitmap), sb->s_blocksize);
+ bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap));
if (!bh) {
ext2_error (sb, "read_block_bitmap",
"Cannot read block bitmap - "
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index bcc088a374c70..46e7f2220f573 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -51,8 +51,7 @@ static struct buffer_head *read_inode_bitmap (struct super_block * sb,
if (!desc)
goto error_out;
- bh = bread(sb->s_dev, le32_to_cpu(desc->bg_inode_bitmap),
- sb->s_blocksize);
+ bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap));
if (!bh)
ext2_error (sb, "read_inode_bitmap",
"Cannot read inode bitmap - "
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 3665f5ef6a729..e96e1a014f5a0 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -239,8 +239,7 @@ static Indirect *ext2_get_branch(struct inode *inode,
Indirect chain[4],
int *err)
{
- kdev_t dev = inode->i_dev;
- int size = inode->i_sb->s_blocksize;
+ struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
@@ -250,7 +249,7 @@ static Indirect *ext2_get_branch(struct inode *inode,
if (!p->key)
goto no_block;
while (--depth) {
- bh = bread(dev, le32_to_cpu(p->key), size);
+ bh = sb_bread(sb, le32_to_cpu(p->key));
if (!bh)
goto failure;
/* Reader: pointers */
@@ -399,7 +398,7 @@ static int ext2_alloc_branch(struct inode *inode,
* Get buffer_head for parent block, zero it out and set
* the pointer to new one, then send parent to disk.
*/
- bh = getblk(inode->i_dev, parent, blocksize);
+ bh = sb_getblk(inode->i_sb, parent);
lock_buffer(bh);
memset(bh->b_data, 0, blocksize);
branch[n].bh = bh;
@@ -763,7 +762,7 @@ static void ext2_free_branches(struct inode *inode, u32 *p, u32 *q, int depth)
if (!nr)
continue;
*p = 0;
- bh = bread (inode->i_dev, nr, inode->i_sb->s_blocksize);
+ bh = sb_bread(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
* (should be rare).
@@ -921,7 +920,7 @@ void ext2_read_inode (struct inode * inode)
EXT2_INODE_SIZE(inode->i_sb);
block = le32_to_cpu(gdp[desc].bg_inode_table) +
(offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
- if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+ if (!(bh = sb_bread(inode->i_sb, block))) {
ext2_error (inode->i_sb, "ext2_read_inode",
"unable to read inode block - "
"inode=%lu, block=%lu", inode->i_ino, block);
@@ -1063,7 +1062,7 @@ static int ext2_update_inode(struct inode * inode, int do_sync)
EXT2_INODE_SIZE(inode->i_sb);
block = le32_to_cpu(gdp[desc].bg_inode_table) +
(offset >> EXT2_BLOCK_SIZE_BITS(inode->i_sb));
- if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+ if (!(bh = sb_bread(inode->i_sb, block))) {
ext2_error (inode->i_sb, "ext2_write_inode",
"unable to read inode block - "
"inode=%lu, block=%lu", inode->i_ino, block);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index ee386b01e0ec9..c9c544458311a 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -432,6 +432,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
printk ("EXT2-fs: unable to set blocksize %d\n", blocksize);
return NULL;
}
+ sb->s_blocksize = blocksize;
/*
* If the superblock doesn't start on a sector boundary,
@@ -443,7 +444,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
offset = (sb_block*BLOCK_SIZE) % blocksize;
}
- if (!(bh = bread (dev, logic_sb_block, blocksize))) {
+ if (!(bh = sb_bread(sb, logic_sb_block))) {
printk ("EXT2-fs: unable to read superblock\n");
return NULL;
}
@@ -502,7 +503,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
offset = (sb_block*BLOCK_SIZE) % blocksize;
- bh = bread (dev, logic_sb_block, blocksize);
+ bh = sb_bread(sb, logic_sb_block);
if(!bh) {
printk("EXT2-fs: Couldn't read superblock on "
"2nd try.\n");
@@ -606,8 +607,7 @@ struct super_block * ext2_read_super (struct super_block * sb, void * data,
goto failed_mount;
}
for (i = 0; i < db_count; i++) {
- sb->u.ext2_sb.s_group_desc[i] = bread (dev, logic_sb_block + i + 1,
- sb->s_blocksize);
+ sb->u.ext2_sb.s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
if (!sb->u.ext2_sb.s_group_desc[i]) {
for (j = 0; j < i; j++)
brelse (sb->u.ext2_sb.s_group_desc[j]);
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index db676c0057baa..f4f87da52c376 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -91,8 +91,7 @@ static int read_block_bitmap (struct super_block * sb,
if (!gdp)
goto error_out;
retval = 0;
- bh = bread (sb->s_dev,
- le32_to_cpu(gdp->bg_block_bitmap), sb->s_blocksize);
+ bh = sb_bread(sb, le32_to_cpu(gdp->bg_block_bitmap));
if (!bh) {
ext3_error (sb, "read_block_bitmap",
"Cannot read block bitmap - "
@@ -353,8 +352,7 @@ do_more:
#ifdef CONFIG_JBD_DEBUG
{
struct buffer_head *debug_bh;
- debug_bh = get_hash_table(sb->s_dev, block + i,
- sb->s_blocksize);
+ debug_bh = sb_get_hash_table(sb, block + i);
if (debug_bh) {
BUFFER_TRACE(debug_bh, "Deleted!");
if (!bh2jh(bitmap_bh)->b_committed_data)
@@ -702,7 +700,7 @@ got_block:
struct buffer_head *debug_bh;
/* Record bitmap buffer state in the newly allocated block */
- debug_bh = get_hash_table(sb->s_dev, tmp, sb->s_blocksize);
+ debug_bh = sb_get_hash_table(sb, tmp);
if (debug_bh) {
BUFFER_TRACE(debug_bh, "state when allocated");
BUFFER_TRACE2(debug_bh, bh, "bitmap state");
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 088f796731679..062ed9374aa5a 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -60,8 +60,7 @@ static int read_inode_bitmap (struct super_block * sb,
retval = -EIO;
goto error_out;
}
- bh = bread (sb->s_dev,
- le32_to_cpu(gdp->bg_inode_bitmap), sb->s_blocksize);
+ bh = sb_bread(sb, le32_to_cpu(gdp->bg_inode_bitmap));
if (!bh) {
ext3_error (sb, "read_inode_bitmap",
"Cannot read inode bitmap - "
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index ca171a4c4d827..b3e997fd5c208 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -389,8 +389,7 @@ static int ext3_block_to_path(struct inode *inode, long i_block, int offsets[4])
static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
Indirect chain[4], int *err)
{
- kdev_t dev = inode->i_dev;
- int blocksize = inode->i_sb->s_blocksize;
+ struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
@@ -400,7 +399,7 @@ static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
if (!p->key)
goto no_block;
while (--depth) {
- bh = bread(dev, le32_to_cpu(p->key), blocksize);
+ bh = sb_bread(sb, le32_to_cpu(p->key));
if (!bh)
goto failure;
/* Reader: pointers */
@@ -558,7 +557,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
* and set the pointer to new one, then send
* parent to disk.
*/
- bh = getblk(inode->i_dev, parent, blocksize);
+ bh = sb_getblk(inode->i_sb, parent);
branch[n].bh = bh;
lock_buffer(bh);
BUFFER_TRACE(bh, "call get_create_access");
@@ -854,8 +853,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
*errp = ext3_get_block_handle(handle, inode, block, &dummy, create);
if (!*errp && buffer_mapped(&dummy)) {
struct buffer_head *bh;
- bh = getblk(dummy.b_dev, dummy.b_blocknr,
- inode->i_sb->s_blocksize);
+ bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (buffer_new(&dummy)) {
J_ASSERT(create != 0);
J_ASSERT(handle != 0);
@@ -1549,9 +1547,6 @@ ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
u32 *first, u32 *last)
{
u32 *p;
- kdev_t dev = inode->i_sb->s_dev;
- unsigned long blocksize = inode->i_sb->s_blocksize;
-
if (try_to_extend_transaction(handle, inode)) {
if (bh) {
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
@@ -1577,7 +1572,7 @@ ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
struct buffer_head *bh;
*p = 0;
- bh = get_hash_table(dev, nr, blocksize);
+ bh = sb_get_hash_table(inode->i_sb, nr);
ext3_forget(handle, 0, inode, bh, nr);
}
}
@@ -1690,7 +1685,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
continue; /* A hole */
/* Go read the buffer for the next level down */
- bh = bread(inode->i_dev, nr, inode->i_sb->s_blocksize);
+ bh = sb_bread(inode->i_sb, nr);
/*
* A read failure? Report error and clear slot
@@ -2003,7 +1998,7 @@ int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc)
EXT3_INODE_SIZE(inode->i_sb);
block = le32_to_cpu(gdp[desc].bg_inode_table) +
(offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb));
- if (!(bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize))) {
+ if (!(bh = sb_bread(inode->i_sb, block))) {
ext3_error (inode->i_sb, "ext3_get_inode_loc",
"unable to read inode block - "
"inode=%lu, block=%lu", inode->i_ino, block);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 9a5e18950dc10..d7ebe39243f95 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -925,6 +925,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
goto out_fail;
}
+ sb->s_blocksize = blocksize;
set_blocksize (dev, blocksize);
/*
@@ -936,7 +937,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
}
- if (!(bh = bread (dev, logic_sb_block, blocksize))) {
+ if (!(bh = sb_bread(sb, logic_sb_block))) {
printk (KERN_ERR "EXT3-fs: unable to read superblock\n");
goto out_fail;
}
@@ -1009,7 +1010,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
set_blocksize (dev, sb->s_blocksize);
logic_sb_block = (sb_block * EXT3_MIN_BLOCK_SIZE) / blocksize;
offset = (sb_block * EXT3_MIN_BLOCK_SIZE) % blocksize;
- bh = bread (dev, logic_sb_block, blocksize);
+ bh = sb_bread(sb, logic_sb_block);
if (!bh) {
printk(KERN_ERR
"EXT3-fs: Can't read superblock on 2nd try.\n");
@@ -1093,8 +1094,7 @@ struct super_block * ext3_read_super (struct super_block * sb, void * data,
goto failed_mount;
}
for (i = 0; i < db_count; i++) {
- sbi->s_group_desc[i] = bread(dev, logic_sb_block + i + 1,
- blocksize);
+ sbi->s_group_desc[i] = sb_bread(sb, logic_sb_block + i + 1);
if (!sbi->s_group_desc[i]) {
printk (KERN_ERR "EXT3-fs: "
"can't read group descriptor %d\n", i);
diff --git a/fs/fat/buffer.c b/fs/fat/buffer.c
index d8a4d0bdae1d1..117d85b33ae7e 100644
--- a/fs/fat/buffer.c
+++ b/fs/fat/buffer.c
@@ -59,12 +59,12 @@ void fat_ll_rw_block (
struct buffer_head *default_fat_bread(struct super_block *sb, int block)
{
- return bread (sb->s_dev, block, sb->s_blocksize);
+ return sb_bread(sb, block);
}
struct buffer_head *default_fat_getblk(struct super_block *sb, int block)
{
- return getblk (sb->s_dev, block, sb->s_blocksize);
+ return sb_getblk(sb, block);
}
void default_fat_brelse(struct super_block *sb, struct buffer_head *bh)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index bba65eff5898a..5bbebb08cc569 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -584,7 +584,7 @@ fat_read_super(struct super_block *sb, void *data, int silent,
sb->s_blocksize = hard_blksize;
set_blocksize(sb->s_dev, hard_blksize);
- bh = bread(sb->s_dev, 0, sb->s_blocksize);
+ bh = sb_bread(sb, 0);
if (bh == NULL) {
printk("FAT: unable to read boot sector\n");
goto out_fail;
@@ -656,7 +656,7 @@ fat_read_super(struct super_block *sb, void *data, int silent,
(sbi->fsinfo_sector * logical_sector_size) % hard_blksize;
fsinfo_bh = bh;
if (fsinfo_block != 0) {
- fsinfo_bh = bread(sb->s_dev, fsinfo_block, hard_blksize);
+ fsinfo_bh = sb_bread(sb, fsinfo_block);
if (fsinfo_bh == NULL) {
printk("FAT: bread failed, FSINFO block"
" (blocknr = %d)\n", fsinfo_block);
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
index 979bb3718b49d..fb3eeeb9f32d9 100644
--- a/fs/freevxfs/vxfs_bmap.c
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -137,9 +137,8 @@ vxfs_bmap_indir(struct inode *ip, long indir, int size, long block)
struct vxfs_typed *typ;
int64_t off;
- bp = bread(ip->i_dev,
- indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb)),
- ip->i_sb->s_blocksize);
+ bp = sb_bread(ip->i_sb,
+ indir + (i / VXFS_TYPED_PER_BLOCK(ip->i_sb)));
if (!buffer_mapped(bp))
return 0;
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index a06f13f3b4a1c..363e1ac597276 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -104,7 +104,7 @@ vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino)
block = extent + ((ino * VXFS_ISIZE) / sbp->s_blocksize);
offset = ((ino % (sbp->s_blocksize / VXFS_ISIZE)) * VXFS_ISIZE);
- bp = bread(sbp->s_dev, block, sbp->s_blocksize);
+ bp = sb_bread(sbp, block);
if (buffer_mapped(bp)) {
struct vxfs_inode_info *vip;
diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c
index 7fe6688ec04be..341d5c1a7f4fd 100644
--- a/fs/freevxfs/vxfs_subr.c
+++ b/fs/freevxfs/vxfs_subr.c
@@ -114,7 +114,7 @@ vxfs_bread(struct inode *ip, int block)
daddr_t pblock;
pblock = vxfs_bmap1(ip, block);
- bp = bread(ip->i_dev, pblock, ip->i_sb->s_blocksize);
+ bp = sb_bread(ip->i_sb, pblock);
return (bp);
}
@@ -135,7 +135,7 @@ vxfs_bread(struct inode *ip, int block)
* Zero on success, else a negativ error code (-EIO).
*/
static int
-vxfs_getblk(struct inode *ip, long iblock,
+vxfs_getblk(struct inode *ip, sector_t iblock,
struct buffer_head *bp, int create)
{
daddr_t pblock;
diff --git a/fs/hfs/file.c b/fs/hfs/file.c
index fbfdc1b123964..d2043ae5137c1 100644
--- a/fs/hfs/file.c
+++ b/fs/hfs/file.c
@@ -61,7 +61,7 @@ struct inode_operations hfs_file_inode_operations = {
struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
{
int tmp;
- kdev_t dev = fork->entry->mdb->sys_mdb->s_dev;
+ struct super_block *sb = fork->entry->mdb->sys_mdb;
tmp = hfs_extent_map(fork, block, create);
@@ -71,7 +71,7 @@ struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
*/
if (tmp) {
hfs_cat_mark_dirty(fork->entry);
- return getblk(dev, tmp, HFS_SECTOR_SIZE);
+ return sb_getblk(sb, tmp);
}
return NULL;
} else {
@@ -80,8 +80,7 @@ struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
we waited on the I/O in getblk to complete.
*/
do {
- struct buffer_head *bh =
- getblk(dev, tmp, HFS_SECTOR_SIZE);
+ struct buffer_head *bh = sb_getblk(sb, tmp);
int tmp2 = hfs_extent_map(fork, block, 0);
if (tmp2 == tmp) {
@@ -107,7 +106,7 @@ struct buffer_head *hfs_getblk(struct hfs_fork *fork, int block, int create)
* block number. This function just calls hfs_extent_map() to do the
* real work and then stuffs the appropriate info into the buffer_head.
*/
-int hfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+int hfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
{
unsigned long phys;
diff --git a/fs/hfs/hfs.h b/fs/hfs/hfs.h
index f266bc0c42135..e328a14aeed76 100644
--- a/fs/hfs/hfs.h
+++ b/fs/hfs/hfs.h
@@ -495,7 +495,7 @@ extern void hfs_extent_adj(struct hfs_fork *);
extern void hfs_extent_free(struct hfs_fork *);
/* file.c */
-extern int hfs_get_block(struct inode *, long, struct buffer_head *, int);
+extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
/* mdb.c */
extern struct hfs_mdb *hfs_mdb_get(hfs_sysmdb, int, hfs_s32);
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index a39710f098f72..6d69f71d34d23 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -402,6 +402,8 @@ struct super_block *hfs_read_super(struct super_block *s, void *data,
/* set the device driver to 512-byte blocks */
set_blocksize(dev, HFS_SECTOR_SIZE);
+ s->s_blocksize_bits = HFS_SECTOR_SIZE_BITS;
+ s->s_blocksize = HFS_SECTOR_SIZE;
#ifdef CONFIG_MAC_PARTITION
/* check to see if we're in a partition */
@@ -437,8 +439,6 @@ struct super_block *hfs_read_super(struct super_block *s, void *data,
}
s->s_magic = HFS_SUPER_MAGIC;
- s->s_blocksize_bits = HFS_SECTOR_SIZE_BITS;
- s->s_blocksize = HFS_SECTOR_SIZE;
s->s_op = &hfs_super_operations;
/* try to get the root inode */
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 4ce747a495ad2..c96107d5fcab2 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -41,9 +41,9 @@ hfs_buffer hfs_buffer_get(hfs_sysmdb sys_mdb, int block, int read) {
hfs_buffer tmp = HFS_BAD_BUFFER;
if (read) {
- tmp = bread(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+ tmp = sb_bread(sys_mdb, block);
} else {
- tmp = getblk(sys_mdb->s_dev, block, HFS_SECTOR_SIZE);
+ tmp = sb_getblk(sys_mdb, block);
if (tmp) {
mark_buffer_uptodate(tmp, 1);
}
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index c7b63f358c29f..66067c2a599d7 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -122,12 +122,9 @@ void hpfs_unlock_3inodes(struct inode *i1, struct inode *i2, struct inode *i3)
void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
int ahead)
{
- kdev_t dev = s->s_dev;
struct buffer_head *bh;
- if (!ahead || secno + ahead >= s->s_hpfs_fs_size)
- *bhp = bh = bread(dev, secno, 512);
- else *bhp = bh = bread(dev, secno, 512);
+ *bhp = bh = sb_bread(s, secno);
if (bh != NULL)
return bh->b_data;
else {
@@ -143,7 +140,7 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
struct buffer_head *bh;
/*return hpfs_map_sector(s, secno, bhp, 0);*/
- if ((*bhp = bh = getblk(s->s_dev, secno, 512)) != NULL) {
+ if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
if (!buffer_uptodate(bh)) wait_on_buffer(bh);
mark_buffer_uptodate(bh, 1);
return bh->b_data;
@@ -158,7 +155,6 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
int ahead)
{
- kdev_t dev = s->s_dev;
struct buffer_head *bh;
char *data;
@@ -173,24 +169,22 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
goto bail;
}
- if (!ahead || secno + 4 + ahead > s->s_hpfs_fs_size)
- qbh->bh[0] = bh = bread(dev, secno, 512);
- else qbh->bh[0] = bh = bread(dev, secno, 512);
+ qbh->bh[0] = bh = sb_bread(s, secno);
if (!bh)
goto bail0;
memcpy(data, bh->b_data, 512);
- qbh->bh[1] = bh = bread(dev, secno + 1, 512);
+ qbh->bh[1] = bh = sb_bread(s, secno + 1);
if (!bh)
goto bail1;
memcpy(data + 512, bh->b_data, 512);
- qbh->bh[2] = bh = bread(dev, secno + 2, 512);
+ qbh->bh[2] = bh = sb_bread(s, secno + 2);
if (!bh)
goto bail2;
memcpy(data + 2 * 512, bh->b_data, 512);
- qbh->bh[3] = bh = bread(dev, secno + 3, 512);
+ qbh->bh[3] = bh = sb_bread(s, secno + 3);
if (!bh)
goto bail3;
memcpy(data + 3 * 512, bh->b_data, 512);
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index cefb5b3269770..bce5d5dac34a4 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -68,7 +68,7 @@ void hpfs_truncate(struct inode *i)
hpfs_write_inode(i);
}
-int hpfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
{
secno s;
s = hpfs_bmap(inode, iblock);
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index e30e6137e143a..7fa8f7413d541 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -259,7 +259,7 @@ int hpfs_open(struct inode *, struct file *);
int hpfs_file_fsync(struct file *, struct dentry *, int);
secno hpfs_bmap(struct inode *, unsigned);
void hpfs_truncate(struct inode *);
-int hpfs_get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create);
+int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create);
ssize_t hpfs_file_write(struct file *file, const char *buf, size_t count, loff_t *ppos);
/* inode.c */
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index 48a358de8417f..29db6b6c01ee0 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -123,7 +123,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
int de_len;
if (!bh) {
- bh = isofs_bread(inode, bufsize, block);
+ bh = isofs_bread(inode, block);
if (!bh)
return 0;
}
@@ -158,7 +158,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
brelse(bh);
bh = NULL;
if (offset) {
- bh = isofs_bread(inode, bufsize, block);
+ bh = isofs_bread(inode, block);
if (!bh)
return 0;
memcpy((void *) tmpde + slop, bh->b_data, offset);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 305bf8fae0441..cbf4cabc39892 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -527,6 +527,7 @@ static struct super_block *isofs_read_super(struct super_block *s, void *data,
}
set_blocksize(dev, opt.blocksize);
+ s->s_blocksize = opt.blocksize;
s->u.isofs_sb.s_high_sierra = high_sierra = 0; /* default is iso9660 */
@@ -540,8 +541,8 @@ static struct super_block *isofs_read_super(struct super_block *s, void *data,
struct iso_volume_descriptor * vdp;
block = iso_blknum << (ISOFS_BLOCK_BITS-blocksize_bits);
- if (!(bh = bread(dev, block, opt.blocksize)))
- goto out_no_read;
+ if (!(bh = sb_bread(s, block)))
+ goto out_no_read;
vdp = (struct iso_volume_descriptor *)bh->b_data;
hdp = (struct hs_volume_descriptor *)bh->b_data;
@@ -896,7 +897,6 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
unsigned int firstext;
unsigned long nextino;
int section, rv;
- unsigned int blocksize = inode->i_sb->s_blocksize;
lock_kernel();
@@ -957,7 +957,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock,
(*bh_result)->b_blocknr = firstext + b_off - offset;
(*bh_result)->b_state |= (1UL << BH_Mapped);
} else {
- *bh_result = getblk(inode->i_dev, firstext+b_off-offset, blocksize);
+ *bh_result = sb_getblk(inode->i_sb, firstext+b_off-offset);
if ( !*bh_result )
goto abort;
}
@@ -1000,12 +1000,12 @@ static int isofs_bmap(struct inode *inode, int block)
return 0;
}
-struct buffer_head *isofs_bread(struct inode *inode, unsigned int bufsize, unsigned int block)
+struct buffer_head *isofs_bread(struct inode *inode, unsigned int block)
{
unsigned int blknr = isofs_bmap(inode, block);
if (!blknr)
return NULL;
- return bread(inode->i_dev, blknr, bufsize);
+ return sb_bread(inode->i_sb, blknr);
}
static int isofs_readpage(struct file *file, struct page *page)
@@ -1060,7 +1060,7 @@ static int isofs_read_level3_size(struct inode * inode)
unsigned int de_len;
if (!bh) {
- bh = bread(inode->i_dev, block, bufsize);
+ bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
}
@@ -1092,7 +1092,7 @@ static int isofs_read_level3_size(struct inode * inode)
brelse(bh);
bh = NULL;
if (offset) {
- bh = bread(inode->i_dev, block, bufsize);
+ bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
memcpy((void *) tmpde + slop, bh->b_data, offset);
@@ -1150,7 +1150,7 @@ static void isofs_read_inode(struct inode * inode)
unsigned long offset;
int volume_seq_no, i;
- bh = bread(inode->i_dev, block, bufsize);
+ bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_badread;
@@ -1168,7 +1168,7 @@ static void isofs_read_inode(struct inode * inode)
}
memcpy(tmpde, bh->b_data + offset, frag1);
brelse(bh);
- bh = bread(inode->i_dev, ++block, bufsize);
+ bh = sb_bread(inode->i_sb, ++block);
if (!bh)
goto out_badread;
memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1);
@@ -1345,7 +1345,7 @@ static void isofs_read_inode(struct inode * inode)
#ifdef LEAK_CHECK
#undef malloc
#undef free_s
-#undef bread
+#undef sb_bread
#undef brelse
void * leak_check_malloc(unsigned int size){
@@ -1360,9 +1360,9 @@ void leak_check_free_s(void * obj, int size){
return kfree(obj);
}
-struct buffer_head * leak_check_bread(int dev, int block, int size){
+struct buffer_head * leak_check_bread(struct super_block *sb, int block){
check_bread++;
- return bread(dev, block, size);
+ return sb_bread(sb, block);
}
void leak_check_brelse(struct buffer_head * bh){
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 87fe121f8bbc9..13e79d4d5193c 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -78,7 +78,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
char *dpnt;
if (!bh) {
- bh = isofs_bread(dir, bufsize, block);
+ bh = isofs_bread(dir, block);
if (!bh)
return 0;
}
@@ -108,7 +108,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
brelse(bh);
bh = NULL;
if (offset) {
- bh = isofs_bread(dir, bufsize, block);
+ bh = isofs_bread(dir, block);
if (!bh)
return 0;
memcpy((void *) tmpde + slop, bh->b_data, offset);
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index ee1413f7b5269..6906a8ebac095 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -69,7 +69,7 @@
block = cont_extent; \
offset = cont_offset; \
offset1 = 0; \
- pbh = bread(DEV->i_dev, block, ISOFS_BUFFER_SIZE(DEV)); \
+ pbh = sb_bread(DEV->i_sb, block); \
if(pbh){ \
memcpy(buffer + offset1, pbh->b_data + offset, cont_size - offset1); \
brelse(pbh); \
@@ -511,7 +511,7 @@ static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
block = inode->i_ino >> bufbits;
lock_kernel();
- bh = bread(inode->i_dev, block, bufsize);
+ bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c
index 491a7fd5d6b2a..6347bb16996d1 100644
--- a/fs/minix/bitmap.c
+++ b/fs/minix/bitmap.c
@@ -133,7 +133,7 @@ minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
ino--;
block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
ino / MINIX_INODES_PER_BLOCK;
- *bh = bread(sb->s_dev, block, BLOCK_SIZE);
+ *bh = sb_bread(sb, block);
if (!*bh) {
printk("unable to read i-node block\n");
return NULL;
@@ -158,7 +158,7 @@ minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
ino--;
block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
ino / MINIX2_INODES_PER_BLOCK;
- *bh = bread(sb->s_dev, block, BLOCK_SIZE);
+ *bh = sb_bread(sb, block);
if (!*bh) {
printk("unable to read i-node block\n");
return NULL;
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fe53b49917ba0..5525a48a9164d 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -143,15 +143,15 @@ static struct super_block *minix_read_super(struct super_block *s, void *data,
goto out_bad_hblock;
set_blocksize(dev, BLOCK_SIZE);
- if (!(bh = bread(dev,1,BLOCK_SIZE)))
+ s->s_blocksize = BLOCK_SIZE;
+ s->s_blocksize_bits = BLOCK_SIZE_BITS;
+ if (!(bh = sb_bread(s, 1)))
goto out_bad_sb;
ms = (struct minix_super_block *) bh->b_data;
sbi->s_ms = ms;
sbi->s_sbh = bh;
sbi->s_mount_state = ms->s_state;
- s->s_blocksize = BLOCK_SIZE;
- s->s_blocksize_bits = BLOCK_SIZE_BITS;
sbi->s_ninodes = ms->s_ninodes;
sbi->s_nzones = ms->s_nzones;
sbi->s_imap_blocks = ms->s_imap_blocks;
@@ -198,12 +198,12 @@ static struct super_block *minix_read_super(struct super_block *s, void *data,
block=2;
for (i=0 ; i < sbi->s_imap_blocks ; i++) {
- if (!(sbi->s_imap[i]=bread(dev,block,BLOCK_SIZE)))
+ if (!(sbi->s_imap[i]=sb_bread(s, block)))
goto out_no_bitmap;
block++;
}
for (i=0 ; i < sbi->s_zmap_blocks ; i++) {
- if (!(sbi->s_zmap[i]=bread(dev,block,BLOCK_SIZE)))
+ if (!(sbi->s_zmap[i]=sb_bread(s, block)))
goto out_no_bitmap;
block++;
}
diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c
index 0aee59b47f304..373df9896364f 100644
--- a/fs/minix/itree_common.c
+++ b/fs/minix/itree_common.c
@@ -30,7 +30,7 @@ static inline Indirect *get_branch(struct inode *inode,
Indirect chain[DEPTH],
int *err)
{
- kdev_t dev = inode->i_dev;
+ struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
@@ -40,7 +40,7 @@ static inline Indirect *get_branch(struct inode *inode,
if (!p->key)
goto no_block;
while (--depth) {
- bh = bread(dev, block_to_cpu(p->key), BLOCK_SIZE);
+ bh = sb_bread(sb, block_to_cpu(p->key));
if (!bh)
goto failure;
/* Reader: pointers */
@@ -79,7 +79,7 @@ static int alloc_branch(struct inode *inode,
if (!nr)
break;
branch[n].key = cpu_to_block(nr);
- bh = getblk(inode->i_dev, parent, BLOCK_SIZE);
+ bh = sb_getblk(inode->i_sb, parent);
lock_buffer(bh);
memset(bh->b_data, 0, BLOCK_SIZE);
branch[n].bh = bh;
@@ -277,7 +277,7 @@ static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth
if (!nr)
continue;
*p = 0;
- bh = bread (inode->i_dev, nr, BLOCK_SIZE);
+ bh = sb_bread(inode->i_sb, nr);
if (!bh)
continue;
free_branches(inode, (block_t*)bh->b_data,
diff --git a/fs/namespace.c b/fs/namespace.c
index d790be3672961..bbe12582a7f4e 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -517,9 +517,11 @@ static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
if (mnt) {
err = graft_tree(mnt, nd);
- if (err)
+ if (err) {
+ spin_lock(&dcache_lock);
umount_tree(mnt);
- else
+ spin_unlock(&dcache_lock);
+ } else
mntput(mnt);
}
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index 58f543cd6939b..b72f9100c8556 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -52,14 +52,6 @@ static void ncp_add_mem(struct ncp_server *server, const void *source, int size)
return;
}
-static void ncp_add_mem_fromfs(struct ncp_server *server, const char *source, int size)
-{
- assert_server_locked(server);
- copy_from_user(&(server->packet[server->current_size]), source, size);
- server->current_size += size;
- return;
-}
-
static void ncp_add_pstring(struct ncp_server *server, const char *s)
{
int len = strlen(s);
diff --git a/fs/ntfs/fs.c b/fs/ntfs/fs.c
index 4533c63dc9100..5076c1dac7423 100644
--- a/fs/ntfs/fs.c
+++ b/fs/ntfs/fs.c
@@ -1023,8 +1023,9 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
ntfs_error("Unable to set blocksize %d.\n", blocksize);
goto ntfs_read_super_vol;
}
+ sb->s_blocksize = blocksize;
/* Read the super block (boot block). */
- if (!(bh = bread(sb->s_dev, 0, blocksize))) {
+ if (!(bh = sb_bread(sb, 0))) {
ntfs_error("Reading super block failed\n");
goto ntfs_read_super_unl;
}
@@ -1071,8 +1072,7 @@ struct super_block *ntfs_read_super(struct super_block *sb, void *options,
if (to_read < 1)
to_read = 1;
for (i = 0; i < to_read; i++) {
- if (!(bh = bread(sb->s_dev, vol->mft_lcn + i,
- vol->cluster_size))) {
+ if (!(bh = sb_bread(sb, vol->mft_lcn + i))) {
ntfs_error("Could not read $Mft record 0\n");
goto ntfs_read_super_mft;
}
diff --git a/fs/ntfs/support.c b/fs/ntfs/support.c
index 2f290b4de09c5..d490f2553c639 100644
--- a/fs/ntfs/support.c
+++ b/fs/ntfs/support.c
@@ -169,7 +169,7 @@ int ntfs_getput_clusters(ntfs_volume *vol, int cluster, ntfs_size_t start_offs,
buf->do_read ? "get" : "put", cluster, start_offs, length);
to_copy = vol->cluster_size - start_offs;
while (length) {
- if (!(bh = bread(sb->s_dev, cluster, vol->cluster_size))) {
+ if (!(bh = sb_bread(sb, cluster))) {
ntfs_debug(DEBUG_OTHER, "%s failed\n",
buf->do_read ? "Reading" : "Writing");
error = -EIO;
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 09b94399aaab0..f544a436c822f 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -69,7 +69,7 @@ unsigned long qnx4_count_free_blocks(struct super_block *sb)
struct buffer_head *bh;
while (total < size) {
- if ((bh = bread(sb->s_dev, start + offset, QNX4_BLOCK_SIZE)) == NULL) {
+ if ((bh = sb_bread(sb, start + offset)) == NULL) {
printk("qnx4: I/O error in counting free blocks\n");
break;
}
@@ -96,7 +96,7 @@ int qnx4_is_free(struct super_block *sb, long block)
QNX4DEBUG(("qnx4: is_free requesting block [%lu], bitmap in block [%lu]\n",
(unsigned long) block, (unsigned long) start));
(void) size; /* CHECKME */
- bh = bread(sb->s_dev, start, QNX4_BLOCK_SIZE);
+ bh = sb_bread(sb, start);
if (bh == NULL) {
return -EIO;
}
@@ -124,7 +124,7 @@ int qnx4_set_bitmap(struct super_block *sb, long block, int busy)
QNX4DEBUG(("qnx4: set_bitmap requesting block [%lu], bitmap in block [%lu]\n",
(unsigned long) block, (unsigned long) start));
(void) size; /* CHECKME */
- bh = bread(sb->s_dev, start, QNX4_BLOCK_SIZE);
+ bh = sb_bread(sb, start);
if (bh == NULL) {
return -EIO;
}
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index ac5d09b91477e..49df47d8f3ee8 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -36,7 +36,7 @@ static int qnx4_readdir(struct file *filp, void *dirent, filldir_t filldir)
while (filp->f_pos < inode->i_size) {
blknum = qnx4_block_map( inode, filp->f_pos >> QNX4_BLOCK_SIZE_BITS );
- bh = bread(inode->i_dev, blknum, QNX4_BLOCK_SIZE);
+ bh = sb_bread(inode->i_sb, blknum);
if(bh==NULL) {
printk(KERN_ERR "qnx4_readdir: bread failed (%ld)\n", blknum);
break;
diff --git a/fs/qnx4/fsync.c b/fs/qnx4/fsync.c
index 26c8d77920141..4ef5de9554e16 100644
--- a/fs/qnx4/fsync.c
+++ b/fs/qnx4/fsync.c
@@ -24,8 +24,6 @@
#include <asm/segment.h>
#include <asm/system.h>
-#define blocksize QNX4_BLOCK_SIZE
-
/*
* The functions for qnx4 fs file synchronization.
*/
@@ -40,7 +38,7 @@ static int sync_block(struct inode *inode, unsigned short *block, int wait)
if (!*block)
return 0;
tmp = *block;
- bh = get_hash_table(inode->i_dev, *block, blocksize);
+ bh = sb_get_hash_table(inode->i_sb, *block);
if (!bh)
return 0;
if (*block != tmp) {
@@ -74,7 +72,7 @@ static int sync_iblock(struct inode *inode, unsigned short *iblock,
rc = sync_block(inode, iblock, wait);
if (rc)
return rc;
- *bh = bread(inode->i_dev, tmp, blocksize);
+ *bh = sb_bread(inode->i_sb, tmp);
if (tmp != *iblock) {
brelse(*bh);
*bh = NULL;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 75ad8a8f5d03e..cfec8ede9c85e 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -95,7 +95,7 @@ static void qnx4_write_inode(struct inode *inode, int unused)
QNX4DEBUG(("qnx4: write inode 2.\n"));
block = ino / QNX4_INODES_PER_BLOCK;
lock_kernel();
- if (!(bh = bread(inode->i_dev, block, QNX4_BLOCK_SIZE))) {
+ if (!(bh = sb_bread(inode->i_sb, block))) {
printk("qnx4: major problem: unable to read inode from dev "
"%s\n", kdevname(inode->i_dev));
unlock_kernel();
@@ -162,7 +162,7 @@ struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
if ( nr >= 0 )
nr = qnx4_block_map( inode, nr );
if (nr) {
- result = getblk(inode->i_dev, nr, QNX4_BLOCK_SIZE);
+ result = sb_getblk(inode->i_sb, nr);
return result;
}
if (!create) {
@@ -173,7 +173,7 @@ struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
if (!tmp) {
return NULL;
}
- result = getblk(inode->i_dev, tmp, QNX4_BLOCK_SIZE);
+ result = sb_getblk(inode->i_sb, tmp);
if (tst) {
qnx4_free_block(inode->i_sb, tmp);
brelse(result);
@@ -204,7 +204,7 @@ struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
return NULL;
}
-int qnx4_get_block( struct inode *inode, long iblock, struct buffer_head *bh, int create )
+int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
{
unsigned long phys;
@@ -243,7 +243,7 @@ unsigned long qnx4_block_map( struct inode *inode, long iblock )
while ( --nxtnt > 0 ) {
if ( ix == 0 ) {
// read next xtnt block.
- bh = bread( inode->i_dev, i_xblk - 1, QNX4_BLOCK_SIZE );
+ bh = sb_bread(inode->i_sb, i_xblk - 1);
if ( !bh ) {
QNX4DEBUG(("qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
return -EIO;
@@ -307,7 +307,7 @@ static const char *qnx4_checkroot(struct super_block *sb)
rd = le32_to_cpu(sb->u.qnx4_sb.sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
rl = le32_to_cpu(sb->u.qnx4_sb.sb->RootDir.di_first_xtnt.xtnt_size);
for (j = 0; j < rl; j++) {
- bh = bread(sb->s_dev, rd + j, QNX4_BLOCK_SIZE); /* root dir, first block */
+ bh = sb_bread(sb, rd + j); /* root dir, first block */
if (bh == NULL) {
return "unable to read root entry.";
}
@@ -350,7 +350,7 @@ static struct super_block *qnx4_read_super(struct super_block *s,
/* Check the boot signature. Since the qnx4 code is
dangerous, we should leave as quickly as possible
if we don't belong here... */
- bh = bread(dev, 0, QNX4_BLOCK_SIZE);
+ bh = sb_bread(s, 0);
if (!bh) {
printk("qnx4: unable to read the boot sector\n");
goto outnobh;
@@ -362,7 +362,7 @@ static struct super_block *qnx4_read_super(struct super_block *s,
}
brelse(bh);
- bh = bread(dev, 1, QNX4_BLOCK_SIZE);
+ bh = sb_bread(s, 1);
if (!bh) {
printk("qnx4: unable to read the superblock\n");
goto outnobh;
@@ -457,7 +457,7 @@ static void qnx4_read_inode(struct inode *inode)
}
block = ino / QNX4_INODES_PER_BLOCK;
- if (!(bh = bread(inode->i_dev, block, QNX4_BLOCK_SIZE))) {
+ if (!(bh = sb_bread(inode->i_sb, block))) {
printk("qnx4: major problem: unable to read inode from dev "
"%s\n", kdevname(inode->i_dev));
return;
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 4c61dfdeab8ab..5f800a83b41b0 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -920,7 +920,7 @@ static int is_left_neighbor_in_cache(
/* Get left neighbor block number. */
n_left_neighbor_blocknr = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position);
/* Look for the left neighbor in the cache. */
- if ( (left = get_hash_table(p_s_sb->s_dev, n_left_neighbor_blocknr, p_s_sb->s_blocksize)) ) {
+ if ( (left = sb_get_hash_table(p_s_sb, n_left_neighbor_blocknr)) ) {
RFALSE( buffer_uptodate (left) && ! B_IS_IN_TREE(left),
"vs-8170: left neighbor (%b %z) is not in the tree", left, left);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 42148504961f3..65ac678891302 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1963,7 +1963,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
//
// this is exactly what 2.3.99-pre9's ext2_bmap is
//
-static int reiserfs_aop_bmap(struct address_space *as, sector_t block) {
+static int reiserfs_aop_bmap(struct address_space *as, long block) {
return generic_block_bmap(as, block, reiserfs_bmap) ;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 2c71fd471adfe..3b70c989bd708 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -683,7 +683,7 @@ retry:
count = 0 ;
for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 && i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
bn = reiserfs_get_journal_block(s) + (jl->j_start+i) % JOURNAL_BLOCK_COUNT;
- tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+ tbh = sb_get_hash_table(s, bn) ;
/* kill this sanity check */
if (count > (orig_commit_left + 2)) {
@@ -712,7 +712,7 @@ reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_l
for (i = 0 ; atomic_read(&(jl->j_commit_left)) > 1 &&
i < (jl->j_len + 1) ; i++) { /* everything but commit_bh */
bn = reiserfs_get_journal_block(s) + (jl->j_start + i) % JOURNAL_BLOCK_COUNT ;
- tbh = get_hash_table(s->s_dev, bn, s->s_blocksize) ;
+ tbh = sb_get_hash_table(s, bn) ;
wait_on_buffer(tbh) ;
if (!buffer_uptodate(tbh)) {
@@ -1403,8 +1403,7 @@ static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffe
offset = d_bh->b_blocknr - reiserfs_get_journal_block(p_s_sb) ;
/* ok, we have a journal description block, lets see if the transaction was valid */
- c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT),
- p_s_sb->s_blocksize) ;
+ c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
if (!c_bh)
return 0 ;
commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
@@ -1458,7 +1457,7 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
unsigned long trans_offset ;
int i;
- d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+ d_bh = sb_bread(p_s_sb, cur_dblock) ;
if (!d_bh)
return 1 ;
desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
@@ -1482,8 +1481,7 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
brelse(d_bh) ;
return 1 ;
}
- c_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT),
- p_s_sb->s_blocksize) ;
+ c_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + ((trans_offset + le32_to_cpu(desc->j_len) + 1) % JOURNAL_BLOCK_COUNT)) ;
if (!c_bh) {
brelse(d_bh) ;
return 1 ;
@@ -1512,11 +1510,11 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
}
/* get all the buffer heads */
for(i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
- log_blocks[i] = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT, p_s_sb->s_blocksize);
+ log_blocks[i] = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + (trans_offset + 1 + i) % JOURNAL_BLOCK_COUNT);
if (i < JOURNAL_TRANS_HALF) {
- real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(desc->j_realblock[i]), p_s_sb->s_blocksize) ;
+ real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
} else {
- real_blocks[i] = getblk(p_s_sb->s_dev, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF]), p_s_sb->s_blocksize) ;
+ real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - JOURNAL_TRANS_HALF])) ;
}
if (real_blocks[i]->b_blocknr >= reiserfs_get_journal_block(p_s_sb) &&
real_blocks[i]->b_blocknr < (reiserfs_get_journal_block(p_s_sb)+JOURNAL_BLOCK_COUNT)) {
@@ -1617,10 +1615,9 @@ static int journal_read(struct super_block *p_s_sb) {
** is the first unflushed, and if that transaction is not valid,
** replay is done
*/
- SB_JOURNAL(p_s_sb)->j_header_bh = bread(p_s_sb->s_dev,
+ SB_JOURNAL(p_s_sb)->j_header_bh = sb_bread(p_s_sb,
reiserfs_get_journal_block(p_s_sb) +
- JOURNAL_BLOCK_COUNT,
- p_s_sb->s_blocksize) ;
+ JOURNAL_BLOCK_COUNT) ;
if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
return 1 ;
}
@@ -1641,7 +1638,7 @@ static int journal_read(struct super_block *p_s_sb) {
** there is nothing more we can do, and it makes no sense to read
** through the whole log.
*/
- d_bh = bread(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset), p_s_sb->s_blocksize) ;
+ d_bh = sb_bread(p_s_sb, reiserfs_get_journal_block(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
if (!ret) {
continue_replay = 0 ;
@@ -1661,7 +1658,7 @@ static int journal_read(struct super_block *p_s_sb) {
** all the valid transactions, and pick out the oldest.
*/
while(continue_replay && cur_dblock < (reiserfs_get_journal_block(p_s_sb) + JOURNAL_BLOCK_COUNT)) {
- d_bh = bread(p_s_sb->s_dev, cur_dblock, p_s_sb->s_blocksize) ;
+ d_bh = sb_bread(p_s_sb, cur_dblock) ;
ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
if (ret == 1) {
desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
@@ -2553,7 +2550,7 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_bloc
int cleaned = 0 ;
if (reiserfs_dont_log(th->t_super)) {
- bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+ bh = sb_get_hash_table(p_s_sb, blocknr) ;
if (bh && buffer_dirty (bh)) {
printk ("journal_mark_freed(dont_log): dirty buffer on hash list: %lx %ld\n", bh->b_state, blocknr);
BUG ();
@@ -2561,7 +2558,7 @@ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_bloc
brelse (bh);
return 0 ;
}
- bh = get_hash_table(p_s_sb->s_dev, blocknr, p_s_sb->s_blocksize) ;
+ bh = sb_get_hash_table(p_s_sb, blocknr) ;
/* if it is journal new, we just remove it from this transaction */
if (bh && buffer_journal_new(bh)) {
mark_buffer_notjournal_new(bh) ;
@@ -2768,7 +2765,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
/* setup description block */
- d_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start, p_s_sb->s_blocksize) ;
+ d_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ;
mark_buffer_uptodate(d_bh, 1) ;
desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
@@ -2776,9 +2773,8 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
desc->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
/* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
- c_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) +
- ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT),
- p_s_sb->s_blocksize) ;
+ c_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) +
+ ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % JOURNAL_BLOCK_COUNT)) ;
commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
@@ -2866,9 +2862,8 @@ printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
/* copy all the real blocks into log area. dirty log blocks */
if (test_bit(BH_JDirty, &cn->bh->b_state)) {
struct buffer_head *tmp_bh ;
- tmp_bh = getblk(p_s_sb->s_dev, reiserfs_get_journal_block(p_s_sb) +
- ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT),
- p_s_sb->s_blocksize) ;
+ tmp_bh = sb_getblk(p_s_sb, reiserfs_get_journal_block(p_s_sb) +
+ ((cur_write_start + jindex) % JOURNAL_BLOCK_COUNT)) ;
mark_buffer_uptodate(tmp_bh, 1) ;
memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;
jindex++ ;
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 9fed2136995aa..ab7a31036afcb 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -39,7 +39,7 @@ int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
}
/* check the device size */
- bh = bread(s->s_dev, block_count_new - 1, s->s_blocksize);
+ bh = sb_bread(s, block_count_new - 1);
if (!bh) {
printk("reiserfs_resize: can\'t read last block\n");
return -EINVAL;
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 993e6fe1f3da7..327bd73117ca2 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1116,7 +1116,7 @@ static char prepare_for_delete_or_cut(
continue;
}
/* Search for the buffer in cache. */
- p_s_un_bh = get_hash_table(p_s_sb->s_dev, get_block_num(p_n_unfm_pointer,0), n_blk_size);
+ p_s_un_bh = sb_get_hash_table(p_s_sb, get_block_num(p_n_unfm_pointer,0));
if (p_s_un_bh) {
mark_buffer_clean(p_s_un_bh) ;
diff --git a/fs/romfs/inode.c b/fs/romfs/inode.c
index 477d2eb6872b9..9c791cbce0846 100644
--- a/fs/romfs/inode.c
+++ b/fs/romfs/inode.c
@@ -108,7 +108,7 @@ romfs_read_super(struct super_block *s, void *data, int silent)
s->u.generic_sbp = (void *) 0;
s->s_maxbytes = 0xFFFFFFFF;
- bh = bread(dev, 0, ROMBSIZE);
+ bh = sb_bread(s, 0);
if (!bh) {
/* XXX merge with other printk? */
printk ("romfs: unable to read superblock\n");
@@ -188,7 +188,7 @@ romfs_strnlen(struct inode *i, unsigned long offset, unsigned long count)
if (count > maxsize || offset+count > maxsize)
count = maxsize-offset;
- bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+ bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
if (!bh)
return -1; /* error */
@@ -203,7 +203,7 @@ romfs_strnlen(struct inode *i, unsigned long offset, unsigned long count)
while (res < count) {
offset += maxsize;
- bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+ bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
if (!bh)
return -1;
maxsize = min_t(unsigned long, count - res, ROMBSIZE);
@@ -226,7 +226,7 @@ romfs_copyfrom(struct inode *i, void *dest, unsigned long offset, unsigned long
if (offset >= maxsize || count > maxsize || offset+count>maxsize)
return -1;
- bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+ bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
if (!bh)
return -1; /* error */
@@ -241,7 +241,7 @@ romfs_copyfrom(struct inode *i, void *dest, unsigned long offset, unsigned long
offset += maxsize;
dest += maxsize;
- bh = bread(i->i_dev, offset>>ROMBSBITS, ROMBSIZE);
+ bh = sb_bread(i->i_sb, offset>>ROMBSBITS);
if (!bh)
return -1;
maxsize = min_t(unsigned long, count - res, ROMBSIZE);
diff --git a/fs/sysv/balloc.c b/fs/sysv/balloc.c
index 1d76bb94148ff..2f3df11dd2079 100644
--- a/fs/sysv/balloc.c
+++ b/fs/sysv/balloc.c
@@ -73,7 +73,7 @@ void sysv_free_block(struct super_block * sb, u32 nr)
*/
if (count == sb->sv_flc_size || count == 0) {
block += sb->sv_block_base;
- bh = getblk(sb->s_dev, block, sb->s_blocksize);
+ bh = sb_getblk(sb, block);
if (!bh) {
printk("sysv_free_block: getblk() failed\n");
unlock_super(sb);
@@ -125,7 +125,7 @@ u32 sysv_new_block(struct super_block * sb)
unsigned count;
block += sb->sv_block_base;
- if (!(bh = bread(sb->s_dev, block, sb->s_blocksize))) {
+ if (!(bh = sb_bread(sb, block))) {
printk("sysv_new_block: cannot read free-list block\n");
/* retry this same block next time */
*sb->sv_bcache_count = cpu_to_fs16(sb, 1);
@@ -196,7 +196,7 @@ unsigned long sysv_count_free_blocks(struct super_block * sb)
if (block < sb->sv_firstdatazone || block >= sb->sv_nzones)
goto Einval;
block += sb->sv_block_base;
- bh = bread(sb->s_dev, block, sb->s_blocksize);
+ bh = sb_bread(sb, block);
if (!bh)
goto Eio;
n = fs16_to_cpu(sb, *(u16*)bh->b_data);
diff --git a/fs/sysv/ialloc.c b/fs/sysv/ialloc.c
index a91224c578e5f..474e67ec6501b 100644
--- a/fs/sysv/ialloc.c
+++ b/fs/sysv/ialloc.c
@@ -55,7 +55,7 @@ sysv_raw_inode(struct super_block *sb, unsigned ino, struct buffer_head **bh)
struct sysv_inode *res;
int block = sb->sv_firstinodezone + sb->sv_block_base;
block += (ino-1) >> sb->sv_inodes_per_block_bits;
- *bh = bread(sb->s_dev, block, sb->s_blocksize);
+ *bh = sb_bread(sb, block);
if (!*bh)
return NULL;
res = (struct sysv_inode *) (*bh)->b_data;
diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c
index af27d22104eeb..5d4ef29793a81 100644
--- a/fs/sysv/itree.c
+++ b/fs/sysv/itree.c
@@ -86,8 +86,7 @@ static Indirect *get_branch(struct inode *inode,
Indirect chain[],
int *err)
{
- kdev_t dev = inode->i_dev;
- int size = inode->i_sb->s_blocksize;
+ struct super_block *sb = inode->i_sb;
Indirect *p = chain;
struct buffer_head *bh;
@@ -96,8 +95,8 @@ static Indirect *get_branch(struct inode *inode,
if (!p->key)
goto no_block;
while (--depth) {
- int block = block_to_cpu(inode->i_sb, p->key);
- bh = bread(dev, block, size);
+ int block = block_to_cpu(sb, p->key);
+ bh = sb_bread(sb, block);
if (!bh)
goto failure;
if (!verify_chain(chain, p))
@@ -139,7 +138,7 @@ static int alloc_branch(struct inode *inode,
* the pointer to new one, then send parent to disk.
*/
parent = block_to_cpu(inode->i_sb, branch[n-1].key);
- bh = getblk(inode->i_dev, parent, blocksize);
+ bh = sb_getblk(inode->i_sb, parent);
lock_buffer(bh);
memset(bh->b_data, 0, blocksize);
branch[n].bh = bh;
@@ -192,7 +191,7 @@ changed:
return -EAGAIN;
}
-static int get_block(struct inode *inode, long iblock, struct buffer_head *bh_result, int create)
+static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
{
int err = -EIO;
int offsets[DEPTH];
@@ -336,7 +335,7 @@ static void free_branches(struct inode *inode, u32 *p, u32 *q, int depth)
continue;
*p = 0;
block = block_to_cpu(sb, nr);
- bh = bread(inode->i_dev, block, sb->s_blocksize);
+ bh = sb_bread(sb, block);
if (!bh)
continue;
free_branches(inode, (u32*)bh->b_data,
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 983f0be5b925c..2a38452d7f1a6 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -362,11 +362,12 @@ static struct super_block *sysv_read_super(struct super_block *sb,
if (64 != sizeof (struct sysv_inode))
panic("sysv fs: bad i-node size");
set_blocksize(dev,BLOCK_SIZE);
+ sb->s_blocksize = BLOCK_SIZE;
sb->sv_block_base = 0;
for (i = 0; i < sizeof(flavours)/sizeof(flavours[0]) && !size; i++) {
brelse(bh);
- bh = bread(dev, flavours[i].block, BLOCK_SIZE);
+ bh = sb_bread(sb, flavours[i].block);
if (!bh)
continue;
size = flavours[i].test(sb, bh);
@@ -380,8 +381,9 @@ static struct super_block *sysv_read_super(struct super_block *sb,
blocknr = bh->b_blocknr << 1;
brelse(bh);
set_blocksize(dev, 512);
- bh1 = bread(dev, blocknr, 512);
- bh = bread(dev, blocknr + 1, 512);
+ sb->s_blocksize = 512;
+ bh1 = sb_bread(sb, blocknr);
+ bh = sb_bread(sb, blocknr + 1);
break;
case 2:
bh1 = bh;
@@ -390,7 +392,8 @@ static struct super_block *sysv_read_super(struct super_block *sb,
blocknr = bh->b_blocknr >> 1;
brelse(bh);
set_blocksize(dev, 2048);
- bh1 = bh = bread(dev, blocknr, 2048);
+ sb->s_blocksize = 2048;
+ bh1 = bh = sb_bread(sb, blocknr);
break;
default:
goto Ebadsize;
@@ -441,8 +444,9 @@ static struct super_block *v7_read_super(struct super_block *sb,void *data,
sb->sv_bytesex = BYTESEX_PDP;
set_blocksize(dev, 512);
+ sb->s_blocksize = 512;
- if ((bh = bread(dev, 1, 512)) == NULL) {
+ if ((bh = sb_bread(sb, 1)) == NULL) {
if (!silent)
printk("VFS: unable to read V7 FS superblock on "
"device %s.\n", bdevname(dev));
@@ -458,7 +462,7 @@ static struct super_block *v7_read_super(struct super_block *sb,void *data,
/* plausibility check on root inode: it is a directory,
with a nonzero size that is a multiple of 16 */
- if ((bh2 = bread(dev, 2, 512)) == NULL)
+ if ((bh2 = sb_bread(sb, 2)) == NULL)
goto failed;
v7i = (struct sysv_inode *)(bh2->b_data + 64);
if ((fs16_to_cpu(sb,v7i->i_mode) & ~0777) != S_IFDIR ||
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 982a7f808620b..8dc09bf960080 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -98,7 +98,7 @@ static int read_block_bitmap(struct super_block * sb,
loc.logicalBlockNum = bitmap->s_extPosition;
loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
- bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block), sb->s_blocksize);
+ bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
if (!bh)
{
retval = -EIO;
@@ -463,7 +463,7 @@ static void udf_table_free_blocks(struct super_block * sb,
elen = 0;
obloc = nbloc = UDF_I_LOCATION(table);
- obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0), sb->s_blocksize);
+ obh = nbh = udf_tread(sb, udf_get_lb_pblock(sb, nbloc, 0));
atomic_inc(&nbh->b_count);
while (count && (etype =
@@ -571,8 +571,7 @@ static void udf_table_free_blocks(struct super_block * sb,
elen -= sb->s_blocksize;
if (!(nbh = udf_tread(sb,
- udf_get_lb_pblock(sb, nbloc, 0),
- sb->s_blocksize)))
+ udf_get_lb_pblock(sb, nbloc, 0))))
{
udf_release_data(obh);
goto error_return;
@@ -689,7 +688,7 @@ static int udf_table_prealloc_blocks(struct super_block * sb,
extoffset = sizeof(struct UnallocatedSpaceEntry);
bloc = UDF_I_LOCATION(table);
- bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0), sb->s_blocksize);
+ bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
eloc.logicalBlockNum = 0xFFFFFFFF;
while (first_block != eloc.logicalBlockNum && (etype =
@@ -766,7 +765,7 @@ static int udf_table_new_block(struct super_block * sb,
extoffset = sizeof(struct UnallocatedSpaceEntry);
bloc = UDF_I_LOCATION(table);
- goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0), sb->s_blocksize);
+ goal_bh = bh = udf_tread(sb, udf_get_lb_pblock(sb, bloc, 0));
atomic_inc(&goal_bh->b_count);
while (spread && (etype =
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 7a13d861ccfdd..f1dd42b3c63af 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -146,7 +146,7 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
return -ENOENT;
}
- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
{
udf_release_data(bh);
return -EIO;
@@ -160,7 +160,7 @@ do_udf_readdir(struct inode * dir, struct file *filp, filldir_t filldir, void *d
for (num=0; i>0; i--)
{
block = udf_get_lb_pblock(dir->i_sb, eloc, offset+i);
- tmp = udf_tgetblk(dir->i_sb, block, dir->i_sb->s_blocksize);
+ tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 33f5cf0b09c26..97ebc7e6d9600 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -60,7 +60,7 @@ Uint8 * udf_filead_read(struct inode *dir, Uint8 *tmpad, Uint8 ad_size,
block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
if (!block)
return NULL;
- if (!(*bh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!(*bh = udf_tread(dir->i_sb, block)))
return NULL;
}
else if (*offset > dir->i_sb->s_blocksize)
@@ -74,7 +74,7 @@ Uint8 * udf_filead_read(struct inode *dir, Uint8 *tmpad, Uint8 ad_size,
block = udf_get_lb_pblock(dir->i_sb, fe_loc, ++*pos);
if (!block)
return NULL;
- if (!((*bh) = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!((*bh) = udf_tread(dir->i_sb, block)))
return NULL;
memcpy((Uint8 *)ad + remainder, (*bh)->b_data, ad_size - remainder);
@@ -117,7 +117,7 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
*extoffset = lextoffset;
udf_release_data(fibh->sbh);
- if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
return NULL;
fibh->soffset = fibh->eoffset = 0;
@@ -129,7 +129,7 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
for (num=0; i>0; i--)
{
block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset+i);
- tmp = udf_tgetblk(dir->i_sb, block, dir->i_sb->s_blocksize);
+ tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
@@ -183,7 +183,7 @@ udf_fileident_read(struct inode *dir, loff_t *nf_pos,
fibh->soffset -= dir->i_sb->s_blocksize;
fibh->eoffset -= dir->i_sb->s_blocksize;
- if (!(fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!(fibh->ebh = udf_tread(dir->i_sb, block)))
return NULL;
if (sizeof(struct FileIdentDesc) > - fibh->soffset)
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 33ff393177dc8..72ce8252793f8 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -57,7 +57,7 @@ static int udf_adinicb_readpage(struct file *file, struct page * page)
kaddr = kmap(page);
memset(kaddr, 0, PAGE_CACHE_SIZE);
block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
- bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+ bh = sb_bread(inode->i_sb, block);
memcpy(kaddr, bh->b_data + udf_ext0_offset(inode), inode->i_size);
brelse(bh);
flush_dcache_page(page);
@@ -80,7 +80,7 @@ static int udf_adinicb_writepage(struct page *page)
kaddr = kmap(page);
block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
- bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+ bh = sb_bread(inode->i_sb, block);
memcpy(bh->b_data + udf_ext0_offset(inode), kaddr, inode->i_size);
mark_buffer_dirty(bh);
brelse(bh);
@@ -105,7 +105,7 @@ static int udf_adinicb_commit_write(struct file *file, struct page *page, unsign
char *kaddr = page_address(page);
block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
- bh = bread (inode->i_dev, block, inode->i_sb->s_blocksize);
+ bh = sb_bread(inode->i_sb, block);
memcpy(bh->b_data + udf_file_entry_alloc_offset(inode) + offset,
kaddr + offset, to-offset);
mark_buffer_dirty(bh);
@@ -246,8 +246,7 @@ int udf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
/* ok, we need to read the inode */
bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
- inode->i_sb->s_blocksize);
+ udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
if (!bh)
{
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 48431cec2bee8..7b95bf925ef8f 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -184,7 +184,7 @@ void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
}
block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
- bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+ bh = udf_tread(inode->i_sb, block);
if (!bh)
return;
page = grab_cache_page(inode->i_mapping, 0);
@@ -251,10 +251,10 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
UDF_I_LOCATION(inode).partitionReferenceNum, 0);
if (!newblock)
return NULL;
- sbh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+ sbh = udf_tread(inode->i_sb, inode->i_ino);
if (!sbh)
return NULL;
- dbh = udf_tgetblk(inode->i_sb, newblock, inode->i_sb->s_blocksize);
+ dbh = udf_tgetblk(inode->i_sb, newblock);
if (!dbh)
return NULL;
lock_buffer(dbh);
@@ -382,7 +382,7 @@ struct buffer_head * udf_getblk(struct inode * inode, long block,
if (!*err && buffer_mapped(&dummy))
{
struct buffer_head *bh;
- bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
+ bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (buffer_new(&dummy))
{
lock_buffer(bh);
@@ -886,8 +886,7 @@ void udf_truncate(struct inode * inode)
udf_file_entry_alloc_offset(inode);
if ((bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
- inode->i_sb->s_blocksize)))
+ udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0))))
{
memset(bh->b_data + offset, 0x00, inode->i_sb->s_blocksize - offset);
mark_buffer_dirty(bh);
@@ -1322,8 +1321,7 @@ udf_update_inode(struct inode *inode, int do_sync)
int err = 0;
bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0),
- inode->i_sb->s_blocksize);
+ udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
if (!bh)
{
@@ -1624,8 +1622,7 @@ Sint8 udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
if (!(*bh))
{
if (!(*bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, *bloc, 0),
- inode->i_sb->s_blocksize)))
+ udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
{
udf_debug("reading block %d failed!\n",
udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -1653,7 +1650,7 @@ Sint8 udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
return -1;
}
if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
- *bloc, 0), inode->i_sb->s_blocksize)))
+ *bloc, 0))))
{
return -1;
}
@@ -1759,8 +1756,7 @@ Sint8 udf_write_aext(struct inode *inode, lb_addr bloc, int *extoffset,
if (!(bh))
{
if (!(bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, bloc, 0),
- inode->i_sb->s_blocksize)))
+ udf_get_lb_pblock(inode->i_sb, bloc, 0))))
{
udf_debug("reading block %d failed!\n",
udf_get_lb_pblock(inode->i_sb, bloc, 0));
@@ -1828,8 +1824,7 @@ Sint8 udf_next_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
if (!(*bh))
{
if (!(*bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, *bloc, 0),
- inode->i_sb->s_blocksize)))
+ udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
{
udf_debug("reading block %d failed!\n",
udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -1951,8 +1946,7 @@ Sint8 udf_current_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
if (!(*bh))
{
if (!(*bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, *bloc, 0),
- inode->i_sb->s_blocksize)))
+ udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
{
udf_debug("reading block %d failed!\n",
udf_get_lb_pblock(inode->i_sb, *bloc, 0));
@@ -2033,8 +2027,7 @@ Sint8 udf_insert_aext(struct inode *inode, lb_addr bloc, int extoffset,
if (!bh)
{
if (!(bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, bloc, 0),
- inode->i_sb->s_blocksize)))
+ udf_get_lb_pblock(inode->i_sb, bloc, 0))))
{
udf_debug("reading block %d failed!\n",
udf_get_lb_pblock(inode->i_sb, bloc, 0));
@@ -2068,8 +2061,7 @@ Sint8 udf_delete_aext(struct inode *inode, lb_addr nbloc, int nextoffset,
if (!(nbh))
{
if (!(nbh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb, nbloc, 0),
- inode->i_sb->s_blocksize)))
+ udf_get_lb_pblock(inode->i_sb, nbloc, 0))))
{
udf_debug("reading block %d failed!\n",
udf_get_lb_pblock(inode->i_sb, nbloc, 0));
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index 4b9cb0ef1bd5e..3cbebf75fac7e 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -67,21 +67,21 @@ udf64_high32(Uint64 indat)
#if defined(__linux__) && defined(__KERNEL__)
extern struct buffer_head *
-udf_tgetblk(struct super_block *sb, int block, int size)
+udf_tgetblk(struct super_block *sb, int block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
- return getblk(sb->s_dev, udf_fixed_to_variable(block), size);
+ return sb_getblk(sb, udf_fixed_to_variable(block));
else
- return getblk(sb->s_dev, block, size);
+ return sb_getblk(sb, block);
}
extern struct buffer_head *
-udf_tread(struct super_block *sb, int block, int size)
+udf_tread(struct super_block *sb, int block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
- return bread(sb->s_dev, udf_fixed_to_variable(block), size);
+ return sb_bread(sb, udf_fixed_to_variable(block));
else
- return bread(sb->s_dev, block, size);
+ return sb_bread(sb, block);
}
extern struct GenericAttrFormat *
@@ -92,7 +92,7 @@ udf_add_extendedattr(struct inode * inode, Uint32 size, Uint32 type,
long_ad eaicb;
int offset;
- *bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+ *bh = udf_tread(inode->i_sb, inode->i_ino);
if (UDF_I_EXTENDED_FE(inode) == 0)
{
@@ -208,7 +208,7 @@ udf_get_extendedattr(struct inode * inode, Uint32 type, Uint8 subtype,
long_ad eaicb;
Uint32 offset;
- *bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+ *bh = udf_tread(inode->i_sb, inode->i_ino);
if (UDF_I_EXTENDED_FE(inode) == 0)
{
@@ -273,7 +273,7 @@ udf_read_untagged(struct super_block *sb, Uint32 block, Uint32 offset)
struct buffer_head *bh = NULL;
/* Read the block */
- bh = udf_tread(sb, block+offset, sb->s_blocksize);
+ bh = udf_tread(sb, block+offset);
if (!bh)
{
printk(KERN_ERR "udf: udf_read_untagged(%p,%d,%d) failed\n",
@@ -305,7 +305,7 @@ udf_read_tagged(struct super_block *sb, Uint32 block, Uint32 location, Uint16 *i
if (block == 0xFFFFFFFF)
return NULL;
- bh = udf_tread(sb, block, sb->s_blocksize);
+ bh = udf_tread(sb, block);
if (!bh)
{
udf_debug("block=%d, location=%d: read failed\n", block, location);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 7b6f0a6745781..b36093c816898 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -183,7 +183,7 @@ udf_find_entry(struct inode *dir, struct dentry *dentry,
return NULL;
}
- if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
{
udf_release_data(bh);
return NULL;
@@ -404,7 +404,7 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
else
offset = 0;
- if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!(fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block)))
{
udf_release_data(bh);
*err = -EIO;
@@ -488,7 +488,7 @@ udf_add_entry(struct inode *dir, struct dentry *dentry,
block = udf_get_lb_pblock(dir->i_sb, UDF_I_LOCATION(dir), 0);
if (UDF_I_ALLOCTYPE(dir) == ICB_FLAG_AD_IN_ICB)
{
- fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize);
+ fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
fibh->soffset = fibh->eoffset = udf_file_entry_alloc_offset(dir);
}
else
@@ -803,7 +803,7 @@ static int empty_dir(struct inode *dir)
return 0;
}
- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block, dir->i_sb->s_blocksize)))
+ if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block)))
return 0;
while ( (f_pos < size) )
@@ -964,7 +964,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
block = udf_get_pblock(inode->i_sb, block,
UDF_I_LOCATION(inode).partitionReferenceNum, 0);
- bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+ bh = udf_tread(inode->i_sb, block);
lock_buffer(bh);
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
mark_buffer_uptodate(bh, 1);
@@ -974,7 +974,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
else
{
block = udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0);
- bh = udf_tread(inode->i_sb, block, inode->i_sb->s_blocksize);
+ bh = udf_tread(inode->i_sb, block);
}
ea = bh->b_data + udf_ext0_offset(inode);
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 933f2db760635..39382845c64fa 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -76,7 +76,7 @@ Uint32 udf_get_pblock_virt15(struct super_block *sb, Uint32 block, Uint16 partit
loc = udf_block_map(UDF_SB_VAT(sb), newblock);
- if (!(bh = bread(sb->s_dev, loc, sb->s_blocksize)))
+ if (!(bh = sb_bread(sb, loc)))
{
udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
sb, block, partition, loc, index);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 034064dfdf202..7afbe3af2dc3c 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -412,7 +412,7 @@ udf_vrs(struct super_block *sb, int silent)
for (;!nsr02 && !nsr03; sector += sectorsize)
{
/* Read a block */
- bh = udf_tread(sb, sector >> sb->s_blocksize_bits, sb->s_blocksize);
+ bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
if (!bh)
break;
@@ -525,7 +525,7 @@ udf_find_anchor(struct super_block *sb)
for (i=0; (!lastblock && i<sizeof(last)/sizeof(int)); i++)
{
- if (last[i] < 0 || !(bh = bread(sb->s_dev, last[i], sb->s_blocksize)))
+ if (last[i] < 0 || !(bh = sb_bread(sb, last[i])))
{
ident = location = 0;
}
@@ -560,7 +560,7 @@ udf_find_anchor(struct super_block *sb)
}
else
{
- if (last[i] < 256 || !(bh = bread(sb->s_dev, last[i] - 256, sb->s_blocksize)))
+ if (last[i] < 256 || !(bh = sb_bread(sb, last[i] - 256)))
{
ident = location = 0;
}
@@ -579,8 +579,7 @@ udf_find_anchor(struct super_block *sb)
}
else
{
- if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = bread(sb->s_dev, last[i] - 312 - UDF_SB_SESSION(sb),
- sb->s_blocksize)))
+ if (last[i] < 312 + UDF_SB_SESSION(sb) || !(bh = sb_bread(sb, last[i] - 312 - UDF_SB_SESSION(sb))))
{
ident = location = 0;
}
@@ -606,7 +605,7 @@ udf_find_anchor(struct super_block *sb)
if (!lastblock)
{
/* We havn't found the lastblock. check 312 */
- if ((bh = bread(sb->s_dev, 312 + UDF_SB_SESSION(sb), sb->s_blocksize)))
+ if ((bh = sb_bread(sb, 312 + UDF_SB_SESSION(sb))))
{
ident = le16_to_cpu(((tag *)bh->b_data)->tagIdent);
location = le32_to_cpu(((tag *)bh->b_data)->tagLocation);
@@ -1258,7 +1257,7 @@ udf_load_partition(struct super_block *sb, lb_addr *fileset)
Uint32 pos;
pos = udf_block_map(UDF_SB_VAT(sb), 0);
- bh = bread(sb->s_dev, pos, sb->s_blocksize);
+ bh = sb_bread(sb, pos);
UDF_SB_TYPEVIRT(sb,i).s_start_offset =
le16_to_cpu(((struct VirtualAllocationTable20 *)bh->b_data + udf_ext0_offset(UDF_SB_VAT(sb)))->lengthHeader) +
udf_ext0_offset(UDF_SB_VAT(sb));
@@ -1728,7 +1727,7 @@ udf_count_free_bitmap(struct super_block *sb, struct udf_bitmap *bitmap)
{
udf_release_data(bh);
newblock = udf_get_lb_pblock(sb, loc, ++block);
- bh = udf_tread(sb, newblock, sb->s_blocksize);
+ bh = udf_tread(sb, newblock);
if (!bh)
{
udf_debug("read failed\n");
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 543e9b45ecfad..3254e530faf96 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -88,7 +88,7 @@ static int udf_symlink_filler(struct file *file, struct page *page)
lock_kernel();
if (UDF_I_ALLOCTYPE(inode) == ICB_FLAG_AD_IN_ICB)
{
- bh = udf_tread(inode->i_sb, inode->i_ino, inode->i_sb->s_blocksize);
+ bh = udf_tread(inode->i_sb, inode->i_ino);
if (!bh)
goto out;
@@ -97,8 +97,7 @@ static int udf_symlink_filler(struct file *file, struct page *page)
}
else
{
- bh = bread(inode->i_dev, udf_block_map(inode, 0),
- inode->i_sb->s_blocksize);
+ bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
if (!bh)
goto out;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 97dabb0ba6734..56d80d31ce079 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -139,8 +139,8 @@ extern void udf_discard_prealloc(struct inode *);
/* misc.c */
extern int udf_read_tagged_data(char *, int size, int fd, int block, int partref);
-extern struct buffer_head *udf_tgetblk(struct super_block *, int, int);
-extern struct buffer_head *udf_tread(struct super_block *, int, int);
+extern struct buffer_head *udf_tgetblk(struct super_block *, int);
+extern struct buffer_head *udf_tread(struct super_block *, int);
extern struct GenericAttrFormat *udf_add_extendedattr(struct inode *, Uint32, Uint32, Uint8, struct buffer_head **);
extern struct GenericAttrFormat *udf_get_extendedattr(struct inode *, Uint32, Uint8, struct buffer_head **);
extern struct buffer_head *udf_read_tagged(struct super_block *, Uint32, Uint32, Uint16 *);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 38083eab40fc1..31c1bdd0ed466 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -223,7 +223,7 @@ failed:
#define NULLIFY_FRAGMENTS \
for (i = oldcount; i < newcount; i++) { \
- bh = getblk (sb->s_dev, result + i, sb->s_blocksize); \
+ bh = sb_getblk(sb, result + i); \
memset (bh->b_data, 0, sb->s_blocksize); \
mark_buffer_uptodate(bh, 1); \
mark_buffer_dirty (bh); \
@@ -357,7 +357,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
result = ufs_alloc_fragments (inode, cgno, goal, request, err);
if (result) {
for (i = 0; i < oldcount; i++) {
- bh = bread (sb->s_dev, tmp + i, sb->s_blocksize);
+ bh = sb_bread(sb, tmp + i);
if(bh)
{
mark_buffer_clean (bh);
diff --git a/fs/ufs/cylinder.c b/fs/ufs/cylinder.c
index 16aa991954660..97391b4d66c02 100644
--- a/fs/ufs/cylinder.c
+++ b/fs/ufs/cylinder.c
@@ -54,7 +54,7 @@ static void ufs_read_cylinder (struct super_block * sb,
*/
UCPI_UBH->bh[0] = sb->u.ufs_sb.s_ucg[cgno];
for (i = 1; i < UCPI_UBH->count; i++)
- if (!(UCPI_UBH->bh[i] = bread (sb->s_dev, UCPI_UBH->fragment + i, sb->s_blocksize)))
+ if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
goto failed;
sb->u.ufs_sb.s_cgno[bitmap_nr] = cgno;
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index f333e5a2f0ade..3dca14b360e94 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -74,7 +74,7 @@ ufs_readdir (struct file * filp, void * dirent, filldir_t filldir)
while (!error && !stored && filp->f_pos < inode->i_size) {
lblk = (filp->f_pos) >> sb->s_blocksize_bits;
blk = ufs_frag_map(inode, lblk);
- if (!blk || !(bh = bread (sb->s_dev, blk, sb->s_blocksize))) {
+ if (!blk || !(bh = sb_bread(sb, blk))) {
/* XXX - error - skip to the next block */
printk("ufs_readdir: "
"dir inode %lu has a hole at offset %lu\n",
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index cff561ab9b5fb..5c3bc8f231464 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -106,8 +106,7 @@ int ufs_frag_map(struct inode *inode, int frag)
struct buffer_head *bh;
int n = *p++;
- bh = bread(sb->s_dev, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift),
- sb->s_blocksize);
+ bh = sb_bread(sb, uspi->s_sbbase + fs32_to_cpu(sb, block)+(n>>shift));
if (!bh)
goto out;
block = ((u32*) bh->b_data)[n & mask];
@@ -147,8 +146,7 @@ repeat:
lastfrag = inode->u.ufs_i.i_lastfrag;
if (tmp && fragment < lastfrag) {
if (metadata) {
- result = getblk (sb->s_dev, uspi->s_sbbase + tmp + blockoff,
- sb->s_blocksize);
+ result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
if (tmp == fs32_to_cpu(sb, *p)) {
UFSD(("EXIT, result %u\n", tmp + blockoff))
return result;
@@ -216,7 +214,7 @@ repeat:
* now. -DaveM
*/
if (metadata) {
- result = getblk (inode->i_dev, tmp + blockoff, sb->s_blocksize);
+ result = sb_getblk(inode->i_sb, tmp + blockoff);
} else {
*phys = tmp;
result = NULL;
@@ -264,8 +262,7 @@ repeat:
tmp = fs32_to_cpu(sb, *p);
if (tmp) {
if (metadata) {
- result = getblk (bh->b_dev, uspi->s_sbbase + tmp + blockoff,
- sb->s_blocksize);
+ result = sb_getblk(sb, uspi->s_sbbase + tmp + blockoff);
if (tmp == fs32_to_cpu(sb, *p))
goto out;
brelse (result);
@@ -292,7 +289,7 @@ repeat:
* now. -DaveM
*/
if (metadata) {
- result = getblk (bh->b_dev, tmp + blockoff, sb->s_blocksize);
+ result = sb_getblk(sb, tmp + blockoff);
} else {
*phys = tmp;
*new = 1;
@@ -425,7 +422,7 @@ struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment,
*err = error;
if (!error && buffer_mapped(&dummy)) {
struct buffer_head *bh;
- bh = getblk(dummy.b_dev, dummy.b_blocknr, inode->i_sb->s_blocksize);
+ bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (buffer_new(&dummy)) {
memset(bh->b_data, 0, inode->i_sb->s_blocksize);
mark_buffer_uptodate(bh, 1);
@@ -500,7 +497,7 @@ void ufs_read_inode (struct inode * inode)
return;
}
- bh = bread (sb->s_dev, uspi->s_sbbase + ufs_inotofsba(inode->i_ino), sb->s_blocksize);
+ bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
if (!bh) {
ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
return;
@@ -591,7 +588,7 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
return -1;
}
- bh = bread (sb->s_dev, ufs_inotofsba(inode->i_ino), sb->s_blocksize);
+ bh = sb_bread(sb, ufs_inotofsba(inode->i_ino));
if (!bh) {
ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
return -1;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 8cdb4c852fcc3..9421f59605243 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -339,7 +339,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) {
size = uspi->s_bsize;
if (i + uspi->s_fpb > blks)
size = (blks - i) * uspi->s_fsize;
- ubh = ubh_bread(sb->s_dev, uspi->s_csaddr + i, size);
+ ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
if (!ubh)
goto failed;
ubh_ubhcpymem (space, ubh, size);
@@ -363,7 +363,7 @@ int ufs_read_cylinder_structures (struct super_block * sb) {
}
for (i = 0; i < uspi->s_ncg; i++) {
UFSD(("read cg %u\n", i))
- if (!(sb->u.ufs_sb.s_ucg[i] = bread (sb->s_dev, ufs_cgcmin(i), sb->s_blocksize)))
+ if (!(sb->u.ufs_sb.s_ucg[i] = sb_bread(sb, ufs_cgcmin(i))))
goto failed;
if (!ufs_cg_chkmagic (sb, (struct ufs_cylinder_group *) sb->u.ufs_sb.s_ucg[i]->b_data))
goto failed;
@@ -414,7 +414,7 @@ void ufs_put_cylinder_structures (struct super_block * sb) {
size = uspi->s_bsize;
if (i + uspi->s_fpb > blks)
size = (blks - i) * uspi->s_fsize;
- ubh = ubh_bread (sb->s_dev, uspi->s_csaddr + i, size);
+ ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
ubh_memcpyubh (ubh, space, size);
space += size;
ubh_mark_buffer_uptodate (ubh, 1);
@@ -597,11 +597,12 @@ struct super_block * ufs_read_super (struct super_block * sb, void * data,
again:
set_blocksize (sb->s_dev, block_size);
+ sb->s_blocksize = block_size;
/*
* read ufs super block from device
*/
- ubh = ubh_bread_uspi (uspi, sb->s_dev, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
+ ubh = ubh_bread_uspi (uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
if (!ubh)
goto failed;
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
index e90fa8f7b8ac1..fc4cb9c386c74 100644
--- a/fs/ufs/truncate.c
+++ b/fs/ufs/truncate.c
@@ -114,7 +114,7 @@ static int ufs_trunc_direct (struct inode * inode)
frag1 = ufs_fragnum (frag1);
frag2 = ufs_fragnum (frag2);
for (j = frag1; j < frag2; j++) {
- bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+ bh = sb_get_hash_table (sb, tmp + j);
if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
retry = 1;
brelse (bh);
@@ -137,7 +137,7 @@ next1:
if (!tmp)
continue;
for (j = 0; j < uspi->s_fpb; j++) {
- bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+ bh = sb_get_hash_table(sb, tmp + j);
if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
retry = 1;
brelse (bh);
@@ -176,7 +176,7 @@ next2:;
ufs_panic(sb, "ufs_truncate_direct", "internal error");
frag4 = ufs_fragnum (frag4);
for (j = 0; j < frag4; j++) {
- bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+ bh = sb_get_hash_table (sb, tmp + j);
if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
retry = 1;
brelse (bh);
@@ -218,7 +218,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p)
tmp = fs32_to_cpu(sb, *p);
if (!tmp)
return 0;
- ind_ubh = ubh_bread (sb->s_dev, tmp, uspi->s_bsize);
+ ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
if (tmp != fs32_to_cpu(sb, *p)) {
ubh_brelse (ind_ubh);
return 1;
@@ -235,7 +235,7 @@ static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p)
if (!tmp)
continue;
for (j = 0; j < uspi->s_fpb; j++) {
- bh = get_hash_table (sb->s_dev, tmp + j, uspi->s_fsize);
+ bh = sb_get_hash_table(sb, tmp + j);
if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
retry = 1;
brelse (bh);
@@ -312,7 +312,7 @@ static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p)
tmp = fs32_to_cpu(sb, *p);
if (!tmp)
return 0;
- dind_bh = ubh_bread (inode->i_dev, tmp, uspi->s_bsize);
+ dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
if (tmp != fs32_to_cpu(sb, *p)) {
ubh_brelse (dind_bh);
return 1;
@@ -378,7 +378,7 @@ static int ufs_trunc_tindirect (struct inode * inode)
p = inode->u.ufs_i.i_u1.i_data + UFS_TIND_BLOCK;
if (!(tmp = fs32_to_cpu(sb, *p)))
return 0;
- tind_bh = ubh_bread (sb->s_dev, tmp, uspi->s_bsize);
+ tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
if (tmp != fs32_to_cpu(sb, *p)) {
ubh_brelse (tind_bh);
return 1;
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index 06f2cb8d56f56..2d94ed5531d79 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -23,7 +23,7 @@
struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
- kdev_t dev, unsigned fragment, unsigned size)
+ struct super_block *sb, unsigned fragment, unsigned size)
{
struct ufs_buffer_head * ubh;
unsigned i, j, count;
@@ -39,7 +39,7 @@ struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
ubh->fragment = fragment;
ubh->count = count;
for (i = 0; i < count; i++)
- if (!(ubh->bh[i] = bread (dev, fragment + i, uspi->s_fsize)))
+ if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
goto failed;
for (; i < UFS_MAXFRAG; i++)
ubh->bh[i] = NULL;
@@ -51,7 +51,7 @@ failed:
}
struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
- kdev_t dev, unsigned fragment, unsigned size)
+ struct super_block *sb, unsigned fragment, unsigned size)
{
unsigned i, j, count;
if (size & ~uspi->s_fmask)
@@ -62,7 +62,7 @@ struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
USPI_UBH->fragment = fragment;
USPI_UBH->count = count;
for (i = 0; i < count; i++)
- if (!(USPI_UBH->bh[i] = bread (dev, fragment + i, uspi->s_fsize)))
+ if (!(USPI_UBH->bh[i] = sb_bread(sb, fragment + i)))
goto failed;
for (; i < UFS_MAXFRAG; i++)
USPI_UBH->bh[i] = NULL;
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 5ee0ecb3c1a10..2e5d476028294 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -226,9 +226,9 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
/*
* These functions manipulate ufs buffers
*/
-#define ubh_bread(dev,fragment,size) _ubh_bread_(uspi,dev,fragment,size)
-extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, kdev_t, unsigned, unsigned);
-extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, kdev_t, unsigned, unsigned);
+#define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)
+extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, unsigned, unsigned);
+extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, unsigned, unsigned);
extern void ubh_brelse (struct ufs_buffer_head *);
extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index d8d68e8c296d3..a140326a59902 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -102,13 +102,6 @@ extern void iounmap(void *addr);
#define page_to_bus page_to_phys
/*
- * can the hardware map this into one segment or not, given no other
- * constraints.
- */
-#define BIOVEC_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-/*
* readX/writeX() are used to access memory mapped devices. On some
* architectures the memory mapped IO stuff needs to be accessed
* differently. On the x86 architecture, we just read/write the
diff --git a/include/asm-m68k/machdep.h b/include/asm-m68k/machdep.h
index 82badf63fdfd0..b7c3f7a285e8b 100644
--- a/include/asm-m68k/machdep.h
+++ b/include/asm-m68k/machdep.h
@@ -38,7 +38,6 @@ extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
extern void (*mach_hd_setup)(char *, int *);
extern long mach_max_dma_address;
extern void (*mach_floppy_setup)(char *, int *);
-extern void (*mach_floppy_eject)(void);
extern void (*mach_heartbeat) (int);
extern void (*mach_l2_flush) (int);
extern int mach_sysrq_key;
diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h
index fce01fcd21a94..b80f2f7ab8ce7 100644
--- a/include/asm-sparc64/dma.h
+++ b/include/asm-sparc64/dma.h
@@ -1,4 +1,4 @@
-/* $Id: dma.h,v 1.19 2000/01/28 13:43:14 jj Exp $
+/* $Id: dma.h,v 1.21 2001/12/13 04:16:52 davem Exp $
* include/asm-sparc64/dma.h
*
* Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu)
@@ -218,10 +218,4 @@ extern int isa_dma_bridge_buggy;
#define isa_dma_bridge_buggy (0)
#endif
-/* We support dynamic DMA remapping and adjacent SG entries
- * which have addresses modulo DMA_CHUNK_SIZE will be merged
- * by dma_prepare_sg().
- */
-#define DMA_CHUNK_SIZE 8192
-
#endif /* !(_ASM_SPARC64_DMA_H) */
diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h
index 258428abdae72..8b8c056caf029 100644
--- a/include/asm-sparc64/io.h
+++ b/include/asm-sparc64/io.h
@@ -1,4 +1,4 @@
-/* $Id: io.h,v 1.40 2001/11/10 09:24:56 davem Exp $ */
+/* $Id: io.h,v 1.46 2001/12/13 04:16:52 davem Exp $ */
#ifndef __SPARC64_IO_H
#define __SPARC64_IO_H
@@ -18,11 +18,10 @@ extern unsigned long virt_to_bus_not_defined_use_pci_map(volatile void *addr);
extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr);
#define bus_to_virt bus_to_virt_not_defined_use_pci_map
+/* BIO layer definitions. */
extern unsigned long phys_base;
#define page_to_phys(page) ((((page) - mem_map) << PAGE_SHIFT)+phys_base)
-
-#define BIOVEC_MERGEABLE(vec1, vec2) \
- ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (DMA_CHUNK_SIZE - 1)) == 0)
+#define BIO_VMERGE_BOUNDARY 8192
/* Different PCI controllers we support have their PCI MEM space
* mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area,
@@ -258,6 +257,7 @@ static __inline__ void _raw_writeq(u64 q, unsigned long addr)
#define __raw_readb(__addr) (_raw_readb((unsigned long)(__addr)))
#define __raw_readw(__addr) (_raw_readw((unsigned long)(__addr)))
#define __raw_readl(__addr) (_raw_readl((unsigned long)(__addr)))
+#define __raw_readq(__addr) (_raw_readq((unsigned long)(__addr)))
#define __raw_writeb(__b, __addr) (_raw_writeb((u8)(__b), (unsigned long)(__addr)))
#define __raw_writew(__w, __addr) (_raw_writew((u16)(__w), (unsigned long)(__addr)))
#define __raw_writel(__l, __addr) (_raw_writel((u32)(__l), (unsigned long)(__addr)))
@@ -415,7 +415,7 @@ out:
*/
#define ioremap(__offset, __size) ((void *)(__offset))
#define ioremap_nocache(X,Y) ioremap((X),(Y))
-#define iounmap(__addr) do { } while(0)
+#define iounmap(__addr) do { (void)(__addr); } while(0)
/* Similarly for SBUS. */
#define sbus_ioremap(__res, __offset, __size, __name) \
diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h
index 3a264a7e9063a..39bbdbbe4f795 100644
--- a/include/linux/amigaffs.h
+++ b/include/linux/amigaffs.h
@@ -31,7 +31,7 @@ affs_bread(struct super_block *sb, int block)
{
pr_debug(KERN_DEBUG "affs_bread: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
- return bread(sb->s_dev, block, sb->s_blocksize);
+ return sb_bread(sb, block);
return NULL;
}
static inline struct buffer_head *
@@ -39,7 +39,7 @@ affs_getblk(struct super_block *sb, int block)
{
pr_debug(KERN_DEBUG "affs_getblk: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size)
- return getblk(sb->s_dev, block, sb->s_blocksize);
+ return sb_getblk(sb, block);
return NULL;
}
static inline struct buffer_head *
@@ -48,10 +48,11 @@ affs_getzeroblk(struct super_block *sb, int block)
struct buffer_head *bh;
pr_debug(KERN_DEBUG "affs_getzeroblk: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
- bh = getblk(sb->s_dev, block, sb->s_blocksize);
- wait_on_buffer(bh);
+ bh = sb_getblk(sb, block);
+ lock_buffer(bh);
memset(bh->b_data, 0 , sb->s_blocksize);
mark_buffer_uptodate(bh, 1);
+ unlock_buffer(bh);
return bh;
}
return NULL;
@@ -62,7 +63,7 @@ affs_getemptyblk(struct super_block *sb, int block)
struct buffer_head *bh;
pr_debug(KERN_DEBUG "affs_getemptyblk: %d\n", block);
if (block >= AFFS_SB->s_reserved && block < AFFS_SB->s_partition_size) {
- bh = getblk(sb->s_dev, block, sb->s_blocksize);
+ bh = sb_getblk(sb, block);
wait_on_buffer(bh);
mark_buffer_uptodate(bh, 1);
return bh;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index a7c0c25763819..8bbacfeebeeb7 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -20,6 +20,12 @@
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
+/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
+#include <asm/io.h>
+#ifndef BIO_VMERGE_BOUNDARY
+#define BIO_VMERGE_BOUNDARY 0
+#endif
+
#define BIO_DEBUG
#ifdef BIO_DEBUG
@@ -61,7 +67,17 @@ struct bio {
unsigned short bi_vcnt; /* how many bio_vec's */
unsigned short bi_idx; /* current index into bvl_vec */
- unsigned short bi_hw_seg; /* actual mapped segments */
+
+ /* Number of segments in this BIO after
+ * physical address coalescing is performed.
+ */
+ unsigned short bi_phys_segments;
+
+ /* Number of segments after physical and DMA remapping
+ * hardware coalescing is performed.
+ */
+ unsigned short bi_hw_segments;
+
unsigned int bi_size; /* residual I/O count */
unsigned int bi_max; /* max bvl_vecs we can hold,
used as index into pool */
@@ -128,10 +144,13 @@ struct bio {
/*
* merge helpers etc
*/
+
#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
#define __BVEC_START(bio) bio_iovec_idx((bio), 0)
-#define BIO_CONTIG(bio, nxt) \
- BIOVEC_MERGEABLE(__BVEC_END((bio)), __BVEC_START((nxt)))
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
+ ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -174,6 +193,7 @@ extern void bio_put(struct bio *);
extern int bio_endio(struct bio *, int, int);
struct request_queue;
+extern inline int bio_phys_segments(struct request_queue *, struct bio *);
extern inline int bio_hw_segments(struct request_queue *, struct bio *);
extern inline void __bio_clone(struct bio *, struct bio *);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fad87a308171c..620b149ec617a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -41,8 +41,19 @@ struct request {
* touch them
*/
unsigned long hard_nr_sectors;
- unsigned short nr_segments;
+
+ /* Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+ /* Number of scatter-gather addr+len pairs after
+ * physical and DMA remapping hardware coalescing is performed.
+ * This is the number of scatter-gather entries the driver
+ * will actually have to deal with after DMA mapping is done.
+ */
unsigned short nr_hw_segments;
+
unsigned int current_nr_sectors;
unsigned int hard_cur_sectors;
void *special;
@@ -146,6 +157,7 @@ struct request_queue
* queue needs bounce pages for pages above this limit
*/
unsigned long bounce_pfn;
+ int bounce_gfp;
/*
* This is used to remove the plug when tq_disk runs.
@@ -166,7 +178,8 @@ struct request_queue
* queue settings
*/
unsigned short max_sectors;
- unsigned short max_segments;
+ unsigned short max_phys_segments;
+ unsigned short max_hw_segments;
unsigned short hardsect_size;
unsigned int max_segment_size;
@@ -202,19 +215,22 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT)
#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
#ifdef CONFIG_HIGHMEM
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
{
- create_bounce(q->bounce_pfn, bio);
+ create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
}
#else /* CONFIG_HIGHMEM */
#define blk_queue_bounce(q, bio) do { } while (0)
+#define init_emergency_isa_pool() do { } while (0)
#endif /* CONFIG_HIGHMEM */
@@ -257,7 +273,8 @@ extern struct request *blk_get_request(request_queue_t *, int, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
extern void blk_recount_segments(request_queue_t *, struct bio *);
-extern inline int blk_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
extern int block_ioctl(kdev_t, unsigned int, unsigned long);
@@ -270,7 +287,8 @@ extern void blk_cleanup_queue(request_queue_t *);
extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
extern void blk_queue_bounce_limit(request_queue_t *, u64);
extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
-extern void blk_queue_max_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
@@ -284,7 +302,8 @@ extern int * blksize_size[MAX_BLKDEV];
extern int * max_readahead[MAX_BLKDEV];
-#define MAX_SEGMENTS 128
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
#define MAX_SECTORS 255
#define MAX_SEGMENT_SIZE 65536
diff --git a/include/linux/blkdev.h.orig b/include/linux/blkdev.h.orig
new file mode 100644
index 0000000000000..620b149ec617a
--- /dev/null
+++ b/include/linux/blkdev.h.orig
@@ -0,0 +1,371 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+
+#include <asm/scatterlist.h>
+
+struct request_queue;
+typedef struct request_queue request_queue_t;
+struct elevator_s;
+typedef struct elevator_s elevator_t;
+
+struct request_list {
+ unsigned int count;
+ struct list_head free;
+ wait_queue_head_t wait;
+};
+
+struct request {
+ struct list_head queuelist; /* looking for ->queue? you must _not_
+ * access it directly, use
+ * blkdev_dequeue_request! */
+ int elevator_sequence;
+
+ unsigned char cmd[16];
+
+ unsigned long flags; /* see REQ_ bits below */
+
+ int rq_status; /* should split this into a few status bits */
+ kdev_t rq_dev;
+ int errors;
+ sector_t sector;
+ unsigned long nr_sectors;
+ unsigned long hard_sector; /* the hard_* are block layer
+ * internals, no driver should
+ * touch them
+ */
+ unsigned long hard_nr_sectors;
+
+ /* Number of scatter-gather DMA addr+len pairs after
+ * physical address coalescing is performed.
+ */
+ unsigned short nr_phys_segments;
+
+ /* Number of scatter-gather addr+len pairs after
+ * physical and DMA remapping hardware coalescing is performed.
+ * This is the number of scatter-gather entries the driver
+ * will actually have to deal with after DMA mapping is done.
+ */
+ unsigned short nr_hw_segments;
+
+ unsigned int current_nr_sectors;
+ unsigned int hard_cur_sectors;
+ void *special;
+ char *buffer;
+ struct completion *waiting;
+ struct bio *bio, *biotail;
+ request_queue_t *q;
+ struct request_list *rl;
+};
+
+/*
+ * first three bits match BIO_RW* bits, important
+ */
+enum rq_flag_bits {
+ __REQ_RW, /* not set, read. set, write */
+ __REQ_RW_AHEAD, /* READA */
+ __REQ_BARRIER, /* may not be passed */
+ __REQ_CMD, /* is a regular fs rw request */
+ __REQ_NOMERGE, /* don't touch this for merging */
+ __REQ_STARTED, /* drive already may have started this one */
+ __REQ_DONTPREP, /* don't call prep for this one */
+ /*
+ * for IDE
+ */
+ __REQ_DRIVE_CMD,
+ __REQ_DRIVE_TASK,
+
+ __REQ_PC, /* packet command (special) */
+ __REQ_BLOCK_PC, /* queued down pc from block layer */
+ __REQ_SENSE, /* sense retrival */
+
+ __REQ_SPECIAL, /* driver special command */
+
+ __REQ_NR_BITS, /* stops here */
+};
+
+#define REQ_RW (1 << __REQ_RW)
+#define REQ_RW_AHEAD (1 << __REQ_RW_AHEAD)
+#define REQ_BARRIER (1 << __REQ_BARRIER)
+#define REQ_CMD (1 << __REQ_CMD)
+#define REQ_NOMERGE (1 << __REQ_NOMERGE)
+#define REQ_STARTED (1 << __REQ_STARTED)
+#define REQ_DONTPREP (1 << __REQ_DONTPREP)
+#define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD)
+#define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK)
+#define REQ_PC (1 << __REQ_PC)
+#define REQ_SENSE (1 << __REQ_SENSE)
+#define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC)
+#define REQ_SPECIAL (1 << __REQ_SPECIAL)
+
+#include <linux/elevator.h>
+
+typedef int (merge_request_fn) (request_queue_t *, struct request *,
+ struct bio *);
+typedef int (merge_requests_fn) (request_queue_t *, struct request *,
+ struct request *);
+typedef void (request_fn_proc) (request_queue_t *q);
+typedef request_queue_t * (queue_proc) (kdev_t dev);
+typedef int (make_request_fn) (request_queue_t *q, struct bio *bio);
+typedef int (prep_rq_fn) (request_queue_t *, struct request *);
+typedef void (unplug_device_fn) (void *q);
+
+enum blk_queue_state {
+ Queue_down,
+ Queue_up,
+};
+
+/*
+ * Default nr free requests per queue, ll_rw_blk will scale it down
+ * according to available RAM at init time
+ */
+#define QUEUE_NR_REQUESTS 8192
+
+struct request_queue
+{
+ /*
+ * the queue request freelist, one for reads and one for writes
+ */
+ struct request_list rq[2];
+
+ /*
+ * Together with queue_head for cacheline sharing
+ */
+ struct list_head queue_head;
+ elevator_t elevator;
+
+ request_fn_proc *request_fn;
+ merge_request_fn *back_merge_fn;
+ merge_request_fn *front_merge_fn;
+ merge_requests_fn *merge_requests_fn;
+ make_request_fn *make_request_fn;
+ prep_rq_fn *prep_rq_fn;
+
+ /*
+ * The queue owner gets to use this for whatever they like.
+ * ll_rw_blk doesn't touch it.
+ */
+ void *queuedata;
+
+ /*
+ * queue needs bounce pages for pages above this limit
+ */
+ unsigned long bounce_pfn;
+ int bounce_gfp;
+
+ /*
+ * This is used to remove the plug when tq_disk runs.
+ */
+ struct tq_struct plug_tq;
+
+ /*
+ * various queue flags, see QUEUE_* below
+ */
+ unsigned long queue_flags;
+
+ /*
+ * protects queue structures from reentrancy
+ */
+ spinlock_t *queue_lock;
+
+ /*
+ * queue settings
+ */
+ unsigned short max_sectors;
+ unsigned short max_phys_segments;
+ unsigned short max_hw_segments;
+ unsigned short hardsect_size;
+ unsigned int max_segment_size;
+
+ unsigned long seg_boundary_mask;
+
+ wait_queue_head_t queue_wait;
+};
+
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+#define QUEUE_FLAG_PLUGGED 0 /* queue is plugged */
+#define QUEUE_FLAG_NOSPLIT 1 /* can process bio over several goes */
+#define QUEUE_FLAG_CLUSTER 2 /* cluster several segments into 1 */
+
+#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_mark_plugged(q) set_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
+#define blk_queue_empty(q) elv_queue_empty(q)
+#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
+
+#define rq_data_dir(rq) ((rq)->flags & 1)
+
+/*
+ * noop, requests are automagically marked as active/inactive by I/O
+ * scheduler -- see elv_next_request
+ */
+#define blk_queue_headactive(q, head_active)
+
+extern unsigned long blk_max_low_pfn, blk_max_pfn;
+
+#define BLK_BOUNCE_HIGH (blk_max_low_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ANY (blk_max_pfn << PAGE_SHIFT)
+#define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD)
+
+#ifdef CONFIG_HIGHMEM
+
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
+extern void init_emergency_isa_pool(void);
+
+extern inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
+{
+ create_bounce(q->bounce_pfn, q->bounce_gfp, bio);
+}
+
+#else /* CONFIG_HIGHMEM */
+
+#define blk_queue_bounce(q, bio) do { } while (0)
+#define init_emergency_isa_pool() do { } while (0)
+
+#endif /* CONFIG_HIGHMEM */
+
+#define rq_for_each_bio(bio, rq) \
+ if ((rq->bio)) \
+ for (bio = (rq)->bio; bio; bio = bio->bi_next)
+
+struct blk_dev_struct {
+ /*
+ * queue_proc has to be atomic
+ */
+ request_queue_t request_queue;
+ queue_proc *queue;
+ void *data;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+/*
+ * Used to indicate the default queue for drivers that don't bother
+ * to implement multiple queues. We have this access macro here
+ * so as to eliminate the need for each and every block device
+ * driver to know about the internal structure of blk_dev[].
+ */
+#define BLK_DEFAULT_QUEUE(_MAJOR) &blk_dev[_MAJOR].request_queue
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern void grok_partitions(kdev_t dev, long size);
+extern int wipe_partitions(kdev_t dev);
+extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size);
+extern void generic_make_request(struct bio *bio);
+extern inline request_queue_t *blk_get_queue(kdev_t dev);
+extern void blkdev_release_request(struct request *);
+extern void blk_attempt_remerge(request_queue_t *, struct request *);
+extern struct request *blk_get_request(request_queue_t *, int, int);
+extern void blk_put_request(struct request *);
+extern void blk_plug_device(request_queue_t *);
+extern void blk_recount_segments(request_queue_t *, struct bio *);
+extern inline int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern inline int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *);
+extern void blk_queue_assign_lock(request_queue_t *q, spinlock_t *);
+
+extern int block_ioctl(kdev_t, unsigned int, unsigned long);
+
+/*
+ * Access functions for manipulating queue properties
+ */
+extern int blk_init_queue(request_queue_t *, request_fn_proc *, spinlock_t *);
+extern void blk_cleanup_queue(request_queue_t *);
+extern void blk_queue_make_request(request_queue_t *, make_request_fn *);
+extern void blk_queue_bounce_limit(request_queue_t *, u64);
+extern void blk_queue_max_sectors(request_queue_t *q, unsigned short);
+extern void blk_queue_max_phys_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_hw_segments(request_queue_t *q, unsigned short);
+extern void blk_queue_max_segment_size(request_queue_t *q, unsigned int);
+extern void blk_queue_hardsect_size(request_queue_t *q, unsigned short);
+extern void blk_queue_segment_boundary(request_queue_t *q, unsigned long);
+extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *);
+extern void blk_dump_rq_flags(struct request *, char *);
+extern void generic_unplug_device(void *);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * max_readahead[MAX_BLKDEV];
+
+#define MAX_PHYS_SEGMENTS 128
+#define MAX_HW_SEGMENTS 128
+#define MAX_SECTORS 255
+
+#define MAX_SEGMENT_SIZE 65536
+
+/* read-ahead in pages.. */
+#define MAX_READAHEAD 31
+#define MIN_READAHEAD 3
+
+#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+#define blkdev_entry_next_request(entry) blkdev_entry_to_request((entry)->next)
+#define blkdev_entry_prev_request(entry) blkdev_entry_to_request((entry)->prev)
+#define blkdev_next_request(req) blkdev_entry_to_request((req)->queuelist.next)
+#define blkdev_prev_request(req) blkdev_entry_to_request((req)->queuelist.prev)
+
+extern void drive_stat_acct(struct request *, int, int);
+
+extern inline void blk_clear(int major)
+{
+ blk_size[major] = NULL;
+#if 0
+ blk_size_in_bytes[major] = NULL;
+#endif
+ blksize_size[major] = NULL;
+ max_readahead[major] = NULL;
+ read_ahead[major] = 0;
+}
+
+extern inline int get_hardsect_size(kdev_t dev)
+{
+ request_queue_t *q = blk_get_queue(dev);
+ int retval = 512;
+
+ if (q && q->hardsect_size)
+ retval = q->hardsect_size;
+
+ return retval;
+}
+
+#define blk_finished_io(nsects) do { } while (0)
+#define blk_started_io(nsects) do { } while (0)
+
+extern inline unsigned int blksize_bits(unsigned int size)
+{
+ unsigned int bits = 8;
+ do {
+ bits++;
+ size >>= 1;
+ } while (size > 256);
+ return bits;
+}
+
+extern inline unsigned int block_size(kdev_t dev)
+{
+ int retval = BLOCK_SIZE;
+ int major = MAJOR(dev);
+
+ if (blksize_size[major]) {
+ int minor = MINOR(dev);
+ if (blksize_size[major][minor])
+ retval = blksize_size[major][minor];
+ }
+ return retval;
+}
+
+#endif
diff --git a/include/linux/fd.h b/include/linux/fd.h
index c0ed2792ba8b3..187785b83958c 100644
--- a/include/linux/fd.h
+++ b/include/linux/fd.h
@@ -369,10 +369,4 @@ struct floppy_raw_cmd {
#define FDEJECT _IO(2, 0x5a)
/* eject the disk */
-
-#ifdef __KERNEL__
-/* eject the boot floppy (if we need the drive for a different root floppy) */
-void floppy_eject(void);
-#endif
-
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7f52b46d619f8..b1e59c161107b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -21,7 +21,6 @@
#include <linux/cache.h>
#include <linux/stddef.h>
#include <linux/string.h>
-#include <linux/bio.h>
#include <asm/atomic.h>
#include <asm/bitops.h>
@@ -1363,6 +1362,7 @@ extern struct buffer_head * get_hash_table(kdev_t, sector_t, int);
extern struct buffer_head * getblk(kdev_t, sector_t, int);
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int submit_bh(int, struct buffer_head *);
+struct bio;
extern int submit_bio(int, struct bio *);
extern int is_read_only(kdev_t);
extern void __brelse(struct buffer_head *);
@@ -1379,6 +1379,18 @@ static inline void bforget(struct buffer_head *buf)
}
extern int set_blocksize(kdev_t, int);
extern struct buffer_head * bread(kdev_t, int, int);
+static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
+{
+ return bread(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
+{
+ return getblk(sb->s_dev, block, sb->s_blocksize);
+}
+static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
+{
+ return get_hash_table(sb->s_dev, block, sb->s_blocksize);
+}
extern void wakeup_bdflush(void);
extern void put_unused_buffer_head(struct buffer_head * bh);
extern struct buffer_head * get_unused_buffer_head(int async);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 157c3b62fc347..7aa92d2c257a4 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -2,6 +2,7 @@
#define _LINUX_HIGHMEM_H
#include <linux/config.h>
+#include <linux/bio.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_HIGHMEM
@@ -13,7 +14,7 @@ extern struct page *highmem_start_page;
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
-extern void create_bounce(unsigned long pfn, struct bio **bio_orig);
+extern void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig);
static inline char *bh_kmap(struct buffer_head *bh)
{
diff --git a/include/linux/iso_fs.h b/include/linux/iso_fs.h
index 82dde80812ae4..9cdfbaea416dd 100644
--- a/include/linux/iso_fs.h
+++ b/include/linux/iso_fs.h
@@ -219,7 +219,7 @@ int get_joliet_filename(struct iso_directory_record *, unsigned char *, struct i
int get_acorn_filename(struct iso_directory_record *, char *, struct inode *);
extern struct dentry *isofs_lookup(struct inode *, struct dentry *);
-extern struct buffer_head *isofs_bread(struct inode *, unsigned int, unsigned int);
+extern struct buffer_head *isofs_bread(struct inode *, unsigned int);
extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
extern struct inode_operations isofs_dir_inode_operations;
@@ -230,11 +230,11 @@ extern struct address_space_operations isofs_symlink_aops;
#ifdef LEAK_CHECK
#define free_s leak_check_free_s
#define malloc leak_check_malloc
-#define bread leak_check_bread
+#define sb_bread leak_check_bread
#define brelse leak_check_brelse
extern void * leak_check_malloc(unsigned int size);
extern void leak_check_free_s(void * obj, int size);
-extern struct buffer_head * leak_check_bread(int dev, int block, int size);
+extern struct buffer_head * leak_check_bread(struct super_block *sb, int block);
extern void leak_check_brelse(struct buffer_head * bh);
#endif /* LEAK_CHECK */
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index dd9b7cb6efb5b..55ba2f99d9a23 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -118,7 +118,7 @@ extern int qnx4_unlink(struct inode *dir, struct dentry *dentry);
extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry);
extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int);
extern int qnx4_sync_inode(struct inode *inode);
-extern int qnx4_get_block(struct inode *inode, long iblock, struct buffer_head *bh, int create);
+extern int qnx4_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create);
#endif /* __KERNEL__ */
diff --git a/init/do_mounts.c b/init/do_mounts.c
index e6a94292c2b41..3aab59aec4fcb 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -351,23 +351,20 @@ static int __init create_dev(char *name, kdev_t dev, char *devfs_name)
return sys_symlink(path + n + 5, name);
}
-#ifdef CONFIG_MAC_FLOPPY
-int swim3_fd_eject(int devnum);
-#endif
static void __init change_floppy(char *fmt, ...)
{
extern void wait_for_keypress(void);
char buf[80];
+ int fd;
va_list args;
va_start(args, fmt);
vsprintf(buf, fmt, args);
va_end(args);
-#ifdef CONFIG_BLK_DEV_FD
- floppy_eject();
-#endif
-#ifdef CONFIG_MAC_FLOPPY
- swim3_fd_eject(MINOR(ROOT_DEV));
-#endif
+ fd = open("/dev/root", O_RDWR, 0);
+ if (fd >= 0) {
+ sys_ioctl(fd, FDEJECT, 0);
+ close(fd);
+ }
printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
wait_for_keypress();
}
diff --git a/kernel/ksyms.c b/kernel/ksyms.c
index bd626a165483c..55a53c02be176 100644
--- a/kernel/ksyms.c
+++ b/kernel/ksyms.c
@@ -60,7 +60,7 @@ extern void set_device_ro(kdev_t dev,int flag);
extern void *sys_call_table;
extern struct timezone sys_tz;
-extern int request_dma(unsigned int dmanr, char * deviceID);
+extern int request_dma(unsigned int dmanr, const char * deviceID);
extern void free_dma(unsigned int dmanr);
extern spinlock_t dma_spin_lock;
diff --git a/kernel/signal.c b/kernel/signal.c
index 44acecd851c7d..b6958912e4318 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -649,8 +649,10 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
/*
* kill_something_info() interprets pid in interesting ways just like kill(2).
*
- * POSIX specifies that kill(-1,sig) is unspecified, but what we have
- * is probably wrong. Should make it like BSD or SYSV.
+ * POSIX (2001) specifies "If pid is -1, sig shall be sent to all processes
+ * (excluding an unspecified set of system processes) for which the process
+ * has permission to send that signal."
+ * So, probably the process should also signal itself.
*/
static int kill_something_info(int sig, struct siginfo *info, int pid)
@@ -663,7 +665,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
read_lock(&tasklist_lock);
for_each_task(p) {
- if (p->pid > 1 && p != current) {
+ if (p->pid > 1) {
int err = send_sig_info(sig, info, p);
++count;
if (err != -EPERM)
diff --git a/mm/filemap.c b/mm/filemap.c
index bd53edf5452e7..0ae33bcc1a3d8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2847,7 +2847,7 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
loff_t pos;
struct page *page, *cached_page;
- unsigned long written;
+ ssize_t written;
long status = 0;
int err;
unsigned bytes;
diff --git a/mm/highmem.c b/mm/highmem.c
index efdc8b71bc8ca..72fd4e8c1b207 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -184,13 +184,14 @@ void kunmap_high(struct page *page)
wake_up(&pkmap_map_wait);
}
-#define POOL_SIZE 64
+#define POOL_SIZE 64
+#define ISA_POOL_SIZE 16
-static mempool_t *page_pool;
+static mempool_t *page_pool, *isa_page_pool;
-static void * page_pool_alloc(int gfp_mask, void *data)
+static void *page_pool_alloc(int gfp_mask, void *data)
{
- return alloc_page(gfp_mask & ~ __GFP_HIGHIO);
+ return alloc_page(gfp_mask);
}
static void page_pool_free(void *page, void *data)
@@ -215,6 +216,23 @@ static __init int init_emergency_pool(void)
return 0;
}
+/*
+ * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
+ * as the max address, so check if the pool has already been created.
+ */
+int init_emergency_isa_pool(void)
+{
+ if (isa_page_pool)
+ return 0;
+
+ isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
+ if (!isa_page_pool)
+ BUG();
+
+ printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
+ return 0;
+}
+
__initcall(init_emergency_pool);
/*
@@ -248,7 +266,7 @@ static inline void copy_to_high_bio_irq(struct bio *to, struct bio *from)
}
}
-static inline int bounce_end_io (struct bio *bio, int nr_sectors)
+static inline int bounce_end_io (struct bio *bio, int nr_sectors, mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
@@ -267,7 +285,7 @@ static inline int bounce_end_io (struct bio *bio, int nr_sectors)
if (bvec->bv_page == org_vec->bv_page)
continue;
- mempool_free(bvec->bv_page, page_pool);
+ mempool_free(bvec->bv_page, pool);
}
out_eio:
@@ -279,28 +297,53 @@ out_eio:
static int bounce_end_io_write(struct bio *bio, int nr_sectors)
{
- return bounce_end_io(bio, nr_sectors);
+ return bounce_end_io(bio, nr_sectors, page_pool);
+}
+
+static int bounce_end_io_write_isa(struct bio *bio, int nr_sectors)
+{
+ return bounce_end_io(bio, nr_sectors, isa_page_pool);
}
-static int bounce_end_io_read (struct bio *bio, int nr_sectors)
+static inline int __bounce_end_io_read(struct bio *bio, int nr_sectors,
+ mempool_t *pool)
{
struct bio *bio_orig = bio->bi_private;
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
copy_to_high_bio_irq(bio_orig, bio);
- return bounce_end_io(bio, nr_sectors);
+ return bounce_end_io(bio, nr_sectors, pool);
+}
+
+static int bounce_end_io_read(struct bio *bio, int nr_sectors)
+{
+ return __bounce_end_io_read(bio, nr_sectors, page_pool);
}
-void create_bounce(unsigned long pfn, struct bio **bio_orig)
+static int bounce_end_io_read_isa(struct bio *bio, int nr_sectors)
+{
+ return __bounce_end_io_read(bio, nr_sectors, isa_page_pool);
+}
+
+void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
{
struct page *page;
struct bio *bio = NULL;
- int i, rw = bio_data_dir(*bio_orig);
+ int i, rw = bio_data_dir(*bio_orig), bio_gfp;
struct bio_vec *to, *from;
+ mempool_t *pool;
BUG_ON((*bio_orig)->bi_idx);
+ if (!(gfp & GFP_DMA)) {
+ bio_gfp = GFP_NOHIGHIO;
+ pool = page_pool;
+ } else {
+ bio_gfp = GFP_NOIO;
+ pool = isa_page_pool;
+ }
+
bio_for_each_segment(from, *bio_orig, i) {
page = from->bv_page;
@@ -314,11 +357,11 @@ void create_bounce(unsigned long pfn, struct bio **bio_orig)
* irk, bounce it
*/
if (!bio)
- bio = bio_alloc(GFP_NOHIGHIO, (*bio_orig)->bi_vcnt);
+ bio = bio_alloc(bio_gfp, (*bio_orig)->bi_vcnt);
to = &bio->bi_io_vec[i];
- to->bv_page = mempool_alloc(page_pool, GFP_NOHIGHIO);
+ to->bv_page = mempool_alloc(pool, gfp);
to->bv_len = from->bv_len;
to->bv_offset = from->bv_offset;
@@ -359,10 +402,17 @@ void create_bounce(unsigned long pfn, struct bio **bio_orig)
bio->bi_idx = 0;
bio->bi_size = (*bio_orig)->bi_size;
- if (rw & WRITE)
- bio->bi_end_io = bounce_end_io_write;
- else
- bio->bi_end_io = bounce_end_io_read;
+ if (pool == page_pool) {
+ if (rw & WRITE)
+ bio->bi_end_io = bounce_end_io_write;
+ else
+ bio->bi_end_io = bounce_end_io_read;
+ } else {
+ if (rw & WRITE)
+ bio->bi_end_io = bounce_end_io_write_isa;
+ else
+ bio->bi_end_io = bounce_end_io_read_isa;
+ }
bio->bi_private = *bio_orig;
*bio_orig = bio;
diff --git a/mm/mempool.c b/mm/mempool.c
index 0c0bf99965ca1..ecf1acc80fea9 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -176,7 +176,8 @@ void mempool_destroy(mempool_t *pool)
*
* this function only sleeps if the alloc_fn function sleeps or
* returns NULL. Note that due to preallocation, this function
- * *never* fails.
+ * *never* fails when called from process contexts. (it might
+ * fail if called from an IRQ context.)
*/
void * mempool_alloc(mempool_t *pool, int gfp_mask)
{
@@ -185,7 +186,7 @@ void * mempool_alloc(mempool_t *pool, int gfp_mask)
struct list_head *tmp;
int curr_nr;
DECLARE_WAITQUEUE(wait, current);
- int gfp_nowait = gfp_mask & ~__GFP_WAIT;
+ int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
repeat_alloc:
element = pool->alloc(gfp_nowait, pool->pool_data);
@@ -196,15 +197,11 @@ repeat_alloc:
* If the pool is less than 50% full then try harder
* to allocate an element:
*/
- if (gfp_mask != gfp_nowait) {
- if (pool->curr_nr <= pool->min_nr/2) {
- element = pool->alloc(gfp_mask, pool->pool_data);
- if (likely(element != NULL))
- return element;
- }
- } else
- /* we must not sleep */
- return NULL;
+ if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
+ element = pool->alloc(gfp_mask, pool->pool_data);
+ if (likely(element != NULL))
+ return element;
+ }
/*
* Kick the VM at this point.
@@ -218,19 +215,25 @@ repeat_alloc:
element = tmp;
pool->curr_nr--;
spin_unlock_irqrestore(&pool->lock, flags);
-
return element;
}
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ /* We must not sleep in the GFP_ATOMIC case */
+ if (gfp_mask == gfp_nowait)
+ return NULL;
+
+ run_task_queue(&tq_disk);
+
add_wait_queue_exclusive(&pool->wait, &wait);
set_task_state(current, TASK_UNINTERRUPTIBLE);
+ spin_lock_irqsave(&pool->lock, flags);
curr_nr = pool->curr_nr;
spin_unlock_irqrestore(&pool->lock, flags);
- if (!curr_nr) {
- run_task_queue(&tq_disk);
+ if (!curr_nr)
schedule();
- }
current->state = TASK_RUNNING;
remove_wait_queue(&pool->wait, &wait);