diff options
author | davem <davem> | 2001-12-13 04:16:51 +0000 |
---|---|---|
committer | davem <davem> | 2001-12-13 04:16:51 +0000 |
commit | e0e16db2ec7592cb1b9ed17a19f386ec87379863 (patch) | |
tree | ee4ab2c301ffc7d4871043a6f1571b726527b33d | |
parent | 92485e4f1057297645b19aaeb8db114a43f31371 (diff) | |
download | netdev-vger-cvs-e0e16db2ec7592cb1b9ed17a19f386ec87379863.tar.gz |
Kill DMA_CHUNK_SIZE and related code.
Rename request nr_segments to nr_phys_segments.
-rw-r--r-- | drivers/block/DAC960.c | 2 | ||||
-rw-r--r-- | drivers/block/cciss.c | 2 | ||||
-rw-r--r-- | drivers/block/cpqarray.c | 2 | ||||
-rw-r--r-- | drivers/block/ll_rw_blk.c | 24 | ||||
-rw-r--r-- | drivers/ide/ide-dma.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi.c | 5 | ||||
-rw-r--r-- | drivers/scsi/scsi_dma.c | 7 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 2 | ||||
-rw-r--r-- | drivers/scsi/scsi_merge.c | 89 | ||||
-rw-r--r-- | include/asm-i386/io.h | 7 | ||||
-rw-r--r-- | include/asm-sparc64/dma.h | 10 | ||||
-rw-r--r-- | include/asm-sparc64/io.h | 10 | ||||
-rw-r--r-- | include/linux/bio.h | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 13 |
14 files changed, 37 insertions, 142 deletions
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 55d3fbe7e..eed7b446c 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2889,7 +2889,7 @@ static boolean DAC960_ProcessRequest(DAC960_Controller_T *Controller, Command->LogicalDriveNumber = DAC960_LogicalDriveNumber(Request->rq_dev); Command->BlockNumber = Request->sector; Command->BlockCount = Request->nr_sectors; - Command->SegmentCount = Request->nr_segments; + Command->SegmentCount = Request->nr_phys_segments; Command->BufferHeader = Request->bio; Command->RequestBuffer = Request->buffer; blkdev_dequeue_request(Request); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 371755761..208a785c6 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1219,7 +1219,7 @@ queue: goto startio; creq = elv_next_request(q); - if (creq->nr_segments > MAXSGENTRIES) + if (creq->nr_phys_segments > MAXSGENTRIES) BUG(); if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR ) diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 4ff77277d..e34f7b47e 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -864,7 +864,7 @@ queue_next: goto startio; creq = elv_next_request(q); - if (creq->nr_segments > SG_MAX) + if (creq->nr_phys_segments > SG_MAX) BUG(); if (h->ctlr != MAJOR(creq->rq_dev)-MAJOR_NR || h->ctlr > nr_ctlr) diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c index 2eef6065a..dce810ac4 100644 --- a/drivers/block/ll_rw_blk.c +++ b/drivers/block/ll_rw_blk.c @@ -373,7 +373,7 @@ inline int blk_contig_segment(request_queue_t *q, struct bio *bio, /* * map a request to scatterlist, return number of sg entries setup. Caller - * must make sure sg can hold rq->nr_segments entries + * must make sure sg can hold rq->nr_phys_segments entries */ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg) { @@ -408,7 +408,7 @@ int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg } else { new_segment: if (nsegs >= q->max_segments) { - printk("map: %d >= %d, i %d, segs %d, size %ld\n", nsegs, q->max_segments, i, rq->nr_segments, rq->nr_sectors); + printk("map: %d >= %d, i %d, phys_segs %d, size %ld\n", nsegs, q->max_segments, i, rq->nr_phys_segments, rq->nr_sectors); BUG(); } @@ -435,8 +435,8 @@ static inline int ll_new_segment(request_queue_t *q, struct request *req, { int nr_segs = bio_hw_segments(q, bio); - if (req->nr_segments + nr_segs <= q->max_segments) { - req->nr_segments += nr_segs; + if (req->nr_phys_segments + nr_segs <= q->max_segments) { + req->nr_phys_segments += nr_segs; return 1; } @@ -469,7 +469,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, static int ll_merge_requests_fn(request_queue_t *q, struct request *req, struct request *next) { - int total_segments = req->nr_segments + next->nr_segments; + int total_segments = req->nr_phys_segments + next->nr_phys_segments; if (blk_contig_segment(q, req->biotail, next->bio)) total_segments--; @@ -477,7 +477,7 @@ static int ll_merge_requests_fn(request_queue_t *q, struct request *req, if (total_segments > q->max_segments) return 0; - req->nr_segments = total_segments; + req->nr_phys_segments = total_segments; return 1; } @@ -967,16 +967,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) cur_nr_sectors = bio_iovec(bio)->bv_len >> 9; rw = bio_data_dir(bio); -#if 1 /* XXX Makes no writes actually go to disk. - * XXX Use this on kernels you do not trust such - * XXX as current 2.5.1-preX :-) -DaveM - */ - if (rw & WRITE) { - set_bit(BIO_UPTODATE, &bio->bi_flags); - bio->bi_end_io(bio, nr_sectors); - return 0; - } -#endif /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even @@ -1111,7 +1101,7 @@ get_rq: req->hard_sector = req->sector = sector; req->hard_nr_sectors = req->nr_sectors = nr_sectors; req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors; - req->nr_segments = bio->bi_vcnt; + req->nr_phys_segments = bio->bi_vcnt; req->nr_hw_segments = bio_hw_segments(q, bio); req->buffer = bio_data(bio); /* see ->buffer comment above */ req->waiting = NULL; diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c index ecdcd85d5..fb638ca30 100644 --- a/drivers/ide/ide-dma.c +++ b/drivers/ide/ide-dma.c @@ -232,8 +232,8 @@ static int ide_build_sglist (ide_hwif_t *hwif, struct request *rq) nents = blk_rq_map_sg(q, rq, hwif->sg_table); - if (rq->q && nents > rq->nr_segments) - printk("ide-dma: received %d segments, build %d\n", rq->nr_segments, nents); + if (rq->q && nents > rq->nr_phys_segments) + printk("ide-dma: received %d phys segments, build %d\n", rq->nr_phys_segments, nents); if (rq_data_dir(rq) == READ) hwif->sg_dma_direction = PCI_DMA_FROMDEVICE; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3713c3284..6faaad0b7 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -186,11 +186,6 @@ void scsi_initialize_queue(Scsi_Device * SDpnt, struct Scsi_Host * SHpnt) blk_init_queue(q, scsi_request_fn); q->queuedata = (void *) SDpnt; -#ifdef DMA_CHUNK_SIZE - if (max_segments > 64) - max_segments = 64; -#endif - blk_queue_max_segments(q, max_segments); blk_queue_max_sectors(q, SHpnt->max_sectors); diff --git a/drivers/scsi/scsi_dma.c b/drivers/scsi/scsi_dma.c index 3de835130..76fbc3aaf 100644 --- a/drivers/scsi/scsi_dma.c +++ b/drivers/scsi/scsi_dma.c @@ -246,13 +246,8 @@ void scsi_resize_dma_pool(void) */ if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM || SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) { + /* XXX This needs fixing -DaveM */ int nents = host->sg_tablesize; -#ifdef DMA_CHUNK_SIZE - /* If the architecture does DMA sg merging, make sure - we count with at least 64 entries even for HBAs - which handle very few sg entries. */ - if (nents < 64) nents = 64; -#endif new_dma_sectors += ((nents * sizeof(struct scatterlist) + 511) >> 9) * SDpnt->queue_depth; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a723b3404..6e5cae494 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -82,7 +82,7 @@ static void __scsi_insert_special(request_queue_t *q, struct request *rq, rq->special = data; rq->q = NULL; rq->bio = rq->biotail = NULL; - rq->nr_segments = 0; + rq->nr_phys_segments = 0; rq->elevator_sequence = 0; /* diff --git a/drivers/scsi/scsi_merge.c b/drivers/scsi/scsi_merge.c index 140933cc2..23995ab3f 100644 --- a/drivers/scsi/scsi_merge.c +++ b/drivers/scsi/scsi_merge.c @@ -192,66 +192,16 @@ recount_segments(Scsi_Cmnd * SCpnt) struct request *req = &SCpnt->request; struct Scsi_Host *SHpnt = SCpnt->host; - req->nr_segments = __count_segments(req, SHpnt->unchecked_isa_dma,NULL); + req->nr_phys_segments = __count_segments(req, SHpnt->unchecked_isa_dma,NULL); } -/* - * IOMMU hackery for sparc64 - */ -#ifdef DMA_CHUNK_SIZE - -#define MERGEABLE_BUFFERS(X,Y) \ - ((((bvec_to_phys(__BVEC_END((X))) + __BVEC_END((X))->bv_len) | bio_to_phys((Y))) & (DMA_CHUNK_SIZE - 1)) == 0) - -static inline int scsi_new_mergeable(request_queue_t * q, - struct request * req, - struct bio *bio) -{ - int nr_segs = bio_hw_segments(q, bio); - - /* - * pci_map_sg will be able to merge these two - * into a single hardware sg entry, check if - * we'll have enough memory for the sg list. - * scsi.c allocates for this purpose - * min(64,sg_tablesize) entries. - */ - if (req->nr_segments + nr_segs > q->max_segments) - return 0; - - req->nr_segments += nr_segs; - return 1; -} - -static inline int scsi_new_segment(request_queue_t * q, - struct request * req, - struct bio *bio) -{ - int nr_segs = bio_hw_segments(q, bio); - /* - * pci_map_sg won't be able to map these two - * into a single hardware sg entry, so we have to - * check if things fit into sg_tablesize. - */ - if (req->nr_hw_segments + nr_segs > q->max_segments) - return 0; - else if (req->nr_segments + nr_segs > q->max_segments) - return 0; - - req->nr_hw_segments += nr_segs; - req->nr_segments += nr_segs; - return 1; -} - -#else /* DMA_CHUNK_SIZE */ - static inline int scsi_new_segment(request_queue_t * q, struct request * req, struct bio *bio) { int nr_segs = bio_hw_segments(q, bio); - if (req->nr_segments + nr_segs > q->max_segments) { + if (req->nr_phys_segments + nr_segs > q->max_segments) { req->flags |= REQ_NOMERGE; return 0; } @@ -260,10 +210,9 @@ static inline int scsi_new_segment(request_queue_t * q, * This will form the start of a new segment. Bump the * counter. */ - req->nr_segments += nr_segs; + req->nr_phys_segments += nr_segs; return 1; } -#endif /* DMA_CHUNK_SIZE */ /* * Function: __scsi_merge_fn() @@ -306,11 +255,6 @@ __inline static int __scsi_back_merge_fn(request_queue_t * q, return 0; } -#ifdef DMA_CHUNK_SIZE - if (MERGEABLE_BUFFERS(req->biotail, bio)) - return scsi_new_mergeable(q, req, bio); -#endif - return scsi_new_segment(q, req, bio); } @@ -323,10 +267,6 @@ __inline static int __scsi_front_merge_fn(request_queue_t * q, return 0; } -#ifdef DMA_CHUNK_SIZE - if (MERGEABLE_BUFFERS(bio, req->bio)) - return scsi_new_mergeable(q, req, bio); -#endif return scsi_new_segment(q, req, bio); } @@ -396,7 +336,7 @@ inline static int scsi_merge_requests_fn(request_queue_t * q, if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) return 0; - bio_segs = req->nr_segments + next->nr_segments; + bio_segs = req->nr_phys_segments + next->nr_phys_segments; if (blk_contig_segment(q, req->biotail, next->bio)) bio_segs--; @@ -406,26 +346,11 @@ inline static int scsi_merge_requests_fn(request_queue_t * q, if (bio_segs > q->max_segments) return 0; -#ifdef DMA_CHUNK_SIZE - bio_segs = req->nr_hw_segments + next->nr_hw_segments; - if (blk_contig_segment(q, req->biotail, next->bio)) - bio_segs--; - - /* If dynamic DMA mapping can merge last segment in req with - * first segment in next, then the check for hw segments was - * done above already, so we can always merge. - */ - if (bio_segs > q->max_segments) - return 0; - - req->nr_hw_segments = bio_segs; -#endif - /* * This will form the start of a new segment. Bump the * counter. */ - req->nr_segments = bio_segs; + req->nr_phys_segments = bio_segs; return 1; } @@ -483,7 +408,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt, if (!sg_count_valid) { count = __count_segments(req, dma_host, NULL); } else { - count = req->nr_segments; + count = req->nr_phys_segments; } /* @@ -530,7 +455,7 @@ __inline static int __init_io(Scsi_Cmnd * SCpnt, */ printk("Warning - running *really* short on DMA buffers\n"); this_count = req->current_nr_sectors; - printk("SCSI: depth is %d, # segs %d, # hw segs %d\n", SHpnt->host_busy, req->nr_segments, req->nr_hw_segments); + printk("SCSI: depth is %d, # phys segs %d, # hw segs %d\n", SHpnt->host_busy, req->nr_phys_segments, req->nr_hw_segments); goto single_segment; } diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h index 0c5e61d14..975f0bf61 100644 --- a/include/asm-i386/io.h +++ b/include/asm-i386/io.h @@ -105,13 +105,6 @@ extern void iounmap(void *addr); #define page_to_bus page_to_phys /* - * can the hardware map this into one segment or not, given no other - * constraints. - */ -#define BIOVEC_MERGEABLE(vec1, vec2) \ - ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) - -/* * readX/writeX() are used to access memory mapped devices. On some * architectures the memory mapped IO stuff needs to be accessed * differently. On the x86 architecture, we just read/write the diff --git a/include/asm-sparc64/dma.h b/include/asm-sparc64/dma.h index aeef575cc..cd8370e13 100644 --- a/include/asm-sparc64/dma.h +++ b/include/asm-sparc64/dma.h @@ -1,4 +1,4 @@ -/* $Id: dma.h,v 1.20 2001-12-11 11:39:57 davem Exp $ +/* $Id: dma.h,v 1.21 2001-12-13 04:16:52 davem Exp $ * include/asm-sparc64/dma.h * * Copyright 1996 (C) David S. Miller (davem@caip.rutgers.edu) @@ -218,12 +218,4 @@ extern int isa_dma_bridge_buggy; #define isa_dma_bridge_buggy (0) #endif -/* We support dynamic DMA remapping and adjacent SG entries - * which have addresses modulo DMA_CHUNK_SIZE will be merged - * by dma_prepare_sg(). - */ -#if 0 /* XXX Buggy in 2.5.x currently... */ -#define DMA_CHUNK_SIZE 8192 -#endif - #endif /* !(_ASM_SPARC64_DMA_H) */ diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index 7d4480719..be6c5546b 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h @@ -1,4 +1,4 @@ -/* $Id: io.h,v 1.45 2001-12-12 00:13:56 davem Exp $ */ +/* $Id: io.h,v 1.46 2001-12-13 04:16:52 davem Exp $ */ #ifndef __SPARC64_IO_H #define __SPARC64_IO_H @@ -21,14 +21,6 @@ extern unsigned long bus_to_virt_not_defined_use_pci_map(volatile void *addr); extern unsigned long phys_base; #define page_to_phys(page) ((((page) - mem_map) << PAGE_SHIFT)+phys_base) -#ifdef DMA_CHUNK_SIZE -#define BIOVEC_MERGEABLE(vec1, vec2) \ - ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (DMA_CHUNK_SIZE - 1)) == 0) -#else -#define BIOVEC_MERGEABLE(vec1, vec2) \ - ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) -#endif - /* Different PCI controllers we support have their PCI MEM space * mapped to an either 2GB (Psycho) or 4GB (Sabre) aligned area, * so need to chop off the top 33 or 32 bits. diff --git a/include/linux/bio.h b/include/linux/bio.h index a7c0c2576..dca5debcf 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -130,6 +130,8 @@ struct bio { */ #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) #define __BVEC_START(bio) bio_iovec_idx((bio), 0) +#define BIOVEC_MERGEABLE(vec1, vec2) \ + ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) #define BIO_CONTIG(bio, nxt) \ BIOVEC_MERGEABLE(__BVEC_END((bio)), __BVEC_START((nxt))) #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 204ab9765..658f94e7e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -41,8 +41,19 @@ struct request { * touch them */ unsigned long hard_nr_sectors; - unsigned short nr_segments; + + /* Number of scatter-gather DMA addr+len pairs after + * physical address coalescing is performed. + */ + unsigned short nr_phys_segments; + + /* Number of scatter-gather addr+len pairs after + * physical and DMA remapping hardware coalescing is performed. + * This is the number of scatter-gather entries the driver + * will actually have to deal with after DMA mapping is done. + */ unsigned short nr_hw_segments; + unsigned int current_nr_sectors; unsigned int hard_cur_sectors; void *special; |