diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre6/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- /opt/kernel/linux-2.4.4-pre6/drivers/block/ll_rw_blk.c Mon Apr 23 19:00:47 2001 +++ linux/drivers/block/ll_rw_blk.c Mon Apr 23 16:47:23 2001 @@ -42,7 +42,7 @@ /* * For the allocated request tables */ -static kmem_cache_t *request_cachep; +kmem_cache_t *request_cachep; /* * The "disk" task queue is used to start the actual requests @@ -456,7 +456,7 @@ /* * No available requests for this queue, unplug the device. */ -static struct request *__get_request_wait(request_queue_t *q, int rw) +static struct request *__get_request_wait(request_queue_t *q, int rw, kdev_t dev) { register struct request *rq; DECLARE_WAITQUEUE(wait, current); @@ -470,14 +470,15 @@ if (rq) break; generic_unplug_device(q); - schedule(); + if (!schedule_timeout(60*HZ)) + printk("%s: timeout rq, %d\n", kdevname(dev), rw); } remove_wait_queue(&q->wait_for_request, &wait); current->state = TASK_RUNNING; return rq; } -static inline struct request *get_request_wait(request_queue_t *q, int rw) +static inline struct request *get_request_wait(request_queue_t *q, int rw, kdev_t dev) { register struct request *rq; @@ -486,7 +487,7 @@ spin_unlock_irq(&io_request_lock); if (rq) return rq; - return __get_request_wait(q, rw); + return __get_request_wait(q, rw, dev); } /* RO fail safe mechanism */ @@ -796,7 +797,7 @@ if (rw_ahead) goto end_io; - freereq = __get_request_wait(q, rw); + freereq = __get_request_wait(q, rw, bh->b_rdev); goto again; } @@ -1086,6 +1087,23 @@ extern int stram_device_init (void); #endif +static void __dump_rq(struct request *rq) +{ + struct buffer_head *bh; + int sectors = 0; + + printk("__dump_rq: buffer string error detected\n"); + + bh = rq->bh; + while (bh) { + printk("sector %lu\n", bh->b_rsector); + sectors += bh->b_size >> 9; + bh = bh->b_reqnext; + } + + if (rq->hard_nr_sectors != sectors) + printk("nr_secs mismatch (%lu != %u)\n", rq->hard_nr_sectors, sectors); +} /** * end_that_request_first - end I/O on one buffer. @@ -1117,11 +1135,12 @@ kdevname(req->rq_dev), name, req->sector); if ((bh = req->bh) != NULL) { + struct buffer_head *nbh = bh->b_reqnext; nsect = bh->b_size >> 9; blk_finished_io(nsect); + if (nbh && (bh->b_rsector + (bh->b_size >> 9)) !=nbh->b_rsector) + __dump_rq(req); req->bh = bh->b_reqnext; - if (req->bh && (bh->b_rsector + (bh->b_size >> 9)) != req->bh->b_rsector) - printk("%s: %lu is followed by %lu\n", name, bh->b_rsector, req->bh->b_rsector); bh->b_reqnext = NULL; bh->b_end_io(bh, uptodate); if ((bh = req->bh) != NULL) { @@ -1330,3 +1349,4 @@ EXPORT_SYMBOL(blkdev_release_request); EXPORT_SYMBOL(generic_unplug_device); EXPORT_SYMBOL(queued_sectors); +EXPORT_SYMBOL(request_cachep); diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre6/drivers/block/pktcdvd.c linux/drivers/block/pktcdvd.c --- /opt/kernel/linux-2.4.4-pre6/drivers/block/pktcdvd.c Mon Apr 23 19:00:47 2001 +++ linux/drivers/block/pktcdvd.c Mon Apr 23 18:23:12 2001 @@ -94,7 +94,7 @@ * *************************************************************************/ -#define VERSION_CODE "v0.0.2i 21/04/2001 Jens Axboe (axboe@suse.de)" +#define VERSION_CODE "v0.0.2i-pre3 23/04/2001 Jens Axboe (axboe@suse.de)" #include #include @@ -125,7 +125,8 @@ */ #define PACKET_MAX_SIZE 32 -#define NEXT_BH(bh, nbh) (((bh)->b_rsector + ((bh)->b_size >> 9)) == (nbh)->b_rsector) +#define NEXT_BH(bh, nbh) \ + (((bh)->b_rsector + ((bh)->b_size >> 9)) == (nbh)->b_rsector) #define BH_IN_ORDER(b1, b2) ((b1)->b_rsector < (b2)->b_rsector) @@ -154,33 +155,6 @@ return NULL; } -static void pkt_recheck_segments(struct request *rq) -{ - struct buffer_head *bh; - int nr_segments = 1, sectors; - - bh = rq->bh; - sectors = bh->b_size >> 9; - - while (bh->b_reqnext) { - if (!CONTIG_BH(bh, bh->b_reqnext)) - nr_segments++; - bh = bh->b_reqnext; - sectors += bh->b_size >> 9; - } - - /* - * this is needed because it quickly gets impossible to check - * this at merge time due to the hole merges - */ - rq->nr_segments = rq->nr_hw_segments = nr_segments; - - if (sectors != rq->nr_sectors) { - printk("tell jens, %u != %lu\n", sectors, rq->nr_sectors); - BUG(); - } -} - /* * The following three functions are the plugins to the ll_rw_blk * layer and decides whether a given request / buffer head can be @@ -207,7 +181,10 @@ void *ptr = q->queuedata; int ret; - if (rq->cmd == WRITE && ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd)) + if (rq->cmd != WRITE) + BUG(); + + if (ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd)) return ELEVATOR_NO_MERGE; /* @@ -334,17 +311,19 @@ static inline struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block, int size) { - struct buffer_head *bh; + struct buffer_head *bh = NULL; - if ((bh = get_hash_table(dev, block, size))) { - if (!test_and_set_bit(BH_Lock, &bh->b_state)) { + bh = get_hash_table(dev, block, size); + if (bh) { + if (!test_and_set_bit(BH_Lock, &bh->b_state)) atomic_set_buffer_clean(bh); - return bh; + else { + brelse(bh); + bh = NULL; } - brelse(bh); } - return NULL; + return bh; } static void pkt_end_io_write(struct buffer_head *, int); @@ -352,15 +331,13 @@ static struct buffer_head *pkt_get_buffer(struct pktcdvd_device *pd, unsigned long sector, int size) { + unsigned long block = sector / (size >> 9); struct buffer_head *bh; VPRINTK("get_buffer: sector=%ld, size=%d\n", sector, size); - if ((bh = pkt_get_hash(pd->pkt_dev, sector / (size >> 9), size))) { - bh->b_private = pd; - bh->b_end_io = pkt_end_io_write; - goto out; - } + if ((bh = pkt_get_hash(pd->pkt_dev, block, size))) + goto got_it; /* * should not happen... @@ -393,8 +370,9 @@ bh->b_list = PKT_BUF_LIST; bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req); -out: +got_it: blk_started_io(bh->b_size >> 9); + bh->b_blocknr = block; bh->b_rsector = sector; bh->b_dev = pd->pkt_dev; bh->b_rdev = pd->dev; @@ -403,7 +381,7 @@ static void pkt_put_buffer(struct buffer_head *bh) { - struct pktcdvd_device *pd = bh->b_private; + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_dev)]; unsigned long flags; if (bh->b_list != PKT_BUF_LIST) @@ -412,7 +390,6 @@ if (atomic_read(&bh->b_count)) printk("pktcdvd: put_buffer: busy buffer\n"); - bh->b_private = NULL; bh->b_state = 0; bh->b_reqnext = NULL; @@ -424,6 +401,86 @@ atomic_dec(&pd->cdrw.pending_bh); } +static void pkt_rq_end_io(struct request *rq) +{ + struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); + +#if 0 + if (rq->cmd == WRITE_PACKET) + rq->cmd = WRITE; + + VPRINTK("pkt_rq_end_io: cmd=%d, q=%p\n", rq->cmd, rq->q); +#endif + + if (pd->rq == NULL) + printk("rq_end_io: no current rq\n"); + + atomic_dec(&pd->wrqcnt); + pd->rq = NULL; + rq->end_io = NULL; + + if (!test_and_clear_bit(PACKET_BUSY, &pd->flags)) + printk("rq_end_io: BUSY not set\n"); + + if (!test_and_clear_bit(PACKET_RQ, &pd->flags)) + printk("rq_end_io: RQ not set\n"); + + wake_up(&pd->wqueue); +} + +static inline void __pkt_inject_request(request_queue_t *q, struct request *req) +{ + struct list_head *head = &q->queue_head; + + VPRINTK("__pkt_inject_request: list_empty == %d, size=%d, cmd=%d\n", + list_empty(&q->queue_head), req->bh->b_size >> 9, req->cmd); + + if (list_empty(&q->queue_head)) + q->plug_device_fn(q, req->rq_dev); + else if (q->head_active && !q->plugged) + head = head->next; + + list_add(&req->queue, head); +} + +static void pkt_inject_request(request_queue_t *q, struct request *rq) +{ + rq->end_io = pkt_rq_end_io; + spin_lock_irq(&io_request_lock); + __pkt_inject_request(q, rq); + spin_unlock_irq(&io_request_lock); +} + +static int pkt_throttle_speed(struct pktcdvd_device *pd) +{ + struct request *rq; + + if (pd->speed == 1) + return 1; + + /* + * in theory we could just steal a request from our READ freelist + * in the queue, since they will never be used... + */ + rq = kmem_cache_alloc(request_cachep, SLAB_ATOMIC); + if (!rq) + return 1; + + memset(rq, 0, sizeof(*rq)); + INIT_LIST_HEAD(&rq->queue); + INIT_LIST_HEAD(&rq->table); + rq->cmd = SPECIAL; + rq->sector = PKT_THROTTLE_SPEED; + rq->special = pd->rq; + + pd->rq = NULL; + clear_bit(PACKET_RQ, &pd->flags); + clear_bit(PACKET_BUSY, &pd->flags); + + __pkt_inject_request(&pd->cdrw.r_queue, rq); + return 0; +} + /* * we use this as our default b_end_io handler, since we need to take * the entire request off the list if just on of the clusters fail. @@ -434,7 +491,8 @@ */ static void pkt_end_io_write(struct buffer_head *bh, int uptodate) { - struct pktcdvd_device *pd = (struct pktcdvd_device *) bh->b_private; + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)]; + unsigned long flags; VPRINTK("end_io_write: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate); @@ -443,39 +501,40 @@ mark_buffer_uptodate(bh, uptodate); unlock_buffer(bh); + brelse(bh); - if (bh->b_list == PKT_BUF_LIST) { - brelse(bh); + if (bh->b_list == PKT_BUF_LIST) pkt_put_buffer(bh); - } /* * obviously, more needs to be done here. */ if (!uptodate) { - printk("pktcdvd: %s: write error\n", pd->name); - set_bit(PACKET_READONLY, &pd->flags); + spin_lock_irqsave(&io_request_lock, flags); + if (pkt_throttle_speed(pd)) { + printk("pktcdvd: %s: write error\n", pd->name); + set_bit(PACKET_READONLY, &pd->flags); + } + spin_unlock_irqrestore(&io_request_lock, flags); } pd->stats.bh_e++; } -static void pkt_init_bh(struct pktcdvd_device *pd, struct request *rq) +static void pkt_init_rq(struct pktcdvd_device *pd, struct request *rq) { - struct buffer_head *bh = rq->bh; - unsigned cnt = 0; + struct buffer_head *bh; + unsigned int cnt, nr_segments; VPRINTK("init_bh: cmd=%d, bh=%ld\n", rq->cmd, bh->b_blocknr); + cnt = 0; + nr_segments = 1; + bh = rq->bh; while (bh) { -#if 1 - if (bh->b_list == PKT_BUF_LIST) { - bh->b_private = pd; - bh->b_end_io = pkt_end_io_write; - } -#else + struct buffer_head *nbh = bh->b_reqnext; + + bh->b_rdev = pd->pkt_dev; bh->b_end_io = pkt_end_io_write; - bh->b_private = pd; -#endif /* * the buffer better be uptodate, mapped, and locked! @@ -487,20 +546,26 @@ if (!buffer_mapped(bh)) printk("%lu not mapped\n", bh->b_rsector); - /* - * if this happens, do report - */ - if (bh->b_reqnext) { - if ((bh->b_rsector + (bh->b_size >> 9)) != bh->b_reqnext->b_rsector) - printk("tell jens, %lu follows %lu\n", bh->b_reqnext->b_rsector, bh->b_rsector); - if (bh->b_rsector >= bh->b_reqnext->b_rsector) - - printk("tell jens, order %lu >= %lu\n", bh->b_rsector, bh->b_reqnext->b_rsector); + if (nbh) { + if (!CONTIG_BH(bh, nbh)) + nr_segments++; + + /* + * if this happens, do report + */ + if ((bh->b_rsector + (bh->b_size >> 9))!=nbh->b_rsector) + printk("tell jens, %lu follows %lu\n", + nbh->b_rsector, bh->b_rsector); + if (bh->b_rsector >= nbh->b_rsector) + printk("tell jens, order %lu >= %lu\n", + bh->b_rsector, nbh->b_rsector); } - bh = bh->b_reqnext; - cnt += rq->current_nr_sectors; + cnt += bh->b_size >> 9; + bh = nbh; } + rq->nr_segments = rq->nr_hw_segments = nr_segments; + if (cnt != rq->nr_sectors) { printk("botched request %u (%lu)\n", cnt, rq->nr_sectors); BUG(); @@ -526,10 +591,13 @@ if (!pd->settings.size) return 0; + if (!(rq->cmd & WRITE)) + return 1; + return ZONE(rq->sector, pd) == ZONE(rq->sector + rq->nr_sectors -1, pd); } -#if defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) +#if defined(CONFIG_CDROM_PKTCDVD_BEMPTY) static void pkt_init_buffer(struct buffer_head *bh) { set_bit(BH_Uptodate, &bh->b_state); @@ -539,28 +607,37 @@ static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh) { - struct super_block *sb = get_super(pd->pkt_dev); - struct super_operations *sop = sb ? sb->s_op : NULL; - unsigned long packet = 0, blocknr = bh->b_blocknr; - - if (sop && sop->block_empty) { - if (sop->block_empty(sb, blocknr, &packet)) { - pkt_init_buffer(pd, bh); - return 1; - } + struct super_block *sb; + struct super_operations *sop; + unsigned long packet; + int ret; + + ret = 0; + if ((sb = get_super(pd->pkt_dev)) == NULL) + goto out; + if ((sop = sb->s_op) == NULL) + goto out; + if (sop->block_empty == NULL) + goto out; + + packet = 0; + if (sop->block_empty(sb, bh->b_blocknr, &packet)) { + pkt_init_buffer(pd, bh); + ret = 1; } - return 0; +out: + return ret; } -#else /* defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) */ +#else /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */ static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh) { return 0; } -#endif /* defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) */ +#endif /* defined(CONFIG_CDROM_PKTCDVD_BEMPTY) */ /* * basically just does a ll_rw_block for the bhs given to use, but we @@ -617,13 +694,11 @@ start_s = rq->sector - (rq->sector & (pd->settings.size - 1)); end_s = start_s + pd->settings.size; -#if 0 VPRINTK("pkt_gather_data: cmd=%d\n", rq->cmd); VPRINTK("need %d sectors for %s\n", sectors, kdevname(pd->dev)); VPRINTK("from %lu to %lu ", start_s, end_s); VPRINTK("(%lu - %lu)\n", rq->bh->b_rsector, rq->bhtail->b_rsector + rq->current_nr_sectors); -#endif if (blksize_size[MAJOR(pd->dev)]) { if (rq->bh->b_size != blksize_size[MAJOR(pd->dev)][MINOR(pd->dev)]) { @@ -639,11 +714,13 @@ bh = rq->bh; while (bh) { int secs = bh->b_size >> 9; - if (secs != 4) + + if (bh->b_size != CD_FRAMESIZE) BUG(); index = (bh->b_rsector & (pd->settings.size - 1)) / secs; + atomic_inc(&bh->b_count); bhs[index] = bh; bh = bh->b_reqnext; } @@ -696,11 +773,12 @@ rq->current_nr_sectors = rq->bh->b_size >> 9; rq->hard_nr_sectors = rq->nr_sectors; rq->sector = rq->hard_sector = start_s; - //rq->cmd = WRITE_PACKET; +#if 0 + rq->cmd = WRITE_PACKET; +#endif -// VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector); - pkt_recheck_segments(rq); - pkt_init_bh(pd, rq); + VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector); + pkt_init_rq(pd, rq); pkt_account_rq(pd, sectors, rq->nr_sectors, rq->current_nr_sectors); /* @@ -714,62 +792,6 @@ return 0; } -static void pkt_rq_end_io(struct request *rq) -{ - struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); - unsigned long flags; - -#if 0 - if (rq->cmd == WRITE_PACKET) - rq->cmd = WRITE; - - VPRINTK("pkt_rq_end_io: cmd=%d, q=%p\n", rq->cmd, rq->q); -#endif - - spin_lock_irqsave(&io_request_lock, flags); - - if (pd->rq == NULL) - printk("rq_end_io: no current rq\n"); - - atomic_dec(&pd->wrqcnt); - pd->rq = NULL; - rq->end_io = NULL; - - if (!test_and_clear_bit(PACKET_BUSY, &pd->flags)) - printk("rq_end_io: BUSY not set\n"); - - if (!test_and_clear_bit(PACKET_RQ, &pd->flags)) - printk("rq_end_io: RQ not set\n"); - - spin_unlock_irqrestore(&io_request_lock, flags); - wake_up(&pd->wqueue); -} - -static inline void __pkt_inject_request(request_queue_t *q, struct request *req) -{ - struct list_head *head = &q->queue_head; - -#if 0 - VPRINTK("__pkt_inject_request: list_empty == %d, size=%d, cmd=%d\n", - list_empty(&q->queue_head), req->bh->b_size >> 9, req->cmd); -#endif - - if (list_empty(&q->queue_head)) - q->plug_device_fn(q, req->rq_dev); - else if (q->head_active && !q->plugged) - head = head->next; - - list_add(&req->queue, head); -} - -static void pkt_inject_request(request_queue_t *q, struct request *rq) -{ - rq->end_io = pkt_rq_end_io; - spin_lock_irq(&io_request_lock); - __pkt_inject_request(q, rq); - spin_unlock_irq(&io_request_lock); -} - /* * Returns: 1, keep 'em coming -- 0, wait for wakeup */ @@ -778,9 +800,7 @@ { int ret; -#if 0 VPRINTK("do_request: bh=%ld, nr_sectors=%ld, size=%d, cmd=%d\n", rq->bh->b_blocknr, rq->nr_sectors, pd->settings.size, rq->cmd); -#endif /* * perfect match. the merge_* functions have already made sure that @@ -788,7 +808,9 @@ * count matches it's good. */ if (rq->nr_sectors == pd->settings.size) { - //rq->cmd = WRITE_PACKET; +#if 0 + rq->cmd = WRITE_PACKET; +#endif pkt_account_rq(pd, 0, rq->nr_sectors, rq->current_nr_sectors); return 0; } @@ -808,6 +830,35 @@ return ret; } +static int pkt_set_speed(struct pktcdvd_device *pd, unsigned speed); +static int pkt_get_speed(struct pktcdvd_device *pd); + +static void pkt_do_special_rq(struct pktcdvd_device *pd, struct request *rq) +{ + switch (rq->sector) { + case PKT_THROTTLE_SPEED: { + struct request *old_rq = rq->special; + int speed = pd->speed / 2; + + (void) pkt_set_speed(pd, speed); + (void) pkt_get_speed(pd); + + kmem_cache_free(request_cachep, rq); + + spin_lock_irq(&io_request_lock); + pd->rq = NULL; + clear_bit(PACKET_RQ, &pd->flags); + clear_bit(PACKET_BUSY, &pd->flags); + __pkt_inject_request(&pd->cdrw.r_queue, old_rq); + spin_unlock_irq(&io_request_lock); + printk("pktcdvd: speed throttled (R/W) %u/%u\n", (pd->speed * 3) / 2, pd->speed); + break; + } + default: + BUG(); + } +} + /* * handle the requests that got queued for this writer * @@ -819,9 +870,7 @@ struct request *rq; int ret; -#if 0 VPRINTK("handle_queue\n"); -#endif /* * nothing for us to do @@ -853,6 +902,11 @@ if (rq->cmd == READ) BUG(); + if (rq->cmd == SPECIAL) { + pkt_do_special_rq(pd, rq); + goto out; + } + if ((rq->current_nr_sectors << 9) != CD_FRAMESIZE) { pkt_kill_request(pd, 0, "wrong size"); goto out; @@ -929,10 +983,8 @@ /* * got SIGKILL */ - if (signal_pending(current)) { - printk("pktcdvd: thread got SIGKILL\n"); + if (signal_pending(current)) break; - } } printk("pktcdvd: kernel thread %s stopped\n", pd->name); @@ -958,7 +1010,6 @@ { struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata; request_queue_t *pdq = NULL; - struct request *prq = NULL; if (list_empty(&q->queue_head)) return; @@ -980,7 +1031,7 @@ * system, this is just to prevent accidents like that from * happening again */ - if (test_bit(PACKET_READONLY, &pd->flags)) { + if ((rq->cmd & WRITE) && test_bit(PACKET_READONLY,&pd->flags)) { blkdev_dequeue_request(rq); __pkt_kill_request(rq, 0, pd->name); continue; @@ -1005,7 +1056,7 @@ break; } - pd->rq = prq = rq; + pd->rq = rq; blkdev_dequeue_request(rq); } VPRINTK("wake up wait queue\n"); @@ -1498,8 +1549,7 @@ return ret; clear_bit(PACKET_READONLY, &pd->flags); } else { - if ((ret = pkt_adjust_speed(pd, 0xff))) - return ret; + (void) pkt_adjust_speed(pd, 0xff); set_bit(PACKET_READONLY, &pd->flags); } @@ -1596,8 +1646,7 @@ * * rules: always merge whenever possible, and support hole merges */ -static int pkt_hole_merge(struct pktcdvd_device *pd, struct request *rq, - struct buffer_head *bh) +static int pkt_hole_merge(struct request *rq, struct buffer_head *bh) { struct buffer_head *tbh, *nbh; int count = bh->b_size >> 9; @@ -1618,10 +1667,10 @@ } else { nbh = NULL; for (tbh = rq->bh; tbh->b_reqnext; tbh = tbh->b_reqnext) { - if (tbh->b_rsector == sector) + nbh = tbh->b_reqnext; + if (tbh->b_rsector == sector ||nbh->b_rsector == sector) return ELEVATOR_NO_MERGE; - nbh = tbh->b_reqnext; if (sector < nbh->b_rsector && sector > tbh->b_rsector) break; } @@ -1632,6 +1681,11 @@ bh->b_reqnext = nbh; tbh->b_reqnext = bh; + + if (nbh->b_rsector <= bh->b_rsector) + printk("nbh %lu <= bh %lu\n", nbh->b_rsector, bh->b_rsector); + if (bh->b_rsector <= tbh->b_rsector) + printk("bh %lu <= tbh %lu\n", bh->b_rsector, tbh->b_rsector); } rq->nr_sectors = rq->hard_nr_sectors += count; @@ -1639,8 +1693,8 @@ return ELEVATOR_PRIV_MERGE; } -inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq, - struct list_head *head) +inline int pkt_bh_rq_ordered(struct buffer_head *bh, struct request *rq, + struct list_head *head) { struct list_head *next; struct request *next_rq; @@ -1649,31 +1703,16 @@ if (next == head) return 0; - /* - * if the device is different (usually on a different partition), - * just check if bh is after rq - */ next_rq = blkdev_entry_to_request(next); if (next_rq->rq_dev != rq->rq_dev) return bh->b_rsector > rq->sector; - /* - * ok, rq, next_rq and bh are on the same device. if bh is in between - * the two, this is the sweet spot - */ if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector) return 1; - /* - * next_rq is ordered wrt rq, but bh is not in between the two - */ if (next_rq->sector > rq->sector) return 0; - /* - * next_rq and rq not ordered, if we happen to be either before - * next_rq or after rq insert here anyway - */ if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector) return 1; @@ -1697,7 +1736,7 @@ continue; if (__rq->rq_dev != bh->b_rdev) continue; - if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head)) + if (!*req && pkt_bh_rq_ordered(bh, __rq, &q->queue_head)) *req = __rq; if (__rq->cmd != rw) continue; @@ -1712,7 +1751,7 @@ *req = __rq; break; } else if (ZONE(__rq->sector, pd) == ZONE(bh->b_rsector, pd)) { - ret = pkt_hole_merge(pd, __rq, bh); + ret = pkt_hole_merge(__rq, bh); if (ret == ELEVATOR_NO_MERGE) continue; break; @@ -1725,7 +1764,7 @@ static int pkt_make_request(request_queue_t *q, int rw, struct buffer_head *bh) { - struct pktcdvd_device *pd = q->queuedata; + struct pktcdvd_device *pd = &pkt_devs[MINOR(bh->b_rdev)]; /* * quick remap a READ diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre6/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c --- /opt/kernel/linux-2.4.4-pre6/drivers/scsi/scsi_lib.c Mon Apr 23 19:00:47 2001 +++ linux/drivers/scsi/scsi_lib.c Mon Apr 23 17:39:45 2001 @@ -330,6 +330,25 @@ spin_unlock_irqrestore(&io_request_lock, flags); } +static void __scsi_dump_rq(struct request *rq) +{ + struct buffer_head *bh; + int sectors = 0; + + printk("__dump_rq: buffer string error detected\n"); + + bh = rq->bh; + while (bh) { + printk("sector %lu\n", bh->b_rsector); + sectors += bh->b_size >> 9; + bh = bh->b_reqnext; + } + + if (rq->hard_nr_sectors != sectors) + printk("nr_secs mismatch (%lu != %u)\n", rq->hard_nr_sectors, sectors); +} + + /* * Function: scsi_end_request() * @@ -374,8 +393,11 @@ } do { if ((bh = req->bh) != NULL) { + struct buffer_head *nbh = bh->b_reqnext; nsect = bh->b_size >> 9; blk_finished_io(nsect); + if (nbh && (bh->b_rsector + (bh->b_size >> 9)) !=nbh->b_rsector) + __scsi_dump_rq(req); req->bh = bh->b_reqnext; req->nr_sectors -= nsect; req->sector += nsect; @@ -424,8 +446,13 @@ } add_blkdev_randomness(MAJOR(req->rq_dev)); - if (req->end_io) + if (req->end_io) { + unsigned long flags; + + spin_lock_irqsave(&io_request_lock, flags); req->end_io(req); + spin_unlock_irqrestore(&io_request_lock, flags); + } SDpnt = SCpnt->device; diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre6/fs/buffer.c linux/fs/buffer.c --- /opt/kernel/linux-2.4.4-pre6/fs/buffer.c Mon Apr 23 19:00:47 2001 +++ linux/fs/buffer.c Mon Apr 23 14:26:15 2001 @@ -1068,7 +1068,10 @@ static __inline__ void __mark_dirty(struct buffer_head *bh) { - bh->b_flushtime = jiffies + bdf_prm.b_un.age_buffer; + bh->b_flushtime = jiffies; + if (MAJOR(bh->b_rdev) != PACKET_MAJOR) + bh->b_flushtime += bdf_prm.b_un.age_buffer; + refile_buffer(bh); } diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre6/include/linux/blkdev.h linux/include/linux/blkdev.h --- /opt/kernel/linux-2.4.4-pre6/include/linux/blkdev.h Mon Apr 23 19:00:47 2001 +++ linux/include/linux/blkdev.h Mon Apr 23 13:35:22 2001 @@ -6,6 +6,7 @@ #include #include #include +#include struct request_queue; typedef struct request_queue request_queue_t; @@ -181,6 +182,8 @@ extern int * max_segments[MAX_BLKDEV]; extern atomic_t queued_sectors; + +extern kmem_cache_t *request_cachep; #define MAX_SEGMENTS 128 #define MAX_SECTORS 255 diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre6/include/linux/pktcdvd.h linux/include/linux/pktcdvd.h --- /opt/kernel/linux-2.4.4-pre6/include/linux/pktcdvd.h Mon Apr 23 19:00:47 2001 +++ linux/include/linux/pktcdvd.h Mon Apr 23 14:43:16 2001 @@ -25,6 +25,7 @@ * status as soon as the cdb is validated). */ #if defined(CONFIG_CDROM_PKTCDVD_WCACHE) +#warning Enabling write caching, use at your own risk #define USE_WCACHING 1 #else #define USE_WCACHING 0 @@ -94,6 +95,11 @@ #define PACKET_MCN "4a656e734178626f65323030300000" #undef PACKET_USE_LS + +/* + * special requests + */ +#define PKT_THROTTLE_SPEED 1 /* * Very crude stats for now diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre6/lib/rwsem.c linux/lib/rwsem.c --- /opt/kernel/linux-2.4.4-pre6/lib/rwsem.c Sun Apr 22 12:24:13 2001 +++ linux/lib/rwsem.c Sun Apr 22 12:30:35 2001 @@ -54,13 +54,13 @@ /* check the wait queue is populated */ waiter = sem->wait_front; - if (__builtin_expect(!waiter,0)) { + if (!waiter) { printk("__rwsem_do_wake(): wait_list unexpectedly empty\n"); BUG(); goto out; } - if (__builtin_expect(!waiter->flags,0)) { + if (!waiter->flags) { printk("__rwsem_do_wake(): wait_list front apparently not waiting\n"); BUG(); goto out;