diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/drivers/block/Config.in linux/drivers/block/Config.in --- /opt/kernel/linux-2.4.4-pre5/drivers/block/Config.in Sat Apr 21 18:03:32 2001 +++ linux/drivers/block/Config.in Sat Apr 21 14:15:35 2001 @@ -40,6 +40,7 @@ tristate 'Packet writing on CD/DVD media' CONFIG_CDROM_PKTCDVD if [ "$CONFIG_CDROM_PKTCDVD" != "n" ]; then int ' Free buffers for data gathering' CONFIG_CDROM_PKTCDVD_BUFFERS 256 + bool ' Enable write caching' CONFIG_CDROM_PKTCDVD_WCACHE n fi tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/drivers/block/elevator.c linux/drivers/block/elevator.c --- /opt/kernel/linux-2.4.4-pre5/drivers/block/elevator.c Sat Apr 21 18:03:32 2001 +++ linux/drivers/block/elevator.c Sat Apr 21 01:49:17 2001 @@ -36,39 +36,39 @@ inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq, struct list_head *head) { - struct list_head *prev; - struct request *prev_rq; + struct list_head *next; + struct request *next_rq; - prev = rq->queue.prev; - if (prev == head) + next = rq->queue.next; + if (next == head) return 0; /* * if the device is different (usually on a different partition), * just check if bh is after rq */ - prev_rq = blkdev_entry_to_request(prev); - if (prev_rq->rq_dev != rq->rq_dev) - return bh->b_rsector > prev_rq->sector; + next_rq = blkdev_entry_to_request(next); + if (next_rq->rq_dev != rq->rq_dev) + return bh->b_rsector > rq->sector; /* - * ok, rq, prev_rq and bh are on the same device. if bh is in between + * ok, rq, next_rq and bh are on the same device. if bh is in between * the two, this is the sweet spot */ - if (bh->b_rsector < rq->sector && bh->b_rsector > prev_rq->sector) + if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector) return 1; /* - * prev_rq is ordered wrt rq, but bh is not in between the two + * next_rq is ordered wrt rq, but bh is not in between the two */ - if (rq->sector > prev_rq->sector) + if (next_rq->sector > rq->sector) return 0; /* - * prev_rq and rq not ordered, if we happen to be either before - * prev_rq or after rq insert here anyway + * next_rq and rq not ordered, if we happen to be either before + * next_rq or after rq insert here anyway */ - if (bh->b_rsector > prev_rq->sector || bh->b_rsector < rq->sector) + if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector) return 1; return 0; diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- /opt/kernel/linux-2.4.4-pre5/drivers/block/ll_rw_blk.c Sat Apr 21 18:03:32 2001 +++ linux/drivers/block/ll_rw_blk.c Sat Apr 21 17:26:10 2001 @@ -761,6 +761,9 @@ attempt_front_merge(q, head, req, max_sectors, max_segments); goto out; + case ELEVATOR_PRIV_MERGE: + goto out; + /* * elevator says don't/can't merge. get new request */ diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/drivers/block/pktcdvd.c linux/drivers/block/pktcdvd.c --- /opt/kernel/linux-2.4.4-pre5/drivers/block/pktcdvd.c Sat Apr 21 18:03:32 2001 +++ linux/drivers/block/pktcdvd.c Sat Apr 21 17:26:10 2001 @@ -12,13 +12,15 @@ * - Only able to write on CD-RW media right now. * - check host application code on media and set it in write page * - Generic interface for UDF to submit large packets for variable length - * packet writing (kiovec of dirty pages) + * packet writing * - (in correlation with above) interface for UDF <-> packet to negotiate * a new location when a write fails. * - handle OPC, especially for -RW media * * ------------------------------------------------------------------------ * + * Newer changes -- see ChangeLog + * * 0.0.2d (26/10/2000) * - (scsi) use implicit segment recounting for all hba's * - fix speed setting, was consistenly off on most drives @@ -92,7 +94,7 @@ * *************************************************************************/ -#define VERSION_CODE "v0.0.2h 20/04/2001 Jens Axboe (axboe@suse.de)" +#define VERSION_CODE "v0.0.2i 21/04/2001 Jens Axboe (axboe@suse.de)" #include #include @@ -167,13 +169,11 @@ sectors += bh->b_size >> 9; } -#if 0 - if (rq->nr_segments != nr_segments) - printk("pkt_recheck_segments: rq->nr_segments=%d, nr_segments=%d\n", - rq->nr_segments, nr_segments); -#endif /* FIXME: enable this again soon */ - - rq->nr_segments = nr_segments; + /* + * this is needed because it quickly gets impossible to check + * this at merge time due to the hole merges + */ + rq->nr_segments = rq->nr_hw_segments = nr_segments; if (sectors != rq->nr_sectors) { printk("tell jens, %u != %lu\n", sectors, rq->nr_sectors); @@ -207,10 +207,6 @@ void *ptr = q->queuedata; int ret; -#if 0 - VPRINTK("pkt_do_merge: cmd=%d\n", rq->cmd); -#endif - if (rq->cmd == WRITE && ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd)) return ELEVATOR_NO_MERGE; @@ -229,10 +225,6 @@ { struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); -#if 0 - VPRINTK("front_merge_fn: cmd=%d\n", rq->cmd); -#endif - return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.front_merge_fn, pd); } @@ -241,10 +233,6 @@ { struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); -#if 0 - VPRINTK("back_merge: cmd=%d\n", rq->cmd); -#endif - return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.back_merge_fn, pd); } @@ -252,22 +240,18 @@ * rules similar to above */ static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq, - struct request *next, int max_segs) + struct request *nxt, int max_segs) { struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev); struct packet_cdrw *cdrw = &pd->cdrw; void *ptr = q->queuedata; int ret; -#if 0 - VPRINTK("merge_requests: cmd=%d\n", rq->cmd); -#endif - - if (ZONE(rq->sector, pd) != ZONE(next->sector + next->nr_sectors - 1, pd)) + if (ZONE(rq->sector, pd) != ZONE(nxt->sector + nxt->nr_sectors - 1, pd)) return 0; q->queuedata = cdrw->queuedata; - ret = cdrw->merge_requests_fn(q, rq, next, max_segs); + ret = cdrw->merge_requests_fn(q, rq, nxt, max_segs); q->queuedata = ptr; return ret; } @@ -278,9 +262,7 @@ struct buffer_head *bh; int i = 0; -#if 0 VPRINTK("grow_bhlist: count=%d\n", count); -#endif while (i < count) { bh = kmem_cache_alloc(bh_cachep, SLAB_KERNEL); @@ -314,9 +296,7 @@ struct buffer_head *bh; int i = 0; -#if 0 VPRINTK("shrink_bhlist: count=%d\n", count); -#endif while ((i < count) && cdrw->bhlist) { spin_lock_irq(&pd->lock); @@ -354,7 +334,6 @@ static inline struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block, int size) { -#if 0 struct buffer_head *bh; if ((bh = get_hash_table(dev, block, size))) { @@ -362,10 +341,8 @@ atomic_set_buffer_clean(bh); return bh; } - printk("buffer %lu was already locked\n", bh->b_rsector); brelse(bh); } -#endif return NULL; } @@ -377,17 +354,13 @@ { struct buffer_head *bh; -#if 0 VPRINTK("get_buffer: sector=%ld, size=%d\n", sector, size); -#endif -#if 0 if ((bh = pkt_get_hash(pd->pkt_dev, sector / (size >> 9), size))) { bh->b_private = pd; bh->b_end_io = pkt_end_io_write; goto out; } -#endif /* * should not happen... @@ -420,10 +393,8 @@ bh->b_list = PKT_BUF_LIST; bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req); -#if 0 out: -#endif - bh->b_blocknr = sector / bh->b_size >> 9; + blk_started_io(bh->b_size >> 9); bh->b_rsector = sector; bh->b_dev = pd->pkt_dev; bh->b_rdev = pd->dev; @@ -435,10 +406,6 @@ struct pktcdvd_device *pd = bh->b_private; unsigned long flags; -#if 0 - VPRINTK("put_buffer: bh=%ld\n", bh->b_blocknr); -#endif - if (bh->b_list != PKT_BUF_LIST) BUG(); @@ -469,9 +436,7 @@ { struct pktcdvd_device *pd = (struct pktcdvd_device *) bh->b_private; -#if 0 VPRINTK("end_io_write: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate); -#endif atomic_set_buffer_clean(bh); clear_bit(BH_Req, &bh->b_state); @@ -499,16 +464,13 @@ struct buffer_head *bh = rq->bh; unsigned cnt = 0; -#if 0 VPRINTK("init_bh: cmd=%d, bh=%ld\n", rq->cmd, bh->b_blocknr); -#endif while (bh) { #if 1 if (bh->b_list == PKT_BUF_LIST) { bh->b_private = pd; bh->b_end_io = pkt_end_io_write; - blk_started_io(bh->b_size >> 9); } #else bh->b_end_io = pkt_end_io_write; @@ -630,6 +592,7 @@ __pkt_kill_request(pd->rq, uptodate, pd->name); pd->rq = NULL; clear_bit(PACKET_RQ, &pd->flags); + clear_bit(PACKET_BUSY, &pd->flags); spin_unlock_irq(&io_request_lock); } @@ -839,11 +802,9 @@ } ret = pkt_gather_data(pd, rq); - if (ret) { - clear_bit(PACKET_RQ, &pd->flags); - clear_bit(PACKET_BUSY, &pd->flags); + if (ret) pkt_kill_request(pd, 0, "pkt_gather_data"); - } + return ret; } @@ -959,13 +920,8 @@ } while (1); set_current_state(TASK_INTERRUPTIBLE); -#if 0 - VPRINTK("before generic_unplug_device\n"); -#endif + generic_unplug_device(q); -#if 0 - VPRINTK("after generic_unplug_device\n"); -#endif schedule(); remove_wait_queue(&pd->wqueue, &wait); @@ -1002,13 +958,8 @@ { struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata; request_queue_t *pdq = NULL; - int max_segments = MAX_SEGMENTS; struct request *prq = NULL; -#if 0 - VPRINTK("pkt_request\n"); -#endif - if (list_empty(&q->queue_head)) return; @@ -1017,19 +968,12 @@ while (!list_empty(&q->queue_head)) { struct request *rq = blkdev_entry_next_request(&q->queue_head); - if (!rq) { - VPRINTK("rq=NULL\n"); - BUG(); - } VPRINTK("pkt_request: cmd=%d, rq=%p, rq->sector=%ld, rq->nr_sectors=%ld, rq->pd->rq=%p\n", rq->cmd, rq, rq->sector, rq->nr_sectors, pd->rq); rq->rq_dev = pd->dev; - if (rq->cmd == READ) { - blkdev_dequeue_request(rq); - __pkt_inject_request(pdq, rq); - continue; - } + if (rq->cmd == READ) + BUG(); /* * UDF had a bug, where it submitted a write to a ro file @@ -1056,36 +1000,8 @@ * rfn will be reinvoked once that is done */ if (test_and_set_bit(PACKET_RQ, &pd->flags)) { - if (pd->rq == NULL) { + if (pd->rq == NULL) VPRINTK("PACKET_RQ but pd->rq == NULL???\n"); - break; - } else if (prq == NULL) { - break; - } - -#if 0 - if (ZONE(prq->sector, pd) == ZONE(rq->sector, pd)) { - struct request *trq = rq; - - if (prq->sector > rq->sector) { - rq = prq; - prq = trq; - } - - if (q->merge_requests_fn(q, prq, rq, max_segments)) { - q->elevator.elevator_merge_req_fn(prq, rq); - prq->bhtail->b_reqnext = rq->bh; - prq->bhtail = rq->bhtail; - prq->nr_sectors = prq->hard_nr_sectors += rq->hard_nr_sectors; - prq->end_io = rq->end_io; - blkdev_dequeue_request(trq); - blkdev_release_request(rq); - continue; - } else - printk("merge failed: prq->sector=%ld, rq->sector=%ld\n", - prq->sector, rq->sector); - } -#endif break; } @@ -1387,7 +1303,7 @@ /* * the IMMED bit -- we default to not setting it, although that - * would allow a much faster close + * would allow a much faster close, this is safer */ #if 0 cgc.cmd[1] = 1 << 1; @@ -1451,7 +1367,7 @@ /* * Give me full power, Captain */ -static int pkt_max_speed(struct pktcdvd_device *pd) +static int pkt_adjust_speed(struct pktcdvd_device *pd, int speed) { disc_information di; int ret; @@ -1463,7 +1379,7 @@ * command (besides, we also use the old set speed command, * not the streaming feature). */ - if ((ret = pkt_set_speed(pd, 8))) + if ((ret = pkt_set_speed(pd, speed))) return ret; /* @@ -1554,7 +1470,7 @@ (void) pkt_write_caching(pd, USE_WCACHING); - if ((ret = pkt_max_speed(pd))) { + if ((ret = pkt_adjust_speed(pd, 8))) { DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name); return -EIO; } @@ -1582,13 +1498,13 @@ return ret; clear_bit(PACKET_READONLY, &pd->flags); } else { - if ((ret = pkt_max_speed(pd))) + if ((ret = pkt_adjust_speed(pd, 0xff))) return ret; set_bit(PACKET_READONLY, &pd->flags); } if (write) - printk("pktcdvd: %luKB available on disc\n", lba << 1); + printk("pktcdvd: %lukB available on disc\n", lba << 1); return 0; } @@ -1675,41 +1591,94 @@ return ret; } -inline struct request *bh_rq_same_zone(struct buffer_head *bh, - struct request *rq, struct list_head *head) +/* + * pktcdvd i/o elevator parts + * + * rules: always merge whenever possible, and support hole merges + */ +static int pkt_hole_merge(struct pktcdvd_device *pd, struct request *rq, + struct buffer_head *bh) { - struct list_head *prev; - struct request *prev_rq; + struct buffer_head *tbh, *nbh; + int count = bh->b_size >> 9; + unsigned long sector = bh->b_rsector; + + if (sector == rq->bh->b_rsector || sector == rq->bhtail->b_rsector) + return ELEVATOR_NO_MERGE; + + if (sector > rq->bhtail->b_rsector) { + rq->bhtail->b_reqnext = bh; + rq->bhtail = bh; + } else if (sector < rq->bh->b_rsector) { + bh->b_reqnext = rq->bh; + rq->bh = bh; + rq->buffer = bh->b_data; + rq->current_nr_sectors = count; + rq->sector = rq->hard_sector = sector; + } else { + nbh = NULL; + for (tbh = rq->bh; tbh->b_reqnext; tbh = tbh->b_reqnext) { + if (tbh->b_rsector == sector) + return ELEVATOR_NO_MERGE; + + nbh = tbh->b_reqnext; + if (sector < nbh->b_rsector && sector > tbh->b_rsector) + break; + } + if (!nbh) + BUG(); + if (!tbh) + BUG(); - prev = rq->queue.prev; - if (prev == head) { - if (rq->sector > bh->b_rsector) - return rq; - else - return NULL; + bh->b_reqnext = nbh; + tbh->b_reqnext = bh; } - prev_rq = blkdev_entry_to_request(prev); - if (prev_rq->rq_dev != rq->rq_dev) - return NULL; + rq->nr_sectors = rq->hard_nr_sectors += count; + blk_started_io(count); + return ELEVATOR_PRIV_MERGE; +} - if (bh->b_rsector < rq->sector && bh->b_rsector > prev_rq->sector) - return rq; +inline int bh_rq_in_between(struct buffer_head *bh, struct request *rq, + struct list_head *head) +{ + struct list_head *next; + struct request *next_rq; - if (rq->sector > prev_rq->sector) - return NULL; + next = rq->queue.next; + if (next == head) + return 0; - if (bh->b_rsector > prev_rq->sector || bh->b_rsector < rq->sector) - return rq; + /* + * if the device is different (usually on a different partition), + * just check if bh is after rq + */ + next_rq = blkdev_entry_to_request(next); + if (next_rq->rq_dev != rq->rq_dev) + return bh->b_rsector > rq->sector; - return NULL; -} + /* + * ok, rq, next_rq and bh are on the same device. if bh is in between + * the two, this is the sweet spot + */ + if (bh->b_rsector < next_rq->sector && bh->b_rsector > rq->sector) + return 1; -/* - * pktcdvd i/o elevator - * - * rules: always merge whenever possible, and support hole merges - */ + /* + * next_rq is ordered wrt rq, but bh is not in between the two + */ + if (next_rq->sector > rq->sector) + return 0; + + /* + * next_rq and rq not ordered, if we happen to be either before + * next_rq or after rq insert here anyway + */ + if (bh->b_rsector > rq->sector || bh->b_rsector < next_rq->sector) + return 1; + + return 0; +} static int pkt_elevator_merge(request_queue_t *q, struct request **req, struct list_head *head, @@ -1718,43 +1687,35 @@ { struct list_head *entry = &q->queue_head; unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE; - struct pktcdvd_device *pd = pkt_find_dev(bh->b_rdev); + struct pktcdvd_device *pd = q->queuedata; - VPRINTK("pkt_elevator_merge: rw=%d, ms=%d, bh=%ld (%lu), dev=%d, count=%d\n", rw, max_sectors, bh->b_blocknr, bh->b_rsector, bh->b_rdev, count); + VPRINTK("pkt_elevator_merge: rw=%d, ms=%d, bh=%lu, dev=%d\n", rw, max_sectors, bh->b_rsector, bh->b_rdev); while ((entry = entry->prev) != head) { struct request *__rq = blkdev_entry_to_request(entry); - VPRINTK("cmd=%d, bh=%ld (%lu/%lu), dev=%d, sem=%p, seq=%d, req=%p, ret=%d\n", - __rq->cmd, __rq->bh->b_blocknr, __rq->sector, __rq->nr_sectors, __rq->rq_dev, __rq->sem, __rq->elevator_sequence, *req, ret); - /* - * simply "aging" of requests in queue - */ - if (rw == READ && __rq->elevator_sequence-- <= 0) - break; - if (__rq->sem) continue; if (__rq->rq_dev != bh->b_rdev) continue; - if (!*req && ZONE(__rq->sector, pd) == ZONE(bh->b_rsector, pd)) - *req = bh_rq_same_zone(bh, __rq, &q->queue_head); + if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head)) + *req = __rq; if (__rq->cmd != rw) continue; if (__rq->nr_sectors + count > max_sectors) continue; - if (rw == READ && __rq->elevator_sequence < count) - continue; - if (rw == WRITE && ZONE(__rq->sector, pd) != ZONE(bh->b_rsector, pd)) - continue; if (__rq->sector + __rq->nr_sectors == bh->b_rsector) { ret = ELEVATOR_BACK_MERGE; *req = __rq; break; } else if (__rq->sector - count == bh->b_rsector) { ret = ELEVATOR_FRONT_MERGE; - __rq->elevator_sequence -= count; *req = __rq; break; + } else if (ZONE(__rq->sector, pd) == ZONE(bh->b_rsector, pd)) { + ret = pkt_hole_merge(pd, __rq, bh); + if (ret == ELEVATOR_NO_MERGE) + continue; + break; } } VPRINTK("*req=%p, ret=%d\n", *req, ret); @@ -1762,13 +1723,37 @@ return ret; } +static int pkt_make_request(request_queue_t *q, int rw, struct buffer_head *bh) +{ + struct pktcdvd_device *pd = q->queuedata; + + /* + * quick remap a READ + */ + if (rw == READ || rw == READA) { + bh->b_rdev = pd->dev; + return 1; + } + + if (!(rw & WRITE)) + BUG(); + + /* + * do remaps of blocks here in the future, yes thank the format that + * it's done here... + */ + return pd->make_request_fn(q, rw, bh); +} + static void pkt_init_queue(struct pktcdvd_device *pd) { request_queue_t *q = &pd->cdrw.r_queue; blk_init_queue(q, pkt_request); - blk_queue_headactive(q, 0); elevator_init(&q->elevator, ELEVATOR_PKTCDVD); + pd->make_request_fn = q->make_request_fn; + blk_queue_make_request(q, pkt_make_request); + blk_queue_headactive(q, 0); q->front_merge_fn = pkt_front_merge_fn; q->back_merge_fn = pkt_back_merge_fn; q->merge_requests_fn = pkt_merge_requests_fn; @@ -1813,6 +1798,7 @@ printk("pktcdvd: not enough memory for buffers\n"); return -ENOMEM; } + set_blocksize(dev, CD_FRAMESIZE); pd->cdi = cdi; pd->dev = dev; @@ -1962,7 +1948,9 @@ if (atomic_read(&pd->refcnt) != 1) return -EBUSY; return pkt_remove_dev(pd); - + /* + * really just for debug, not meant to be used + */ case PACKET_WAKEUP: if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -2013,7 +2001,7 @@ b += sprintf(b, "\nWriter %s (%s):\n", pd->name, kdevname(pd->dev)); b += sprintf(b, "\nSettings:\n"); - b += sprintf(b, "\tpacket size:\t\t%dKB\n", pd->settings.size / 2); + b += sprintf(b, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); if (pd->settings.write_type == 0) msg = "Packet"; @@ -2043,7 +2031,7 @@ b += sprintf(b, "\nMisc:\n"); b += sprintf(b, "\treference count:\t%d\n", atomic_read(&pd->refcnt)); b += sprintf(b, "\tflags:\t\t\t0x%lx\n", pd->flags); - b += sprintf(b, "\twrite speed:\t\t%uKB/sec\n", pd->speed * 150); + b += sprintf(b, "\twrite speed:\t\t%ukB/s\n", pd->speed * 150); b += sprintf(b, "\tstart offset:\t\t%lu\n", pd->offset); b += sprintf(b, "\tmode page offset:\t%u\n", pd->mode_offset); @@ -2054,7 +2042,7 @@ spin_lock_irq(&io_request_lock); i = 0; - list_for_each(foo, &pd->cdrw.r_queue.queue_head); + list_for_each(foo, &pd->cdrw.r_queue.queue_head) i++; spin_unlock_irq(&io_request_lock); b += sprintf(b, "\tqueue requests:\t\t%u\n", i); diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/drivers/char/sysrq.c linux/drivers/char/sysrq.c --- /opt/kernel/linux-2.4.4-pre5/drivers/char/sysrq.c Fri Feb 9 20:30:22 2001 +++ linux/drivers/char/sysrq.c Sat Apr 21 15:51:57 2001 @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -53,6 +54,28 @@ } } +static void show_requests(request_queue_t *q) +{ + struct list_head *entry; + + spin_lock_irq(&io_request_lock); + + list_for_each(entry, &q->queue_head) { + struct request *rq = blkdev_entry_to_request(entry); + int zone = rq->sector & ~127; + int hole; + + hole = 0; + if ((rq->sector + rq->nr_sectors - (rq->bhtail->b_size >> 9)) + != rq->bhtail->b_rsector) + hole = 1; + + printk("rq: cmd %d, sector %lu (-> %lu), zone %u, hole %d, nr_sectors %lu\n", rq->cmd, rq->sector, rq->sector + rq->nr_sectors - 1, zone, hole, rq->nr_sectors); + } + + spin_unlock_irq(&io_request_lock); +} + /* * This function is called by the keyboard handler when SysRq is pressed * and any other keycode arrives. @@ -111,6 +134,12 @@ if (pt_regs) show_regs(pt_regs); break; + case 'q': { + request_queue_t *q = blk_get_queue(MKDEV(97, 0)); + printk("Show requests\n"); + show_requests(q); + break; + } case 't': /* T -- show task info */ printk("Show State\n"); show_state(); diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/fs/udf/super.c linux/fs/udf/super.c --- /opt/kernel/linux-2.4.4-pre5/fs/udf/super.c Fri Apr 20 00:11:32 2001 +++ linux/fs/udf/super.c Sat Apr 21 03:27:12 2001 @@ -201,7 +201,7 @@ char *opt, *val; uopt->novrs = 0; - uopt->blocksize = 512; + uopt->blocksize = 2048; uopt->partition = 0xFFFF; uopt->session = 0xFFFFFFFF; uopt->lastblock = 0xFFFFFFFF; diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/include/linux/blkdev.h linux/include/linux/blkdev.h --- /opt/kernel/linux-2.4.4-pre5/include/linux/blkdev.h Sat Apr 21 18:03:32 2001 +++ linux/include/linux/blkdev.h Sat Apr 21 17:59:39 2001 @@ -126,8 +126,6 @@ * Tasks wait here for free request */ wait_queue_head_t wait_for_request; - - int total_requests; }; struct blk_dev_struct { diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/include/linux/elevator.h linux/include/linux/elevator.h --- /opt/kernel/linux-2.4.4-pre5/include/linux/elevator.h Fri Feb 16 01:58:34 2001 +++ linux/include/linux/elevator.h Sat Apr 21 16:01:15 2001 @@ -53,6 +53,7 @@ #define ELEVATOR_NO_MERGE 0 #define ELEVATOR_FRONT_MERGE 1 #define ELEVATOR_BACK_MERGE 2 +#define ELEVATOR_PRIV_MERGE 3 /* * This is used in the elevator algorithm. We don't prioritise reads diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/include/linux/fs.h linux/include/linux/fs.h --- /opt/kernel/linux-2.4.4-pre5/include/linux/fs.h Sat Apr 21 18:03:32 2001 +++ linux/include/linux/fs.h Sat Apr 21 17:56:09 2001 @@ -213,7 +213,6 @@ #define BH_Mapped 4 /* 1 if the buffer has a disk mapping */ #define BH_New 5 /* 1 if the buffer is new and not yet written out */ #define BH_Protected 6 /* 1 if the buffer is protected */ -#define BH_Packet 7 /* 1 if packet writing buffer */ /* * Try to keep the most commonly used fields in single cache lines (16 @@ -1237,7 +1236,6 @@ extern void file_moveto(struct file *new, struct file *old); extern struct buffer_head * get_hash_table(kdev_t, int, int); extern struct buffer_head * getblk(kdev_t, int, int); -extern inline struct buffer_head *__getblk(kdev_t, int, int); extern void ll_rw_block(int, int, struct buffer_head * bh[]); extern void submit_bh(int, struct buffer_head *); extern int is_read_only(kdev_t); diff -urN --exclude-from /home/axboe/exclude /opt/kernel/linux-2.4.4-pre5/include/linux/pktcdvd.h linux/include/linux/pktcdvd.h --- /opt/kernel/linux-2.4.4-pre5/include/linux/pktcdvd.h Sat Apr 21 18:03:32 2001 +++ linux/include/linux/pktcdvd.h Sat Apr 21 17:15:06 2001 @@ -24,7 +24,11 @@ * able to sucessfully recover with this option (drive will return good * status as soon as the cdb is validated). */ +#if defined(CONFIG_CDROM_PKTCDVD_WCACHE) +#define USE_WCACHING 1 +#else #define USE_WCACHING 0 +#endif /* * No user-servicable parts beyond this point -> @@ -58,12 +62,10 @@ #define PACKET_WRITEABLE 1 /* pd is writeable */ #define PACKET_NWA_VALID 2 /* next writeable address valid */ #define PACKET_LRA_VALID 3 /* last recorded address valid */ -#define PACKET_READY 4 -#define PACKET_READONLY 5 /* read only pd */ -#define PACKET_THREAD 6 /* kernel thread running */ -#define PACKET_RQ 7 /* current rq is set */ -#define PACKET_BUSY 8 /* current rq is being processed */ -#define PACKET_LOCK 9 /* pd is locked (wrt pd->rq) */ +#define PACKET_READONLY 4 /* read only pd */ +#define PACKET_THREAD 5 /* kernel thread running */ +#define PACKET_RQ 6 /* current rq is set */ +#define PACKET_BUSY 7 /* current rq is being processed */ /* * Disc status -- from READ_DISC_INFO @@ -166,6 +168,8 @@ wait_queue_head_t lock_wait; struct request *rq; /* current request */ atomic_t wrqcnt; + + make_request_fn *make_request_fn; }; /* @@ -183,8 +187,8 @@ #define ELEVATOR_PKTCDVD \ ((elevator_t) { \ - 8192, /* read_latency */ \ - 0, /* write_latency */ \ + 0, /* not used */ \ + 0, /* not used */ \ \ pkt_elevator_merge, /* elevator_merge_fn */ \ pkt_elevator_cleanup, \