From: Peter Osterlund Simplified the code in the pktcdvd driver that ensures that write requests are not larger than the packet size. This also limits the read request size, but that doesn't seem cause any measurable overhead, so it's better to keep the code simple. Signed-off-by: Peter Osterlund Signed-off-by: Andrew Morton --- 25-akpm/drivers/block/pktcdvd.c | 45 ++++++---------------------------------- 25-akpm/include/linux/pktcdvd.h | 2 - 2 files changed, 7 insertions(+), 40 deletions(-) diff -puN drivers/block/pktcdvd.c~simplified-request-size-handling-in-cdrw-packet-writing drivers/block/pktcdvd.c --- 25/drivers/block/pktcdvd.c~simplified-request-size-handling-in-cdrw-packet-writing Wed Aug 4 15:51:05 2004 +++ 25-akpm/drivers/block/pktcdvd.c Wed Aug 4 15:51:05 2004 @@ -84,25 +84,6 @@ static struct pktcdvd_device *pkt_find_d return NULL; } -/* - * The underlying block device is not allowed to merge write requests. Some - * CDRW drives can not handle writes larger than one packet, even if the size - * is a multiple of the packet size. - */ -static int pkt_lowlevel_elv_merge_fn(request_queue_t *q, struct request **req, struct bio *bio) -{ - struct pktcdvd_device *pd = pkt_find_dev(q); - BUG_ON(!pd); - - if (bio_data_dir(bio) == WRITE) - return ELEVATOR_NO_MERGE; - - if (pd->cdrw.elv_merge_fn) - return pd->cdrw.elv_merge_fn(q, req, bio); - - return ELEVATOR_NO_MERGE; -} - static void pkt_lowlevel_elv_completed_req_fn(request_queue_t *q, struct request *req) { struct pktcdvd_device *pd = pkt_find_dev(q); @@ -118,17 +99,6 @@ static void pkt_lowlevel_elv_completed_r pd->cdrw.elv_completed_req_fn(q, req); } -static int pkt_lowlevel_merge_requests_fn(request_queue_t *q, struct request *rq, struct request *next) -{ - struct pktcdvd_device *pd = pkt_find_dev(q); - BUG_ON(!pd); - - if (rq_data_dir(rq) == WRITE) - return 0; - - return pd->cdrw.merge_requests_fn(q, rq, next); -} - static void pkt_bio_init(struct bio *bio) { bio->bi_next = NULL; @@ -1932,17 +1902,20 @@ static int pkt_open_dev(struct pktcdvd_d } } spin_lock_irq(q->queue_lock); - pd->cdrw.elv_merge_fn = q->elevator.elevator_merge_fn; pd->cdrw.elv_completed_req_fn = q->elevator.elevator_completed_req_fn; - pd->cdrw.merge_requests_fn = q->merge_requests_fn; - q->elevator.elevator_merge_fn = pkt_lowlevel_elv_merge_fn; q->elevator.elevator_completed_req_fn = pkt_lowlevel_elv_completed_req_fn; - q->merge_requests_fn = pkt_lowlevel_merge_requests_fn; spin_unlock_irq(q->queue_lock); if (write) { if ((ret = pkt_open_write(pd))) goto restore_queue; + /* + * Some CDRW drives can not handle writes larger than one packet, + * even if the size is a multiple of the packet size. + */ + spin_lock_irq(q->queue_lock); + blk_queue_max_sectors(q, pd->settings.size); + spin_unlock_irq(q->queue_lock); set_bit(PACKET_WRITABLE, &pd->flags); } else { pkt_set_speed(pd, 0xffff, 0xffff); @@ -1959,9 +1932,7 @@ static int pkt_open_dev(struct pktcdvd_d restore_queue: spin_lock_irq(q->queue_lock); - q->elevator.elevator_merge_fn = pd->cdrw.elv_merge_fn; q->elevator.elevator_completed_req_fn = pd->cdrw.elv_completed_req_fn; - q->merge_requests_fn = pd->cdrw.merge_requests_fn; spin_unlock_irq(q->queue_lock); out_putdev: blkdev_put(pd->bdev); @@ -1985,9 +1956,7 @@ static void pkt_release_dev(struct pktcd q = bdev_get_queue(pd->bdev); pkt_set_speed(pd, 0xffff, 0xffff); spin_lock_irq(q->queue_lock); - q->elevator.elevator_merge_fn = pd->cdrw.elv_merge_fn; q->elevator.elevator_completed_req_fn = pd->cdrw.elv_completed_req_fn; - q->merge_requests_fn = pd->cdrw.merge_requests_fn; spin_unlock_irq(q->queue_lock); blkdev_put(pd->bdev); } diff -puN include/linux/pktcdvd.h~simplified-request-size-handling-in-cdrw-packet-writing include/linux/pktcdvd.h --- 25/include/linux/pktcdvd.h~simplified-request-size-handling-in-cdrw-packet-writing Wed Aug 4 15:51:05 2004 +++ 25-akpm/include/linux/pktcdvd.h Wed Aug 4 15:51:05 2004 @@ -139,9 +139,7 @@ struct packet_cdrw struct list_head pkt_active_list; spinlock_t active_list_lock; /* Serialize access to pkt_active_list */ struct task_struct *thread; - elevator_merge_fn *elv_merge_fn; elevator_completed_req_fn *elv_completed_req_fn; - merge_requests_fn *merge_requests_fn; }; /* _