diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/block/DAC960.c linux/drivers/block/DAC960.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/block/DAC960.c Thu May 4 15:45:04 2000 +++ linux/drivers/block/DAC960.c Thu May 4 14:27:12 2000 @@ -1363,9 +1363,8 @@ Command->SegmentCount = Request->nr_segments; Command->BufferHeader = Request->bh; RequestBuffer = Request->buffer; - Request->rq_status = RQ_INACTIVE; blkdev_dequeue_request(Request); - wake_up(&wait_for_request); + blkdev_release_request(Request); if (Command->SegmentCount == 1) { DAC960_CommandMailbox_T *CommandMailbox = &Command->CommandMailbox; diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/block/elevator.c linux/drivers/block/elevator.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/block/elevator.c Mon Mar 13 04:32:57 2000 +++ linux/drivers/block/elevator.c Wed May 3 03:42:37 2000 @@ -4,6 +4,16 @@ * Block device elevator/IO-scheduler. * * Copyright (C) 2000 Andrea Arcangeli SuSE + * + * 30042000 Jens Axboe : + * + * Split the elevator a bit so that it is possible to choose a different + * one or even write a new "plug in". There are three pieces: + * - elevator_fn, inserts a new request in the queue list + * - elevator_merge_fn, decides whether a new buffer can be merged with + * an existing request + * - elevator_dequeue_fn, called when a request is taken off the active list + * */ #include @@ -12,9 +22,9 @@ #include #include -static void elevator_default(struct request * req, elevator_t * elevator, - struct list_head * real_head, - struct list_head * head, int orig_latency) +void elevator_default(struct request *req, elevator_t * elevator, + struct list_head * real_head, + struct list_head * head, int orig_latency) { struct list_head * entry = real_head, * point = NULL; struct request * tmp; @@ -22,6 +32,12 @@ int latency = orig_latency -= elevator->nr_segments, pass = 0; int point_latency = 0xbeefbeef; + if (list_empty(real_head)) { + req->elevator_sequence = elevator_sequence(elevator, orig_latency); + list_add(&req->queue, real_head); + return; + } + while ((entry = entry->prev) != head) { if (!point && latency >= 0) { point = entry; @@ -49,19 +65,130 @@ req->elevator_sequence = elevator_sequence(elevator, latency); } +int elevator_default_merge(request_queue_t *q, struct request **req, + struct buffer_head *bh, int rw, + int *max_sectors, int *max_segments) +{ + struct list_head *entry, *head = &q->queue_head; + unsigned int count = bh->b_size >> 9; + elevator_t *elevator = &q->elevator; + int orig_latency, latency, sequence, action, starving = 0; + + /* + * Avoid write-bombs as not to hurt interactiveness of reads + */ + if (rw == WRITE) + *max_segments = elevator->max_bomb_segments; + + latency = orig_latency = elevator_request_latency(elevator, rw); + sequence = elevator->sequence; + + if (q->head_active && !q->plugged) + head = head->next; + + entry = head; + while ((entry = entry->prev) != head && !starving) { + *req = blkdev_entry_to_request(entry); + latency += (*req)->nr_segments; + if (elevator_sequence_before((*req)->elevator_sequence, sequence)) + starving = 1; + if (latency < 0) + continue; + if ((*req)->sem) + continue; + if ((*req)->cmd != rw) + continue; + if ((*req)->nr_sectors + count > *max_sectors) + continue; + if ((*req)->rq_dev != bh->b_rdev) + continue; + if ((*req)->sector + (*req)->nr_sectors == bh->b_rsector) { + if (latency - (*req)->nr_segments < 0) + break; + action = ELEVATOR_BACK_MERGE; + } else if ((*req)->sector - count == bh->b_rsector) { + if (starving) + break; + action = ELEVATOR_FRONT_MERGE; + } else { + continue; + } + q->elevator.sequence++; + return action; + } + return ELEVATOR_NO_MERGE; +} + +inline void elevator_default_dequeue(struct request *req) +{ + if (req->cmd == READ) + req->e->read_pendings--; + + req->e->nr_segments -= req->nr_segments; +} + +/* + * No request sorting, just add it to the back of the list + */ +void elevator_noop(struct request *req, elevator_t *elevator, + struct list_head *real_head, struct list_head *head, + int orig_latency) +{ + list_add_tail(&req->queue, real_head); +} + +/* + * See if we can find a request that is buffer can be coalesced with. + */ +int elevator_noop_merge(request_queue_t *q, struct request **req, + struct buffer_head *bh, int rw, + int *max_sectors, int *max_segments) +{ + struct list_head *entry, *head = &q->queue_head; + unsigned int count = bh->b_size >> 9; + + if (q->head_active && !q->plugged) + head = head->next; + + entry = head; + while ((entry = entry->prev) != head) { + *req = blkdev_entry_to_request(entry); + if ((*req)->sem) + continue; + if ((*req)->cmd != rw) + continue; + if ((*req)->nr_sectors + count > *max_sectors) + continue; + if ((*req)->rq_dev != bh->b_rdev) + continue; + if ((*req)->sector + (*req)->nr_sectors == bh->b_rsector) + return ELEVATOR_BACK_MERGE; + if ((*req)->sector - count == bh->b_rsector) + return ELEVATOR_FRONT_MERGE; + } + return ELEVATOR_NO_MERGE; +} + +/* + * The noop "elevator" does not do any accounting + */ +void elevator_noop_dequeue(struct request *req) {} + #ifdef ELEVATOR_DEBUG -void elevator_debug(request_queue_t * q, kdev_t dev) +void elevator_default_debug(request_queue_t * q, kdev_t dev) { int read_pendings = 0, nr_segments = 0; elevator_t * elevator = &q->elevator; struct list_head * entry = &q->queue_head; static int counter; + if (elevator->elevator_fn != elevator_default) + return; + if (counter++ % 100) return; - while ((entry = entry->prev) != &q->queue_head) - { + while ((entry = entry->prev) != &q->queue_head) { struct request * req; req = blkdev_entry_to_request(entry); @@ -81,16 +208,14 @@ nr_segments += req->nr_segments; } - if (read_pendings != elevator->read_pendings) - { + if (read_pendings != elevator->read_pendings) { printk(KERN_WARNING "%s: elevator read_pendings %d should be %d\n", kdevname(dev), elevator->read_pendings, read_pendings); elevator->read_pendings = read_pendings; } - if (nr_segments != elevator->nr_segments) - { + if (nr_segments != elevator->nr_segments) { printk(KERN_WARNING "%s: elevator nr_segments %d should be %d\n", kdevname(dev), elevator->nr_segments, @@ -102,49 +227,42 @@ int blkelvget_ioctl(elevator_t * elevator, blkelv_ioctl_arg_t * arg) { - int ret; blkelv_ioctl_arg_t output; output.queue_ID = elevator; output.read_latency = elevator->read_latency; output.write_latency = elevator->write_latency; output.max_bomb_segments = elevator->max_bomb_segments; + strcpy(output.elevator_name, elevator->elevator_name); - ret = -EFAULT; if (copy_to_user(arg, &output, sizeof(blkelv_ioctl_arg_t))) - goto out; - ret = 0; - out: - return ret; + return -EFAULT; + + return 0; } int blkelvset_ioctl(elevator_t * elevator, const blkelv_ioctl_arg_t * arg) { blkelv_ioctl_arg_t input; - int ret; - ret = -EFAULT; if (copy_from_user(&input, arg, sizeof(blkelv_ioctl_arg_t))) - goto out; + return -EFAULT; - ret = -EINVAL; if (input.read_latency < 0) - goto out; + return -EINVAL; if (input.write_latency < 0) - goto out; + return -EINVAL; if (input.max_bomb_segments <= 0) - goto out; + return -EINVAL; elevator->read_latency = input.read_latency; elevator->write_latency = input.write_latency; elevator->max_bomb_segments = input.max_bomb_segments; - ret = 0; - out: - return ret; + return 0; } -void elevator_init(elevator_t * elevator) +void elevator_init(elevator_t * elevator, elevator_t type) { - *elevator = ELEVATOR_DEFAULTS; + *elevator = type; } diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/block/ll_rw_blk.c Thu May 4 15:42:31 2000 +++ linux/drivers/block/ll_rw_blk.c Thu May 4 16:24:22 2000 @@ -4,6 +4,7 @@ * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 1994, Karl Keyte: Added support for disk statistics * Elevator latency, (C) 2000 Andrea Arcangeli SuSE + * Queue request tables / lock, selectable elevator, Jens Axboe */ /* @@ -37,14 +38,16 @@ #endif /* - * The request-struct contains all necessary data - * to load a nr of sectors into memory + * For the allocated request tables */ -static struct request all_requests[NR_REQUEST]; +static kmem_cache_t *request_cachep; /* * The "disk" task queue is used to start the actual requests - * after a plug + * after a plug. + * + * Nowadays, it is mainly used when the memory pressure gets too high. When + * we can, we fire individual queues instead. */ DECLARE_TASK_QUEUE(tq_disk); @@ -62,11 +65,6 @@ */ spinlock_t io_request_lock = SPIN_LOCK_UNLOCKED; -/* - * used to wait on when there are no free requests - */ -DECLARE_WAIT_QUEUE_HEAD(wait_for_request); - /* This specifies how many sectors to read ahead on the disk. */ int read_ahead[MAX_BLKDEV]; @@ -148,8 +146,20 @@ return ret; } +/* + * Hopefully the low level driver has finished any out standing requests + * before first... + */ void blk_cleanup_queue(request_queue_t * q) { + struct list_head *entry, *head; + struct request *rq; + + entry = head = &q->request_freelist; + while ((entry = entry->next) != head) { + rq = list_entry(entry, struct request, table); + kmem_cache_free(request_cachep, rq); + } memset(q, 0, sizeof(*q)); } @@ -237,10 +247,33 @@ queue_task(&q->plug_tq, &tq_disk); } +static void blk_init_free_list(request_queue_t *q) +{ + struct request *rq; + int i; + + /* + * Divide requests in half between read and write. This used to + * be a 2/3 advantage for reads, but now reads can steal from + * the write free list. + */ + for (i = 0; i < QUEUE_NR_REQUESTS; i++) { + rq = kmem_cache_alloc(request_cachep, SLAB_KERNEL); + rq->rq_status = RQ_INACTIVE; + list_add(&rq->table, &q->request_freelist); + } + + q->queue_requests = 0; + init_waitqueue_head(&q->wait_for_request); + spin_lock_init(&q->request_lock); +} + void blk_init_queue(request_queue_t * q, request_fn_proc * rfn) { INIT_LIST_HEAD(&q->queue_head); - elevator_init(&q->elevator); + INIT_LIST_HEAD(&q->request_freelist); + elevator_init(&q->elevator, ELEVATOR_DEFAULT); + blk_init_free_list(q); q->request_fn = rfn; q->back_merge_fn = ll_back_merge_fn; q->front_merge_fn = ll_front_merge_fn; @@ -268,84 +301,73 @@ request_queue_t * q = (request_queue_t *) data; unsigned long flags; - spin_lock_irqsave(&io_request_lock,flags); + spin_lock_irqsave(&io_request_lock, flags); if (q->plugged) { q->plugged = 0; if (!list_empty(&q->queue_head)) (q->request_fn)(q); } - spin_unlock_irqrestore(&io_request_lock,flags); + spin_unlock_irqrestore(&io_request_lock, flags); } +#define blkdev_free_rq(list) list_entry((list)->next, struct request, table); /* - * look for a free request in the first N entries. - * NOTE: interrupts must be disabled on the way in (on SMP the request queue - * spinlock has to be aquired), and will still be disabled on the way out. + * Get a free request. io_request_lock must be held and interrupts + * disabled on the way in. */ -static inline struct request * get_request(int n, kdev_t dev) +static inline struct request *get_request(request_queue_t *q, int rw) { - static struct request *prev_found = NULL, *prev_limit = NULL; - register struct request *req, *limit; + struct request *rq; - if (n <= 0) - panic("get_request(%d): impossible!\n", n); - - limit = all_requests + n; - if (limit != prev_limit) { - prev_limit = limit; - prev_found = all_requests; - } - req = prev_found; - for (;;) { - req = ((req > all_requests) ? req : limit) - 1; - if (req->rq_status == RQ_INACTIVE) - break; - if (req == prev_found) - return NULL; - } - prev_found = req; - req->rq_status = RQ_ACTIVE; - req->rq_dev = dev; - req->special = NULL; - return req; + if (list_empty(&q->request_freelist)) + return NULL; + + if ((q->queue_requests > QUEUE_WRITES_MAX) && (rw == WRITE)) + return NULL; + + rq = blkdev_free_rq(&q->request_freelist); + list_del(&rq->table); + rq->rq_status = RQ_ACTIVE; + rq->special = NULL; + rq->q = q; + q->queue_requests++; + return rq; } /* - * wait until a free request in the first N entries is available. + * No available requests for this queue, unplug the device. */ -static struct request * __get_request_wait(int n, kdev_t dev) +static struct request *__get_request_wait(request_queue_t *q, int rw) { - register struct request *req; + register struct request *rq; DECLARE_WAITQUEUE(wait, current); - unsigned long flags; - add_wait_queue_exclusive(&wait_for_request, &wait); + add_wait_queue_exclusive(&q->wait_for_request, &wait); for (;;) { - __set_current_state(TASK_UNINTERRUPTIBLE|TASK_EXCLUSIVE); - spin_lock_irqsave(&io_request_lock,flags); - req = get_request(n, dev); - spin_unlock_irqrestore(&io_request_lock,flags); - if (req) + __set_current_state(TASK_UNINTERRUPTIBLE | TASK_EXCLUSIVE); + spin_lock_irq(&io_request_lock); + rq = get_request(q, rw); + spin_unlock_irq(&io_request_lock); + if (rq) break; - run_task_queue(&tq_disk); + generic_unplug_device(q); schedule(); } - remove_wait_queue(&wait_for_request, &wait); + remove_wait_queue(&q->wait_for_request, &wait); current->state = TASK_RUNNING; - return req; + return rq; } -static inline struct request * get_request_wait(int n, kdev_t dev) +static inline struct request *get_request_wait(request_queue_t *q, int rw) { - register struct request *req; - unsigned long flags; + register struct request *rq; - spin_lock_irqsave(&io_request_lock,flags); - req = get_request(n, dev); - spin_unlock_irqrestore(&io_request_lock,flags); - if (req) - return req; - return __get_request_wait(n, dev); + spin_lock_irq(&io_request_lock); + rq = get_request(q, rw); + spin_unlock_irq(&io_request_lock); + if (rq) + return rq; + return __get_request_wait(q, rw); } /* RO fail safe mechanism */ @@ -422,35 +444,41 @@ */ static inline void add_request(request_queue_t * q, struct request * req, - struct list_head * head, int latency) + struct list_head *head, int lat) { int major; drive_stat_acct(req, req->nr_sectors, 1); - - if (list_empty(head)) { - req->elevator_sequence = elevator_sequence(&q->elevator, latency); - list_add(&req->queue, &q->queue_head); - return; - } - q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, latency); - /* + * let selected elevator insert the request + */ + q->elevator.elevator_fn(req, &q->elevator, &q->queue_head, head, lat); + + /* * FIXME(eric) I don't understand why there is a need for this * special case code. It clearly doesn't fit any more with * the new queueing architecture, and it got added in 2.3.10. * I am leaving this in here until I hear back from the COMPAQ * people. - */ + */ major = MAJOR(req->rq_dev); if (major >= COMPAQ_SMART2_MAJOR+0 && major <= COMPAQ_SMART2_MAJOR+7) - { (q->request_fn)(q); - } - if (major >= DAC960_MAJOR+0 && major <= DAC960_MAJOR+7) - { (q->request_fn)(q); +} + +void inline blkdev_release_request(struct request *req) +{ + req->rq_status = RQ_INACTIVE; + + /* + * Request may not have originated from ll_rw_blk + */ + if (req->q) { + list_add(&req->table, &req->q->request_freelist); + req->q->queue_requests--; + wake_up(&req->q->wait_for_request); } } @@ -478,13 +506,12 @@ if(!(q->merge_requests_fn)(q, req, next, max_segments)) return; - elevator_merge_requests(&q->elevator, req, next); + elevator_merge_requests(req, next); req->bhtail->b_reqnext = next->bh; req->bhtail = next->bhtail; req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; - next->rq_status = RQ_INACTIVE; list_del(&next->queue); - wake_up (&wait_for_request); + blkdev_release_request(next); } static inline void attempt_back_merge(request_queue_t * q, @@ -512,18 +539,16 @@ } static inline void __make_request(request_queue_t * q, int rw, - struct buffer_head * bh) + struct buffer_head * bh) { int major = MAJOR(bh->b_rdev); unsigned int sector, count; int max_segments = MAX_SEGMENTS; - struct request * req; - int rw_ahead, max_req, max_sectors; - unsigned long flags; - - int orig_latency, latency, starving, sequence; - struct list_head * entry, * head = &q->queue_head; - elevator_t * elevator; + struct request * req = NULL; + int rw_ahead, max_sectors, el_ret; + struct list_head *head = &q->queue_head; + int latency; + elevator_t *elevator = &q->elevator; count = bh->b_size >> 9; sector = bh->b_rsector; @@ -557,7 +582,6 @@ if (buffer_uptodate(bh)) /* Hmmph! Already have it */ goto end_io; kstat.pgpgin++; - max_req = NR_REQUEST; /* reads take precedence */ break; case WRITERAW: rw = WRITE; @@ -574,7 +598,6 @@ * requests are only for reads. */ kstat.pgpgout++; - max_req = (NR_REQUEST * 2) / 3; break; default: BUG(); @@ -599,153 +622,80 @@ /* look for a free request. */ /* - * Loop uses two requests, 1 for loop and 1 for the real device. - * Cut max_req in half to avoid running out and deadlocking. - */ - if ((major == LOOP_MAJOR) || (major == NBD_MAJOR)) - max_req >>= 1; - - /* * Try to coalesce the new request with old requests */ max_sectors = get_max_sectors(bh->b_rdev); - elevator = &q->elevator; - orig_latency = elevator_request_latency(elevator, rw); + latency = elevator_request_latency(elevator, rw); /* * Now we acquire the request spinlock, we have to be mega careful * not to schedule or do something nonatomic */ - spin_lock_irqsave(&io_request_lock,flags); - elevator_debug(q, bh->b_rdev); + spin_lock_irq(&io_request_lock); + elevator_default_debug(q, bh->b_rdev); if (list_empty(head)) { q->plug_device_fn(q, bh->b_rdev); /* is atomic */ goto get_rq; } - /* avoid write-bombs to not hurt iteractiveness of reads */ - if (rw != READ && elevator->read_pendings) - max_segments = elevator->max_bomb_segments; - - sequence = elevator->sequence; - latency = orig_latency - elevator->nr_segments; - starving = 0; - entry = head; - - /* - * The scsi disk and cdrom drivers completely remove the request - * from the queue when they start processing an entry. For this - * reason it is safe to continue to add links to the top entry - * for those devices. - * - * All other drivers need to jump over the first entry, as that - * entry may be busy being processed and we thus can't change - * it. - */ - if (q->head_active && !q->plugged) - head = head->next; - - while ((entry = entry->prev) != head && !starving) { - req = blkdev_entry_to_request(entry); - if (!req->q) - break; - latency += req->nr_segments; - if (elevator_sequence_before(req->elevator_sequence, sequence)) - starving = 1; - if (latency < 0) - continue; + el_ret = elevator->elevator_merge_fn(q, &req, bh, rw, &max_sectors, &max_segments); + switch (el_ret) { - if (req->sem) - continue; - if (req->cmd != rw) - continue; - if (req->nr_sectors + count > max_sectors) - continue; - if (req->rq_dev != bh->b_rdev) - continue; - /* Can we add it to the end of this request? */ - if (req->sector + req->nr_sectors == sector) { - if (latency - req->nr_segments < 0) - break; - /* - * The merge_fn is a more advanced way - * of accomplishing the same task. Instead - * of applying a fixed limit of some sort - * we instead define a function which can - * determine whether or not it is safe to - * merge the request or not. - * - * See if this queue has rules that - * may suggest that we shouldn't merge - * this - */ - if(!(q->back_merge_fn)(q, req, bh, max_segments)) + case ELEVATOR_BACK_MERGE: + if (!q->back_merge_fn(q, req, bh, max_segments)) break; req->bhtail->b_reqnext = bh; req->bhtail = bh; - req->nr_sectors = req->hard_nr_sectors += count; + req->nr_sectors = req->hard_nr_sectors += count; + req->e = elevator; drive_stat_acct(req, count, 0); - - elevator_merge_after(elevator, req, latency); - - /* Can we now merge this req with the next? */ attempt_back_merge(q, req, max_sectors, max_segments); - /* or to the beginning? */ - } else if (req->sector - count == sector) { - if (starving) - break; - /* - * The merge_fn is a more advanced way - * of accomplishing the same task. Instead - * of applying a fixed limit of some sort - * we instead define a function which can - * determine whether or not it is safe to - * merge the request or not. - * - * See if this queue has rules that - * may suggest that we shouldn't merge - * this - */ - if(!(q->front_merge_fn)(q, req, bh, max_segments)) + goto out; + + case ELEVATOR_FRONT_MERGE: + if (!q->front_merge_fn(q, req, bh, max_segments)) break; - bh->b_reqnext = req->bh; - req->bh = bh; - req->buffer = bh->b_data; - req->current_nr_sectors = count; - req->sector = req->hard_sector = sector; - req->nr_sectors = req->hard_nr_sectors += count; + bh->b_reqnext = req->bh; + req->bh = bh; + req->buffer = bh->b_data; + req->current_nr_sectors = count; + req->sector = req->hard_sector = sector; + req->nr_sectors = req->hard_nr_sectors += count; + req->e = elevator; drive_stat_acct(req, count, 0); - - elevator_merge_before(elevator, req, latency); - attempt_front_merge(q, head, req, max_sectors, max_segments); - } else - continue; - - q->elevator.sequence++; - spin_unlock_irqrestore(&io_request_lock,flags); - return; + goto out; + /* + * elevator says don't/can't merge. get new request + */ + case ELEVATOR_NO_MERGE: + break; + default: + printk("elevator returned crap (%d)\n", el_ret); + BUG(); } - -/* find an unused request. */ -get_rq: - req = get_request(max_req, bh->b_rdev); - + /* - * if no request available: if rw_ahead, forget it, - * otherwise try again blocking.. + * Grab a free request from the freelist. Read first try their + * own queue - if that is empty, we steal from the write list. + * Writes must block if the write list is empty, and read aheads + * are not crucial. */ - if (!req) { - spin_unlock_irqrestore(&io_request_lock,flags); +get_rq: + if ((req = get_request(q, rw)) == NULL) { + spin_unlock_irq(&io_request_lock); if (rw_ahead) goto end_io; - req = __get_request_wait(max_req, bh->b_rdev); - spin_lock_irqsave(&io_request_lock,flags); - /* revalidate elevator */ + req = __get_request_wait(q, rw); + spin_lock_irq(&io_request_lock); + + /* + * revalidate elevator, queue request_lock was dropped + */ head = &q->queue_head; if (q->head_active && !q->plugged) head = head->next; @@ -763,13 +713,13 @@ req->sem = NULL; req->bh = bh; req->bhtail = bh; - req->q = q; - add_request(q, req, head, orig_latency); - elevator_account_request(elevator, req); - - spin_unlock_irqrestore(&io_request_lock, flags); + req->rq_dev = bh->b_rdev; + req->e = elevator; + add_request(q, req, head, latency); + elevator_account_request(req); +out: + spin_unlock_irq(&io_request_lock); return; - end_io: bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); } @@ -785,7 +735,6 @@ int generic_make_request (request_queue_t *q, int rw, struct buffer_head * bh) { - unsigned long flags; int ret; /* @@ -793,7 +742,6 @@ * still free to implement/resolve their own stacking * by explicitly returning 0) */ - while (q->make_request_fn) { ret = q->make_request_fn(q, rw, bh); if (ret > 0) { @@ -807,10 +755,10 @@ * the IO request? (normal case) */ __make_request(q, rw, bh); - spin_lock_irqsave(&io_request_lock,flags); + spin_lock_irq(&io_request_lock); if (q && !q->plugged) (q->request_fn)(q); - spin_unlock_irqrestore(&io_request_lock,flags); + spin_unlock_irq(&io_request_lock); return 0; } @@ -949,31 +897,31 @@ void end_that_request_last(struct request *req) { - if (req->q) + if (req->e) { + printk("end_that_request_last called with non-dequeued req\n"); BUG(); + } if (req->sem != NULL) up(req->sem); - req->rq_status = RQ_INACTIVE; - wake_up(&wait_for_request); + + blkdev_release_request(req); } int __init blk_dev_init(void) { - struct request * req; struct blk_dev_struct *dev; - for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) { + request_cachep = kmem_cache_create("blkdev_requests", + sizeof(struct request), + 0, SLAB_HWCACHE_ALIGN, NULL, NULL); + + for (dev = blk_dev + MAX_BLKDEV; dev-- != blk_dev;) dev->queue = NULL; - blk_init_queue(&dev->request_queue, NULL); - } - req = all_requests + NR_REQUEST; - while (--req >= all_requests) { - req->rq_status = RQ_INACTIVE; - } memset(ro_bits,0,sizeof(ro_bits)); memset(max_readahead, 0, sizeof(max_readahead)); memset(max_sectors, 0, sizeof(max_sectors)); + #ifdef CONFIG_AMIGA_Z2RAM z2_init(); #endif @@ -1095,3 +1043,4 @@ EXPORT_SYMBOL(blk_queue_pluggable); EXPORT_SYMBOL(blk_queue_make_request); EXPORT_SYMBOL(generic_make_request); +EXPORT_SYMBOL(blkdev_release_request); diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/block/loop.c linux/drivers/block/loop.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/block/loop.c Thu May 4 15:45:04 2000 +++ linux/drivers/block/loop.c Thu May 4 16:34:16 2000 @@ -818,6 +818,7 @@ if (devfs_unregister_blkdev(MAJOR_NR, "loop") != 0) printk(KERN_WARNING "loop: cannot unregister blkdev\n"); + blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); kfree (loop_dev); kfree (loop_sizes); kfree (loop_blksizes); diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/cdrom/cdrom.c linux/drivers/cdrom/cdrom.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/cdrom/cdrom.c Thu May 4 15:42:31 2000 +++ linux/drivers/cdrom/cdrom.c Wed May 3 14:23:36 2000 @@ -199,11 +199,22 @@ home now. -- Clear header length in mode_select unconditionally. -- Removed the register_disk() that was added, not needed here. + + 3.08 May 1, 2000 - Jens Axboe + -- Fix direction flag in setup_send_key and setup_report_key. This + gave some SCSI adapters problems. + -- Always return -EROFS for write opens + -- Convert to module_init/module_exit style init and remove some + of the #ifdef MODULE stuff + -- Fix several dvd errors - DVD_LU_SEND_ASF should pass agid, + DVD_HOST_SEND_RPC_STATE did not set buffer size in cdb, and + dvd_do_auth passed uninitialized data to drive because init_cdrom_command + did not clear a 0 sized buffer. -------------------------------------------------------------------------*/ -#define REVISION "Revision: 3.07" -#define VERSION "Id: cdrom.c 3.07 2000/02/02" +#define REVISION "Revision: 3.08" +#define VERSION "Id: cdrom.c 3.08 2000/05/01" /* I use an error-log mask to give fine grain control over the type of messages dumped to the system logs. The available masks include: */ @@ -432,17 +443,6 @@ while (cdi != NULL && cdi->dev != dev) cdi = cdi->next; - /* we need to find the device this way when IDE devices such - * as /dev/hdc2 are opened. SCSI drives will be found above and - * so will /dev/hdc, for instance. - */ - if (cdi == NULL) { - kdev_t cd_dev = MKDEV(MAJOR(dev), MINOR(dev) | CD_PART_MASK); - cdi = topCdromPtr; - while (cdi != NULL && cdi->dev != cd_dev) - cdi = cdi->next; - } - return cdi; } @@ -834,7 +834,7 @@ /* This talks to the VFS, which doesn't like errors - just 1 or 0. * Returning "0" is always safe (media hasn't been changed). Do that * if the low-level cdrom driver dosn't support media changed. */ - if (cdi->ops->media_changed == NULL) + if (cdi == NULL || cdi->ops->media_changed == NULL) return 0; if (!CDROM_CAN(CDC_MEDIA_CHANGED)) return 0; @@ -994,6 +994,7 @@ struct cdrom_generic_command cgc; struct cdrom_device_ops *cdo = cdi->ops; + memset(buf, 0, sizeof(buf)); init_cdrom_command(&cgc, buf, 0, CGC_DATA_READ); switch (ai->type) { @@ -1052,7 +1053,7 @@ case DVD_LU_SEND_ASF: cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n"); - setup_report_key(&cgc, ai->lsasf.asf, 5); + setup_report_key(&cgc, ai->lsasf.agid, 5); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; @@ -1113,6 +1114,7 @@ case DVD_HOST_SEND_RPC_STATE: cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n"); setup_send_key(&cgc, 0, 6); + buf[1] = 6; buf[4] = ai->hrpcs.pdrc; if ((ret = cdo->generic_packet(cdi, &cgc))) diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide-cd.c linux/drivers/ide/ide-cd.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide-cd.c Thu May 4 15:42:32 2000 +++ linux/drivers/ide/ide-cd.c Thu May 4 16:52:22 2000 @@ -282,9 +282,12 @@ * - cdrom_read_capacity returns one frame too little. * - Fix real capacity reporting. * + * 4.58 May 1, 2000 - Clean up ACER50 stuff. + * - Fix small problem with ide_cdrom_capacity + * *************************************************************************/ -#define IDECD_VERSION "4.57" +#define IDECD_VERSION "4.58" #include #include @@ -2159,9 +2162,9 @@ { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *devinfo = &info->devinfo; - int minor = (drive->select.b.unit)<select.b.unit) << PARTN_BITS; - devinfo->dev = MKDEV (HWIF(drive)->major, minor | CD_PART_MASK); + devinfo->dev = MKDEV (HWIF(drive)->major, minor); devinfo->ops = &ide_cdrom_dops; devinfo->mask = 0; *(int *)&devinfo->speed = CDROM_STATE_FLAGS (drive)->current_speed; @@ -2195,22 +2198,20 @@ return register_cdrom(devinfo); } -/* - * the buffer struct used by ide_cdrom_get_capabilities() - */ -struct get_capabilities_buf { - char pad[8]; - struct atapi_capabilities_page cap; - char extra_cap[4]; -}; - static int ide_cdrom_get_capabilities(ide_drive_t *drive, struct atapi_capabilities_page *cap) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi = &info->devinfo; struct cdrom_generic_command cgc; - int stat, attempts = 3; + int stat, attempts = 3, size = sizeof(*cap); + + /* + * ACER50 (and others?) require the full spec length mode sense + * page capabilities size, but older drives break. + */ + if (!(drive->id && !strcmp(drive->id->model,"ATAPI CD ROM DRIVE 50X MAX"))) + size -= 4; /* we have to cheat a little here. the packet will eventually * be queued with ide_cdrom_packet(), which extracts the @@ -2220,7 +2221,7 @@ */ cdi->handle = (ide_drive_t *) drive; cdi->ops = &ide_cdrom_dops; - init_cdrom_command(&cgc, cap, sizeof(*cap), CGC_DATA_UNKNOWN); + init_cdrom_command(&cgc, cap, size, CGC_DATA_UNKNOWN); do { /* we seem to get stat=0x01,err=0x00 the first time (??) */ stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0); if (!stat) @@ -2513,9 +2514,8 @@ static int ide_cdrom_check_media_change (ide_drive_t *drive) { - return cdrom_fops.check_media_change - (MKDEV (HWIF (drive)->major, - (drive->select.b.unit)<major, + (drive->select.b.unit) << PARTN_BITS)); } static @@ -2545,8 +2545,7 @@ { unsigned capacity; - capacity = cdrom_read_capacity(drive, &capacity, NULL); - return capacity ? 0 : capacity * SECTORS_PER_FRAME; + return cdrom_read_capacity(drive, &capacity, NULL) ? 0 : capacity * SECTORS_PER_FRAME; } static diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide-cd.h linux/drivers/ide/ide-cd.h --- /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide-cd.h Thu May 4 15:42:32 2000 +++ linux/drivers/ide/ide-cd.h Thu May 4 14:34:46 2000 @@ -10,15 +10,6 @@ #include #include -/* - * Apparently older drives have problems with filling out the entire - * mode_sense capability structure. Define this to 1 if your drive isn't - * probed correctly. - */ -#ifndef BROKEN_CAP_PAGE -#define BROKEN_CAP_PAGE 0 -#endif - /* Turn this on to have the driver print out the meanings of the ATAPI error codes. This will use up additional kernel-space memory, though. */ @@ -410,9 +401,7 @@ unsigned short buffer_size; /* Current speed (in kB/s). */ unsigned short curspeed; -#if !BROKEN_CAP_PAGE char pad[4]; -#endif }; diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide-disk.c linux/drivers/ide/ide-disk.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide-disk.c Thu May 4 15:42:32 2000 +++ linux/drivers/ide/ide-disk.c Thu May 4 16:32:18 2000 @@ -688,13 +688,12 @@ static int set_nowerr(ide_drive_t *drive, int arg) { - unsigned long flags; - - if (ide_spin_wait_hwgroup(drive, &flags)) + if (ide_spin_wait_hwgroup(drive)) return -EBUSY; + drive->nowerr = arg; drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT; - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irq(&io_request_lock); return 0; } diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide.c linux/drivers/ide/ide.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/ide/ide.c Thu May 4 15:45:05 2000 +++ linux/drivers/ide/ide.c Thu May 4 16:38:12 2000 @@ -771,7 +771,7 @@ spin_lock_irqsave(&io_request_lock, flags); blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; - rq->rq_status = RQ_INACTIVE; + blkdev_release_request(rq); spin_unlock_irqrestore(&io_request_lock, flags); if (rq->sem != NULL) up(rq->sem); /* inform originator that rq has been serviced */ @@ -1656,16 +1656,8 @@ */ void ide_init_drive_cmd (struct request *rq) { - rq->buffer = NULL; + memset(rq, 0, sizeof(*rq)); rq->cmd = IDE_DRIVE_CMD; - rq->sector = 0; - rq->nr_sectors = 0; - rq->nr_segments = 0; - rq->current_nr_sectors = 0; - rq->sem = NULL; - rq->bh = NULL; - rq->bhtail = NULL; - rq->q = NULL; } /* @@ -2304,24 +2296,24 @@ return val; } -int ide_spin_wait_hwgroup (ide_drive_t *drive, unsigned long *flags) +int ide_spin_wait_hwgroup (ide_drive_t *drive) { ide_hwgroup_t *hwgroup = HWGROUP(drive); unsigned long timeout = jiffies + (3 * HZ); - spin_lock_irqsave(&io_request_lock, *flags); + spin_lock_irq(&io_request_lock); while (hwgroup->busy) { - unsigned long lflags; - spin_unlock_irqrestore(&io_request_lock, *flags); - __save_flags(lflags); /* local CPU only */ + unsigned long flags; + spin_unlock_irq(&io_request_lock); + __save_flags(flags); /* local CPU only */ __sti(); /* local CPU only; needed for jiffies */ if (0 < (signed long)(jiffies - timeout)) { - __restore_flags(lflags); /* local CPU only */ + __restore_flags(flags); printk("%s: channel busy\n", drive->name); return -EBUSY; } - __restore_flags(lflags); /* local CPU only */ - spin_lock_irqsave(&io_request_lock, *flags); + __restore_flags(flags); /* local CPU only */ + spin_lock_irq(&io_request_lock); } return 0; } @@ -2333,7 +2325,6 @@ */ int ide_write_setting (ide_drive_t *drive, ide_settings_t *setting, int val) { - unsigned long flags; int i; u32 *p; @@ -2345,7 +2336,7 @@ return -EINVAL; if (setting->set) return setting->set(drive, val); - if (ide_spin_wait_hwgroup(drive, &flags)) + if (ide_spin_wait_hwgroup(drive)) return -EBUSY; switch (setting->data_type) { case TYPE_BYTE: @@ -2363,7 +2354,7 @@ *p = val; break; } - spin_unlock_irqrestore(&io_request_lock, flags); + spin_unlock_irq(&io_request_lock); return 0; } diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/scsi/scsi.c linux/drivers/scsi/scsi.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/scsi/scsi.c Thu May 4 15:42:36 2000 +++ linux/drivers/scsi/scsi.c Wed May 3 03:42:37 2000 @@ -2561,7 +2561,6 @@ } } } - printk("wait_for_request = %p\n", &wait_for_request); #endif /* CONFIG_SCSI_LOGGING */ /* } */ } #endif /* CONFIG_PROC_FS */ diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/drivers/scsi/scsi_lib.c linux/drivers/scsi/scsi_lib.c --- /opt/kernel/linux-2.3.99-pre7-4/drivers/scsi/scsi_lib.c Thu May 4 15:42:36 2000 +++ linux/drivers/scsi/scsi_lib.c Wed May 3 03:42:37 2000 @@ -1019,8 +1019,7 @@ * We have copied the data out of the request block - it is now in * a field in SCpnt. Release the request block. */ - req->rq_status = RQ_INACTIVE; - wake_up(&wait_for_request); + blkdev_release_request(req); } /* * Now it is finally safe to release the lock. We are diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/include/linux/blk.h linux/include/linux/blk.h --- /opt/kernel/linux-2.3.99-pre7-4/include/linux/blk.h Thu May 4 15:42:39 2000 +++ linux/include/linux/blk.h Thu May 4 14:27:51 2000 @@ -14,13 +14,6 @@ extern spinlock_t io_request_lock; /* - * NR_REQUEST is the number of entries in the request-queue. - * NOTE that writes may use only the low 2/3 of these: reads - * take precedence. - */ -#define NR_REQUEST 256 - -/* * Initialization functions. */ extern int isp16_init(void); @@ -94,12 +87,9 @@ extern inline void blkdev_dequeue_request(struct request * req) { - if (req->q) - { - if (req->cmd == READ) - req->q->elevator.read_pendings--; - req->q->elevator.nr_segments -= req->nr_segments; - req->q = NULL; + if (req->e) { + req->e->dequeue_fn(req); + req->e = NULL; } list_del(&req->queue); } diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/include/linux/blkdev.h linux/include/linux/blkdev.h --- /opt/kernel/linux-2.3.99-pre7-4/include/linux/blkdev.h Wed Apr 12 02:31:49 2000 +++ linux/include/linux/blkdev.h Thu May 4 14:27:51 2000 @@ -9,6 +9,8 @@ struct request_queue; typedef struct request_queue request_queue_t; +struct elevator_s; +typedef struct elevator_s elevator_t; /* * Ok, this is an expanded form so that we can use the same @@ -19,7 +21,11 @@ struct request { struct list_head queue; int elevator_sequence; + struct list_head table; + /* + * queue free list belongs to + */ volatile int rq_status; /* should split this into a few status bits */ #define RQ_INACTIVE (-1) #define RQ_ACTIVE 1 @@ -41,7 +47,8 @@ struct semaphore * sem; struct buffer_head * bh; struct buffer_head * bhtail; - request_queue_t * q; + request_queue_t *q; + elevator_t *e; }; #include @@ -60,11 +67,25 @@ typedef void (plug_device_fn) (request_queue_t *q, kdev_t device); typedef void (unplug_device_fn) (void *q); +/* + * Default nr free requests per queue + */ +#define QUEUE_NR_REQUESTS 512 +#define QUEUE_WRITES_MAX ((2 * QUEUE_NR_REQUESTS) / 3) + struct request_queue { - struct list_head queue_head; - /* together with queue_head for cacheline sharing */ - elevator_t elevator; + /* + * the queue request freelist, one for reads and one for writes + */ + struct list_head request_freelist; + int queue_requests; + + /* + * Together with queue_head for cacheline sharing + */ + struct list_head queue_head; + elevator_t elevator; request_fn_proc * request_fn; merge_request_fn * back_merge_fn; @@ -76,22 +97,33 @@ * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. */ - void * queuedata; + void * queuedata; /* * This is used to remove the plug when tq_disk runs. */ - struct tq_struct plug_tq; + struct tq_struct plug_tq; /* * Boolean that indicates whether this queue is plugged or not. */ - char plugged; + char plugged; /* * Boolean that indicates whether current_request is active or * not. */ - char head_active; + char head_active; + + /* + * Is meant to protect the queue in the future instead of + * io_request_lock + */ + spinlock_t request_lock; + + /* + * Tasks wait here for free request + */ + wait_queue_head_t wait_for_request; }; struct blk_dev_struct { @@ -118,13 +150,13 @@ extern struct sec_size * blk_sec[MAX_BLKDEV]; extern struct blk_dev_struct blk_dev[MAX_BLKDEV]; -extern wait_queue_head_t wait_for_request; extern void grok_partitions(struct gendisk *dev, int drive, unsigned minors, long size); extern void register_disk(struct gendisk *dev, kdev_t first, unsigned minors, struct block_device_operations *ops, long size); extern void generic_unplug_device(void * data); extern int generic_make_request(request_queue_t *q, int rw, struct buffer_head * bh); -extern request_queue_t * blk_get_queue(kdev_t dev); +extern request_queue_t *blk_get_queue(kdev_t dev); +extern void blkdev_release_request(struct request *); /* * Access functions for manipulating queue properties diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/include/linux/elevator.h linux/include/linux/elevator.h --- /opt/kernel/linux-2.3.99-pre7-4/include/linux/elevator.h Mon Mar 13 04:32:58 2000 +++ linux/include/linux/elevator.h Wed May 3 03:42:37 2000 @@ -3,13 +3,15 @@ #define ELEVATOR_DEBUG -struct elevator_s; -typedef struct elevator_s elevator_t; - typedef void (elevator_fn) (struct request *, elevator_t *, struct list_head *, struct list_head *, int); +typedef int (elevator_merge_fn) (request_queue_t *, struct request **, + struct buffer_head *, int, int *, int *); + +typedef void (elevator_dequeue_fn) (struct request *); + struct elevator_s { int sequence; @@ -21,29 +23,26 @@ unsigned int nr_segments; int read_pendings; + char elevator_name[16]; + elevator_fn * elevator_fn; + elevator_merge_fn *elevator_merge_fn; + elevator_dequeue_fn *dequeue_fn; }; -#define ELEVATOR_DEFAULTS \ -((elevator_t) { \ - 0, /* sequence */ \ - \ - 128, /* read_latency */ \ - 8192, /* write_latency */ \ - 4, /* max_bomb_segments */ \ - \ - 0, /* nr_segments */ \ - 0, /* read_pendings */ \ - \ - elevator_default, /* elevator_fn */ \ - }) - +void elevator_default(struct request *, elevator_t *, struct list_head *, struct list_head *, int); +int elevator_default_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *); +void elevator_default_dequeue(struct request *); +void elevator_noop(struct request *, elevator_t *, struct list_head *, struct list_head *, int); +int elevator_noop_merge(request_queue_t *, struct request **, struct buffer_head *, int, int *, int *); +void elevator_noop_dequeue(struct request *); typedef struct blkelv_ioctl_arg_s { void * queue_ID; int read_latency; int write_latency; int max_bomb_segments; + char elevator_name[16]; } blkelv_ioctl_arg_t; #define BLKELVGET _IO(0x12,106) @@ -52,13 +51,12 @@ extern int blkelvget_ioctl(elevator_t *, blkelv_ioctl_arg_t *); extern int blkelvset_ioctl(elevator_t *, const blkelv_ioctl_arg_t *); - -extern void elevator_init(elevator_t *); +extern void elevator_init(elevator_t *, elevator_t); #ifdef ELEVATOR_DEBUG -extern void elevator_debug(request_queue_t *, kdev_t); +extern void elevator_default_debug(request_queue_t *, kdev_t); #else -#define elevator_debug(a,b) do { } while(0) +#define elevator_default_debug(a,b) do { } while(0) #endif #define elevator_sequence_after(a,b) ((int)((b)-(a)) < 0) @@ -67,6 +65,13 @@ #define elevator_sequence_before_eq(a,b) elevator_sequence_after_eq(b,a) /* + * Return values from elevator merger + */ +#define ELEVATOR_NO_MERGE 0 +#define ELEVATOR_FRONT_MERGE 1 +#define ELEVATOR_BACK_MERGE 2 + +/* * This is used in the elevator algorithm. We don't prioritise reads * over writes any more --- although reads are more time-critical than * writes, by treating them equally we increase filesystem throughput. @@ -77,12 +82,12 @@ (s1)->sector < (s2)->sector)) || \ (s1)->rq_dev < (s2)->rq_dev) -static inline void elevator_merge_requests(elevator_t * e, struct request * req, struct request * next) +static inline void elevator_merge_requests(struct request * req, struct request * next) { if (elevator_sequence_before(next->elevator_sequence, req->elevator_sequence)) req->elevator_sequence = next->elevator_sequence; if (req->cmd == READ) - e->read_pendings--; + req->e->read_pendings--; } @@ -91,23 +96,23 @@ return latency + e->sequence; } -#define elevator_merge_before(q, req, lat) __elevator_merge((q), (req), (lat), 0) -#define elevator_merge_after(q, req, lat) __elevator_merge((q), (req), (lat), 1) -static inline void __elevator_merge(elevator_t * elevator, struct request * req, int latency, int after) +#define elevator_merge_before(req, lat) __elevator_merge((req), (lat), 0) +#define elevator_merge_after(req, lat) __elevator_merge((req), (lat), 1) +static inline void __elevator_merge(struct request * req, int latency, int after) { - int sequence = elevator_sequence(elevator, latency); + int sequence = elevator_sequence(req->e, latency); if (after) sequence -= req->nr_segments; if (elevator_sequence_before(sequence, req->elevator_sequence)) req->elevator_sequence = sequence; } -static inline void elevator_account_request(elevator_t * elevator, struct request * req) +static inline void elevator_account_request(struct request * req) { - elevator->sequence++; + req->e->sequence++; if (req->cmd == READ) - elevator->read_pendings++; - elevator->nr_segments++; + req->e->read_pendings++; + req->e->nr_segments++; } static inline int elevator_request_latency(elevator_t * elevator, int rw) @@ -120,5 +125,41 @@ return latency; } + +#define ELEVATOR_DEFAULT \ +((elevator_t) { \ + 0, /* sequence */ \ + \ + 100000, /* read_latency */ \ + 100000, /* write_latency */ \ + 128, /* max_bomb_segments */ \ + \ + 0, /* nr_segments */ \ + 0, /* read_pendings */ \ + \ + "default", /* elevator_name */ \ + \ + elevator_default, /* elevator_fn */ \ + elevator_default_merge, /* elevator_merge_fn */ \ + elevator_default_dequeue, /* dequeue_fn */ \ + }) + +#define ELEVATOR_NOOP \ +((elevator_t) { \ + 0, /* sequence */ \ + \ + 0, /* read_latency */ \ + 0, /* write_latency */ \ + 0, /* max_bomb_segments */ \ + \ + 0, /* nr_segments */ \ + 0, /* read_pendings */ \ + \ + "noop", /* elevator_name */ \ + \ + elevator_noop, /* elevator_fn */ \ + elevator_noop_merge, /* elevator_merge_fn */ \ + elevator_noop_dequeue, /* dequeue_fn */ \ + }) #endif diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/include/linux/ide.h linux/include/linux/ide.h --- /opt/kernel/linux-2.3.99-pre7-4/include/linux/ide.h Thu May 4 15:42:39 2000 +++ linux/include/linux/ide.h Thu May 4 14:29:25 2000 @@ -769,7 +769,7 @@ */ int drive_is_flashcard (ide_drive_t *drive); -int ide_spin_wait_hwgroup(ide_drive_t *drive, unsigned long *flags); +int ide_spin_wait_hwgroup(ide_drive_t *drive); void ide_timer_expiry (unsigned long data); void ide_intr (int irq, void *dev_id, struct pt_regs *regs); void do_ide0_request (request_queue_t * q); diff -ur --exclude-from /home/axboe/cdrom/exclude /opt/kernel/linux-2.3.99-pre7-4/kernel/ksyms.c linux/kernel/ksyms.c --- /opt/kernel/linux-2.3.99-pre7-4/kernel/ksyms.c Thu May 4 15:45:06 2000 +++ linux/kernel/ksyms.c Thu May 4 14:27:14 2000 @@ -269,7 +269,6 @@ /* block device driver support */ EXPORT_SYMBOL(block_read); EXPORT_SYMBOL(block_write); -EXPORT_SYMBOL(wait_for_request); EXPORT_SYMBOL(blksize_size); EXPORT_SYMBOL(hardsect_size); EXPORT_SYMBOL(blk_size);