From: Nick Piggin Following patch seperates elevator noop, and allows it to be treated like the other schedulers. drivers/block/Kconfig.iosched | 21 +++++++++ drivers/block/Makefile | 1 drivers/block/elevator.c | 76 ----------------------------------- drivers/block/ll_rw_blk.c | 13 ++++-- drivers/block/noop-iosched.c | 89 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 121 insertions(+), 79 deletions(-) diff -puN drivers/block/elevator.c~standalone-elevator-noop drivers/block/elevator.c --- 25/drivers/block/elevator.c~standalone-elevator-noop 2003-08-06 19:57:26.000000000 -0700 +++ 25-akpm/drivers/block/elevator.c 2003-08-06 19:57:26.000000000 -0700 @@ -87,72 +87,6 @@ inline int elv_try_last_merge(request_qu } /* - * elevator noop - * - * See if we can find a request that this buffer can be coalesced with. - */ -int elevator_noop_merge(request_queue_t *q, struct list_head **insert, - struct bio *bio) -{ - struct list_head *entry = &q->queue_head; - struct request *__rq; - int ret; - - if ((ret = elv_try_last_merge(q, bio))) { - *insert = q->last_merge; - return ret; - } - - while ((entry = entry->prev) != &q->queue_head) { - __rq = list_entry_rq(entry); - - if (__rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) - break; - else if (__rq->flags & REQ_STARTED) - break; - - if (!blk_fs_request(__rq)) - continue; - - if ((ret = elv_try_merge(__rq, bio))) { - *insert = &__rq->queuelist; - q->last_merge = &__rq->queuelist; - return ret; - } - } - - return ELEVATOR_NO_MERGE; -} - -void elevator_noop_merge_requests(request_queue_t *q, struct request *req, - struct request *next) -{ - list_del_init(&next->queuelist); -} - -void elevator_noop_add_request(request_queue_t *q, struct request *rq, - struct list_head *insert_here) -{ - list_add_tail(&rq->queuelist, &q->queue_head); - - /* - * new merges must not precede this barrier - */ - if (rq->flags & REQ_HARDBARRIER) - q->last_merge = NULL; - else if (!q->last_merge) - q->last_merge = &rq->queuelist; -} - -struct request *elevator_noop_next_request(request_queue_t *q) -{ - if (!list_empty(&q->queue_head)) - return list_entry_rq(q->queue_head.next); - - return NULL; -} - -/* * general block -> elevator interface starts here */ int elevator_init(request_queue_t *q, elevator_t *type) @@ -415,18 +349,8 @@ void elv_unregister_queue(struct request } } -elevator_t elevator_noop = { - .elevator_merge_fn = elevator_noop_merge, - .elevator_merge_req_fn = elevator_noop_merge_requests, - .elevator_next_req_fn = elevator_noop_next_request, - .elevator_add_req_fn = elevator_noop_add_request, - .elevator_name = "noop", -}; - module_init(elevator_global_init); -EXPORT_SYMBOL(elevator_noop); - EXPORT_SYMBOL(elv_add_request); EXPORT_SYMBOL(__elv_add_request); EXPORT_SYMBOL(elv_requeue_request); diff -puN drivers/block/Kconfig.iosched~standalone-elevator-noop drivers/block/Kconfig.iosched --- 25/drivers/block/Kconfig.iosched~standalone-elevator-noop 2003-08-06 19:57:26.000000000 -0700 +++ 25-akpm/drivers/block/Kconfig.iosched 2003-08-06 19:57:26.000000000 -0700 @@ -1,8 +1,29 @@ +config IOSCHED_NOOP + bool "No-op I/O scheduler" if EMBEDDED + default y + ---help--- + The no-op I/O scheduler is a minimal scheduler that does basic merging + and sorting. Its main uses include non-disk based block devices like + memory devices, and specialised software or hardware environments + that do their own scheduling and require only minimal assistance from + the kernel. + config IOSCHED_AS bool "Anticipatory I/O scheduler" if EMBEDDED default y + ---help--- + The anticipatory I/O scheduler is the default disk scheduler. It is + generally a good choice for most environments, but is quite large and + complex when compared to the deadline I/O scheduler, it can also be + slower in some cases especially some database loads. config IOSCHED_DEADLINE bool "Deadline I/O scheduler" if EMBEDDED default y + ---help--- + The deadline I/O scheduler is simple and compact, and is often as + good as the anticipatory I/O scheduler, and in some database + workloads, better. In the case of a single process performing I/O to + a disk at any one time, its behaviour is almost identical to the + anticipatory I/O scheduler and so is a good choice. diff -puN drivers/block/ll_rw_blk.c~standalone-elevator-noop drivers/block/ll_rw_blk.c --- 25/drivers/block/ll_rw_blk.c~standalone-elevator-noop 2003-08-06 19:57:26.000000000 -0700 +++ 25-akpm/drivers/block/ll_rw_blk.c 2003-08-06 19:57:26.000000000 -0700 @@ -1227,11 +1227,14 @@ static elevator_t *chosen_elevator = &iosched_as; #elif defined(CONFIG_IOSCHED_DEADLINE) &iosched_deadline; -#else +#elif defined(CONFIG_IOSCHED_NOOP) &elevator_noop; +#else + NULL; +#error "You must have at least 1 I/O scheduler selected" #endif -#if defined(CONFIG_IOSCHED_AS) || defined(CONFIG_IOSCHED_DEADLINE) +#if defined(CONFIG_IOSCHED_AS) || defined(CONFIG_IOSCHED_DEADLINE) || defined (CONFIG_IOSCHED_NOOP) static int __init elevator_setup(char *str) { #ifdef CONFIG_IOSCHED_DEADLINE @@ -1242,11 +1245,15 @@ static int __init elevator_setup(char *s if (!strcmp(str, "as")) chosen_elevator = &iosched_as; #endif +#ifdef CONFIG_IOSCHED_NOOP + if (!strcmp(str, "noop")) + chosen_elevator = &elevator_noop; +#endif return 1; } __setup("elevator=", elevator_setup); -#endif /* CONFIG_IOSCHED_AS || CONFIG_IOSCHED_DEADLINE */ +#endif /* CONFIG_IOSCHED_AS || CONFIG_IOSCHED_DEADLINE || CONFIG_IOSCHED_NOOP */ /** * blk_init_queue - prepare a request queue for use with a block device diff -puN drivers/block/Makefile~standalone-elevator-noop drivers/block/Makefile --- 25/drivers/block/Makefile~standalone-elevator-noop 2003-08-06 19:57:26.000000000 -0700 +++ 25-akpm/drivers/block/Makefile 2003-08-06 19:57:26.000000000 -0700 @@ -15,6 +15,7 @@ obj-y := elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o +obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_AS) += as-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_MAC_FLOPPY) += swim3.o diff -puN /dev/null drivers/block/noop-iosched.c --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25-akpm/drivers/block/noop-iosched.c 2003-08-06 19:57:26.000000000 -0700 @@ -0,0 +1,89 @@ +/* + * elevator noop + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * See if we can find a request that this buffer can be coalesced with. + */ +int elevator_noop_merge(request_queue_t *q, struct list_head **insert, + struct bio *bio) +{ + struct list_head *entry = &q->queue_head; + struct request *__rq; + int ret; + + if ((ret = elv_try_last_merge(q, bio))) { + *insert = q->last_merge; + return ret; + } + + while ((entry = entry->prev) != &q->queue_head) { + __rq = list_entry_rq(entry); + + if (__rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) + break; + else if (__rq->flags & REQ_STARTED) + break; + + if (!blk_fs_request(__rq)) + continue; + + if ((ret = elv_try_merge(__rq, bio))) { + *insert = &__rq->queuelist; + q->last_merge = &__rq->queuelist; + return ret; + } + } + + return ELEVATOR_NO_MERGE; +} + +void elevator_noop_merge_requests(request_queue_t *q, struct request *req, + struct request *next) +{ + list_del_init(&next->queuelist); +} + +void elevator_noop_add_request(request_queue_t *q, struct request *rq, + struct list_head *insert_here) +{ + list_add_tail(&rq->queuelist, &q->queue_head); + + /* + * new merges must not precede this barrier + */ + if (rq->flags & REQ_HARDBARRIER) + q->last_merge = NULL; + else if (!q->last_merge) + q->last_merge = &rq->queuelist; +} + +struct request *elevator_noop_next_request(request_queue_t *q) +{ + if (!list_empty(&q->queue_head)) + return list_entry_rq(q->queue_head.next); + + return NULL; +} + +elevator_t elevator_noop = { + .elevator_merge_fn = elevator_noop_merge, + .elevator_merge_req_fn = elevator_noop_merge_requests, + .elevator_next_req_fn = elevator_noop_next_request, + .elevator_add_req_fn = elevator_noop_add_request, + .elevator_name = "noop", +}; + +EXPORT_SYMBOL(elevator_noop); _