From: Nick Piggin With the rbtree based sortlist the CPU cost of doing frontmerges wasn't measurable even with 8192 request slots. Cache wise I'd say it wouldn't make much diff either because if we don't get a merge we still have to traverse the same rbtree path on insert anyway. drivers/block/as-iosched.c | 27 +++++++-------------------- 1 files changed, 7 insertions(+), 20 deletions(-) diff -puN drivers/block/as-iosched.c~as-remove-frontmerge drivers/block/as-iosched.c --- 25/drivers/block/as-iosched.c~as-remove-frontmerge 2003-03-19 22:34:22.000000000 -0800 +++ 25-akpm/drivers/block/as-iosched.c 2003-03-19 22:34:28.000000000 -0800 @@ -152,7 +152,6 @@ struct as_data { */ unsigned long fifo_expire[2]; unsigned long batch_expire[2]; - unsigned long front_merges; unsigned long antic_expire; }; @@ -1379,6 +1378,7 @@ static int as_merge(request_queue_t *q, struct list_head **insert, struct bio *bio) { struct as_data *ad = q->elevator.elevator_data; + sector_t rb_key = bio->bi_sector + bio_sectors(bio); struct request *__rq; int ret; @@ -1407,17 +1407,13 @@ as_merge(request_queue_t *q, struct list /* * check for front merge */ - if (ad->front_merges) { - sector_t rb_key = bio->bi_sector + bio_sectors(bio); + __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio)); + if (__rq) { + BUG_ON(rb_key != rq_rb_key(__rq)); - __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio)); - if (__rq) { - BUG_ON(rb_key != rq_rb_key(__rq)); - - if (elv_rq_merge_ok(__rq, bio)) { - ret = ELEVATOR_FRONT_MERGE; - goto out; - } + if (elv_rq_merge_ok(__rq, bio)) { + ret = ELEVATOR_FRONT_MERGE; + goto out; } } @@ -1600,7 +1596,6 @@ static int as_init(request_queue_t *q, e ad->fifo_expire[READ] = read_expire; ad->fifo_expire[WRITE] = write_expire; ad->hash_valid_count = 1; - ad->front_merges = 1; ad->antic_expire = antic_expire; ad->batch_expire[READ] = read_batch_expire; ad->batch_expire[WRITE] = write_batch_expire; @@ -1667,7 +1662,6 @@ static ssize_t __FUNC(struct as_data *ad } SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[READ]); SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[WRITE]); -SHOW_FUNCTION(as_frontmerges_show, ad->front_merges); SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire); SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[READ]); SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[WRITE]); @@ -1685,7 +1679,6 @@ static ssize_t __FUNC(struct as_data *ad } STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[READ], 0, INT_MAX); STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[WRITE], 0, INT_MAX); -STORE_FUNCTION(as_frontmerges_store, &ad->front_merges, 0, 1); STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX); STORE_FUNCTION(as_read_batchexpire_store, &ad->batch_expire[READ], 0, INT_MAX); @@ -1703,11 +1696,6 @@ static struct as_fs_entry as_writeexpire .show = as_writeexpire_show, .store = as_writeexpire_store, }; -static struct as_fs_entry as_frontmerges_entry = { - .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR }, - .show = as_frontmerges_show, - .store = as_frontmerges_store, -}; static struct as_fs_entry as_anticexpire_entry = { .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR }, .show = as_anticexpire_show, @@ -1727,7 +1715,6 @@ static struct as_fs_entry as_write_batch static struct attribute *default_attrs[] = { &as_readexpire_entry.attr, &as_writeexpire_entry.attr, - &as_frontmerges_entry.attr, &as_anticexpire_entry.attr, &as_read_batchexpire_entry.attr, &as_write_batchexpire_entry.attr, _