Fixes up significant read-vs-write regression since 2.5.57. - Allow separate tuning of read batch expiry and write batch expiry. block/deadline-iosched.c | 71 +++++++++++++++++++++++++++++++++-------------- 1 files changed, 50 insertions(+), 21 deletions(-) diff -puN drivers/block/deadline-iosched.c~batch-tuning drivers/block/deadline-iosched.c --- 25/drivers/block/deadline-iosched.c~batch-tuning 2003-01-28 21:35:15.000000000 -0800 +++ 25-akpm/drivers/block/deadline-iosched.c 2003-01-28 21:35:15.000000000 -0800 @@ -22,10 +22,30 @@ /* * See Documentation/deadline-iosched.txt */ -static int read_expire = HZ / 2; /* max time before a read is submitted. */ -static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ -static int batch_expire = HZ / 4; /* Interval between recalculation of - writes_starved */ + +/* + * max time before a read is submitted. + */ +static int read_expire = HZ / 2; + +/* + * ditto for writes, these limits are not hard, even + * if the disk is capable of satisfying them. + */ +static int write_expire = 5 * HZ; + +/* + * read_batch_expire describes how long we will allow a stream of reads to + * persist before looking to see whether it is time to switch over to writes. + */ +static int read_batch_expire = HZ / 20; + +/* + * write_batch_expire describes how long we will allow a stream of writes to + * persist before looking to see whether it is time to switch over to reads. + */ +static int write_batch_expire = HZ / 40; + static int writes_starved = 2; /* max times reads can starve a write */ static const int deadline_hash_shift = 10; @@ -58,7 +78,7 @@ struct deadline_data { struct list_head *dispatch; /* driver dispatch queue */ struct list_head *hash; /* request hash */ unsigned long hash_valid_count; /* barrier hash count */ - unsigned int batch_expires; /* time until this batch expires */ + unsigned int current_batch_expires; int batch_data_dir; /* current/last batch READ or WRITE */ unsigned int starved; /* times reads have starved writes */ @@ -66,8 +86,8 @@ struct deadline_data { * settings that change how the i/o scheduler behaves */ int fifo_expire[2]; - int batch_expire; int writes_starved; + int batch_expire[2]; int front_merges; }; @@ -500,12 +520,9 @@ static inline int deadline_check_fifo(st * deadline_check_batch returns 0 if the current batch has not expired, * 1 otherwise. */ -static inline int deadline_check_batch(struct deadline_data *dd) +static inline int deadline_batch_expired(struct deadline_data *dd) { - if (time_after(jiffies, dd->batch_expires)) - return 1; - - return 0; + return time_after(jiffies, dd->current_batch_expires); } /* @@ -524,7 +541,7 @@ static int deadline_dispatch_requests(st */ drq = dd->next_drq[dd->batch_data_dir]; - if (drq && !deadline_check_batch(dd)) + if (drq && !deadline_batch_expired(dd)) /* we have a "next request" and our batch is still running */ goto dispatch_request; @@ -533,7 +550,7 @@ static int deadline_dispatch_requests(st * data direction (read / write) */ - dd->batch_expires = jiffies + dd->batch_expire; /* start a new batch */ + dd->current_batch_expires = jiffies + dd->batch_expire[WRITE]; if (reads) { BUG_ON(RB_EMPTY(&dd->sort_list[READ])); @@ -542,6 +559,7 @@ static int deadline_dispatch_requests(st goto dispatch_writes; data_dir = dd->batch_data_dir = READ; + dd->current_batch_expires = jiffies + dd->batch_expire[READ]; other_dir = WRITE; goto dispatch_find_request; @@ -739,7 +757,8 @@ static int deadline_init(request_queue_t dd->hash_valid_count = 1; dd->writes_starved = writes_starved; dd->front_merges = 1; - dd->batch_expire = batch_expire; + dd->batch_expire[READ] = read_batch_expire; + dd->batch_expire[WRITE] = write_batch_expire; e->elevator_data = dd; for (i = READ; i <= WRITE; i++) { @@ -803,7 +822,8 @@ SHOW_FUNCTION(deadline_readexpire_show, SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE]); SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved); SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges); -SHOW_FUNCTION(deadline_batchexpire_show, dd->batch_expire); +SHOW_FUNCTION(deadline_read_batchexpire_show, dd->batch_expire[READ]); +SHOW_FUNCTION(deadline_write_batchexpire_show, dd->batch_expire[WRITE]); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ @@ -820,7 +840,10 @@ STORE_FUNCTION(deadline_readexpire_store STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX); STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1); -STORE_FUNCTION(deadline_batchexpire_store, &dd->batch_expire, 0, INT_MAX); +STORE_FUNCTION(deadline_read_batchexpire_store, + &dd->batch_expire[READ], 0, INT_MAX); +STORE_FUNCTION(deadline_write_batchexpire_store, + &dd->batch_expire[WRITE], 0, INT_MAX); #undef STORE_FUNCTION static struct deadline_fs_entry deadline_readexpire_entry = { @@ -843,10 +866,15 @@ static struct deadline_fs_entry deadline .show = deadline_frontmerges_show, .store = deadline_frontmerges_store, }; -static struct deadline_fs_entry deadline_batchexpire_entry = { - .attr = {.name = "batch_expire", .mode = S_IRUGO | S_IWUSR }, - .show = deadline_batchexpire_show, - .store = deadline_batchexpire_store, +static struct deadline_fs_entry deadline_read_batchexpire_entry = { + .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR }, + .show = deadline_read_batchexpire_show, + .store = deadline_read_batchexpire_store, +}; +static struct deadline_fs_entry deadline_write_batchexpire_entry = { + .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR }, + .show = deadline_write_batchexpire_show, + .store = deadline_write_batchexpire_store, }; static struct attribute *default_attrs[] = { @@ -854,7 +882,8 @@ static struct attribute *default_attrs[] &deadline_writeexpire_entry.attr, &deadline_writesstarved_entry.attr, &deadline_frontmerges_entry.attr, - &deadline_batchexpire_entry.attr, + &deadline_read_batchexpire_entry.attr, + &deadline_write_batchexpire_entry.attr, NULL, }; _