From: "Chen, Kenneth W" It's kind of redundant that queue_congestion_on/off_threshold gets calculated on every I/O and they produce the same number over and over again unless q->nr_requests gets changed (which is probably a very rare event). We can cache those values in the request_queue structure. --- 25-akpm/drivers/block/ll_rw_blk.c | 29 ++++++++++++++++------------- 25-akpm/include/linux/blkdev.h | 2 ++ 2 files changed, 18 insertions(+), 13 deletions(-) diff -puN drivers/block/ll_rw_blk.c~cache-queue_congestion_on-off_threshold drivers/block/ll_rw_blk.c --- 25/drivers/block/ll_rw_blk.c~cache-queue_congestion_on-off_threshold 2004-05-05 23:18:01.057452016 -0700 +++ 25-akpm/drivers/block/ll_rw_blk.c 2004-05-05 23:31:43.632401616 -0700 @@ -70,14 +70,7 @@ EXPORT_SYMBOL(blk_max_pfn); */ static inline int queue_congestion_on_threshold(struct request_queue *q) { - int ret; - - ret = q->nr_requests - (q->nr_requests / 8) + 1; - - if (ret > q->nr_requests) - ret = q->nr_requests; - - return ret; + return q->nr_congestion_on; } /* @@ -85,14 +78,22 @@ static inline int queue_congestion_on_th */ static inline int queue_congestion_off_threshold(struct request_queue *q) { - int ret; + return q->nr_congestion_off; +} - ret = q->nr_requests - (q->nr_requests / 8) - 1; +static void blk_queue_congestion_threshold(struct request_queue *q) +{ + int nr; - if (ret < 1) - ret = 1; + nr = q->nr_requests - (q->nr_requests / 8) + 1; + if (nr > q->nr_requests) + nr = q->nr_requests; + q->nr_congestion_on = nr; - return ret; + nr = q->nr_requests - (q->nr_requests / 8) - 1; + if (nr < 1) + nr = 1; + q->nr_congestion_off = nr; } void clear_backing_dev_congested(struct backing_dev_info *bdi, int rw) @@ -235,6 +236,7 @@ void blk_queue_make_request(request_queu blk_queue_max_sectors(q, MAX_SECTORS); blk_queue_hardsect_size(q, 512); blk_queue_dma_alignment(q, 511); + blk_queue_congestion_threshold(q); q->unplug_thresh = 4; /* hmm */ q->unplug_delay = (3 * HZ) / 1000; /* 3 milliseconds */ @@ -2953,6 +2955,7 @@ queue_requests_store(struct request_queu int ret = queue_var_store(&q->nr_requests, page, count); if (q->nr_requests < BLKDEV_MIN_RQ) q->nr_requests = BLKDEV_MIN_RQ; + blk_queue_congestion_threshold(q); if (rl->count[READ] >= queue_congestion_on_threshold(q)) set_queue_congested(q, READ); diff -puN include/linux/blkdev.h~cache-queue_congestion_on-off_threshold include/linux/blkdev.h --- 25/include/linux/blkdev.h~cache-queue_congestion_on-off_threshold 2004-05-05 23:18:01.059451712 -0700 +++ 25-akpm/include/linux/blkdev.h 2004-05-05 23:18:01.066450648 -0700 @@ -334,6 +334,8 @@ struct request_queue * queue settings */ unsigned long nr_requests; /* Max # of requests */ + unsigned int nr_congestion_on; + unsigned int nr_congestion_off; unsigned short max_sectors; unsigned short max_phys_segments; _