Async block read fs/buffer.c | 31 ++++++++++++++++++++++++++----- include/linux/buffer_head.h | 13 ++++++++++--- 2 files changed, 36 insertions(+), 8 deletions(-) diff -puN fs/buffer.c~aio-06-bread_wq fs/buffer.c --- 25/fs/buffer.c~aio-06-bread_wq 2003-10-02 00:50:25.000000000 -0700 +++ 25-akpm/fs/buffer.c 2003-10-02 00:50:25.000000000 -0700 @@ -1315,9 +1315,12 @@ void __bforget(struct buffer_head *bh) __brelse(bh); } -static struct buffer_head *__bread_slow(struct buffer_head *bh) +static struct buffer_head *__bread_slow_wq(struct buffer_head *bh, + wait_queue_t *wait) { - lock_buffer(bh); + if (-EIOCBRETRY == lock_buffer_wq(bh, wait)) + return ERR_PTR(-EIOCBRETRY); + if (buffer_uptodate(bh)) { unlock_buffer(bh); return bh; @@ -1327,7 +1330,8 @@ static struct buffer_head *__bread_slow( get_bh(bh); bh->b_end_io = end_buffer_read_sync; submit_bh(READ, bh); - wait_on_buffer(bh); + if (-EIOCBRETRY == wait_on_buffer_wq(bh, wait)) + return ERR_PTR(-EIOCBRETRY); if (buffer_uptodate(bh)) return bh; } @@ -1335,6 +1339,11 @@ static struct buffer_head *__bread_slow( return NULL; } +static inline struct buffer_head *__bread_slow(struct buffer_head *bh) +{ + return __bread_slow_wq(bh, NULL); +} + /* * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their @@ -1522,6 +1531,18 @@ __bread(struct block_device *bdev, secto } EXPORT_SYMBOL(__bread); +struct buffer_head * +__bread_wq(struct block_device *bdev, sector_t block, int size, + wait_queue_t *wait) +{ + struct buffer_head *bh = __getblk(bdev, block, size); + + if (!buffer_uptodate(bh)) + bh = __bread_slow_wq(bh, wait); + return bh; +} +EXPORT_SYMBOL(__bread_wq); + /* * invalidate_bh_lrus() is called rarely - at unmount. Because it is only for * unmount it only needs to ensure that all buffers from the target device are @@ -1999,8 +2020,8 @@ static int __block_prepare_write(struct /* * If we issued read requests - let them complete. */ - while(wait_bh > wait) { - if (err = wait_on_buffer_wq(*--wait_bh, current->io_wait)) + while (wait_bh > wait) { + if ((err = wait_on_buffer_wq(*--wait_bh, current->io_wait))) return err; if (!buffer_uptodate(*wait_bh)) return -EIO; diff -puN include/linux/buffer_head.h~aio-06-bread_wq include/linux/buffer_head.h --- 25/include/linux/buffer_head.h~aio-06-bread_wq 2003-10-02 00:50:25.000000000 -0700 +++ 25-akpm/include/linux/buffer_head.h 2003-10-02 00:50:25.000000000 -0700 @@ -174,6 +174,8 @@ void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); void __breadahead(struct block_device *, sector_t block, int size); struct buffer_head *__bread(struct block_device *, sector_t block, int size); +struct buffer_head *__bread_wq(struct block_device *, sector_t block, + int size, wait_queue_t *wait); struct buffer_head *alloc_buffer_head(int gfp_flags); void free_buffer_head(struct buffer_head * bh); void FASTCALL(unlock_buffer(struct buffer_head *bh)); @@ -231,13 +233,13 @@ static inline void put_bh(struct buffer_ static inline void brelse(struct buffer_head *bh) { - if (bh) + if (bh && !IS_ERR(bh)) __brelse(bh); } static inline void bforget(struct buffer_head *bh) { - if (bh) + if (bh && !IS_ERR(bh)) __bforget(bh); } @@ -254,7 +256,12 @@ sb_breadahead(struct super_block *sb, se } static inline struct buffer_head * -sb_getblk(struct super_block *sb, sector_t block) +sb_bread_wq(struct super_block *sb, sector_t block, wait_queue_t *wait) +{ + return __bread_wq(sb->s_bdev, block, sb->s_blocksize, wait); +} + +static inline struct buffer_head *sb_getblk(struct super_block *sb, sector_t block) { return __getblk(sb->s_bdev, block, sb->s_blocksize); } _