diff -urpN -X /home/fletch/.diff.exclude 715-fs_aio_5_wrdown_wq/fs/buffer.c 716-fs_aio_6_bread_wq/fs/buffer.c --- 715-fs_aio_5_wrdown_wq/fs/buffer.c Sat Jun 14 20:42:00 2003 +++ 716-fs_aio_6_bread_wq/fs/buffer.c Sat Jun 14 20:44:25 2003 @@ -119,23 +119,35 @@ void unlock_buffer(struct buffer_head *b * from becoming locked again - you have to lock it yourself * if you want to preserve its state. */ -void __wait_on_buffer(struct buffer_head * bh) +int __wait_on_buffer_wq(struct buffer_head * bh, wait_queue_t *wait) { wait_queue_head_t *wqh = bh_waitq_head(bh); - DEFINE_WAIT(wait); + DEFINE_WAIT(local_wait); + + if (!wait) + wait = &local_wait; if (atomic_read(&bh->b_count) == 0 && (!bh->b_page || !PageLocked(bh->b_page))) buffer_error(); do { - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(wqh, wait, TASK_UNINTERRUPTIBLE); if (buffer_locked(bh)) { blk_run_queues(); + if (!is_sync_wait(wait)) { + return -EIOCBRETRY; + } io_schedule(); } } while (buffer_locked(bh)); - finish_wait(wqh, &wait); + finish_wait(wqh, wait); + return 0; +} + +void __wait_on_buffer(struct buffer_head * bh) +{ + __wait_on_buffer_wq(bh, NULL); } static void @@ -1273,9 +1285,12 @@ void __bforget(struct buffer_head *bh) __brelse(bh); } -static struct buffer_head *__bread_slow(struct buffer_head *bh) +static struct buffer_head *__bread_slow_wq(struct buffer_head *bh, + wait_queue_t *wait) { - lock_buffer(bh); + if (-EIOCBRETRY == lock_buffer_wq(bh, wait)) + return ERR_PTR(-EIOCBRETRY); + if (buffer_uptodate(bh)) { unlock_buffer(bh); return bh; @@ -1285,7 +1300,8 @@ static struct buffer_head *__bread_slow( get_bh(bh); bh->b_end_io = end_buffer_io_sync; submit_bh(READ, bh); - wait_on_buffer(bh); + if (-EIOCBRETRY == wait_on_buffer_wq(bh, wait)) + return ERR_PTR(-EIOCBRETRY); if (buffer_uptodate(bh)) return bh; } @@ -1293,6 +1309,11 @@ static struct buffer_head *__bread_slow( return NULL; } +static inline struct buffer_head *__bread_slow(struct buffer_head *bh) +{ + return __bread_slow_wq(bh, NULL); +} + /* * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block(). * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their @@ -1467,6 +1488,18 @@ __bread(struct block_device *bdev, secto bh = __bread_slow(bh); return bh; } + + +struct buffer_head * +__bread_wq(struct block_device *bdev, sector_t block, int size, + wait_queue_t *wait) +{ + struct buffer_head *bh = __getblk(bdev, block, size); + + if (!buffer_uptodate(bh)) + bh = __bread_slow_wq(bh, wait); + return bh; +} EXPORT_SYMBOL(__bread); /* @@ -1905,8 +1938,11 @@ static int __block_prepare_write(struct clear_buffer_new(bh); if (!buffer_mapped(bh)) { err = get_block(inode, block, bh, 1); - if (err) + if (err) { + if (-EIOCBRETRY == err) + pr_debug("get_block queued\n"); goto out; + } if (buffer_new(bh)) { clear_buffer_new(bh); unmap_underlying_metadata(bh->b_bdev, @@ -1948,6 +1984,8 @@ static int __block_prepare_write(struct * If we issued read requests - let them complete. */ while(wait_bh > wait) { + if (!is_sync_wait(current->io_wait)) + printk("block_prepare_write: wait on buffer\n"); wait_on_buffer(*--wait_bh); if (!buffer_uptodate(*wait_bh)) return -EIO; diff -urpN -X /home/fletch/.diff.exclude 715-fs_aio_5_wrdown_wq/include/linux/buffer_head.h 716-fs_aio_6_bread_wq/include/linux/buffer_head.h --- 715-fs_aio_5_wrdown_wq/include/linux/buffer_head.h Sat May 10 18:35:03 2003 +++ 716-fs_aio_6_bread_wq/include/linux/buffer_head.h Sat Jun 14 20:44:25 2003 @@ -158,6 +158,7 @@ void mark_buffer_async_write(struct buff void invalidate_bdev(struct block_device *, int); int sync_blockdev(struct block_device *bdev); void __wait_on_buffer(struct buffer_head *); +int __wait_on_buffer_wq(struct buffer_head *, wait_queue_t *wait); wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); void wake_up_buffer(struct buffer_head *bh); int fsync_bdev(struct block_device *); @@ -168,6 +169,8 @@ struct buffer_head * __getblk(struct blo void __brelse(struct buffer_head *); void __bforget(struct buffer_head *); struct buffer_head *__bread(struct block_device *, sector_t block, int size); +struct buffer_head *__bread_wq(struct block_device *, sector_t block, + int size, wait_queue_t *wait); struct buffer_head *alloc_buffer_head(int gfp_flags); void free_buffer_head(struct buffer_head * bh); void FASTCALL(unlock_buffer(struct buffer_head *bh)); @@ -225,13 +228,13 @@ static inline void put_bh(struct buffer_ static inline void brelse(struct buffer_head *bh) { - if (bh) + if (bh && !IS_ERR(bh)) __brelse(bh); } static inline void bforget(struct buffer_head *bh) { - if (bh) + if (bh && !IS_ERR(bh)) __bforget(bh); } @@ -242,7 +245,12 @@ sb_bread(struct super_block *sb, sector_ } static inline struct buffer_head * -sb_getblk(struct super_block *sb, sector_t block) +sb_bread_wq(struct super_block *sb, sector_t block, wait_queue_t *wait) +{ + return __bread_wq(sb->s_bdev, block, sb->s_blocksize, wait); +} + +static inline struct buffer_head *sb_getblk(struct super_block *sb, sector_t block) { return __getblk(sb->s_bdev, block, sb->s_blocksize); } @@ -272,10 +280,26 @@ static inline void wait_on_buffer(struct __wait_on_buffer(bh); } +static inline int wait_on_buffer_wq(struct buffer_head *bh, wait_queue_t *wait) +{ + if (buffer_locked(bh)) + return __wait_on_buffer_wq(bh, wait); + + return 0; +} + static inline void lock_buffer(struct buffer_head *bh) { while (test_set_buffer_locked(bh)) __wait_on_buffer(bh); +} + +static inline int lock_buffer_wq(struct buffer_head *bh, wait_queue_t *wait) +{ + if (test_set_buffer_locked(bh)) + return __wait_on_buffer_wq(bh, wait); + + return 0; } #endif /* _LINUX_BUFFER_HEAD_H */