diff -urpN -X /home/fletch/.diff.exclude 712-fs_aio_2_read/drivers/block/ll_rw_blk.c 713-fs_aio_3_write/drivers/block/ll_rw_blk.c --- 712-fs_aio_2_read/drivers/block/ll_rw_blk.c Sat Jun 14 18:37:27 2003 +++ 713-fs_aio_3_write/drivers/block/ll_rw_blk.c Sat Jun 14 20:44:20 2003 @@ -1531,16 +1531,29 @@ void blk_put_request(struct request *req * If no queues are congested then just wait for the next request to be * returned. */ -void blk_congestion_wait(int rw, long timeout) +int blk_congestion_wait_wq(int rw, long timeout, wait_queue_t *wait) { - DEFINE_WAIT(wait); wait_queue_head_t *wqh = &congestion_wqh[rw]; + DEFINE_WAIT(local_wait); + + if (!wait) + wait = &local_wait; blk_run_queues(); - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); + prepare_to_wait(wqh, wait, TASK_UNINTERRUPTIBLE); + if (!is_sync_wait(wait)) + return -EIOCBRETRY; + io_schedule_timeout(timeout); - finish_wait(wqh, &wait); + finish_wait(wqh, wait); + return 0; +} + +void blk_congestion_wait(int rw, long timeout) +{ + blk_congestion_wait_wq(rw, timeout, NULL); } + /* * Has to be called with the request spinlock acquired diff -urpN -X /home/fletch/.diff.exclude 712-fs_aio_2_read/include/linux/blkdev.h 713-fs_aio_3_write/include/linux/blkdev.h --- 712-fs_aio_2_read/include/linux/blkdev.h Sat Jun 14 18:37:37 2003 +++ 713-fs_aio_3_write/include/linux/blkdev.h Sat Jun 14 20:44:20 2003 @@ -455,6 +455,7 @@ extern void blk_queue_free_tags(request_ extern int blk_queue_resize_tags(request_queue_t *, int); extern void blk_queue_invalidate_tags(request_queue_t *); extern void blk_congestion_wait(int rw, long timeout); +extern int blk_congestion_wait_wq(int rw, long timeout, wait_queue_t *wait); extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); extern void blk_rq_prep_restart(struct request *); diff -urpN -X /home/fletch/.diff.exclude 712-fs_aio_2_read/include/linux/writeback.h 713-fs_aio_3_write/include/linux/writeback.h --- 712-fs_aio_2_read/include/linux/writeback.h Sat Jun 14 18:37:38 2003 +++ 713-fs_aio_3_write/include/linux/writeback.h Sat Jun 14 20:44:20 2003 @@ -84,8 +84,8 @@ int dirty_writeback_centisecs_handler(st void *, size_t *); void page_writeback_init(void); -void balance_dirty_pages(struct address_space *mapping); -void balance_dirty_pages_ratelimited(struct address_space *mapping); +int balance_dirty_pages(struct address_space *mapping); +int balance_dirty_pages_ratelimited(struct address_space *mapping); int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); diff -urpN -X /home/fletch/.diff.exclude 712-fs_aio_2_read/mm/filemap.c 713-fs_aio_3_write/mm/filemap.c --- 712-fs_aio_2_read/mm/filemap.c Sat Jun 14 20:44:20 2003 +++ 713-fs_aio_3_write/mm/filemap.c Sat Jun 14 20:44:20 2003 @@ -423,8 +423,8 @@ struct page *find_trylock_page(struct ad * * Returns zero if the page was not present. find_lock_page() may sleep. */ -struct page *find_lock_page(struct address_space *mapping, - unsigned long offset) +struct page *find_lock_page_wq(struct address_space *mapping, + unsigned long offset, wait_queue_t *wait) { struct page *page; @@ -435,7 +435,10 @@ repeat: page_cache_get(page); if (TestSetPageLocked(page)) { spin_unlock(&mapping->page_lock); - lock_page(page); + if (-EIOCBRETRY == lock_page_wq(page, wait)) { + page_cache_release(page); + return ERR_PTR(-EIOCBRETRY); + } spin_lock(&mapping->page_lock); /* Has the page been truncated while we slept? */ @@ -450,6 +453,12 @@ repeat: return page; } +struct page *find_lock_page(struct address_space *mapping, + unsigned long offset) +{ + return find_lock_page_wq(mapping, offset, NULL); +} + /** * find_or_create_page - locate or add a pagecache page * @@ -1416,7 +1425,9 @@ __grab_cache_page(struct address_space * int err; struct page *page; repeat: - page = find_lock_page(mapping, index); + page = find_lock_page_wq(mapping, index, current->io_wait); + if (IS_ERR(page)) + return page; if (!page) { if (!*cached_page) { *cached_page = page_cache_alloc(mapping); @@ -1755,6 +1766,10 @@ generic_file_aio_write_nolock(struct kio fault_in_pages_readable(buf, bytes); page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); + if (IS_ERR(page)) { + status = PTR_ERR(page); + break; + } if (!page) { status = -ENOMEM; break; @@ -1762,6 +1777,8 @@ generic_file_aio_write_nolock(struct kio status = a_ops->prepare_write(file, page, offset, offset+bytes); if (unlikely(status)) { + if (-EIOCBRETRY == status) + pr_debug("queued prepare_write\n"); /* * prepare_write() may have instantiated a few blocks * outside i_size. Trim these off again. @@ -1804,7 +1821,11 @@ generic_file_aio_write_nolock(struct kio page_cache_release(page); if (status < 0) break; - balance_dirty_pages_ratelimited(mapping); + status = balance_dirty_pages_ratelimited(mapping); + if (status < 0) { + pr_debug("async balance_dirty_pages\n"); + break; + } cond_resched(); } while (count); *ppos = pos; diff -urpN -X /home/fletch/.diff.exclude 712-fs_aio_2_read/mm/page-writeback.c 713-fs_aio_3_write/mm/page-writeback.c --- 712-fs_aio_2_read/mm/page-writeback.c Sat Jun 14 18:37:39 2003 +++ 713-fs_aio_3_write/mm/page-writeback.c Sat Jun 14 20:44:20 2003 @@ -144,7 +144,7 @@ get_dirty_limits(struct page_state *ps, * If we're over `background_thresh' then pdflush is woken to perform some * writeout. */ -void balance_dirty_pages(struct address_space *mapping) +int balance_dirty_pages(struct address_space *mapping) { struct page_state ps; long nr_reclaimable; @@ -161,6 +161,7 @@ void balance_dirty_pages(struct address_ .sync_mode = WB_SYNC_NONE, .older_than_this = NULL, .nr_to_write = write_chunk, + .nonblocking = !is_sync_wait(current->io_wait) }; get_dirty_limits(&ps, &background_thresh, &dirty_thresh); @@ -187,7 +188,11 @@ void balance_dirty_pages(struct address_ if (pages_written >= write_chunk) break; /* We've done our duty */ } - blk_congestion_wait(WRITE, HZ/10); + if (-EIOCBRETRY == blk_congestion_wait_wq(WRITE, HZ/10, + current->io_wait)) { + pr_debug("async blk congestion wait\n"); + return -EIOCBRETRY; + } } if (nr_reclaimable + ps.nr_writeback <= dirty_thresh) @@ -195,6 +200,8 @@ void balance_dirty_pages(struct address_ if (!writeback_in_progress(bdi) && nr_reclaimable > background_thresh) pdflush_operation(background_writeout, 0); + + return 0; } /** @@ -210,7 +217,7 @@ void balance_dirty_pages(struct address_ * decrease the ratelimiting by a lot, to prevent individual processes from * overshooting the limit by (ratelimit_pages) each. */ -void balance_dirty_pages_ratelimited(struct address_space *mapping) +int balance_dirty_pages_ratelimited(struct address_space *mapping) { static DEFINE_PER_CPU(int, ratelimits) = 0; int cpu; @@ -224,10 +231,10 @@ void balance_dirty_pages_ratelimited(str if (per_cpu(ratelimits, cpu)++ >= ratelimit) { per_cpu(ratelimits, cpu) = 0; put_cpu(); - balance_dirty_pages(mapping); - return; + return balance_dirty_pages(mapping); } put_cpu(); + return 0; } /*