From: Suparna Bhattacharya, Hugh Dickins. Actually lock the buffer. fs/aio.c | 4 +++- include/linux/buffer_head.h | 24 +++++++++++++----------- include/linux/pagemap.h | 35 ++++++++++++++++------------------- mm/filemap.c | 6 ------ 4 files changed, 32 insertions(+), 37 deletions(-) diff -puN fs/aio.c~lock_buffer_wq-fix fs/aio.c --- 25/fs/aio.c~lock_buffer_wq-fix 2003-08-20 23:30:30.000000000 -0700 +++ 25-akpm/fs/aio.c 2003-08-20 23:30:30.000000000 -0700 @@ -53,6 +53,7 @@ static kmem_cache_t *kiocb_cachep; static kmem_cache_t *kioctx_cachep; static struct workqueue_struct *aio_wq; +static struct workqueue_struct *aio_fput_wq; /* Used for rare fput completion. */ static void aio_fput_routine(void *); @@ -80,6 +81,7 @@ static int __init aio_setup(void) panic("unable to create kioctx cache"); aio_wq = create_workqueue("aio"); + aio_fput_wq = create_workqueue("aio_fput"); pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); @@ -507,7 +509,7 @@ static int __aio_put_req(struct kioctx * spin_lock(&fput_lock); list_add(&req->ki_list, &fput_head); spin_unlock(&fput_lock); - queue_work(aio_wq, &fput_work); + queue_work(aio_fput_wq, &fput_work); } else really_put_req(ctx, req); return 1; diff -puN include/linux/buffer_head.h~lock_buffer_wq-fix include/linux/buffer_head.h --- 25/include/linux/buffer_head.h~lock_buffer_wq-fix 2003-08-20 23:30:30.000000000 -0700 +++ 25-akpm/include/linux/buffer_head.h 2003-08-20 23:30:30.000000000 -0700 @@ -280,32 +280,34 @@ map_bh(struct buffer_head *bh, struct su * __wait_on_buffer() just to trip a debug check. Because debug code in inline * functions is bloaty. */ -static inline void wait_on_buffer(struct buffer_head *bh) -{ - if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0) - __wait_on_buffer(bh); -} static inline int wait_on_buffer_wq(struct buffer_head *bh, wait_queue_t *wait) { - if (buffer_locked(bh)) + if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0) return __wait_on_buffer_wq(bh, wait); return 0; } -static inline void lock_buffer(struct buffer_head *bh) +static inline void wait_on_buffer(struct buffer_head *bh) { - while (test_set_buffer_locked(bh)) - __wait_on_buffer(bh); + wait_on_buffer_wq(bh, NULL); } static inline int lock_buffer_wq(struct buffer_head *bh, wait_queue_t *wait) { - if (test_set_buffer_locked(bh)) - return __wait_on_buffer_wq(bh, wait); + while (test_set_buffer_locked(bh)) { + int ret = __wait_on_buffer_wq(bh, wait); + if (ret) + return ret; + } return 0; } +static inline void lock_buffer(struct buffer_head *bh) +{ + lock_buffer_wq(bh, NULL); +} + #endif /* _LINUX_BUFFER_HEAD_H */ diff -puN include/linux/pagemap.h~lock_buffer_wq-fix include/linux/pagemap.h --- 25/include/linux/pagemap.h~lock_buffer_wq-fix 2003-08-20 23:30:30.000000000 -0700 +++ 25-akpm/include/linux/pagemap.h 2003-08-20 23:30:30.000000000 -0700 @@ -153,11 +153,6 @@ static inline void ___add_to_page_cache( extern void FASTCALL(__lock_page(struct page *page)); extern void FASTCALL(unlock_page(struct page *page)); -static inline void lock_page(struct page *page) -{ - if (TestSetPageLocked(page)) - __lock_page(page); -} extern int FASTCALL(__lock_page_wq(struct page *page, wait_queue_t *wait)); static inline int lock_page_wq(struct page *page, wait_queue_t *wait) @@ -168,12 +163,17 @@ static inline int lock_page_wq(struct pa return 0; } +static inline void lock_page(struct page *page) +{ + lock_page_wq(page, NULL); +} /* * This is exported only for wait_on_page_locked/wait_on_page_writeback. * Never use this directly! */ -extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr)); +extern int FASTCALL(wait_on_page_bit_wq(struct page *page, int bit_nr, + wait_queue_t *wait)); /* * Wait for a page to be unlocked. @@ -182,14 +182,6 @@ extern void FASTCALL(wait_on_page_bit(st * ie with increased "page->count" so that the page won't * go away during the wait.. */ -static inline void wait_on_page_locked(struct page *page) -{ - if (PageLocked(page)) - wait_on_page_bit(page, PG_locked); -} - -extern int FASTCALL(wait_on_page_bit_wq(struct page *page, int bit_nr, - wait_queue_t *wait)); static inline int wait_on_page_locked_wq(struct page *page, wait_queue_t *wait) { if (PageLocked(page)) @@ -197,14 +189,14 @@ static inline int wait_on_page_locked_wq return 0; } +static inline void wait_on_page_locked(struct page *page) +{ + wait_on_page_locked_wq(page, NULL); +} + /* * Wait for a page to complete writeback */ -static inline void wait_on_page_writeback(struct page *page) -{ - if (PageWriteback(page)) - wait_on_page_bit(page, PG_writeback); -} static inline int wait_on_page_writeback_wq(struct page *page, wait_queue_t *wait) @@ -214,6 +206,11 @@ static inline int wait_on_page_writeback return 0; } +static inline void wait_on_page_writeback(struct page *page) +{ + wait_on_page_writeback_wq(page, NULL); +} + extern void end_page_writeback(struct page *page); /* diff -puN mm/filemap.c~lock_buffer_wq-fix mm/filemap.c --- 25/mm/filemap.c~lock_buffer_wq-fix 2003-08-20 23:30:30.000000000 -0700 +++ 25-akpm/mm/filemap.c 2003-08-20 23:30:30.000000000 -0700 @@ -318,12 +318,6 @@ int wait_on_page_bit_wq(struct page *pag } EXPORT_SYMBOL(wait_on_page_bit_wq); -void wait_on_page_bit(struct page *page, int bit_nr) -{ - wait_on_page_bit_wq(page, bit_nr, NULL); -} -EXPORT_SYMBOL(wait_on_page_bit); - /** * unlock_page() - unlock a locked page * _