diff -urN 2.4.7pre4/fs/buffer.c bh_async/fs/buffer.c --- 2.4.7pre4/fs/buffer.c Tue Jul 10 00:03:05 2001 +++ bh_async/fs/buffer.c Tue Jul 10 03:48:10 2001 @@ -823,10 +823,11 @@ * that unlock the page.. */ spin_lock_irqsave(&page_uptodate_lock, flags); + mark_buffer_async(bh, 0); __unlock_buffer(bh); tmp = bh->b_this_page; while (tmp != bh) { - if (tmp->b_end_io == end_buffer_io_async && buffer_locked(tmp)) + if (buffer_async(tmp) && buffer_locked(tmp)) goto still_busy; tmp = tmp->b_this_page; } @@ -858,8 +859,9 @@ return; } -void set_buffer_async_io(struct buffer_head *bh) { +inline void set_buffer_async_io(struct buffer_head *bh) { bh->b_end_io = end_buffer_io_async ; + mark_buffer_async(bh, 1); } /* @@ -1549,7 +1551,7 @@ /* Stage 2: lock the buffers, mark them clean */ do { lock_buffer(bh); - bh->b_end_io = end_buffer_io_async; + set_buffer_async_io(bh); get_bh(bh); set_bit(BH_Uptodate, &bh->b_state); clear_bit(BH_Dirty, &bh->b_state); @@ -1751,7 +1753,7 @@ for (i = 0; i < nr; i++) { struct buffer_head * bh = arr[i]; lock_buffer(bh); - bh->b_end_io = end_buffer_io_async; + set_buffer_async_io(bh); get_bh(bh); } @@ -2198,7 +2200,7 @@ lock_buffer(bh); bh->b_blocknr = *(b++); set_bit(BH_Mapped, &bh->b_state); - bh->b_end_io = end_buffer_io_async; + set_buffer_async_io(bh); get_bh(bh); bh = bh->b_this_page; } while (bh != head); diff -urN 2.4.7pre4/include/linux/fs.h bh_async/include/linux/fs.h --- 2.4.7pre4/include/linux/fs.h Mon Jul 9 20:25:17 2001 +++ bh_async/include/linux/fs.h Tue Jul 10 03:46:27 2001 @@ -215,6 +215,7 @@ BH_Mapped, /* 1 if the buffer has a disk mapping */ BH_New, /* 1 if the buffer is new and not yet written out */ BH_Protected, /* 1 if the buffer is protected */ + BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */ BH_PrivateStart,/* not a state bit, but the first bit available * for private allocation by other entities @@ -275,6 +276,7 @@ #define buffer_mapped(bh) __buffer_state(bh,Mapped) #define buffer_new(bh) __buffer_state(bh,New) #define buffer_protected(bh) __buffer_state(bh,Protected) +#define buffer_async(bh) __buffer_state(bh,Async) #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) @@ -1109,6 +1111,14 @@ extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh)); #define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state) + +static inline void mark_buffer_async(struct buffer_head * bh, int on) +{ + if (on) + set_bit(BH_Async, &bh->b_state); + else + clear_bit(BH_Async, &bh->b_state); +} /* * If an error happens during the make_request, this function