--- 2.4.0-test5/fs/buffer.c Fri Jul 28 07:24:13 2000 +++ 2.4.0-test5-1/fs/buffer.c Wed Aug 2 23:03:18 2000 @@ -482,16 +482,12 @@ __remove_from_lru_list(bh, bh->b_list); } -static void insert_into_queues(struct buffer_head *bh) +static void __insert_into_queues(struct buffer_head *bh) { struct buffer_head **head = &hash(bh->b_dev, bh->b_blocknr); - spin_lock(&lru_list_lock); - write_lock(&hash_table_lock); __hash_link(bh, head); __insert_into_lru_list(bh, bh->b_list); - write_unlock(&hash_table_lock); - spin_unlock(&lru_list_lock); } /* This function must only run if there are no other @@ -524,19 +520,27 @@ * will force it bad). This shouldn't really happen currently, but * the code is ready. */ -struct buffer_head * get_hash_table(kdev_t dev, int block, int size) +static inline struct buffer_head * __get_hash_table(kdev_t dev, int block, int size) { - struct buffer_head **head = &hash(dev, block); - struct buffer_head *bh; + struct buffer_head *bh = hash(dev, block); - read_lock(&hash_table_lock); - for(bh = *head; bh; bh = bh->b_next) + for (; bh; bh = bh->b_next) if (bh->b_blocknr == block && bh->b_size == size && bh->b_dev == dev) break; if (bh) atomic_inc(&bh->b_count); + + return bh; +} + +struct buffer_head * get_hash_table(kdev_t dev, int block, int size) +{ + struct buffer_head *bh; + + read_lock(&hash_table_lock); + bh = __get_hash_table(dev, block, size); read_unlock(&hash_table_lock); return bh; @@ -804,7 +808,9 @@ int isize; repeat: - bh = get_hash_table(dev, block, size); + spin_lock(&lru_list_lock); + write_lock(&hash_table_lock); + bh = __get_hash_table(dev, block, size); if (bh) goto out; @@ -829,9 +835,10 @@ bh->b_state = 1 << BH_Mapped; /* Insert the buffer into the regular lists */ - insert_into_queues(bh); + __insert_into_queues(bh); out: - touch_buffer(bh); + write_unlock(&hash_table_lock); + spin_unlock(&lru_list_lock); return bh; } @@ -839,7 +846,11 @@ * If we block while refilling the free list, somebody may * create the buffer first ... search the hashes again. */ + write_unlock(&hash_table_lock); + spin_unlock(&lru_list_lock); refill_freelist(size); + spin_lock(&lru_list_lock); + write_lock(&hash_table_lock); goto repeat; }