diff -urNp --exclude CVS --exclude BitKeeper --exclude {arch} --exclude .arch-ids x-ref/fs/inode.c x/fs/inode.c --- x-ref/fs/inode.c 2003-12-04 20:00:58.000000000 +0100 +++ x/fs/inode.c 2003-12-04 20:09:38.000000000 +0100 @@ -796,33 +796,86 @@ void prune_icache(int goal) { LIST_HEAD(list); struct list_head *entry, *freeable = &list; - int count; + int count, pass; struct inode * inode; - spin_lock(&inode_lock); + count = pass = 0; + entry = &inode_unused; - count = 0; - entry = inode_unused.prev; - while (entry != &inode_unused) - { - struct list_head *tmp = entry; - - entry = entry->prev; - inode = INODE(tmp); - if (inode->i_state & (I_FREEING|I_CLEAR|I_LOCK)) - continue; - if (!CAN_UNUSE(inode)) - continue; - if (atomic_read(&inode->i_count)) - continue; - list_del(tmp); - list_add(tmp, freeable); - inode->i_state |= I_FREEING; - count++; - if (!--goal) - break; + spin_lock(&inode_lock); + while (goal && pass++ < 2) { + entry = inode_unused.prev; + while (entry != &inode_unused) + { + struct list_head *tmp = entry; + + entry = entry->prev; + inode = INODE(tmp); + if (inode->i_state & (I_FREEING|I_CLEAR|I_LOCK)) + continue; + if (atomic_read(&inode->i_count)) + continue; + if (pass == 2 && !inode->i_state && !CAN_UNUSE(inode)) { + if (inode_has_buffers(inode)) + /* + * If the inode has dirty buffers + * pending, start flushing out bdflush.ndirty + * worth of data even if there's no dirty-memory + * pressure. Do nothing else in this + * case, until all dirty buffers are gone + * we can do nothing about the inode other than + * to keep flushing dirty stuff. We could also + * flush only the dirty buffers in the inode + * but there's no API to do it asynchronously + * and this simpler approch to deal with the + * dirty payload shouldn't make much difference + * in practice. Also keep in mind if somebody + * keeps overwriting data in a flood we'd + * never manage to drop the inode anyways, + * and we really shouldn't do that because + * it's an heavily used one. + */ + wakeup_bdflush(); + else if (inode->i_data.nrpages) + /* + * If we're here it means the only reason + * we cannot drop the inode is that its + * due its pagecache so go ahead and trim it + * hard. If it doesn't go away it means + * they're dirty or dirty/pinned pages ala + * ramfs. + * + * invalidate_inode_pages() is a non + * blocking operation but we introduce + * a dependency order between the + * inode_lock and the pagemap_lru_lock, + * the inode_lock must always be taken + * first from now on. + */ + invalidate_inode_pages(inode); + } + if (!CAN_UNUSE(inode)) + continue; + list_del(tmp); + list_add(tmp, freeable); + inode->i_state |= I_FREEING; + count++; + if (!--goal) + break; + } } inodes_stat.nr_unused -= count; + + /* + * the unused list is hardly an LRU so it makes + * more sense to rotate it so we don't bang + * always on the same inodes in case they're + * unfreeable for whatever reason. + */ + if (entry != &inode_unused) { + list_del(&inode_unused); + list_add(&inode_unused, entry); + } spin_unlock(&inode_lock); dispose_list(freeable); diff -urNp --exclude CVS --exclude BitKeeper --exclude {arch} --exclude .arch-ids x-ref/mm/filemap.c x/mm/filemap.c --- x-ref/mm/filemap.c 2003-12-04 20:00:59.000000000 +0100 +++ x/mm/filemap.c 2003-12-04 20:01:09.000000000 +0100 @@ -202,7 +202,7 @@ void invalidate_inode_pages(struct inode if (TryLockPage(page)) continue; - if (page->buffers && !try_to_free_buffers(page, 0)) + if (page->buffers && !try_to_release_page(page, 0)) goto unlock; if (page_count(page) != 1)