From: Maneesh Soni This patch the corrects the dentry_stat.nr_unused calculation. In select_parent() and shrink_dcache_anon() we were not doing any adjustments to the nr_unused count after manipulating the dentry_unused list. Now the nr_unused count is decremented if the dentry is on dentry_unused list and is removed from there. Further in the same routines, we have to adjust the nr_unused count again if the dentry is moved to the end of d_lru list for pruning. fs/dcache.c | 22 ++++++++++++++++------ 1 files changed, 16 insertions(+), 6 deletions(-) diff -puN fs/dcache.c~dentry_stat-accounting-fix fs/dcache.c --- 25/fs/dcache.c~dentry_stat-accounting-fix 2003-04-14 02:06:31.000000000 -0700 +++ 25-akpm/fs/dcache.c 2003-04-14 02:06:31.000000000 -0700 @@ -538,13 +538,18 @@ resume: struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_child); next = tmp->next; - list_del_init(&dentry->d_lru); - /* don't add non zero d_count dentries - * back to d_lru list + if (!list_empty(&dentry->d_lru)) { + dentry_stat.nr_unused--; + list_del_init(&dentry->d_lru); + } + /* + * move only zero ref count dentries to the end + * of the unused list for prune_dcache */ if (!atomic_read(&dentry->d_count)) { list_add(&dentry->d_lru, dentry_unused.prev); + dentry_stat.nr_unused++; found++; } /* @@ -609,13 +614,18 @@ void shrink_dcache_anon(struct hlist_hea spin_lock(&dcache_lock); hlist_for_each(lp, head) { struct dentry *this = hlist_entry(lp, struct dentry, d_hash); - list_del(&this->d_lru); + if (!list_empty(&this->d_lru)) { + dentry_stat.nr_unused--; + list_del(&this->d_lru); + } - /* don't add non zero d_count dentries - * back to d_lru list + /* + * move only zero ref count dentries to the end + * of the unused list for prune_dcache */ if (!atomic_read(&this->d_count)) { list_add_tail(&this->d_lru, &dentry_unused); + dentry_stat.nr_unused++; found++; } } _