From: David Gibson add_to_page_cache() locks the given page if and only if it suceeds. The hugepage code (every arch), however, does an unlock_page() after add_to_page_cache() before checking the return code, which could trip the BUG() in unlock_page() if add_to_page_cache() failed. In practice we've never hit this bug, because the only ways add_to_page_cache() can fail are when we fail to allocate a radix tree node (very rare), or when there is already a page at that offset in the radix tree, which never happens during prefault, obviously. We should probably fix it anyway, though. The analagous bug in some of the patches floating about to demand-allocation of hugepages is more of a problem, because multiple processes can race to instantiate a particular page in the radix tree - that's been hit at least once (which is how I found this). --- 25-akpm/arch/i386/mm/hugetlbpage.c | 5 +++-- 25-akpm/arch/ia64/mm/hugetlbpage.c | 5 +++-- 25-akpm/arch/ppc64/mm/hugetlbpage.c | 5 +++-- 25-akpm/arch/sh/mm/hugetlbpage.c | 5 +++-- 25-akpm/arch/sparc64/mm/hugetlbpage.c | 5 +++-- 5 files changed, 15 insertions(+), 10 deletions(-) diff -puN arch/i386/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix arch/i386/mm/hugetlbpage.c --- 25/arch/i386/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix 2004-05-08 21:49:07.791842160 -0700 +++ 25-akpm/arch/i386/mm/hugetlbpage.c 2004-05-08 21:49:07.801840640 -0700 @@ -263,8 +263,9 @@ int hugetlb_prefault(struct address_spac goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff -puN arch/ia64/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix arch/ia64/mm/hugetlbpage.c --- 25/arch/ia64/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix 2004-05-08 21:49:07.793841856 -0700 +++ 25-akpm/arch/ia64/mm/hugetlbpage.c 2004-05-08 21:49:07.801840640 -0700 @@ -293,8 +293,9 @@ int hugetlb_prefault(struct address_spac goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff -puN arch/ppc64/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix arch/ppc64/mm/hugetlbpage.c --- 25/arch/ppc64/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix 2004-05-08 21:49:07.795841552 -0700 +++ 25-akpm/arch/ppc64/mm/hugetlbpage.c 2004-05-08 21:49:07.802840488 -0700 @@ -451,8 +451,9 @@ int hugetlb_prefault(struct address_spac goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff -puN arch/sh/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix arch/sh/mm/hugetlbpage.c --- 25/arch/sh/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix 2004-05-08 21:49:07.796841400 -0700 +++ 25-akpm/arch/sh/mm/hugetlbpage.c 2004-05-08 21:49:07.802840488 -0700 @@ -248,8 +248,9 @@ int hugetlb_prefault(struct address_spac goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; diff -puN arch/sparc64/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix arch/sparc64/mm/hugetlbpage.c --- 25/arch/sparc64/mm/hugetlbpage.c~hugepage-add_to_page_cache-fix 2004-05-08 21:49:07.798841096 -0700 +++ 25-akpm/arch/sparc64/mm/hugetlbpage.c 2004-05-08 21:49:07.803840336 -0700 @@ -245,8 +245,9 @@ int hugetlb_prefault(struct address_spac goto out; } ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); - unlock_page(page); - if (ret) { + if (! ret) { + unlock_page(page); + } else { hugetlb_put_quota(mapping); free_huge_page(page); goto out; _