From: Hugh Dickins Strict mbind's check that pages already mapped are on right node has been using pte_page without checking if pfn_valid, and without page_table_lock to prevent spurious failures when try_to_unmap_one intervenes between the pte_present and the pte_page. Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton --- mm/mempolicy.c | 19 ++++++++++++++----- 1 files changed, 14 insertions(+), 5 deletions(-) diff -puN mm/mempolicy.c~mbind-fix-verify_pages-pte_page mm/mempolicy.c --- 25/mm/mempolicy.c~mbind-fix-verify_pages-pte_page Mon Jun 6 15:23:30 2005 +++ 25-akpm/mm/mempolicy.c Mon Jun 6 15:23:30 2005 @@ -242,6 +242,9 @@ static int verify_pages(struct mm_struct *mm, unsigned long addr, unsigned long end, unsigned long *nodes) { + int err = 0; + + spin_lock(&mm->page_table_lock); while (addr < end) { struct page *p; pte_t *pte; @@ -268,17 +271,23 @@ verify_pages(struct mm_struct *mm, } p = NULL; pte = pte_offset_map(pmd, addr); - if (pte_present(*pte)) - p = pte_page(*pte); + if (pte_present(*pte)) { + unsigned long pfn = pte_pfn(*pte); + if (pfn_valid(pfn)) + p = pfn_to_page(pfn); + } pte_unmap(pte); if (p) { unsigned nid = page_to_nid(p); - if (!test_bit(nid, nodes)) - return -EIO; + if (!test_bit(nid, nodes)) { + err = -EIO; + break; + } } addr += PAGE_SIZE; } - return 0; + spin_unlock(&mm->page_table_lock); + return err; } /* Step 1: check the range */ _