From: David Mosberger , "Sharma, Arun" The truncate race fix assumed that a non-zero vma->vm_ops->nopage implies a non-zero vma->vm_file. The ia64 x86 emulation code breaks this assumption, so teach do_no_page() to handle it. mm/memory.c | 13 ++++++++----- 1 files changed, 8 insertions(+), 5 deletions(-) diff -puN mm/memory.c~nopage-fix mm/memory.c --- 25/mm/memory.c~nopage-fix 2003-08-26 02:24:53.000000000 -0700 +++ 25-akpm/mm/memory.c 2003-08-26 02:25:00.000000000 -0700 @@ -1385,10 +1385,10 @@ do_no_page(struct mm_struct *mm, struct unsigned long address, int write_access, pte_t *page_table, pmd_t *pmd) { struct page * new_page; - struct address_space *mapping; + struct address_space *mapping = NULL; pte_t entry; struct pte_chain *pte_chain; - int sequence; + int sequence = 0; int ret; if (!vma->vm_ops || !vma->vm_ops->nopage) @@ -1397,8 +1397,10 @@ do_no_page(struct mm_struct *mm, struct pte_unmap(page_table); spin_unlock(&mm->page_table_lock); - mapping = vma->vm_file->f_dentry->d_inode->i_mapping; - sequence = atomic_read(&mapping->truncate_count); + if (vma->vm_file) { + mapping = vma->vm_file->f_dentry->d_inode->i_mapping; + sequence = atomic_read(&mapping->truncate_count); + } smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */ retry: new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, 0); @@ -1434,7 +1436,8 @@ retry: * invalidated this page. If invalidate_mmap_range got called, * retry getting the page. */ - if (unlikely(sequence != atomic_read(&mapping->truncate_count))) { + if (mapping && + (unlikely(sequence != atomic_read(&mapping->truncate_count)))) { sequence = atomic_read(&mapping->truncate_count); spin_unlock(&mm->page_table_lock); page_cache_release(new_page); _