From: Hugh Dickins <hugh@veritas.com>

Dissatisfied with earlier fix to race where swapoff sneaks page into tmpfs
page cache after truncate_inode_pages cleaned it: calling it a second time
can be too heavy, instead fix shmem_unuse_inode to check i_size. 
(Actually, one part of this fix is in the previous patch: shmem_file_write
now has a hold on the page when it raises i_size.)



 25-akpm/mm/shmem.c |   24 +++++++++++-------------
 1 files changed, 11 insertions(+), 13 deletions(-)

diff -puN mm/shmem.c~tmpfs-02-swapoff-truncate-race-refix mm/shmem.c
--- 25/mm/shmem.c~tmpfs-02-swapoff-truncate-race-refix	Tue Jun 10 13:00:11 2003
+++ 25-akpm/mm/shmem.c	Tue Jun 10 13:00:11 2003
@@ -488,16 +488,6 @@ done1:
 	}
 done2:
 	BUG_ON(info->swapped > info->next_index);
-	if (inode->i_mapping->nrpages) {
-		/*
-		 * Call truncate_inode_pages again: racing shmem_unuse_inode
-		 * may have swizzled a page in from swap since vmtruncate or
-		 * generic_delete_inode did it, before we lowered next_index.
-		 */
-		spin_unlock(&info->lock);
-		truncate_inode_pages(inode->i_mapping, inode->i_size);
-		spin_lock(&info->lock);
-	}
 	shmem_recalc_inode(inode);
 	spin_unlock(&info->lock);
 }
@@ -579,6 +569,7 @@ static inline int shmem_find_swp(swp_ent
 
 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
 {
+	struct inode *inode;
 	unsigned long idx;
 	unsigned long size;
 	unsigned long limit;
@@ -643,8 +634,15 @@ lost2:
 	spin_unlock(&info->lock);
 	return 0;
 found:
-	if (move_from_swap_cache(page, idx + offset,
-			info->vfs_inode.i_mapping) == 0)
+	idx += offset;
+	inode = &info->vfs_inode;
+
+	/* Racing against delete or truncate? Must leave out of page cache */
+	limit = (inode->i_state & I_FREEING)? 0:
+		(inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+	if (idx >= limit ||
+	    move_from_swap_cache(page, idx, inode->i_mapping) == 0)
 		shmem_swp_set(info, ptr + offset, 0);
 	shmem_swp_unmap(ptr);
 	spin_unlock(&info->lock);
@@ -653,7 +651,7 @@ found:
 	 * try_to_unuse will skip over mms, then reincrement count.
 	 */
 	swap_free(entry);
-	return 1;
+	return idx < limit;
 }
 
 /*

_