aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-01-07 21:58:02 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:58:02 -0800
commitb37e39b03bcd49397cac7b937d9b180157705e08 (patch)
treebff70e85ba5286bb0c150b260eb1ace00242a6da /mm
parentde146a08f93f5c4ab71503a79a7c5e300065b39a (diff)
downloadhistory-b37e39b03bcd49397cac7b937d9b180157705e08.tar.gz
[PATCH] vmtrunc: truncate_count not atomic
Why is mapping->truncate_count atomic? It's incremented inside i_mmap_lock (and i_sem), and the reads don't need it to be atomic. And why smp_rmb() before call to ->nopage? The compiler cannot reorder the initial assignment of sequence after the call to ->nopage, and no cpu (yet!) can read from the future, which is all that matters there. And delete totally bogus reset of truncate_count from blkmtd add_device. truncate_count is all about detecting i_size changes: i_size does not change there; and if it did, the count should be incremented not reset. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 6224cb1933c145..5c6a4db990acfa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1424,7 +1424,7 @@ void unmap_mapping_range(struct address_space *mapping,
spin_lock(&mapping->i_mmap_lock);
/* Protect against page fault */
- atomic_inc(&mapping->truncate_count);
+ mapping->truncate_count++;
if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
unmap_mapping_range_list(&mapping->i_mmap, &details);
@@ -1726,7 +1726,7 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page * new_page;
struct address_space *mapping = NULL;
pte_t entry;
- int sequence = 0;
+ unsigned int sequence = 0;
int ret = VM_FAULT_MINOR;
int anon = 0;
@@ -1738,9 +1738,8 @@ do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
- sequence = atomic_read(&mapping->truncate_count);
+ sequence = mapping->truncate_count;
}
- smp_rmb(); /* Prevent CPU from reordering lock-free ->nopage() */
retry:
cond_resched();
new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
@@ -1774,9 +1773,8 @@ retry:
* invalidated this page. If unmap_mapping_range got called,
* retry getting the page.
*/
- if (mapping &&
- (unlikely(sequence != atomic_read(&mapping->truncate_count)))) {
- sequence = atomic_read(&mapping->truncate_count);
+ if (mapping && unlikely(sequence != mapping->truncate_count)) {
+ sequence = mapping->truncate_count;
spin_unlock(&mm->page_table_lock);
page_cache_release(new_page);
goto retry;