aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-01-07 21:58:36 -0800
committerLinus Torvalds <torvalds@evo.osdl.org>2005-01-07 21:58:36 -0800
commit25f5906cfbf8bf8603599edb5bce4577de0d7085 (patch)
tree0a0df2793ee6283c0cef9ca1090fab2fbd0817e1 /mm
parent84c496cfadc2c9471340a9ccdf7f82be6f627a5b (diff)
downloadhistory-25f5906cfbf8bf8603599edb5bce4577de0d7085.tar.gz
[PATCH] vmtrunc: unmap_mapping_range_tree
Move unmap_mapping_range's nonlinear vma handling out to its own inline, parallel to the prio_tree handling; unmap_mapping_range_list is a better name for the nonlinear list, rename the other unmap_mapping_range_tree. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/mm/memory.c b/mm/memory.c
index cb073d049868c5..fd761e2ab80791 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1354,9 +1354,9 @@ no_new_page:
}
/*
- * Helper function for unmap_mapping_range().
+ * Helper functions for unmap_mapping_range().
*/
-static inline void unmap_mapping_range_list(struct prio_tree_root *root,
+static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
struct zap_details *details)
{
struct vm_area_struct *vma;
@@ -1380,6 +1380,24 @@ static inline void unmap_mapping_range_list(struct prio_tree_root *root,
}
}
+static inline void unmap_mapping_range_list(struct list_head *head,
+ struct zap_details *details)
+{
+ struct vm_area_struct *vma;
+
+ /*
+ * In nonlinear VMAs there is no correspondence between virtual address
+ * offset and file offset. So we must perform an exhaustive search
+ * across *all* the pages in each nonlinear VMA, not just the pages
+ * whose virtual address lies outside the file truncation point.
+ */
+ list_for_each_entry(vma, head, shared.vm_set.list) {
+ details->nonlinear_vma = vma;
+ zap_page_range(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, details);
+ }
+}
+
/**
* unmap_mapping_range - unmap the portion of all mmaps
* in the specified address_space corresponding to the specified
@@ -1424,23 +1442,9 @@ void unmap_mapping_range(struct address_space *mapping,
mapping->truncate_count++;
if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
- unmap_mapping_range_list(&mapping->i_mmap, &details);
-
- /*
- * In nonlinear VMAs there is no correspondence between virtual address
- * offset and file offset. So we must perform an exhaustive search
- * across *all* the pages in each nonlinear VMA, not just the pages
- * whose virtual address lies outside the file truncation point.
- */
- if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) {
- struct vm_area_struct *vma;
- list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
- shared.vm_set.list) {
- details.nonlinear_vma = vma;
- zap_page_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, &details);
- }
- }
+ unmap_mapping_range_tree(&mapping->i_mmap, &details);
+ if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
+ unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
spin_unlock(&mapping->i_mmap_lock);
}
EXPORT_SYMBOL(unmap_mapping_range);