aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2004-05-22 08:06:10 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-05-22 08:06:10 -0700
commit068258f78adf5e3af492fb433b5a8bc2b2ba8f2f (patch)
tree0e28a85c6df6b1cf81e3a8f03408ace51ca7b169 /mm
parent2fe9c14c0e6932f1f2d18f8888a49078933dfaf6 (diff)
downloadhistory-068258f78adf5e3af492fb433b5a8bc2b2ba8f2f.tar.gz
[PATCH] rmap 18: i_mmap_nonlinear
From: Hugh Dickins <hugh@veritas.com> The prio_tree is of no use to nonlinear vmas: currently we're having to search the tree in the most inefficient way to find all its nonlinears. At the very least we need an indication of the unlikely case when there are some nonlinears; but really, we'd do best to take them out of the prio_tree altogether, into a list of their own - i_mmap_nonlinear.
Diffstat (limited to 'mm')
-rw-r--r--mm/fremap.c12
-rw-r--r--mm/memory.c31
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/prio_tree.c1
-rw-r--r--mm/rmap.c35
5 files changed, 46 insertions, 48 deletions
diff --git a/mm/fremap.c b/mm/fremap.c
index fee3f821c64d71..68af37d169f5db 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -157,6 +157,7 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
unsigned long __prot, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
+ struct address_space *mapping;
unsigned long end = start + size;
struct vm_area_struct *vma;
int err = -EINVAL;
@@ -197,8 +198,17 @@ asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
end <= vma->vm_end) {
/* Must set VM_NONLINEAR before any pages are populated. */
- if (pgoff != linear_page_index(vma, start))
+ if (pgoff != linear_page_index(vma, start) &&
+ !(vma->vm_flags & VM_NONLINEAR)) {
+ mapping = vma->vm_file->f_mapping;
+ spin_lock(&mapping->i_mmap_lock);
vma->vm_flags |= VM_NONLINEAR;
+ vma_prio_tree_remove(vma, &mapping->i_mmap_shared);
+ vma_prio_tree_init(vma);
+ list_add_tail(&vma->shared.vm_set.list,
+ &mapping->i_mmap_nonlinear);
+ spin_unlock(&mapping->i_mmap_lock);
+ }
/* ->populate can take a long time, so downgrade the lock. */
downgrade_write(&mm->mmap_sem);
diff --git a/mm/memory.c b/mm/memory.c
index 91206b09b540d8..cf7f4624f80892 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1116,8 +1116,6 @@ static void unmap_mapping_range_list(struct prio_tree_root *root,
while ((vma = vma_prio_tree_next(vma, root, &iter,
details->first_index, details->last_index)) != NULL) {
- if (unlikely(vma->vm_flags & VM_NONLINEAR))
- continue;
vba = vma->vm_pgoff;
vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
@@ -1133,22 +1131,6 @@ static void unmap_mapping_range_list(struct prio_tree_root *root,
}
}
-static void unmap_nonlinear_range_list(struct prio_tree_root *root,
- struct zap_details *details)
-{
- struct vm_area_struct *vma = NULL;
- struct prio_tree_iter iter;
-
- while ((vma = vma_prio_tree_next(vma, root, &iter,
- 0, ULONG_MAX)) != NULL) {
- if (!(vma->vm_flags & VM_NONLINEAR))
- continue;
- details->nonlinear_vma = vma;
- zap_page_range(vma, vma->vm_start,
- vma->vm_end - vma->vm_start, details);
- }
-}
-
/**
* unmap_mapping_range - unmap the portion of all mmaps
* in the specified address_space corresponding to the specified
@@ -1198,11 +1180,18 @@ void unmap_mapping_range(struct address_space *mapping,
/* Don't waste time to check mapping on fully shared vmas */
details.check_mapping = NULL;
- if (unlikely(!prio_tree_empty(&mapping->i_mmap_shared))) {
+ if (unlikely(!prio_tree_empty(&mapping->i_mmap_shared)))
unmap_mapping_range_list(&mapping->i_mmap_shared, &details);
- unmap_nonlinear_range_list(&mapping->i_mmap_shared, &details);
- }
+ if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) {
+ struct vm_area_struct *vma;
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ details.nonlinear_vma = vma;
+ zap_page_range(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start, &details);
+ }
+ }
spin_unlock(&mapping->i_mmap_lock);
}
EXPORT_SYMBOL(unmap_mapping_range);
diff --git a/mm/mmap.c b/mm/mmap.c
index 70347b11a347e9..9435c6aa10c63a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -72,7 +72,9 @@ static inline void __remove_shared_vm_struct(struct vm_area_struct *vma,
if (vma->vm_flags & VM_DENYWRITE)
atomic_inc(&file->f_dentry->d_inode->i_writecount);
- if (vma->vm_flags & VM_SHARED)
+ if (unlikely(vma->vm_flags & VM_NONLINEAR))
+ list_del_init(&vma->shared.vm_set.list);
+ else if (vma->vm_flags & VM_SHARED)
vma_prio_tree_remove(vma, &mapping->i_mmap_shared);
else
vma_prio_tree_remove(vma, &mapping->i_mmap);
@@ -262,7 +264,10 @@ static inline void __vma_link_file(struct vm_area_struct *vma)
if (vma->vm_flags & VM_DENYWRITE)
atomic_dec(&file->f_dentry->d_inode->i_writecount);
- if (vma->vm_flags & VM_SHARED)
+ if (unlikely(vma->vm_flags & VM_NONLINEAR))
+ list_add_tail(&vma->shared.vm_set.list,
+ &mapping->i_mmap_nonlinear);
+ else if (vma->vm_flags & VM_SHARED)
vma_prio_tree_insert(vma, &mapping->i_mmap_shared);
else
vma_prio_tree_insert(vma, &mapping->i_mmap);
@@ -339,10 +344,10 @@ void vma_adjust(struct vm_area_struct *vma, unsigned long start,
if (file) {
mapping = file->f_mapping;
- if (vma->vm_flags & VM_SHARED)
- root = &mapping->i_mmap_shared;
- else
+ if (!(vma->vm_flags & VM_SHARED))
root = &mapping->i_mmap;
+ else if (!(vma->vm_flags & VM_NONLINEAR))
+ root = &mapping->i_mmap_shared;
spin_lock(&mapping->i_mmap_lock);
}
spin_lock(&mm->page_table_lock);
diff --git a/mm/prio_tree.c b/mm/prio_tree.c
index eb1855004a2369..2bf914699f458d 100644
--- a/mm/prio_tree.c
+++ b/mm/prio_tree.c
@@ -530,6 +530,7 @@ repeat:
/*
* Add a new vma known to map the same set of pages as the old vma:
* useful for fork's dup_mmap as well as vma_prio_tree_insert below.
+ * Note that it just happens to work correctly on i_mmap_nonlinear too.
*/
void vma_prio_tree_add(struct vm_area_struct *vma, struct vm_area_struct *old)
{
diff --git a/mm/rmap.c b/mm/rmap.c
index f9a5deca8ad8da..32e76a18a6cb59 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -335,10 +335,6 @@ static inline int page_referenced_file(struct page *page)
while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
&iter, pgoff, pgoff)) != NULL) {
- if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
- failed++;
- continue;
- }
if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) {
referenced++;
goto out;
@@ -352,8 +348,8 @@ static inline int page_referenced_file(struct page *page)
}
}
- /* Hmm, but what of the nonlinears which pgoff,pgoff skipped? */
- WARN_ON(!failed);
+ if (list_empty(&mapping->i_mmap_nonlinear))
+ WARN_ON(!failed);
out:
spin_unlock(&mapping->i_mmap_lock);
return referenced;
@@ -757,8 +753,6 @@ static inline int try_to_unmap_file(struct page *page)
while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
&iter, pgoff, pgoff)) != NULL) {
- if (unlikely(vma->vm_flags & VM_NONLINEAR))
- continue;
if (vma->vm_mm->rss) {
address = vma_address(vma, pgoff);
ret = try_to_unmap_one(page,
@@ -768,10 +762,12 @@ static inline int try_to_unmap_file(struct page *page)
}
}
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, 0, ULONG_MAX)) != NULL) {
- if (VM_NONLINEAR != (vma->vm_flags &
- (VM_NONLINEAR|VM_LOCKED|VM_RESERVED)))
+ if (list_empty(&mapping->i_mmap_nonlinear))
+ goto out;
+
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
continue;
cursor = (unsigned long) vma->vm_private_data;
if (cursor > max_nl_cursor)
@@ -799,10 +795,9 @@ static inline int try_to_unmap_file(struct page *page)
max_nl_cursor = CLUSTER_SIZE;
do {
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, 0, ULONG_MAX)) != NULL) {
- if (VM_NONLINEAR != (vma->vm_flags &
- (VM_NONLINEAR|VM_LOCKED|VM_RESERVED)))
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ if (vma->vm_flags & (VM_LOCKED|VM_RESERVED))
continue;
cursor = (unsigned long) vma->vm_private_data;
while (vma->vm_mm->rss &&
@@ -831,11 +826,9 @@ static inline int try_to_unmap_file(struct page *page)
* in locked vmas). Reset cursor on all unreserved nonlinear
* vmas, now forgetting on which ones it had fallen behind.
*/
- vma = NULL; /* it is already, but above loop might change */
- while ((vma = vma_prio_tree_next(vma, &mapping->i_mmap_shared,
- &iter, 0, ULONG_MAX)) != NULL) {
- if ((vma->vm_flags & (VM_NONLINEAR|VM_RESERVED)) ==
- VM_NONLINEAR)
+ list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
+ shared.vm_set.list) {
+ if (!(vma->vm_flags & VM_RESERVED))
vma->vm_private_data = 0;
}
relock: