diff options
author | Hugh Dickins <hugh@veritas.com> | 2004-06-04 20:52:28 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-06-04 20:52:28 -0700 |
commit | a888f1f5a259fc9ee58d207560af30eb81dc220b (patch) | |
tree | 14d60889a2a89438553f3820061ec0ada533b564 /mm | |
parent | e495dd356078eacdc49998ec147e3df227c11ad8 (diff) | |
download | history-a888f1f5a259fc9ee58d207560af30eb81dc220b.tar.gz |
[PATCH] mm: vma_adjust insert file earlier
For those arches (arm and parisc) which use the i_mmap tree to implement
flush_dcache_page, during split_vma there's a small window in vma_adjust when
flush_dcache_mmap_lock is dropped, and pages in the split-off part of the vma
might for an instant be invisible to __flush_dcache_page.
Though we're more solid there than ever before, I guess it's a bad idea to
leave that window: so (with regret, it was structurally nicer before) take
__vma_link_file (and vma_prio_tree_init) out of __vma_link.
vma_prio_tree_init (which NULLs a few fields) is actually only needed when
copying a vma, not when a new one has just been memset to 0.
__insert_vm_struct is used by nothing but vma_adjust's split_vma case:
comment it accordingly, remove its mark_mm_hugetlb (it can never create
a new kind of vma) and its validate_mm (another follows immediately).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmap.c | 24 |
1 files changed, 17 insertions, 7 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index c77fec7b00c4e8..d6fd2fe133197d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -293,10 +293,8 @@ __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) { - vma_prio_tree_init(vma); __vma_link_list(mm, vma, prev, rb_parent); __vma_link_rb(mm, vma, rb_link, rb_parent); - __vma_link_file(vma); __anon_vma_link(vma); } @@ -312,7 +310,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, if (mapping) spin_lock(&mapping->i_mmap_lock); anon_vma_lock(vma); + __vma_link(mm, vma, prev, rb_link, rb_parent); + __vma_link_file(vma); + anon_vma_unlock(vma); if (mapping) spin_unlock(&mapping->i_mmap_lock); @@ -323,9 +324,9 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, } /* - * Insert vm structure into process list sorted by address and into the - * inode's i_mmap tree. The caller should hold mm->mmap_sem and - * ->f_mappping->i_mmap_lock if vm_file is non-NULL. + * Helper for vma_adjust in the split_vma insert case: + * insert vm structure into list and rbtree and anon_vma, + * but it has already been inserted into prio_tree earlier. */ static void __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) @@ -337,9 +338,7 @@ __insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) if (__vma && __vma->vm_start < vma->vm_end) BUG(); __vma_link(mm, vma, prev, rb_link, rb_parent); - mark_mm_hugetlb(mm, vma); mm->map_count++; - validate_mm(mm); } static inline void @@ -403,6 +402,15 @@ again: remove_next = 1 + (end > next->vm_end); if (!(vma->vm_flags & VM_NONLINEAR)) root = &mapping->i_mmap; spin_lock(&mapping->i_mmap_lock); + if (insert) { + /* + * Put into prio_tree now, so instantiated pages + * are visible to arm/parisc __flush_dcache_page + * throughout; but we cannot insert into address + * space until vma start or end is updated. + */ + __vma_link_file(insert); + } } /* @@ -1463,6 +1471,7 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, /* most fields are the same, copy all, and then fixup */ *new = *vma; + vma_prio_tree_init(new); if (new_below) new->vm_end = addr; @@ -1775,6 +1784,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (new_vma) { *new_vma = *vma; + vma_prio_tree_init(new_vma); pol = mpol_copy(vma_policy(vma)); if (IS_ERR(pol)) { kmem_cache_free(vm_area_cachep, new_vma); |