diff -urpN -X /home/fletch/.diff.exclude 810-percpu_real_loadavg/mm/mmap.c 831-nolock/mm/mmap.c --- 810-percpu_real_loadavg/mm/mmap.c Wed Aug 13 20:29:24 2003 +++ 831-nolock/mm/mmap.c Wed Aug 13 20:51:42 2003 @@ -291,9 +291,7 @@ static void vma_link(struct mm_struct *m if (mapping) down(&mapping->i_shared_sem); - spin_lock(&mm->page_table_lock); __vma_link(mm, vma, prev, rb_link, rb_parent); - spin_unlock(&mm->page_table_lock); if (mapping) up(&mapping->i_shared_sem); @@ -324,14 +322,12 @@ static inline int is_mergeable_vma(struc static void move_vma_start(struct vm_area_struct *vma, unsigned long addr) { - spinlock_t *lock = &vma->vm_mm->page_table_lock; struct inode *inode = NULL; if (vma->vm_file) { inode = vma->vm_file->f_dentry->d_inode; down(&inode->i_mapping->i_shared_sem); } - spin_lock(lock); if (inode) __remove_shared_vm_struct(vma, inode); /* If no vm_file, perhaps we should always keep vm_pgoff at 0?? */ @@ -341,7 +337,6 @@ static void move_vma_start(struct vm_are __vma_link_file(vma); up(&inode->i_mapping->i_shared_sem); } - spin_unlock(lock); } /* @@ -415,7 +410,6 @@ static int vma_merge(struct mm_struct *m is_mergeable_vma(prev, file, vm_flags) && can_vma_merge_after(prev, vm_flags, file, pgoff)) { struct vm_area_struct *next; - spinlock_t *lock = &mm->page_table_lock; struct inode *inode = file ? file->f_dentry->d_inode : NULL; int need_up = 0; @@ -424,7 +418,6 @@ static int vma_merge(struct mm_struct *m down(&inode->i_mapping->i_shared_sem); need_up = 1; } - spin_lock(lock); prev->vm_end = end; /* @@ -437,7 +430,6 @@ static int vma_merge(struct mm_struct *m prev->vm_end = next->vm_end; __vma_unlink(mm, next, prev); __remove_shared_vm_struct(next, inode); - spin_unlock(lock); if (need_up) up(&inode->i_mapping->i_shared_sem); if (file) @@ -447,7 +439,6 @@ static int vma_merge(struct mm_struct *m kmem_cache_free(vm_area_cachep, next); return 1; } - spin_unlock(lock); if (need_up) up(&inode->i_mapping->i_shared_sem); return 1; @@ -909,19 +900,16 @@ int expand_stack(struct vm_area_struct * */ address += 4 + PAGE_SIZE - 1; address &= PAGE_MASK; - spin_lock(&vma->vm_mm->page_table_lock); grow = (address - vma->vm_end) >> PAGE_SHIFT; /* Overcommit.. */ if (security_vm_enough_memory(grow)) { - spin_unlock(&vma->vm_mm->page_table_lock); return -ENOMEM; } if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur || ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { - spin_unlock(&vma->vm_mm->page_table_lock); vm_unacct_memory(grow); return -ENOMEM; } @@ -929,7 +917,6 @@ int expand_stack(struct vm_area_struct * vma->vm_mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm += grow; - spin_unlock(&vma->vm_mm->page_table_lock); return 0; } @@ -963,19 +950,16 @@ int expand_stack(struct vm_area_struct * * the spinlock only before relocating the vma range ourself. */ address &= PAGE_MASK; - spin_lock(&vma->vm_mm->page_table_lock); grow = (vma->vm_start - address) >> PAGE_SHIFT; /* Overcommit.. */ if (security_vm_enough_memory(grow)) { - spin_unlock(&vma->vm_mm->page_table_lock); return -ENOMEM; } if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { - spin_unlock(&vma->vm_mm->page_table_lock); vm_unacct_memory(grow); return -ENOMEM; } @@ -984,7 +968,6 @@ int expand_stack(struct vm_area_struct * vma->vm_mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm += grow; - spin_unlock(&vma->vm_mm->page_table_lock); return 0; } @@ -1147,8 +1130,6 @@ static void unmap_region(struct mm_struc /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. - * - * Called with the page_table_lock held. */ static void detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, @@ -1271,8 +1252,8 @@ int do_munmap(struct mm_struct *mm, unsi /* * Remove the vma's, and unmap the actual pages */ - spin_lock(&mm->page_table_lock); detach_vmas_to_be_unmapped(mm, mpnt, prev, end); + spin_lock(&mm->page_table_lock); unmap_region(mm, mpnt, prev, start, end); spin_unlock(&mm->page_table_lock); diff -urpN -X /home/fletch/.diff.exclude 810-percpu_real_loadavg/mm/swapfile.c 831-nolock/mm/swapfile.c --- 810-percpu_real_loadavg/mm/swapfile.c Wed Aug 13 20:29:24 2003 +++ 831-nolock/mm/swapfile.c Wed Aug 13 20:51:42 2003 @@ -498,6 +498,7 @@ static int unuse_process(struct mm_struc /* * Go through process' page directory. */ + down_read(&mm->mmap_sem); spin_lock(&mm->page_table_lock); for (vma = mm->mmap; vma; vma = vma->vm_next) { pgd_t * pgd = pgd_offset(mm, vma->vm_start); @@ -505,6 +506,7 @@ static int unuse_process(struct mm_struc break; } spin_unlock(&mm->page_table_lock); + up_read(&mm->mmap_sem); pte_chain_free(pte_chain); return 0; }