diff -purN -X /home/mbligh/.diff.exclude 230-percpu_real_loadavg/mm/mmap.c 240-nolock/mm/mmap.c --- 230-percpu_real_loadavg/mm/mmap.c 2003-11-24 16:26:52.000000000 -0800 +++ 240-nolock/mm/mmap.c 2003-11-24 16:33:53.000000000 -0800 @@ -268,9 +268,7 @@ static void vma_link(struct mm_struct *m if (mapping) down(&mapping->i_shared_sem); - spin_lock(&mm->page_table_lock); __vma_link(mm, vma, prev, rb_link, rb_parent); - spin_unlock(&mm->page_table_lock); if (mapping) up(&mapping->i_shared_sem); @@ -322,12 +320,10 @@ static inline int is_mergeable_vma(struc /* requires that the relevant i_shared_sem be held by the caller */ static void move_vma_start(struct vm_area_struct *vma, unsigned long addr) { - spinlock_t *lock = &vma->vm_mm->page_table_lock; struct inode *inode = NULL; if (vma->vm_file) inode = vma->vm_file->f_dentry->d_inode; - spin_lock(lock); if (inode) __remove_shared_vm_struct(vma, inode); /* If no vm_file, perhaps we should always keep vm_pgoff at 0?? */ @@ -335,7 +331,6 @@ static void move_vma_start(struct vm_are vma->vm_start = addr; if (inode) __vma_link_file(vma); - spin_unlock(lock); } /* @@ -414,7 +409,6 @@ static int vma_merge(struct mm_struct *m is_mergeable_vma(prev, file, vm_flags) && can_vma_merge_after(prev, vm_flags, file, pgoff)) { struct vm_area_struct *next; - spinlock_t *lock = &mm->page_table_lock; int need_up = 0; if (unlikely(file && prev->vm_next && @@ -422,7 +416,6 @@ static int vma_merge(struct mm_struct *m down(i_shared_sem); need_up = 1; } - spin_lock(lock); prev->vm_end = end; /* @@ -435,7 +428,6 @@ static int vma_merge(struct mm_struct *m prev->vm_end = next->vm_end; __vma_unlink(mm, next, prev); __remove_shared_vm_struct(next, inode); - spin_unlock(lock); if (need_up) up(i_shared_sem); if (file) @@ -445,7 +437,6 @@ static int vma_merge(struct mm_struct *m kmem_cache_free(vm_area_cachep, next); return 1; } - spin_unlock(lock); if (need_up) up(i_shared_sem); return 1; @@ -917,19 +908,16 @@ int expand_stack(struct vm_area_struct * */ address += 4 + PAGE_SIZE - 1; address &= PAGE_MASK; - spin_lock(&vma->vm_mm->page_table_lock); grow = (address - vma->vm_end) >> PAGE_SHIFT; /* Overcommit.. */ if (security_vm_enough_memory(grow)) { - spin_unlock(&vma->vm_mm->page_table_lock); return -ENOMEM; } if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur || ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { - spin_unlock(&vma->vm_mm->page_table_lock); vm_unacct_memory(grow); return -ENOMEM; } @@ -937,7 +925,6 @@ int expand_stack(struct vm_area_struct * vma->vm_mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm += grow; - spin_unlock(&vma->vm_mm->page_table_lock); return 0; } @@ -971,19 +958,16 @@ int expand_stack(struct vm_area_struct * * the spinlock only before relocating the vma range ourself. */ address &= PAGE_MASK; - spin_lock(&vma->vm_mm->page_table_lock); grow = (vma->vm_start - address) >> PAGE_SHIFT; /* Overcommit.. */ if (security_vm_enough_memory(grow)) { - spin_unlock(&vma->vm_mm->page_table_lock); return -ENOMEM; } if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur || ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) { - spin_unlock(&vma->vm_mm->page_table_lock); vm_unacct_memory(grow); return -ENOMEM; } @@ -992,7 +976,6 @@ int expand_stack(struct vm_area_struct * vma->vm_mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm += grow; - spin_unlock(&vma->vm_mm->page_table_lock); return 0; } @@ -1155,8 +1138,6 @@ static void unmap_region(struct mm_struc /* * Create a list of vma's touched by the unmap, removing them from the mm's * vma list as we go.. - * - * Called with the page_table_lock held. */ static void detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, @@ -1295,8 +1276,8 @@ int do_munmap(struct mm_struct *mm, unsi /* * Remove the vma's, and unmap the actual pages */ - spin_lock(&mm->page_table_lock); detach_vmas_to_be_unmapped(mm, mpnt, prev, end); + spin_lock(&mm->page_table_lock); unmap_region(mm, mpnt, prev, start, end); spin_unlock(&mm->page_table_lock); diff -purN -X /home/mbligh/.diff.exclude 230-percpu_real_loadavg/mm/swapfile.c 240-nolock/mm/swapfile.c --- 230-percpu_real_loadavg/mm/swapfile.c 2003-11-24 16:26:52.000000000 -0800 +++ 240-nolock/mm/swapfile.c 2003-11-24 16:33:53.000000000 -0800 @@ -499,6 +499,7 @@ static int unuse_process(struct mm_struc /* * Go through process' page directory. */ + down_read(&mm->mmap_sem); spin_lock(&mm->page_table_lock); for (vma = mm->mmap; vma; vma = vma->vm_next) { pgd_t * pgd = pgd_offset(mm, vma->vm_start); @@ -506,6 +507,7 @@ static int unuse_process(struct mm_struc break; } spin_unlock(&mm->page_table_lock); + up_read(&mm->mmap_sem); pte_chain_free(pte_chain); return 0; }