diff options
author | William Lee Irwin III <wli@holomorphy.com> | 2004-08-26 20:36:15 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2004-08-26 20:36:15 -0700 |
commit | 6ac0a8d7d5033027e0ae999525d11750ee4337ca (patch) | |
tree | 2e5f5eb30e6f5946a877fc2455db3355846aea84 /mm | |
parent | 384aabda4b2051245ce44df0ecb538617644e32e (diff) | |
download | history-6ac0a8d7d5033027e0ae999525d11750ee4337ca.tar.gz |
[PATCH] O(1) proc_pid_statm()
Merely removing down_read(&mm->mmap_sem) from task_vsize() is too
half-assed to let stand. The following patch removes the vma iteration
as well as the down_read(&mm->mmap_sem) from both task_mem() and
task_statm() and callers for the CONFIG_MMU=y case in favor of
accounting the various stats reported at the times of vma creation,
destruction, and modification. Unlike the 2.4.x patches of the same
name, this has no per-pte-modification overhead whatsoever.
This patch quashes end user complaints of top(1) being slow as well as
kernel hacker complaints of per-pte accounting overhead simultaneously.
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/mmap.c | 26 | ||||
-rw-r--r-- | mm/mprotect.c | 2 | ||||
-rw-r--r-- | mm/mremap.c | 3 |
3 files changed, 31 insertions, 0 deletions
diff --git a/mm/mmap.c b/mm/mmap.c index c0c6e494144539..0857b7ac2f034e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -729,6 +729,28 @@ none: return NULL; } +void __vm_stat_account(struct mm_struct *mm, unsigned long flags, + struct file *file, long pages) +{ + const unsigned long stack_flags + = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); + +#ifdef CONFIG_HUGETLB + if (flags & VM_HUGETLB) { + if (!(flags & VM_DONTCOPY)) + mm->shared_vm += pages; + return; + } +#endif /* CONFIG_HUGETLB */ + + if (file) + mm->shared_vm += pages; + else if (flags & stack_flags) + mm->stack_vm += pages; + if (flags & VM_EXEC) + mm->exec_vm += pages; +} + /* * The caller must hold down_write(current->mm->mmap_sem). */ @@ -987,6 +1009,7 @@ out: pgoff, flags & MAP_NONBLOCK); down_write(&mm->mmap_sem); } + __vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); return addr; unmap_and_free_vma: @@ -1330,6 +1353,7 @@ int expand_stack(struct vm_area_struct * vma, unsigned long address) vma->vm_mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm += grow; + __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow); anon_vma_unlock(vma); return 0; } @@ -1392,6 +1416,7 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address) vma->vm_mm->total_vm += grow; if (vma->vm_flags & VM_LOCKED) vma->vm_mm->locked_vm += grow; + __vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file, grow); anon_vma_unlock(vma); return 0; } @@ -1497,6 +1522,7 @@ static void unmap_vma(struct mm_struct *mm, struct vm_area_struct *area) area->vm_mm->total_vm -= len >> PAGE_SHIFT; if (area->vm_flags & VM_LOCKED) area->vm_mm->locked_vm -= len >> PAGE_SHIFT; + vm_stat_unaccount(area); area->vm_mm->unmap_area(area); remove_vm_struct(area); } diff --git a/mm/mprotect.c b/mm/mprotect.c index 88041d46ad0b6b..67d02dc0ea0403 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -175,9 +175,11 @@ success: * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ + vm_stat_unaccount(vma); vma->vm_flags = newflags; vma->vm_page_prot = newprot; change_protection(vma, start, end, newprot); + vm_stat_account(vma); return 0; fail: diff --git a/mm/mremap.c b/mm/mremap.c index 6be63314688f1e..0b0c0e27ecb5ab 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -224,6 +224,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, } mm->total_vm += new_len >> PAGE_SHIFT; + __vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) @@ -360,6 +361,8 @@ unsigned long do_mremap(unsigned long addr, addr + new_len, vma->vm_pgoff, NULL); current->mm->total_vm += pages; + __vm_stat_account(vma->vm_mm, vma->vm_flags, + vma->vm_file, pages); if (vma->vm_flags & VM_LOCKED) { current->mm->locked_vm += pages; make_pages_present(addr + old_len, |