aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohn Levon <levon@movementarian.org>2004-08-26 20:34:28 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2004-08-26 20:34:28 -0700
commitddebb8b659b9b56c29cf6a8db3f100a9a4b00ef3 (patch)
tree17c3d056d02cba7d37edcdb4853923bbff19f8ca /mm
parent6e7ca99d18d0b57aecbd760a422ab42ea256f634 (diff)
downloadhistory-ddebb8b659b9b56c29cf6a8db3f100a9a4b00ef3.tar.gz
[PATCH] improve OProfile on many-way systems
Anton prompted me to get this patch merged. It changes the core buffer sync algorithm of OProfile to avoid global locks wherever possible. Anton tested an earlier version of this patch with some success. I've lightly tested this applied against 2.6.8.1-mm3 on my two-way machine. The changes also have the happy side-effect of losing less samples after munmap operations, and removing the blind spot of tasks exiting inside the kernel. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mmap.c8
1 files changed, 2 insertions, 6 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index e2b929e5b13210..c0c6e494144539 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1650,10 +1650,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
if (mpnt->vm_start >= end)
return 0;
- /* Something will probably happen, so notify. */
- if (mpnt->vm_file && (mpnt->vm_flags & VM_EXEC))
- profile_exec_unmap(mm);
-
/*
* If we need to split any vma, do it now to save pain later.
*
@@ -1696,6 +1692,8 @@ asmlinkage long sys_munmap(unsigned long addr, size_t len)
int ret;
struct mm_struct *mm = current->mm;
+ profile_munmap(addr);
+
down_write(&mm->mmap_sem);
ret = do_munmap(mm, addr, len);
up_write(&mm->mmap_sem);
@@ -1798,8 +1796,6 @@ void exit_mmap(struct mm_struct *mm)
struct vm_area_struct *vma;
unsigned long nr_accounted = 0;
- profile_exit_mmap(mm);
-
lru_add_drain();
spin_lock(&mm->page_table_lock);