From 3c3f504a6d82cfaf7a82c613c8d8bac6c21b083d Mon Sep 17 00:00:00 2001 From: John Levon Date: Mon, 13 Sep 2004 19:32:22 -0700 Subject: [PATCH] fix OProfile locking This makes OProgile use get_task_mm() as discussed. It also fixes up Anton's previous patch. Zwane's soaked this patch all night w/o problems. --- drivers/oprofile/buffer_sync.c | 37 +++++++++++-------------------------- drivers/oprofile/cpu_buffer.c | 12 ++++++------ drivers/oprofile/cpu_buffer.h | 4 ++-- kernel/fork.c | 2 ++ 4 files changed, 21 insertions(+), 34 deletions(-) diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c index 2e1ab855d9434..884a532432436 100644 --- a/drivers/oprofile/buffer_sync.c +++ b/drivers/oprofile/buffer_sync.c @@ -133,7 +133,7 @@ static struct notifier_block module_load_nb = { static void end_sync(void) { - end_cpu_timers(); + end_cpu_work(); /* make sure we don't leak task structs */ process_task_mortuary(); process_task_mortuary(); @@ -144,7 +144,7 @@ int sync_start(void) { int err; - start_cpu_timers(); + start_cpu_work(); err = task_handoff_register(&task_free_nb); if (err) @@ -339,40 +339,25 @@ static void add_sample(struct mm_struct * mm, struct op_sample * s, int in_kerne } } - + static void release_mm(struct mm_struct * mm) { - if (mm) - up_read(&mm->mmap_sem); + if (!mm) + return; + up_read(&mm->mmap_sem); + mmput(mm); } -/* Take the task's mmap_sem to protect ourselves from - * races when we do lookup_dcookie(). - */ static struct mm_struct * take_tasks_mm(struct task_struct * task) { - struct mm_struct * mm; - - /* Subtle. We don't need to keep a reference to this task's mm, - * because, for the mm to be freed on another CPU, that would have - * to go through the task exit notifier, which ends up sleeping - * on the buffer_sem we hold, so we end up with mutual exclusion - * anyway. - */ - task_lock(task); - mm = task->mm; - task_unlock(task); - - if (mm) { - /* needed to walk the task's VMAs */ + struct mm_struct * mm = get_task_mm(task); + if (mm) down_read(&mm->mmap_sem); - } - return mm; } - - + + static inline int is_ctx_switch(unsigned long val) { return val == ~0UL; diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index 875badfe1cb80..420dc8e830a53 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -30,7 +30,7 @@ struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned; static void wq_sync_buffer(void *); #define DEFAULT_TIMER_EXPIRE (HZ / 10) -int timers_enabled; +int work_enabled; static void __free_cpu_buffers(int num) { @@ -80,11 +80,11 @@ void free_cpu_buffers(void) } -void start_cpu_timers(void) +void start_cpu_work(void) { int i; - timers_enabled = 1; + work_enabled = 1; for_each_online_cpu(i) { struct oprofile_cpu_buffer * b = &cpu_buffer[i]; @@ -98,11 +98,11 @@ void start_cpu_timers(void) } -void end_cpu_timers(void) +void end_cpu_work(void) { int i; - timers_enabled = 0; + work_enabled = 0; for_each_online_cpu(i) { struct oprofile_cpu_buffer * b = &cpu_buffer[i]; @@ -220,6 +220,6 @@ static void wq_sync_buffer(void * data) sync_buffer(b->cpu); /* don't re-add the work if we're shutting down */ - if (timers_enabled) + if (work_enabled) schedule_delayed_work(&b->work, DEFAULT_TIMER_EXPIRE); } diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index eeb494c3d6210..23be2013a0c6d 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -20,8 +20,8 @@ struct task_struct; int alloc_cpu_buffers(void); void free_cpu_buffers(void); -void start_cpu_timers(void); -void end_cpu_timers(void); +void start_cpu_work(void); +void end_cpu_work(void); /* CPU buffer is composed of such entries (which are * also used for context switch notes) diff --git a/kernel/fork.c b/kernel/fork.c index ccd9ec7a786ef..55ad085f064a7 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -483,6 +483,7 @@ void mmput(struct mm_struct *mm) mmdrop(mm); } } +EXPORT_SYMBOL_GPL(mmput); /** * get_task_mm - acquire a reference to the task's mm @@ -514,6 +515,7 @@ struct mm_struct *get_task_mm(struct task_struct *task) task_unlock(task); return mm; } +EXPORT_SYMBOL_GPL(get_task_mm); /* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, -- cgit 1.2.3-korg