From: William Lee Irwin III With prof_cpu_mask and profile_pc() in hand, the core is now able to perform all the profile accounting work on behalf of arches. Consolidate the profile accounting and convert all arches to call the core function. Signed-off-by: Andrew Morton --- 25-akpm/arch/alpha/kernel/irq_impl.h | 27 ------------- 25-akpm/arch/alpha/kernel/smp.c | 4 +- 25-akpm/arch/alpha/kernel/time.c | 4 +- 25-akpm/arch/arm/kernel/time.c | 27 ------------- 25-akpm/arch/arm26/kernel/time.c | 24 ------------ 25-akpm/arch/h8300/kernel/time.c | 23 ----------- 25-akpm/arch/i386/kernel/apic.c | 3 - 25-akpm/arch/i386/mach-voyager/voyager_smp.c | 3 - 25-akpm/arch/ia64/kernel/time.c | 45 ----------------------- 25-akpm/arch/m68k/kernel/time.c | 22 ----------- 25-akpm/arch/m68knommu/kernel/time.c | 23 +---------- 25-akpm/arch/m68knommu/platform/5307/timers.c | 13 +----- 25-akpm/arch/mips/kernel/time.c | 19 +-------- 25-akpm/arch/parisc/kernel/time.c | 37 ------------------ 25-akpm/arch/ppc/kernel/time.c | 37 ------------------ 25-akpm/arch/ppc64/kernel/time.c | 41 -------------------- 25-akpm/arch/s390/kernel/time.c | 41 -------------------- 25-akpm/arch/sh/kernel/time.c | 31 +-------------- 25-akpm/arch/sh64/kernel/time.c | 34 ----------------- 25-akpm/arch/sparc/kernel/sun4d_smp.c | 5 -- 25-akpm/arch/sparc/kernel/sun4m_smp.c | 5 -- 25-akpm/arch/sparc/kernel/time.c | 23 ----------- 25-akpm/arch/sparc64/kernel/smp.c | 4 -- 25-akpm/arch/sparc64/kernel/time.c | 16 -------- 25-akpm/arch/v850/kernel/time.c | 23 ----------- 25-akpm/arch/x86_64/kernel/apic.c | 3 - 25-akpm/arch/x86_64/kernel/time.c | 2 - 25-akpm/include/asm-i386/hw_irq.h | 42 --------------------- 25-akpm/include/asm-i386/mach-default/do_timer.h | 2 - 25-akpm/include/asm-i386/mach-visws/do_timer.h | 2 - 25-akpm/include/asm-x86_64/hw_irq.h | 33 ---------------- 25-akpm/include/linux/profile.h | 6 +++ 25-akpm/kernel/profile.c | 20 ++++++++++ 25-akpm/kernel/sched.c | 5 -- 34 files changed, 61 insertions(+), 588 deletions(-) diff -puN arch/alpha/kernel/irq_impl.h~profile_tick arch/alpha/kernel/irq_impl.h --- 25/arch/alpha/kernel/irq_impl.h~profile_tick 2004-08-09 22:02:03.709065184 -0700 +++ 25-akpm/arch/alpha/kernel/irq_impl.h 2004-08-09 22:02:03.766056520 -0700 @@ -40,30 +40,3 @@ extern struct hw_interrupt_type i8259a_i extern void init_i8259a_irqs(void); extern void handle_irq(int irq, struct pt_regs * regs); - -static inline void -alpha_do_profile(unsigned long pc) -{ - extern char _stext; - - if (!prof_buffer) - return; - - /* - * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. - * (default is all CPUs.) - */ - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; - - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - /* - * Don't ignore out-of-bounds PC values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (pc > prof_len - 1) - pc = prof_len - 1; - atomic_inc((atomic_t *)&prof_buffer[pc]); -} diff -puN arch/alpha/kernel/smp.c~profile_tick arch/alpha/kernel/smp.c --- 25/arch/alpha/kernel/smp.c~profile_tick 2004-08-09 22:02:03.711064880 -0700 +++ 25-akpm/arch/alpha/kernel/smp.c 2004-08-09 22:02:03.766056520 -0700 @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -599,8 +600,7 @@ smp_percpu_timer_interrupt(struct pt_reg struct cpuinfo_alpha *data = &cpu_data[cpu]; /* Record kernel PC. */ - if (!user) - alpha_do_profile(regs->pc); + profile_tick(CPU_PROFILING, regs); if (!--data->prof_counter) { /* We need to make like a normal interrupt -- otherwise diff -puN arch/alpha/kernel/time.c~profile_tick arch/alpha/kernel/time.c --- 25/arch/alpha/kernel/time.c~profile_tick 2004-08-09 22:02:03.712064728 -0700 +++ 25-akpm/arch/alpha/kernel/time.c 2004-08-09 22:02:03.767056368 -0700 @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -118,8 +119,7 @@ irqreturn_t timer_interrupt(int irq, voi #ifndef CONFIG_SMP /* Not SMP, do kernel PC profiling here. */ - if (!user_mode(regs)) - alpha_do_profile(regs->pc); + profile_tick(CPU_PROFILING, regs); #endif write_seqlock(&xtime_lock); diff -puN arch/arm26/kernel/time.c~profile_tick arch/arm26/kernel/time.c --- 25/arch/arm26/kernel/time.c~profile_tick 2004-08-09 22:02:03.718063816 -0700 +++ 25-akpm/arch/arm26/kernel/time.c 2004-08-09 22:02:03.768056216 -0700 @@ -67,28 +67,6 @@ static unsigned long dummy_gettimeoffset */ unsigned long (*gettimeoffset)(void) = dummy_gettimeoffset; -/* - * Handle kernel profile stuff... - */ -static inline void do_profile(struct pt_regs *regs) -{ - if (!user_mode(regs) && - prof_buffer && - current->pid) { - unsigned long pc = instruction_pointer(regs); - extern int _stext; - - pc -= (unsigned long)&_stext; - - pc >>= prof_shift; - - if (pc >= prof_len) - pc = prof_len - 1; - - prof_buffer[pc] += 1; - } -} - static unsigned long next_rtc_update; /* @@ -189,7 +167,7 @@ static irqreturn_t timer_interrupt(int i { do_timer(regs); do_set_rtc(); //FIME - EVERY timer IRQ? - do_profile(regs); + profile_tick(CPU_PROFILING, regs); return IRQ_HANDLED; //FIXME - is this right? } diff -puN arch/arm/kernel/time.c~profile_tick arch/arm/kernel/time.c --- 25/arch/arm/kernel/time.c~profile_tick 2004-08-09 22:02:03.720063512 -0700 +++ 25-akpm/arch/arm/kernel/time.c 2004-08-09 22:02:03.768056216 -0700 @@ -79,31 +79,6 @@ unsigned long long __attribute__((weak)) return (unsigned long long)jiffies * (1000000000 / HZ); } -/* - * Handle kernel profile stuff... - */ -static inline void do_profile(struct pt_regs *regs) -{ - - profile_hook(regs); - - if (!user_mode(regs) && - prof_buffer && - current->pid) { - unsigned long pc = instruction_pointer(regs); - extern int _stext; - - pc -= (unsigned long)&_stext; - - pc >>= prof_shift; - - if (pc >= prof_len) - pc = prof_len - 1; - - prof_buffer[pc] += 1; - } -} - static unsigned long next_rtc_update; /* @@ -317,7 +292,7 @@ EXPORT_SYMBOL(do_settimeofday); void timer_tick(struct pt_regs *regs) { - do_profile(regs); + profile_tick(CPU_PROFILING, regs); do_leds(); do_set_rtc(); do_timer(regs); diff -puN arch/h8300/kernel/time.c~profile_tick arch/h8300/kernel/time.c --- 25/arch/h8300/kernel/time.c~profile_tick 2004-08-09 22:02:03.721063360 -0700 +++ 25-akpm/arch/h8300/kernel/time.c 2004-08-09 22:02:03.769056064 -0700 @@ -36,24 +36,6 @@ u64 jiffies_64; EXPORT_SYMBOL(jiffies_64); -static inline void do_profile (unsigned long pc) -{ - if (prof_buffer && current->pid) { - extern int _stext; - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - if (pc < prof_len) - ++prof_buffer[pc]; - else - /* - * Don't ignore out-of-bounds PC values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - ++prof_buffer[prof_len-1]; - } -} - /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -64,10 +46,7 @@ static void timer_interrupt(int irq, voi platform_timer_eoi(); do_timer(regs); - - if (!user_mode(regs)) - do_profile(regs->pc); - + profile_tick(CPU_PROFILING, regs); } void time_init(void) diff -puN arch/i386/kernel/apic.c~profile_tick arch/i386/kernel/apic.c --- 25/arch/i386/kernel/apic.c~profile_tick 2004-08-09 22:02:03.723063056 -0700 +++ 25-akpm/arch/i386/kernel/apic.c 2004-08-09 22:02:03.769056064 -0700 @@ -1072,8 +1072,7 @@ inline void smp_local_timer_interrupt(st { int cpu = smp_processor_id(); - x86_do_profile(regs); - + profile_tick(CPU_PROFILING, regs); if (--per_cpu(prof_counter, cpu) <= 0) { /* * The multiplier may have changed since the last time we got diff -puN arch/i386/mach-voyager/voyager_smp.c~profile_tick arch/i386/mach-voyager/voyager_smp.c --- 25/arch/i386/mach-voyager/voyager_smp.c~profile_tick 2004-08-09 22:02:03.724062904 -0700 +++ 25-akpm/arch/i386/mach-voyager/voyager_smp.c 2004-08-09 22:02:03.771055760 -0700 @@ -1287,8 +1287,7 @@ smp_local_timer_interrupt(struct pt_regs int cpu = smp_processor_id(); long weight; - x86_do_profile(regs); - + profile_tick(CPU_PROFILING, regs); if (--per_cpu(prof_counter, cpu) <= 0) { /* * The multiplier may have changed since the last time we got diff -puN arch/ia64/kernel/time.c~profile_tick arch/ia64/kernel/time.c --- 25/arch/ia64/kernel/time.c~profile_tick 2004-08-09 22:02:03.726062600 -0700 +++ 25-akpm/arch/ia64/kernel/time.c 2004-08-09 22:02:03.772055608 -0700 @@ -186,49 +186,6 @@ do_gettimeofday (struct timeval *tv) EXPORT_SYMBOL(do_gettimeofday); -/* - * The profiling function is SMP safe. (nothing can mess - * around with "current", and the profiling counters are - * updated with atomic operations). This is especially - * useful with a profiling multiplier != 1 - */ -static inline void -ia64_do_profile (struct pt_regs * regs) -{ - unsigned long ip; - - profile_hook(regs); - - if (user_mode(regs)) - return; - - if (!prof_buffer) - return; - - /* Conserve space in histogram by encoding slot bits in address - * bits 2 and 3 rather than bits 0 and 1. - */ - ip = profile_pc(regs); - - /* - * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. - * (default is all CPUs.) - */ - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; - - ip -= (unsigned long) &_stext; - ip >>= prof_shift; - /* - * Don't ignore out-of-bounds IP values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (ip > prof_len-1) - ip = prof_len-1; - atomic_inc((atomic_t *)&prof_buffer[ip]); -} - static irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs) { @@ -246,7 +203,7 @@ timer_interrupt (int irq, void *dev_id, printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n", ia64_get_itc(), new_itm); - ia64_do_profile(regs); + profile_tick(CPU_PROFILING, regs); while (1) { #ifdef CONFIG_SMP diff -puN arch/m68k/kernel/time.c~profile_tick arch/m68k/kernel/time.c --- 25/arch/m68k/kernel/time.c~profile_tick 2004-08-09 22:02:03.727062448 -0700 +++ 25-akpm/arch/m68k/kernel/time.c 2004-08-09 22:02:03.772055608 -0700 @@ -38,24 +38,6 @@ static inline int set_rtc_mmss(unsigned return -1; } -static inline void do_profile (unsigned long pc) -{ - if (prof_buffer && current->pid) { - extern int _stext; - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - if (pc < prof_len) - ++prof_buffer[pc]; - else - /* - * Don't ignore out-of-bounds PC values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - ++prof_buffer[prof_len-1]; - } -} - /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -63,9 +45,7 @@ static inline void do_profile (unsigned static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs) { do_timer(regs); - - if (!user_mode(regs)) - do_profile(regs->pc); + profile_tick(CPU_PROFILING, regs); #ifdef CONFIG_HEARTBEAT /* use power LED as a heartbeat instead -- much more useful diff -puN arch/m68knommu/kernel/time.c~profile_tick arch/m68knommu/kernel/time.c --- 25/arch/m68knommu/kernel/time.c~profile_tick 2004-08-09 22:02:03.729062144 -0700 +++ 25-akpm/arch/m68knommu/kernel/time.c 2004-08-09 22:02:03.773055456 -0700 @@ -41,24 +41,6 @@ static inline int set_rtc_mmss(unsigned return -1; } -static inline void do_profile (unsigned long pc) -{ - if (prof_buffer && current->pid) { - extern int _stext; - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - if (pc < prof_len) - ++prof_buffer[pc]; - else - /* - * Don't ignore out-of-bounds PC values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - ++prof_buffer[prof_len-1]; - } -} - /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -75,9 +57,8 @@ static irqreturn_t timer_interrupt(int i write_seqlock(&xtime_lock); do_timer(regs); - - if (!user_mode(regs)) - do_profile(regs->pc); + if (current->pid) + profile_tick(CPU_PROFILING, regs); /* * If we have an externally synchronized Linux clock, then update diff -puN arch/m68knommu/platform/5307/timers.c~profile_tick arch/m68knommu/platform/5307/timers.c --- 25/arch/m68knommu/platform/5307/timers.c~profile_tick 2004-08-09 22:02:03.730061992 -0700 +++ 25-akpm/arch/m68knommu/platform/5307/timers.c 2004-08-09 22:02:03.774055304 -0700 @@ -110,17 +110,8 @@ void coldfire_profile_tick(int irq, void { /* Reset ColdFire timer2 */ mcf_proftp->ter = MCFTIMER_TER_CAP | MCFTIMER_TER_REF; - - if (!user_mode(regs)) { - if (prof_buffer && current->pid) { - extern int _stext; - unsigned long ip = instruction_pointer(regs); - ip -= (unsigned long) &_stext; - ip >>= prof_shift; - if (ip < prof_len) - prof_buffer[ip]++; - } - } + if (current->pid) + profile_tick(CPU_PROFILING, regs); } /***************************************************************************/ diff -puN arch/mips/kernel/time.c~profile_tick arch/mips/kernel/time.c --- 25/arch/mips/kernel/time.c~profile_tick 2004-08-09 22:02:03.732061688 -0700 +++ 25-akpm/arch/mips/kernel/time.c 2004-08-09 22:02:03.774055304 -0700 @@ -417,23 +417,8 @@ static long last_rtc_update; */ void local_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { - if (!user_mode(regs)) { - if (prof_buffer && current->pid) { - unsigned long pc = regs->cp0_epc; - - pc -= (unsigned long) _stext; - pc >>= prof_shift; - /* - * Dont ignore out-of-bounds pc values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (pc > prof_len - 1) - pc = prof_len - 1; - atomic_inc((atomic_t *)&prof_buffer[pc]); - } - } - + if (current->pid) + profile_tick(CPU_PROFILING, regs); #ifdef CONFIG_SMP /* in UP mode, update_process_times() is invoked by do_timer() */ update_process_times(user_mode(regs)); diff -puN arch/parisc/kernel/time.c~profile_tick arch/parisc/kernel/time.c --- 25/arch/parisc/kernel/time.c~profile_tick 2004-08-09 22:02:03.733061536 -0700 +++ 25-akpm/arch/parisc/kernel/time.c 2004-08-09 22:02:03.775055152 -0700 @@ -47,41 +47,6 @@ static long halftick; extern void smp_do_timer(struct pt_regs *regs); #endif -static inline void -parisc_do_profile(struct pt_regs *regs) -{ - unsigned long pc = regs->iaoq[0]; - extern char _stext; - - profile_hook(regs); - - if (user_mode(regs)) - return; - - if (!prof_buffer) - return; - -#if 0 - /* FIXME: when we have irq affinity to cpu, we need to - * only look at the cpus specified in this mask - */ - - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; -#endif - - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - /* - * Don't ignore out-of-bounds PC values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (pc > prof_len - 1) - pc = prof_len - 1; - atomic_inc((atomic_t *)&prof_buffer[pc]); -} - irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { long now; @@ -89,7 +54,7 @@ irqreturn_t timer_interrupt(int irq, voi int nticks; int cpu = smp_processor_id(); - parisc_do_profile(regs); + profile_tick(CPU_PROFILING, regs); now = mfctl(16); /* initialize next_tick to time at last clocktick */ diff -puN arch/ppc64/kernel/time.c~profile_tick arch/ppc64/kernel/time.c --- 25/arch/ppc64/kernel/time.c~profile_tick 2004-08-09 22:02:03.734061384 -0700 +++ 25-akpm/arch/ppc64/kernel/time.c 2004-08-09 22:02:03.776055000 -0700 @@ -105,45 +105,6 @@ void ppc_adjtimex(void); static unsigned adjusting_time = 0; -/* - * The profiling function is SMP safe. (nothing can mess - * around with "current", and the profiling counters are - * updated with atomic operations). This is especially - * useful with a profiling multiplier != 1 - */ -static inline void ppc64_do_profile(struct pt_regs *regs) -{ - unsigned long nip; - - profile_hook(regs); - - if (user_mode(regs)) - return; - - if (!prof_buffer) - return; - - nip = instruction_pointer(regs); - - /* - * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. - * (default is all CPUs.) - */ - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; - - nip -= (unsigned long)_stext; - nip >>= prof_shift; - /* - * Don't ignore out-of-bounds EIP values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (nip > prof_len-1) - nip = prof_len-1; - atomic_inc((atomic_t *)&prof_buffer[nip]); -} - static __inline__ void timer_check_rtc(void) { /* @@ -272,7 +233,7 @@ int timer_interrupt(struct pt_regs * reg irq_enter(); #ifndef CONFIG_PPC_ISERIES - ppc64_do_profile(regs); + profile_tick(CPU_PROFILING, regs); #endif lpaca->lppaca.xIntDword.xFields.xDecrInt = 0; diff -puN arch/ppc/kernel/time.c~profile_tick arch/ppc/kernel/time.c --- 25/arch/ppc/kernel/time.c~profile_tick 2004-08-09 22:02:03.736061080 -0700 +++ 25-akpm/arch/ppc/kernel/time.c 2004-08-09 22:02:03.776055000 -0700 @@ -108,41 +108,6 @@ static inline int tb_delta(unsigned *jif return delta; } -extern char _stext; - -static inline void ppc_do_profile (struct pt_regs *regs) -{ - unsigned long nip; - - profile_hook(regs); - - if (user_mode(regs)) - return; - - if (!prof_buffer) - return; - - nip = instruction_pointer(regs); - - /* - * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. - * (default is all CPUs.) - */ - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; - - nip -= (unsigned long) &_stext; - nip >>= prof_shift; - /* - * Don't ignore out-of-bounds EIP values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (nip > prof_len-1) - nip = prof_len-1; - atomic_inc((atomic_t *)&prof_buffer[nip]); -} - /* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. @@ -163,7 +128,7 @@ void timer_interrupt(struct pt_regs * re while ((next_dec = tb_ticks_per_jiffy - tb_delta(&jiffy_stamp)) <= 0) { jiffy_stamp += tb_ticks_per_jiffy; - ppc_do_profile(regs); + profile_tick(CPU_PROFILING, regs); if (smp_processor_id()) continue; diff -puN arch/s390/kernel/time.c~profile_tick arch/s390/kernel/time.c --- 25/arch/s390/kernel/time.c~profile_tick 2004-08-09 22:02:03.737060928 -0700 +++ 25-akpm/arch/s390/kernel/time.c 2004-08-09 22:02:03.777054848 -0700 @@ -174,46 +174,7 @@ __calculate_ticks(__u64 elapsed) #ifdef CONFIG_PROFILING -extern char _stext, _etext; - -/* - * The profiling function is SMP safe. (nothing can mess - * around with "current", and the profiling counters are - * updated with atomic operations). This is especially - * useful with a profiling multiplier != 1 - */ -static inline void s390_do_profile(struct pt_regs * regs) -{ - unsigned long eip; - - profile_hook(regs); - - if (user_mode(regs)) - return; - - if (!prof_buffer) - return; - - eip = instruction_pointer(regs); - - /* - * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. - * (default is all CPUs.) - */ - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; - - eip -= (unsigned long) &_stext; - eip >>= prof_shift; - /* - * Don't ignore out-of-bounds EIP values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (eip > prof_len-1) - eip = prof_len-1; - atomic_inc((atomic_t *)&prof_buffer[eip]); -} +#define s390_do_profile(regs) profile_tick(CPU_PROFILING, regs) #else #define s390_do_profile(regs) do { ; } while(0) #endif /* CONFIG_PROFILING */ diff -puN arch/sh64/kernel/time.c~profile_tick arch/sh64/kernel/time.c --- 25/arch/sh64/kernel/time.c~profile_tick 2004-08-09 22:02:03.739060624 -0700 +++ 25-akpm/arch/sh64/kernel/time.c 2004-08-09 22:02:03.778054696 -0700 @@ -298,37 +298,6 @@ static int set_rtc_time(unsigned long no /* last time the RTC clock got updated */ static long last_rtc_update = 0; -static inline void sh64_do_profile(struct pt_regs *regs) -{ - extern int _stext; - unsigned long pc; - - profile_hook(regs); - - if (user_mode(regs)) - return; - - /* Don't profile cpu_idle.. */ - if (!prof_buffer || !current->pid) - return; - - pc = instruction_pointer(regs); - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - - /* - * Don't ignore out-of-bounds PC values silently, put them into the - * last histogram slot, so if present, they will show up as a sharp - * peak. - */ - if (pc > prof_len - 1) - pc = prof_len - 1; - - /* We could just be sloppy and not lock against a re-entry on this - increment, but the profiling code won't always be linked in anyway. */ - atomic_inc((atomic_t *)&prof_buffer[pc]); -} - /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -340,8 +309,7 @@ static inline void do_timer_interrupt(in ctc_last_interrupt = (unsigned long) current_ctc; do_timer(regs); - - sh64_do_profile(regs); + profile_tick(CPU_PROFILING, regs); #ifdef CONFIG_HEARTBEAT { diff -puN arch/sh/kernel/time.c~profile_tick arch/sh/kernel/time.c --- 25/arch/sh/kernel/time.c~profile_tick 2004-08-09 22:02:03.740060472 -0700 +++ 25-akpm/arch/sh/kernel/time.c 2004-08-09 22:02:03.779054544 -0700 @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -250,32 +251,6 @@ EXPORT_SYMBOL(do_settimeofday); /* last time the RTC clock got updated */ static long last_rtc_update; -/* Profiling definitions */ -extern unsigned int * prof_buffer; -extern unsigned long prof_len; -extern unsigned long prof_shift; -extern char _stext; - -static inline void sh_do_profile(unsigned long pc) -{ - /* Don't profile cpu_idle.. */ - if (!prof_buffer || !current->pid) - return; - - pc -= (unsigned long)&_stext; - pc >>= prof_shift; - - /* - * Don't ignore out-of-bounds PC values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (pc > prof_len - 1) - pc = prof_len - 1; - - atomic_inc((atomic_t *)&prof_buffer[pc]); -} - /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -283,9 +258,7 @@ static inline void sh_do_profile(unsigne static inline void do_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { do_timer(regs); - - if (!user_mode(regs)) - sh_do_profile(profile_pc(regs)); + profile_tick(CPU_PROFILING, regs); #ifdef CONFIG_HEARTBEAT if (sh_mv.mv_heartbeat != NULL) diff -puN arch/sparc64/kernel/smp.c~profile_tick arch/sparc64/kernel/smp.c --- 25/arch/sparc64/kernel/smp.c~profile_tick 2004-08-09 22:02:03.741060320 -0700 +++ 25-akpm/arch/sparc64/kernel/smp.c 2004-08-09 22:02:03.780054392 -0700 @@ -1012,8 +1012,6 @@ void smp_promstop_others(void) smp_cross_call(&xcall_promstop, 0, 0, 0); } -extern void sparc64_do_profile(struct pt_regs *regs); - #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier #define prof_counter(__cpu) cpu_data(__cpu).counter @@ -1039,7 +1037,7 @@ void smp_percpu_timer_interrupt(struct p } do { - sparc64_do_profile(regs); + profile_tick(CPU_PROFILING, regs); if (!--prof_counter(cpu)) { irq_enter(); diff -puN arch/sparc64/kernel/time.c~profile_tick arch/sparc64/kernel/time.c --- 25/arch/sparc64/kernel/time.c~profile_tick 2004-08-09 22:02:03.743060016 -0700 +++ 25-akpm/arch/sparc64/kernel/time.c 2004-08-09 22:02:03.781054240 -0700 @@ -465,20 +465,6 @@ unsigned long profile_pc(struct pt_regs return pc; } -void sparc64_do_profile(struct pt_regs *regs) -{ - profile_hook(regs); - - if (user_mode(regs)) - return; - - if (!prof_buffer) - return; - - pc = (profile_pc(regs) - (unsigned long)_stext) >> prof_shift; - atomic_inc((atomic_t *)&prof_buffer[min(pc, prof_len-1)]); -} - static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) { unsigned long ticks, pstate; @@ -487,7 +473,7 @@ static irqreturn_t timer_interrupt(int i do { #ifndef CONFIG_SMP - sparc64_do_profile(regs); + profile_tick(CPU_PROFILING, regs); #endif do_timer(regs); diff -puN arch/sparc/kernel/sun4d_smp.c~profile_tick arch/sparc/kernel/sun4d_smp.c --- 25/arch/sparc/kernel/sun4d_smp.c~profile_tick 2004-08-09 22:02:03.744059864 -0700 +++ 25-akpm/arch/sparc/kernel/sun4d_smp.c 2004-08-09 22:02:03.782054088 -0700 @@ -410,8 +410,6 @@ void smp4d_message_pass(int target, int panic("Bogon SMP message pass."); } -extern void sparc_do_profile(unsigned long pc, unsigned long o7); - void smp4d_percpu_timer_interrupt(struct pt_regs *regs) { int cpu = hard_smp4d_processor_id(); @@ -429,8 +427,7 @@ void smp4d_percpu_timer_interrupt(struct show_leds(cpu); } - if(!user_mode(regs)) - sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]); + profile_tick(CPU_PROFILING, regs); if(!--prof_counter(cpu)) { int user = user_mode(regs); diff -puN arch/sparc/kernel/sun4m_smp.c~profile_tick arch/sparc/kernel/sun4m_smp.c --- 25/arch/sparc/kernel/sun4m_smp.c~profile_tick 2004-08-09 22:02:03.746059560 -0700 +++ 25-akpm/arch/sparc/kernel/sun4m_smp.c 2004-08-09 22:02:03.782054088 -0700 @@ -392,16 +392,13 @@ void smp4m_cross_call_irq(void) ccall_info.processors_out[i] = 1; } -extern void sparc_do_profile(unsigned long pc, unsigned long o7); - void smp4m_percpu_timer_interrupt(struct pt_regs *regs) { int cpu = smp_processor_id(); clear_profile_irq(cpu); - if(!user_mode(regs)) - sparc_do_profile(regs->pc, regs->u_regs[UREG_RETPC]); + profile_tick(CPU_PROFILING, regs); if(!--prof_counter(cpu)) { int user = user_mode(regs); diff -puN arch/sparc/kernel/time.c~profile_tick arch/sparc/kernel/time.c --- 25/arch/sparc/kernel/time.c~profile_tick 2004-08-09 22:02:03.747059408 -0700 +++ 25-akpm/arch/sparc/kernel/time.c 2004-08-09 22:02:03.783053936 -0700 @@ -99,26 +99,6 @@ unsigned long profile_pc(struct pt_regs return pc; } -static spinlock_t ticker_lock = SPIN_LOCK_UNLOCKED; - -/* 32-bit Sparc specific profiling function. */ -void sparc_do_profile(unsigned long pc, unsigned long o7) -{ - if(prof_buffer && current->pid) { - extern int _stext; - - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - - spin_lock(&ticker_lock); - if(pc < prof_len) - prof_buffer[pc]++; - else - prof_buffer[prof_len - 1]++; - spin_unlock(&ticker_lock); - } -} - __volatile__ unsigned int *master_l10_counter; __volatile__ unsigned int *master_l10_limit; @@ -135,8 +115,7 @@ irqreturn_t timer_interrupt(int irq, voi static long last_rtc_update; #ifndef CONFIG_SMP - if(!user_mode(regs)) - sparc_do_profile(profile_pc(regs)); + profile_tick(CPU_PROFILING, regs); #endif /* Protect counter clear so that do_gettimeoffset works */ diff -puN arch/v850/kernel/time.c~profile_tick arch/v850/kernel/time.c --- 25/arch/v850/kernel/time.c~profile_tick 2004-08-09 22:02:03.749059104 -0700 +++ 25-akpm/arch/v850/kernel/time.c 2004-08-09 22:02:03.783053936 -0700 @@ -40,24 +40,6 @@ unsigned long long sched_clock(void) return (unsigned long long)jiffies * (1000000000 / HZ); } -static inline void do_profile (unsigned long pc) -{ - if (prof_buffer && current->pid) { - extern int _stext; - pc -= (unsigned long) &_stext; - pc >>= prof_shift; - if (pc < prof_len) - ++prof_buffer[pc]; - else - /* - * Don't ignore out-of-bounds PC values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - ++prof_buffer[prof_len-1]; - } -} - /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -74,10 +56,7 @@ static irqreturn_t timer_interrupt (int mach_tick (); do_timer (regs); - - if (! user_mode (regs)) - do_profile (regs->pc); - + profile_tick(CPU_PROFILING, regs); #if 0 /* * If we have an externally synchronized Linux clock, then update diff -puN arch/x86_64/kernel/apic.c~profile_tick arch/x86_64/kernel/apic.c --- 25/arch/x86_64/kernel/apic.c~profile_tick 2004-08-09 22:02:03.750058952 -0700 +++ 25-akpm/arch/x86_64/kernel/apic.c 2004-08-09 22:02:03.784053784 -0700 @@ -836,8 +836,7 @@ void smp_local_timer_interrupt(struct pt { int cpu = smp_processor_id(); - x86_do_profile(regs); - + profile_tick(CPU_PROFILING, regs); if (--per_cpu(prof_counter, cpu) <= 0) { /* * The multiplier may have changed since the last time we got diff -puN arch/x86_64/kernel/time.c~profile_tick arch/x86_64/kernel/time.c --- 25/arch/x86_64/kernel/time.c~profile_tick 2004-08-09 22:02:03.752058648 -0700 +++ 25-akpm/arch/x86_64/kernel/time.c 2004-08-09 22:02:03.785053632 -0700 @@ -396,7 +396,7 @@ static irqreturn_t timer_interrupt(int i */ #ifndef CONFIG_X86_LOCAL_APIC - x86_do_profile(regs); + profile_tick(CPU_PROFILING, regs); #else if (!using_apic_timer) smp_local_timer_interrupt(regs); diff -puN include/asm-i386/hw_irq.h~profile_tick include/asm-i386/hw_irq.h --- 25/include/asm-i386/hw_irq.h~profile_tick 2004-08-09 22:02:03.753058496 -0700 +++ 25-akpm/include/asm-i386/hw_irq.h 2004-08-09 22:02:03.785053632 -0700 @@ -68,48 +68,6 @@ extern atomic_t irq_mis_count; #define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs)) -static inline void __do_profile(unsigned long eip) -{ - if (!prof_buffer) - return; - - /* - * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. - * (default is all CPUs.) - */ - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; - - eip -= (unsigned long)_stext; - eip >>= prof_shift; - /* - * Don't ignore out-of-bounds EIP values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (eip > prof_len-1) - eip = prof_len-1; - atomic_inc((atomic_t *)&prof_buffer[eip]); -} - -#define kern_profile(eip) __do_profile(eip) - -/* - * The profiling function is SMP safe. (nothing can mess - * around with "current", and the profiling counters are - * updated with atomic operations). This is especially - * useful with a profiling multiplier != 1 - */ -static inline void x86_do_profile(struct pt_regs * regs) -{ - profile_hook(regs); - - if (prof_on != 1 || user_mode(regs)) - return; - - __do_profile(regs->eip); -} - #if defined(CONFIG_X86_IO_APIC) static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { diff -puN include/asm-i386/mach-default/do_timer.h~profile_tick include/asm-i386/mach-default/do_timer.h --- 25/include/asm-i386/mach-default/do_timer.h~profile_tick 2004-08-09 22:02:03.754058344 -0700 +++ 25-akpm/include/asm-i386/mach-default/do_timer.h 2004-08-09 22:02:03.786053480 -0700 @@ -22,7 +22,7 @@ static inline void do_timer_interrupt_ho * system, in that case we have to call the local interrupt handler. */ #ifndef CONFIG_X86_LOCAL_APIC - x86_do_profile(regs); + profile_tick(CPU_PROFILING, regs); #else if (!using_apic_timer) smp_local_timer_interrupt(regs); diff -puN include/asm-i386/mach-visws/do_timer.h~profile_tick include/asm-i386/mach-visws/do_timer.h --- 25/include/asm-i386/mach-visws/do_timer.h~profile_tick 2004-08-09 22:02:03.756058040 -0700 +++ 25-akpm/include/asm-i386/mach-visws/do_timer.h 2004-08-09 22:02:03.786053480 -0700 @@ -15,7 +15,7 @@ static inline void do_timer_interrupt_ho * system, in that case we have to call the local interrupt handler. */ #ifndef CONFIG_X86_LOCAL_APIC - x86_do_profile(regs); + profile_tick(CPU_PROFILING, regs); #else if (!using_apic_timer) smp_local_timer_interrupt(regs); diff -puN include/asm-x86_64/hw_irq.h~profile_tick include/asm-x86_64/hw_irq.h --- 25/include/asm-x86_64/hw_irq.h~profile_tick 2004-08-09 22:02:03.757057888 -0700 +++ 25-akpm/include/asm-x86_64/hw_irq.h 2004-08-09 22:02:03.787053328 -0700 @@ -130,39 +130,6 @@ __asm__( \ "push $" #nr "-256 ; " \ "jmp common_interrupt"); -static inline void x86_do_profile (struct pt_regs *regs) -{ - unsigned long rip; - extern char _stext[]; - - profile_hook(regs); - - if (user_mode(regs)) - return; - if (!prof_buffer) - return; - - rip = regs->rip; - - /* - * Only measure the CPUs specified by /proc/irq/prof_cpu_mask. - * (default is all CPUs.) - */ - if (!cpu_isset(smp_processor_id(), prof_cpu_mask)) - return; - - rip -= (unsigned long) &_stext; - rip >>= prof_shift; - /* - * Don't ignore out-of-bounds EIP values silently, - * put them into the last histogram slot, so if - * present, they will show up as a sharp peak. - */ - if (rip > prof_len-1) - rip = prof_len-1; - atomic_inc((atomic_t *)&prof_buffer[rip]); -} - #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i) { if (IO_APIC_IRQ(i)) diff -puN include/linux/profile.h~profile_tick include/linux/profile.h --- 25/include/linux/profile.h~profile_tick 2004-08-09 22:02:03.759057584 -0700 +++ 25-akpm/include/linux/profile.h 2004-08-09 22:02:03.787053328 -0700 @@ -9,7 +9,11 @@ #include #include +#define CPU_PROFILING 1 +#define SCHED_PROFILING 2 + struct proc_dir_entry; +struct pt_regs; /* parse command line */ int __init profile_setup(char * str); @@ -17,6 +21,8 @@ int __init profile_setup(char * str); /* init basic kernel profiler */ void __init profile_init(void); void create_prof_cpu_mask(struct proc_dir_entry *); +void profile_tick(int, struct pt_regs *); +void profile_hit(int, void *); extern unsigned int * prof_buffer; extern unsigned long prof_len; diff -puN kernel/profile.c~profile_tick kernel/profile.c --- 25/kernel/profile.c~profile_tick 2004-08-09 22:02:03.760057432 -0700 +++ 25-akpm/kernel/profile.c 2004-08-09 22:02:03.788053176 -0700 @@ -9,6 +9,7 @@ #include #include #include +#include #include unsigned int * prof_buffer; @@ -169,6 +170,25 @@ EXPORT_SYMBOL_GPL(profile_event_unregist #ifdef CONFIG_PROC_FS #include #include +#include + +void profile_hit(int type, void *__pc) +{ + unsigned long pc; + + if (prof_on != type) + return; + pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; + atomic_inc((atomic_t *)&prof_buffer[min(pc, prof_len - 1)]); +} + +void profile_tick(int type, struct pt_regs *regs) +{ + if (type == CPU_PROFILING) + profile_hook(regs); + if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask)) + profile_hit(type, (void *)profile_pc(regs)); +} static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) diff -puN kernel/sched.c~profile_tick kernel/sched.c --- 25/kernel/sched.c~profile_tick 2004-08-09 22:02:03.762057128 -0700 +++ 25-akpm/kernel/sched.c 2004-08-09 22:02:03.791052720 -0700 @@ -2602,10 +2602,7 @@ asmlinkage void __sched schedule(void) dump_stack(); } } -#ifdef kern_profile - if (unlikely(prof_on == 2)) - __do_profile((unsigned long)__builtin_return_address(0)); -#endif + profile_hit(SCHED_PROFILING, __builtin_return_address(0)); need_resched: preempt_disable(); _