From 092b8f3488a3e50a4ab5f2f3f7c8bbf56b3144e1 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 20 Feb 2006 10:38:56 +1100 Subject: powerpc: Keep xtime and gettimeofday in sync This fixes a regression which was introduced by moving ppc32 to use the same sort of lockless gettimeofday as ppc64 has been using for some time. This involves getting the timebase and performing some simple arithmetic to convert it to seconds and microseconds. However, the factor and offset used there weren't being updated when NTP varied the tick length using adjtimex. 64-bit didn't notice the problem because it had a hook in the 32-bit adjtimex compat routine that attempted to work out what the generic timekeeping code would do and alter the factor and offset to match. However, that code was very complex and it wasn't clear that it still matched what the generic code would do. Now we use the generic current_tick_length() routine that was recently added to check that the current tick will be as long as we expect; if not we recompute the factor and offset. This keeps gettimeofday and xtime in sync. In addition we check that gettimeofday hasn't got ahead of xtime on each timer interrupt; if it has, we resync. Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/sys_ppc32.c | 4 - arch/powerpc/kernel/time.c | 282 ++++++++++++++-------------------------- 2 files changed, 99 insertions(+), 187 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 475249dc2350db..cd75ab2908fa85 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -176,7 +176,6 @@ struct timex32 { }; extern int do_adjtimex(struct timex *); -extern void ppc_adjtimex(void); asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) { @@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) ret = do_adjtimex(&txc); - /* adjust the conversion of TB to time of day to track adjtimex */ - ppc_adjtimex(); - if(put_user(txc.modes, &utp->modes) || __put_user(txc.offset, &utp->offset) || __put_user(txc.freq, &utp->freq) || diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1886045a2fd8f8..2a7ddc5793797e 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec); unsigned long tb_ticks_per_sec; u64 tb_to_xs; unsigned tb_to_us; -unsigned long processor_freq; + +#define TICKLEN_SCALE (SHIFT_SCALE - 10) +u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ +u64 ticklen_to_xs; /* 0.64 fraction */ + +/* If last_tick_len corresponds to about 1/HZ seconds, then + last_tick_len << TICKLEN_SHIFT will be about 2^63. */ +#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) + DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL_GPL(rtc_lock); @@ -113,10 +122,6 @@ extern unsigned long wall_jiffies; extern struct timezone sys_tz; static long timezone_offset; -void ppc_adjtimex(void); - -static unsigned adjusting_time = 0; - unsigned long ppc_proc_freq; unsigned long ppc_tb_freq; @@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void) */ if (ppc_md.set_rtc_time && ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && - jiffies - wall_jiffies == 1) { + abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) { struct rtc_time tm; to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); tm.tm_year -= 1900; @@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv) if (__USE_RTC()) { /* do this the old way */ unsigned long flags, seq; - unsigned int sec, nsec, usec, lost; + unsigned int sec, nsec, usec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); sec = xtime.tv_sec; nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); - lost = jiffies - wall_jiffies; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec = nsec / 1000 + lost * (1000000 / HZ); + usec = nsec / 1000; while (usec >= 1000000) { usec -= 1000000; ++sec; @@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv) EXPORT_SYMBOL(do_gettimeofday); -/* Synchronize xtime with do_gettimeofday */ - -static inline void timer_sync_xtime(unsigned long cur_tb) -{ -#ifdef CONFIG_PPC64 - /* why do we do this? */ - struct timeval my_tv; - - __do_gettimeofday(&my_tv, cur_tb); - - if (xtime.tv_sec <= my_tv.tv_sec) { - xtime.tv_sec = my_tv.tv_sec; - xtime.tv_nsec = my_tv.tv_usec * 1000; - } -#endif -} - /* * There are two copies of tb_to_xs and stamp_xsec so that no * lock is needed to access and use these values in @@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb) { unsigned long offset; u64 new_stamp_xsec; + u64 tlen, t2x; if (__USE_RTC()) return; + tlen = current_tick_length(); offset = cur_tb - do_gtod.varp->tb_orig_stamp; - if ((offset & 0x80000000u) == 0) - return; - new_stamp_xsec = do_gtod.varp->stamp_xsec - + mulhdu(offset, do_gtod.varp->tb_to_xs); - update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); + if (tlen == last_tick_len && offset < 0x80000000u) { + /* check that we're still in sync; if not, resync */ + struct timeval tv; + __do_gettimeofday(&tv, cur_tb); + if (tv.tv_sec <= xtime.tv_sec && + (tv.tv_sec < xtime.tv_sec || + tv.tv_usec * 1000 <= xtime.tv_nsec)) + return; + } + if (tlen != last_tick_len) { + t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); + last_tick_len = tlen; + } else + t2x = do_gtod.varp->tb_to_xs; + new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + do_div(new_stamp_xsec, 1000000000); + new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; + update_gtod(cur_tb, new_stamp_xsec, t2x); } #ifdef CONFIG_SMP @@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs) write_seqlock(&xtime_lock); tb_last_jiffy += tb_ticks_per_jiffy; tb_last_stamp = per_cpu(last_jiffy, cpu); - timer_recalc_offset(tb_last_jiffy); do_timer(regs); - timer_sync_xtime(tb_last_jiffy); + timer_recalc_offset(tb_last_jiffy); timer_check_rtc(); write_sequnlock(&xtime_lock); - if (adjusting_time && (time_adjust == 0)) - ppc_adjtimex(); } next_dec = tb_ticks_per_jiffy - ticks; @@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs) void wakeup_decrementer(void) { - int i; + unsigned long ticks; - set_dec(tb_ticks_per_jiffy); /* - * We don't expect this to be called on a machine with a 601, - * so using get_tbl is fine. + * The timebase gets saved on sleep and restored on wakeup, + * so all we need to do is to reset the decrementer. */ - tb_last_stamp = tb_last_jiffy = get_tb(); - for_each_cpu(i) - per_cpu(last_jiffy, i) = tb_last_stamp; + ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); + if (ticks < tb_ticks_per_jiffy) + ticks = tb_ticks_per_jiffy - ticks; + else + ticks = 1; + set_dec(ticks); } #ifdef CONFIG_SMP @@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv) time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; - long int tb_delta; - u64 new_xsec, tb_delta_xs; + u64 new_xsec; + unsigned long tb_delta; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; @@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv) first_settimeofday = 0; } #endif + + /* + * Subtract off the number of nanoseconds since the + * beginning of the last tick. + * Note that since we don't increment jiffies_64 anywhere other + * than in do_timer (since we don't have a lost tick problem), + * wall_jiffies will always be the same as jiffies, + * and therefore the (jiffies - wall_jiffies) computation + * has been removed. + */ tb_delta = tb_ticks_since(tb_last_stamp); - tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; - tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); + tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ + new_nsec -= SCALE_XSEC(tb_delta, 1000000000); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); @@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv) ntp_clear(); - new_xsec = 0; - if (new_nsec != 0) { - new_xsec = (u64)new_nsec * XSEC_PER_SEC; + new_xsec = xtime.tv_nsec; + if (new_xsec != 0) { + new_xsec *= XSEC_PER_SEC; do_div(new_xsec, NSEC_PER_SEC); } - new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; + new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; @@ -671,7 +681,7 @@ void __init time_init(void) unsigned long flags; unsigned long tm = 0; struct div_result res; - u64 scale; + u64 scale, x; unsigned shift; if (ppc_md.time_init != NULL) @@ -693,11 +703,36 @@ void __init time_init(void) } tb_ticks_per_jiffy = ppc_tb_freq / HZ; - tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; + tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); - div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); - tb_to_xs = res.result_low; + + /* + * Calculate the length of each tick in ns. It will not be + * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. + * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, + * rounded up. + */ + x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; + do_div(x, ppc_tb_freq); + tick_nsec = x; + last_tick_len = x << TICKLEN_SCALE; + + /* + * Compute ticklen_to_xs, which is a factor which gets multiplied + * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. + * It is computed as: + * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) + * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT + * so as to give the result as a 0.64 fixed-point fraction. + */ + div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0, + tb_ticks_per_jiffy, &res); + div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); + ticklen_to_xs = res.result_low; + + /* Compute tb_to_xs from tick_nsec */ + tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); /* * Compute scale factor for sched_clock. @@ -724,6 +759,14 @@ void __init time_init(void) tm = get_boot_time(); write_seqlock_irqsave(&xtime_lock, flags); + + /* If platform provided a timezone (pmac), we correct the time */ + if (timezone_offset) { + sys_tz.tz_minuteswest = -timezone_offset / 60; + sys_tz.tz_dsttime = 0; + tm -= timezone_offset; + } + xtime.tv_sec = tm; xtime.tv_nsec = 0; do_gtod.varp = &do_gtod.vars[0]; @@ -738,18 +781,11 @@ void __init time_init(void) vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; + vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; vdso_data->tb_to_xs = tb_to_xs; time_freq = 0; - /* If platform provided a timezone (pmac), we correct the time */ - if (timezone_offset) { - sys_tz.tz_minuteswest = -timezone_offset / 60; - sys_tz.tz_dsttime = 0; - xtime.tv_sec -= timezone_offset; - } - last_rtc_update = xtime.tv_sec; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); @@ -759,126 +795,6 @@ void __init time_init(void) set_dec(tb_ticks_per_jiffy); } -/* - * After adjtimex is called, adjust the conversion of tb ticks - * to microseconds to keep do_gettimeofday synchronized - * with ntpd. - * - * Use the time_adjust, time_freq and time_offset computed by adjtimex to - * adjust the frequency. - */ - -/* #define DEBUG_PPC_ADJTIMEX 1 */ - -void ppc_adjtimex(void) -{ -#ifdef CONFIG_PPC64 - unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, - new_tb_to_xs, new_xsec, new_stamp_xsec; - unsigned long tb_ticks_per_sec_delta; - long delta_freq, ltemp; - struct div_result divres; - unsigned long flags; - long singleshot_ppm = 0; - - /* - * Compute parts per million frequency adjustment to - * accomplish the time adjustment implied by time_offset to be - * applied over the elapsed time indicated by time_constant. - * Use SHIFT_USEC to get it into the same units as - * time_freq. - */ - if ( time_offset < 0 ) { - ltemp = -time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - ltemp = -ltemp; - } else { - ltemp = time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - } - - /* If there is a single shot time adjustment in progress */ - if ( time_adjust ) { -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: "); - if ( adjusting_time == 0 ) - printk("starting "); - printk("single shot time_adjust = %ld\n", time_adjust); -#endif - - adjusting_time = 1; - - /* - * Compute parts per million frequency adjustment - * to match time_adjust - */ - singleshot_ppm = tickadj * HZ; - /* - * The adjustment should be tickadj*HZ to match the code in - * linux/kernel/timer.c, but experiments show that this is too - * large. 3/4 of tickadj*HZ seems about right - */ - singleshot_ppm -= singleshot_ppm / 4; - /* Use SHIFT_USEC to get it into the same units as time_freq */ - singleshot_ppm <<= SHIFT_USEC; - if ( time_adjust < 0 ) - singleshot_ppm = -singleshot_ppm; - } - else { -#ifdef DEBUG_PPC_ADJTIMEX - if ( adjusting_time ) - printk("ppc_adjtimex: ending single shot time_adjust\n"); -#endif - adjusting_time = 0; - } - - /* Add up all of the frequency adjustments */ - delta_freq = time_freq + ltemp + singleshot_ppm; - - /* - * Compute a new value for tb_ticks_per_sec based on - * the frequency adjustment - */ - den = 1000000 * (1 << (SHIFT_USEC - 8)); - if ( delta_freq < 0 ) { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta; - } - else { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta; - } - -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); - printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); -#endif - - /* - * Compute a new value of tb_to_xs (used to convert tb to - * microseconds) and a new value of stamp_xsec which is the - * time (in 1/2^20 second units) corresponding to - * tb_orig_stamp. This new value of stamp_xsec compensates - * for the change in frequency (implied by the new tb_to_xs) - * which guarantees that the current time remains the same. - */ - write_seqlock_irqsave( &xtime_lock, flags ); - tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; - div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres); - new_tb_to_xs = divres.result_low; - new_xsec = mulhdu(tb_ticks, new_tb_to_xs); - - old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs); - new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; - - update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs); - - write_sequnlock_irqrestore( &xtime_lock, flags ); -#endif /* CONFIG_PPC64 */ -} - #define FEBRUARY 2 #define STARTOFTIME 1970 -- cgit 1.2.3-korg From 0728a2f99ef6efd1984f9e0ed59834c1cc602e6f Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Sat, 11 Feb 2006 18:21:47 +0100 Subject: [PATCH] powerpc: remove duplicate exports A few symbols are exported twice, remove them from ppc_ksyms.c Remove users of sys_ctrler in arch/ppc/ WARNING: vmlinux: duplicate symbol '__delay' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol '__up' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol '__down' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol '__down_interruptible' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'sys_ctrler' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'strncat' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'strncmp' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'strchr' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'strrchr' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'strnlen' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'strpbrk' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'memscan' previous definition was in vmlinux WARNING: vmlinux: duplicate symbol 'strstr' previous definition was in vmlinux Signed-off-by: Olaf Hering Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/ppc_ksyms.c | 16 ---------------- arch/ppc/kernel/ppc_ksyms.c | 8 -------- arch/ppc/xmon/start.c | 15 +-------------- include/asm-ppc/machdep.h | 13 ------------- 4 files changed, 1 insertion(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index d9a459c144d81d..8a731ea877b79b 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -79,15 +79,8 @@ EXPORT_SYMBOL(sys_sigreturn); EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strcasecmp); EXPORT_SYMBOL(csum_partial); @@ -185,9 +178,6 @@ EXPORT_SYMBOL(adb_try_handler_change); EXPORT_SYMBOL(cuda_request); EXPORT_SYMBOL(cuda_poll); #endif /* CONFIG_ADB_CUDA */ -#ifdef CONFIG_PPC_PMAC -EXPORT_SYMBOL(sys_ctrler); -#endif #ifdef CONFIG_VT EXPORT_SYMBOL(kd_mksound); #endif @@ -205,7 +195,6 @@ EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); @@ -214,7 +203,6 @@ EXPORT_SYMBOL(screen_info); #endif #ifdef CONFIG_PPC32 -EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(timer_interrupt); EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(tb_ticks_per_jiffy); @@ -222,10 +210,6 @@ EXPORT_SYMBOL(console_drivers); EXPORT_SYMBOL(cacheable_memcpy); #endif -EXPORT_SYMBOL(__up); -EXPORT_SYMBOL(__down); -EXPORT_SYMBOL(__down_interruptible); - #ifdef CONFIG_8xx EXPORT_SYMBOL(cpm_install_handler); EXPORT_SYMBOL(cpm_free_handler); diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 15bd9b448a488b..82adb460134831 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -93,15 +93,8 @@ EXPORT_SYMBOL(test_and_change_bit); EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strcasecmp); EXPORT_SYMBOL(__div64_32); @@ -253,7 +246,6 @@ EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(cacheable_memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c index 4344cbe9b5c523..484f5bb1aa3e09 100644 --- a/arch/ppc/xmon/start.c +++ b/arch/ppc/xmon/start.c @@ -146,19 +146,6 @@ xmon_map_scc(void) static int scc_initialized = 0; void xmon_init_scc(void); -extern void cuda_poll(void); - -static inline void do_poll_adb(void) -{ -#ifdef CONFIG_ADB_PMU - if (sys_ctrler == SYS_CTRLER_PMU) - pmu_poll_adb(); -#endif /* CONFIG_ADB_PMU */ -#ifdef CONFIG_ADB_CUDA - if (sys_ctrler == SYS_CTRLER_CUDA) - cuda_poll(); -#endif /* CONFIG_ADB_CUDA */ -} int xmon_write(void *handle, void *ptr, int nb) @@ -189,7 +176,7 @@ xmon_write(void *handle, void *ptr, int nb) ct = 0; for (i = 0; i < nb; ++i) { while ((*sccc & TXRDY) == 0) - do_poll_adb(); + ; c = p[i]; if (c == '\n' && !ct) { c = '\r'; diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h index 39200def8d116f..a3e8a45e45a9e4 100644 --- a/include/asm-ppc/machdep.h +++ b/include/asm-ppc/machdep.h @@ -154,19 +154,6 @@ extern char cmd_line[COMMAND_LINE_SIZE]; extern void setup_pci_ptrs(void); -/* - * Power macintoshes have either a CUDA or a PMU controlling - * system reset, power, NVRAM, RTC. - */ -typedef enum sys_ctrler_kind { - SYS_CTRLER_UNKNOWN = 0, - SYS_CTRLER_CUDA = 1, - SYS_CTRLER_PMU = 2, - SYS_CTRLER_SMU = 3, -} sys_ctrler_t; - -extern sys_ctrler_t sys_ctrler; - #ifdef CONFIG_SMP struct smp_ops_t { void (*message_pass)(int target, int msg); -- cgit 1.2.3-korg From 2b9a32edba3af9ad4ccb23574bea0cc34455dc43 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 15 Feb 2006 21:40:44 -0600 Subject: [PATCH] powerpc: Fix OOPS in lparcfg on G5 Fallback gracefully when reading /proc/ppc64/lparcfg when the /rtas device node can't be found. Signed-off-by: Olof Johansson Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/lparcfg.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 1ae96a8ed7e21f..e789fef4eb8a1d 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *system_id = ""; unsigned int *lp_index_ptr, lp_index = 0; struct device_node *rtas_node; - int *lrdrp; + int *lrdrp = NULL; rootdn = find_path_device("/"); if (rootdn) { @@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v) seq_printf(m, "partition_id=%d\n", (int)lp_index); rtas_node = find_path_device("/rtas"); - lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL); + if (rtas_node) + lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", + NULL); if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; -- cgit 1.2.3-korg From f018b36f3e1f21318066de8d01740d30e38b03d5 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 16 Feb 2006 14:13:50 +1100 Subject: [PATCH] powerpc: Don't start secondary CPUs in a UP && KEXEC kernel Because smp_release_cpus() is built for SMP || KEXEC, it's not safe to unconditionally call it from setup_system(). On a UP && KEXEC kernel we'll start up the secondary CPUs which will then go beserk and we die. Simple fix is to conditionally call smp_release_cpus() in setup_system(). With that in place we don't need the dummy definition of smp_release_cpus() because all call sites are #ifdef'ed either SMP or KEXEC. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/setup_64.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a717dff695ef28..f96c49b03ba05f 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -311,8 +311,6 @@ void smp_release_cpus(void) DBG(" <- smp_release_cpus()\n"); } -#else -#define smp_release_cpus() #endif /* CONFIG_SMP || CONFIG_KEXEC */ /* @@ -473,10 +471,12 @@ void __init setup_system(void) check_smt_enabled(); smp_setup_cpu_maps(); +#ifdef CONFIG_SMP /* Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids */ smp_release_cpus(); +#endif printk("Starting Linux PPC64 %s\n", system_utsname.version); -- cgit 1.2.3-korg From 8fca92705ef462f39e7db5a0f7100bcaae91bfd3 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 16 Feb 2006 14:13:51 +1100 Subject: [PATCH] powerpc: Make UP -> SMP kexec work again For UP to SMP kexec to work we need to jump into pSeries_secondary_smp_init event on a UP + KEXEC kernel. The secondary cpus will not find their hw_cpu_id in the paca and so they'll jump into kexec_wait, ready for a kexec. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_64.S | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 41565962939473..2b03a09fe5e9d5 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -157,8 +157,7 @@ _GLOBAL(__secondary_hold) SET_REG_IMMEDIATE(r4, .hmt_init) mtctr r4 bctr -#else -#ifdef CONFIG_SMP +#elif defined(CONFIG_SMP) || defined(CONFIG_KEXEC) LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) mtctr r4 mr r3,r24 @@ -166,7 +165,6 @@ _GLOBAL(__secondary_hold) #else BUG_OPCODE #endif -#endif /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" -- cgit 1.2.3-korg From 496b7a5159b8366b003bbc17f8c4e27f69b6779e Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Thu, 16 Feb 2006 14:13:53 +1100 Subject: [PATCH] powerpc: Fix bug in spinup of renumbered secondary threads If the logical and physical cpu ids of a secondary thread don't match, we will fail to spin the thread up on pSeries machines due to a bug in pseries/smp.c We call the RTAS "start-cpu" method with the physical cpu id, the address of pSeries_secondary_smp_init and the value to pass that function in r3. Currently we pass "lcpu", the logical cpu id, but pSeries_secondary_smp_init expects the physical cpu id in r3. We should be passing pcpu instead. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/pseries/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 8e6b1ed1396e47..8d710af5075660 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -292,7 +292,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) if (start_cpu == RTAS_UNKNOWN_SERVICE) return 1; - status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); + status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu); if (status != 0) { printk(KERN_ERR "start-cpu failed: %i\n", status); return 0; -- cgit 1.2.3-korg