From 79c62cf1789f935280138b412bb750be84aec747 Mon Sep 17 00:00:00 2001 From: Ross Biro Date: Wed, 11 Jan 2006 22:43:51 +0100 Subject: [PATCH] x86_64: Make udelay more accurate The attempt to avoid overflow in __delay caused varying precision on different CPUs depending on differences in the CPU speed. We should be able to do this multiplication with out overflowing provided the cpu is running at less than about 128 GHz. xloops < 20000 * 0x10c6. loops_per_jiffy * HZ <= cpu_clock_speed. So if the cpu clock speed < 2^64/(20000 * 0x10c6) = 2^64/ 51E6CC0 < 2^64/2^27 = 2^37 = 128G we will not overflow the calculation. Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/lib/delay.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c index 841bd738a1898..03c460cbdd1c0 100644 --- a/arch/x86_64/lib/delay.c +++ b/arch/x86_64/lib/delay.c @@ -39,7 +39,7 @@ void __delay(unsigned long loops) inline void __const_udelay(unsigned long xloops) { - __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ); + __delay((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32); } void __udelay(unsigned long usecs) -- cgit 1.2.3-korg