diff options
author | Dimitri Sivanich <sivanich@sgi.com> | 2006-09-26 10:52:34 +0200 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 10:52:34 +0200 |
commit | cbf9b4bb76c9ce53b7fdde0dffcd000951b5f0d4 (patch) | |
tree | 0942698b5aef01d2b89fe04111f7ba40b28865f0 /arch | |
parent | d28c4393a7bf558538e9def269c1caeab6ec056f (diff) |
[PATCH] X86_64 monotonic_clock goes backwards
I've noticed some erratic behavior while testing the X86_64 version
of monotonic_clock().
While spinning in a loop reading monotonic clock values (pinned to a
single cpu) I noticed that the difference between subsequent values
occasionally went negative (time going backwards).
I found that in the following code:
this_offset = get_cycles_sync();
/* FIXME: 1000 or 1000000? */
--> offset = (this_offset - last_offset)*1000 / cpu_khz;
}
return base + offset;
the offset sometimes turns out to be 0, even though
this_offset > last_offset.
+Added fix From: Toyo Abe <toyoa@mvista.com>
The x86_64-mm-monotonic-clock.patch in 2.6.18-rc4-mm2 made a change to
the updating of monotonic_base. It now uses cycles_2_ns().
I suggest that a set_cyc2ns_scale() should be done prior to the setup_irq().
Because cycles_2_ns() can be called from the timer ISR right after the irq0
is enabled.
Signed-off-by: Toyo Abe <toyoa@mvista.com>
Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86_64/kernel/time.c | 11 |
1 files changed, 4 insertions, 7 deletions
diff --git a/arch/x86_64/kernel/time.c b/arch/x86_64/kernel/time.c index d66c7f750e7..97115e608ed 100644 --- a/arch/x86_64/kernel/time.c +++ b/arch/x86_64/kernel/time.c @@ -276,6 +276,7 @@ static void set_rtc_mmss(unsigned long nowtime) * Note: This function is required to return accurate * time even in the absence of multiple timer ticks. */ +static inline unsigned long long cycles_2_ns(unsigned long long cyc); unsigned long long monotonic_clock(void) { unsigned long seq; @@ -300,8 +301,7 @@ unsigned long long monotonic_clock(void) base = monotonic_base; } while (read_seqretry(&xtime_lock, seq)); this_offset = get_cycles_sync(); - /* FIXME: 1000 or 1000000? */ - offset = (this_offset - last_offset)*1000 / cpu_khz; + offset = cycles_2_ns(this_offset - last_offset); } return base + offset; } @@ -405,8 +405,7 @@ void main_timer_handler(struct pt_regs *regs) offset %= USEC_PER_TICK; } - /* FIXME: 1000 or 1000000? */ - monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz; + monotonic_base += cycles_2_ns(tsc - vxtime.last_tsc); vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot; @@ -929,10 +928,8 @@ void __init time_init(void) vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz; vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; vxtime.last_tsc = get_cycles_sync(); - setup_irq(0, &irq0); - set_cyc2ns_scale(cpu_khz); - + setup_irq(0, &irq0); hotcpu_notifier(time_cpu_notifier, 0); time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id()); |