alpha: convert to use arch_gettimeoffset()

Converts alpha to use GENERIC_TIME via the arch_getoffset()
infrastructure, reducing the amount of arch specific code we need to
maintain.

I suspect the alpha arch could even be further improved to provide and
rpcc() based clocksource, but not having the hardware, I don't feel
comfortable attempting the more complicated conversion (but I'd be glad to
help if anyone else is interested).

[akpm@linux-foundation.org: fix build]
Signed-off-by: John Stultz <johnstul@us.ibm.com>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
john stultz 2009-09-21 17:04:00 -07:00 committed by Linus Torvalds
parent 6e0c9e7777
commit 4f543fa41e
2 changed files with 15 additions and 72 deletions

View File

@ -45,6 +45,14 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_TIME
bool
default y
config ARCH_USES_GETTIMEOFFSET
bool
default y
config ZONE_DMA
bool
default y

View File

@ -408,28 +408,17 @@ time_init(void)
* part. So we can't do the "find absolute time in terms of cycles" thing
* that the other ports do.
*/
void
do_gettimeofday(struct timeval *tv)
u32 arch_gettimeoffset(void)
{
unsigned long flags;
unsigned long sec, usec, seq;
unsigned long delta_cycles, delta_usec, partial_tick;
do {
seq = read_seqbegin_irqsave(&xtime_lock, flags);
delta_cycles = rpcc() - state.last_time;
sec = xtime.tv_sec;
usec = (xtime.tv_nsec / 1000);
partial_tick = state.partial_tick;
} while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
#ifdef CONFIG_SMP
/* Until and unless we figure out how to get cpu cycle counters
in sync and keep them there, we can't use the rpcc tricks. */
delta_usec = 0;
return 0;
#else
unsigned long delta_cycles, delta_usec, partial_tick;
delta_cycles = rpcc() - state.last_time;
partial_tick = state.partial_tick;
/*
* usec = cycles * ticks_per_cycle * 2**48 * 1e6 / (2**48 * ticks)
* = cycles * (s_t_p_c) * 1e6 / (2**48 * ticks)
@ -446,64 +435,10 @@ do_gettimeofday(struct timeval *tv)
delta_usec = (delta_cycles * state.scaled_ticks_per_cycle
+ partial_tick) * 15625;
delta_usec = ((delta_usec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
return delta_usec * 1000;
#endif
usec += delta_usec;
if (usec >= 1000000) {
sec += 1;
usec -= 1000000;
}
tv->tv_sec = sec;
tv->tv_usec = usec;
}
EXPORT_SYMBOL(do_gettimeofday);
int
do_settimeofday(struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
unsigned long delta_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
/* The offset that is added into time in do_gettimeofday above
must be subtracted out here to keep a coherent view of the
time. Without this, a full-tick error is possible. */
#ifdef CONFIG_SMP
delta_nsec = 0;
#else
delta_nsec = rpcc() - state.last_time;
delta_nsec = (delta_nsec * state.scaled_ticks_per_cycle
+ state.partial_tick) * 15625;
delta_nsec = ((delta_nsec / ((1UL << (FIX_SHIFT-6-1)) * HZ)) + 1) / 2;
delta_nsec *= 1000;
#endif
nsec -= delta_nsec;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
ntp_clear();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
EXPORT_SYMBOL(do_settimeofday);
/*
* In order to set the CMOS clock precisely, set_rtc_mmss has to be
* called 500 ms after the second nowtime has started, because when