Merge branch 'powerpc.cherry-picks' into timers/clocksource
Conflicts: arch/powerpc/kernel/time.c Reason: The powerpc next tree contains two commits which conflict with the timekeeping changes:8fd63a9e
powerpc: Rework VDSO gettimeofday to prevent time going backwardsc1aa687d
powerpc: Clean up obsolete code relating to decrementer and timebase John Stultz identified them and provided the conflict resolution. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
commit
47916be4e2
|
@ -366,8 +366,5 @@ static inline void log_error(char *buf, unsigned int err_type, int fatal)
|
||||||
#define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7)
|
#define machine_late_initcall(mach,fn) __define_machine_initcall(mach,"7",fn,7)
|
||||||
#define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s)
|
#define machine_late_initcall_sync(mach,fn) __define_machine_initcall(mach,"7s",fn,7s)
|
||||||
|
|
||||||
void generic_suspend_disable_irqs(void);
|
|
||||||
void generic_suspend_enable_irqs(void);
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_MACHDEP_H */
|
#endif /* _ASM_POWERPC_MACHDEP_H */
|
||||||
|
|
|
@ -28,16 +28,12 @@
|
||||||
extern unsigned long tb_ticks_per_jiffy;
|
extern unsigned long tb_ticks_per_jiffy;
|
||||||
extern unsigned long tb_ticks_per_usec;
|
extern unsigned long tb_ticks_per_usec;
|
||||||
extern unsigned long tb_ticks_per_sec;
|
extern unsigned long tb_ticks_per_sec;
|
||||||
extern u64 tb_to_xs;
|
|
||||||
extern unsigned tb_to_us;
|
|
||||||
|
|
||||||
struct rtc_time;
|
struct rtc_time;
|
||||||
extern void to_tm(int tim, struct rtc_time * tm);
|
extern void to_tm(int tim, struct rtc_time * tm);
|
||||||
extern void GregorianDay(struct rtc_time *tm);
|
extern void GregorianDay(struct rtc_time *tm);
|
||||||
extern time_t last_rtc_update;
|
|
||||||
|
|
||||||
extern void generic_calibrate_decr(void);
|
extern void generic_calibrate_decr(void);
|
||||||
extern void wakeup_decrementer(void);
|
|
||||||
extern void snapshot_timebase(void);
|
extern void snapshot_timebase(void);
|
||||||
|
|
||||||
extern void set_dec_cpu6(unsigned int val);
|
extern void set_dec_cpu6(unsigned int val);
|
||||||
|
@ -204,9 +200,6 @@ static inline unsigned long tb_ticks_since(unsigned long tstamp)
|
||||||
extern u64 mulhdu(u64, u64);
|
extern u64 mulhdu(u64, u64);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void smp_space_timers(unsigned int);
|
|
||||||
|
|
||||||
extern unsigned mulhwu_scale_factor(unsigned, unsigned);
|
|
||||||
extern void div128_by_32(u64 dividend_high, u64 dividend_low,
|
extern void div128_by_32(u64 dividend_high, u64 dividend_low,
|
||||||
unsigned divisor, struct div_result *dr);
|
unsigned divisor, struct div_result *dr);
|
||||||
|
|
||||||
|
|
|
@ -85,6 +85,7 @@ struct vdso_data {
|
||||||
__s32 wtom_clock_sec; /* Wall to monotonic clock */
|
__s32 wtom_clock_sec; /* Wall to monotonic clock */
|
||||||
__s32 wtom_clock_nsec;
|
__s32 wtom_clock_nsec;
|
||||||
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
|
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
|
||||||
|
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
|
||||||
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
||||||
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
||||||
};
|
};
|
||||||
|
@ -105,6 +106,7 @@ struct vdso_data {
|
||||||
__s32 wtom_clock_sec; /* Wall to monotonic clock */
|
__s32 wtom_clock_sec; /* Wall to monotonic clock */
|
||||||
__s32 wtom_clock_nsec;
|
__s32 wtom_clock_nsec;
|
||||||
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
|
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
|
||||||
|
__u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
|
||||||
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
||||||
__u32 dcache_block_size; /* L1 d-cache block size */
|
__u32 dcache_block_size; /* L1 d-cache block size */
|
||||||
__u32 icache_block_size; /* L1 i-cache block size */
|
__u32 icache_block_size; /* L1 i-cache block size */
|
||||||
|
|
|
@ -342,6 +342,7 @@ int main(void)
|
||||||
DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
|
DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec));
|
||||||
DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
|
DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
|
||||||
DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
|
DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime));
|
||||||
|
DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction));
|
||||||
DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
|
DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size));
|
||||||
DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
|
DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size));
|
||||||
DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
|
DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size));
|
||||||
|
|
|
@ -288,8 +288,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||||
max_cpus = NR_CPUS;
|
max_cpus = NR_CPUS;
|
||||||
else
|
else
|
||||||
max_cpus = 1;
|
max_cpus = 1;
|
||||||
|
|
||||||
smp_space_timers(max_cpus);
|
|
||||||
|
|
||||||
for_each_possible_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
if (cpu != boot_cpuid)
|
if (cpu != boot_cpuid)
|
||||||
|
|
|
@ -149,16 +149,6 @@ unsigned long tb_ticks_per_usec = 100; /* sane default */
|
||||||
EXPORT_SYMBOL(tb_ticks_per_usec);
|
EXPORT_SYMBOL(tb_ticks_per_usec);
|
||||||
unsigned long tb_ticks_per_sec;
|
unsigned long tb_ticks_per_sec;
|
||||||
EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
|
EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
|
||||||
u64 tb_to_xs;
|
|
||||||
unsigned tb_to_us;
|
|
||||||
|
|
||||||
#define TICKLEN_SCALE NTP_SCALE_SHIFT
|
|
||||||
static u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
|
|
||||||
static u64 ticklen_to_xs; /* 0.64 fraction */
|
|
||||||
|
|
||||||
/* If last_tick_len corresponds to about 1/HZ seconds, then
|
|
||||||
last_tick_len << TICKLEN_SHIFT will be about 2^63. */
|
|
||||||
#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
|
|
||||||
|
|
||||||
DEFINE_SPINLOCK(rtc_lock);
|
DEFINE_SPINLOCK(rtc_lock);
|
||||||
EXPORT_SYMBOL_GPL(rtc_lock);
|
EXPORT_SYMBOL_GPL(rtc_lock);
|
||||||
|
@ -174,7 +164,6 @@ unsigned long ppc_proc_freq;
|
||||||
EXPORT_SYMBOL(ppc_proc_freq);
|
EXPORT_SYMBOL(ppc_proc_freq);
|
||||||
unsigned long ppc_tb_freq;
|
unsigned long ppc_tb_freq;
|
||||||
|
|
||||||
static u64 tb_last_jiffy __cacheline_aligned_in_smp;
|
|
||||||
static DEFINE_PER_CPU(u64, last_jiffy);
|
static DEFINE_PER_CPU(u64, last_jiffy);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||||
|
@ -446,7 +435,6 @@ EXPORT_SYMBOL(profile_pc);
|
||||||
|
|
||||||
static int __init iSeries_tb_recal(void)
|
static int __init iSeries_tb_recal(void)
|
||||||
{
|
{
|
||||||
struct div_result divres;
|
|
||||||
unsigned long titan, tb;
|
unsigned long titan, tb;
|
||||||
|
|
||||||
/* Make sure we only run on iSeries */
|
/* Make sure we only run on iSeries */
|
||||||
|
@ -477,10 +465,7 @@ static int __init iSeries_tb_recal(void)
|
||||||
tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
|
tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
|
||||||
tb_ticks_per_sec = new_tb_ticks_per_sec;
|
tb_ticks_per_sec = new_tb_ticks_per_sec;
|
||||||
calc_cputime_factors();
|
calc_cputime_factors();
|
||||||
div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
|
|
||||||
tb_to_xs = divres.result_low;
|
|
||||||
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
|
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
|
||||||
vdso_data->tb_to_xs = tb_to_xs;
|
|
||||||
setup_cputime_one_jiffy();
|
setup_cputime_one_jiffy();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -643,27 +628,9 @@ void timer_interrupt(struct pt_regs * regs)
|
||||||
trace_timer_interrupt_exit(regs);
|
trace_timer_interrupt_exit(regs);
|
||||||
}
|
}
|
||||||
|
|
||||||
void wakeup_decrementer(void)
|
|
||||||
{
|
|
||||||
unsigned long ticks;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The timebase gets saved on sleep and restored on wakeup,
|
|
||||||
* so all we need to do is to reset the decrementer.
|
|
||||||
*/
|
|
||||||
ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
|
|
||||||
if (ticks < tb_ticks_per_jiffy)
|
|
||||||
ticks = tb_ticks_per_jiffy - ticks;
|
|
||||||
else
|
|
||||||
ticks = 1;
|
|
||||||
set_dec(ticks);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_SUSPEND
|
#ifdef CONFIG_SUSPEND
|
||||||
void generic_suspend_disable_irqs(void)
|
static void generic_suspend_disable_irqs(void)
|
||||||
{
|
{
|
||||||
preempt_disable();
|
|
||||||
|
|
||||||
/* Disable the decrementer, so that it doesn't interfere
|
/* Disable the decrementer, so that it doesn't interfere
|
||||||
* with suspending.
|
* with suspending.
|
||||||
*/
|
*/
|
||||||
|
@ -673,12 +640,9 @@ void generic_suspend_disable_irqs(void)
|
||||||
set_dec(0x7fffffff);
|
set_dec(0x7fffffff);
|
||||||
}
|
}
|
||||||
|
|
||||||
void generic_suspend_enable_irqs(void)
|
static void generic_suspend_enable_irqs(void)
|
||||||
{
|
{
|
||||||
wakeup_decrementer();
|
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
preempt_enable();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Overrides the weak version in kernel/power/main.c */
|
/* Overrides the weak version in kernel/power/main.c */
|
||||||
|
@ -698,23 +662,6 @@ void arch_suspend_enable_irqs(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
void __init smp_space_timers(unsigned int max_cpus)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
|
|
||||||
|
|
||||||
/* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
|
|
||||||
previous_tb -= tb_ticks_per_jiffy;
|
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
|
||||||
if (i == boot_cpuid)
|
|
||||||
continue;
|
|
||||||
per_cpu(last_jiffy, i) = previous_tb;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Scheduler clock - returns current time in nanosec units.
|
* Scheduler clock - returns current time in nanosec units.
|
||||||
*
|
*
|
||||||
|
@ -853,6 +800,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
|
||||||
struct clocksource *clock, u32 mult)
|
struct clocksource *clock, u32 mult)
|
||||||
{
|
{
|
||||||
u64 new_tb_to_xs, new_stamp_xsec;
|
u64 new_tb_to_xs, new_stamp_xsec;
|
||||||
|
u32 frac_sec;
|
||||||
|
|
||||||
if (clock != &clocksource_timebase)
|
if (clock != &clocksource_timebase)
|
||||||
return;
|
return;
|
||||||
|
@ -868,6 +816,10 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
|
||||||
do_div(new_stamp_xsec, 1000000000);
|
do_div(new_stamp_xsec, 1000000000);
|
||||||
new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
|
new_stamp_xsec += (u64) wall_time->tv_sec * XSEC_PER_SEC;
|
||||||
|
|
||||||
|
BUG_ON(wall_time->tv_nsec >= NSEC_PER_SEC);
|
||||||
|
/* this is tv_nsec / 1e9 as a 0.32 fraction */
|
||||||
|
frac_sec = ((u64) wall_time->tv_nsec * 18446744073ULL) >> 32;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* tb_update_count is used to allow the userspace gettimeofday code
|
* tb_update_count is used to allow the userspace gettimeofday code
|
||||||
* to assure itself that it sees a consistent view of the tb_to_xs and
|
* to assure itself that it sees a consistent view of the tb_to_xs and
|
||||||
|
@ -885,6 +837,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
|
||||||
vdso_data->wtom_clock_sec = wtm->tv_sec;
|
vdso_data->wtom_clock_sec = wtm->tv_sec;
|
||||||
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
|
vdso_data->wtom_clock_nsec = wtm->tv_nsec;
|
||||||
vdso_data->stamp_xtime = *wall_time;
|
vdso_data->stamp_xtime = *wall_time;
|
||||||
|
vdso_data->stamp_sec_fraction = frac_sec;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
++(vdso_data->tb_update_count);
|
++(vdso_data->tb_update_count);
|
||||||
}
|
}
|
||||||
|
@ -1002,15 +955,13 @@ void secondary_cpu_time_init(void)
|
||||||
/* This function is only called on the boot processor */
|
/* This function is only called on the boot processor */
|
||||||
void __init time_init(void)
|
void __init time_init(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
struct div_result res;
|
struct div_result res;
|
||||||
u64 scale, x;
|
u64 scale;
|
||||||
unsigned shift;
|
unsigned shift;
|
||||||
|
|
||||||
if (__USE_RTC()) {
|
if (__USE_RTC()) {
|
||||||
/* 601 processor: dec counts down by 128 every 128ns */
|
/* 601 processor: dec counts down by 128 every 128ns */
|
||||||
ppc_tb_freq = 1000000000;
|
ppc_tb_freq = 1000000000;
|
||||||
tb_last_jiffy = get_rtcl();
|
|
||||||
} else {
|
} else {
|
||||||
/* Normal PowerPC with timebase register */
|
/* Normal PowerPC with timebase register */
|
||||||
ppc_md.calibrate_decr();
|
ppc_md.calibrate_decr();
|
||||||
|
@ -1018,49 +969,14 @@ void __init time_init(void)
|
||||||
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
|
ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
|
||||||
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
|
printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
|
||||||
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
|
ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
|
||||||
tb_last_jiffy = get_tb();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
|
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
|
||||||
tb_ticks_per_sec = ppc_tb_freq;
|
tb_ticks_per_sec = ppc_tb_freq;
|
||||||
tb_ticks_per_usec = ppc_tb_freq / 1000000;
|
tb_ticks_per_usec = ppc_tb_freq / 1000000;
|
||||||
tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
|
|
||||||
calc_cputime_factors();
|
calc_cputime_factors();
|
||||||
setup_cputime_one_jiffy();
|
setup_cputime_one_jiffy();
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculate the length of each tick in ns. It will not be
|
|
||||||
* exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
|
|
||||||
* We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
|
|
||||||
* rounded up.
|
|
||||||
*/
|
|
||||||
x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
|
|
||||||
do_div(x, ppc_tb_freq);
|
|
||||||
tick_nsec = x;
|
|
||||||
last_tick_len = x << TICKLEN_SCALE;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Compute ticklen_to_xs, which is a factor which gets multiplied
|
|
||||||
* by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
|
|
||||||
* It is computed as:
|
|
||||||
* ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
|
|
||||||
* where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
|
|
||||||
* which turns out to be N = 51 - SHIFT_HZ.
|
|
||||||
* This gives the result as a 0.64 fixed-point fraction.
|
|
||||||
* That value is reduced by an offset amounting to 1 xsec per
|
|
||||||
* 2^31 timebase ticks to avoid problems with time going backwards
|
|
||||||
* by 1 xsec when we do timer_recalc_offset due to losing the
|
|
||||||
* fractional xsec. That offset is equal to ppc_tb_freq/2^51
|
|
||||||
* since there are 2^20 xsec in a second.
|
|
||||||
*/
|
|
||||||
div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
|
|
||||||
tb_ticks_per_jiffy << SHIFT_HZ, &res);
|
|
||||||
div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
|
|
||||||
ticklen_to_xs = res.result_low;
|
|
||||||
|
|
||||||
/* Compute tb_to_xs from tick_nsec */
|
|
||||||
tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute scale factor for sched_clock.
|
* Compute scale factor for sched_clock.
|
||||||
* The calibrate_decr() function has set tb_ticks_per_sec,
|
* The calibrate_decr() function has set tb_ticks_per_sec,
|
||||||
|
@ -1082,21 +998,14 @@ void __init time_init(void)
|
||||||
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
|
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
|
||||||
boot_tb = get_tb_or_rtc();
|
boot_tb = get_tb_or_rtc();
|
||||||
|
|
||||||
write_seqlock_irqsave(&xtime_lock, flags);
|
|
||||||
|
|
||||||
/* If platform provided a timezone (pmac), we correct the time */
|
/* If platform provided a timezone (pmac), we correct the time */
|
||||||
if (timezone_offset) {
|
if (timezone_offset) {
|
||||||
sys_tz.tz_minuteswest = -timezone_offset / 60;
|
sys_tz.tz_minuteswest = -timezone_offset / 60;
|
||||||
sys_tz.tz_dsttime = 0;
|
sys_tz.tz_dsttime = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
vdso_data->tb_orig_stamp = tb_last_jiffy;
|
|
||||||
vdso_data->tb_update_count = 0;
|
vdso_data->tb_update_count = 0;
|
||||||
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
|
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
|
||||||
vdso_data->stamp_xsec = (u64) get_seconds() * XSEC_PER_SEC;
|
|
||||||
vdso_data->tb_to_xs = tb_to_xs;
|
|
||||||
|
|
||||||
write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
||||||
|
|
||||||
/* Start the decrementer on CPUs that have manual control
|
/* Start the decrementer on CPUs that have manual control
|
||||||
* such as BookE
|
* such as BookE
|
||||||
|
@ -1190,39 +1099,6 @@ void to_tm(int tim, struct rtc_time * tm)
|
||||||
GregorianDay(tm);
|
GregorianDay(tm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Auxiliary function to compute scaling factors */
|
|
||||||
/* Actually the choice of a timebase running at 1/4 the of the bus
|
|
||||||
* frequency giving resolution of a few tens of nanoseconds is quite nice.
|
|
||||||
* It makes this computation very precise (27-28 bits typically) which
|
|
||||||
* is optimistic considering the stability of most processor clock
|
|
||||||
* oscillators and the precision with which the timebase frequency
|
|
||||||
* is measured but does not harm.
|
|
||||||
*/
|
|
||||||
unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
|
|
||||||
{
|
|
||||||
unsigned mlt=0, tmp, err;
|
|
||||||
/* No concern for performance, it's done once: use a stupid
|
|
||||||
* but safe and compact method to find the multiplier.
|
|
||||||
*/
|
|
||||||
|
|
||||||
for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
|
|
||||||
if (mulhwu(inscale, mlt|tmp) < outscale)
|
|
||||||
mlt |= tmp;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* We might still be off by 1 for the best approximation.
|
|
||||||
* A side effect of this is that if outscale is too large
|
|
||||||
* the returned value will be zero.
|
|
||||||
* Many corner cases have been checked and seem to work,
|
|
||||||
* some might have been forgotten in the test however.
|
|
||||||
*/
|
|
||||||
|
|
||||||
err = inscale * (mlt+1);
|
|
||||||
if (err <= inscale/2)
|
|
||||||
mlt++;
|
|
||||||
return mlt;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
|
* Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
|
||||||
* result.
|
* result.
|
||||||
|
|
|
@ -19,8 +19,10 @@
|
||||||
/* Offset for the low 32-bit part of a field of long type */
|
/* Offset for the low 32-bit part of a field of long type */
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
#define LOPART 4
|
#define LOPART 4
|
||||||
|
#define TSPEC_TV_SEC TSPC64_TV_SEC+LOPART
|
||||||
#else
|
#else
|
||||||
#define LOPART 0
|
#define LOPART 0
|
||||||
|
#define TSPEC_TV_SEC TSPC32_TV_SEC
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
.text
|
.text
|
||||||
|
@ -41,23 +43,11 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday)
|
||||||
mr r9, r3 /* datapage ptr in r9 */
|
mr r9, r3 /* datapage ptr in r9 */
|
||||||
cmplwi r10,0 /* check if tv is NULL */
|
cmplwi r10,0 /* check if tv is NULL */
|
||||||
beq 3f
|
beq 3f
|
||||||
bl __do_get_xsec@local /* get xsec from tb & kernel */
|
lis r7,1000000@ha /* load up USEC_PER_SEC */
|
||||||
bne- 2f /* out of line -> do syscall */
|
addi r7,r7,1000000@l /* so we get microseconds in r4 */
|
||||||
|
bl __do_get_tspec@local /* get sec/usec from tb & kernel */
|
||||||
/* seconds are xsec >> 20 */
|
stw r3,TVAL32_TV_SEC(r10)
|
||||||
rlwinm r5,r4,12,20,31
|
stw r4,TVAL32_TV_USEC(r10)
|
||||||
rlwimi r5,r3,12,0,19
|
|
||||||
stw r5,TVAL32_TV_SEC(r10)
|
|
||||||
|
|
||||||
/* get remaining xsec and convert to usec. we scale
|
|
||||||
* up remaining xsec by 12 bits and get the top 32 bits
|
|
||||||
* of the multiplication
|
|
||||||
*/
|
|
||||||
rlwinm r5,r4,12,0,19
|
|
||||||
lis r6,1000000@h
|
|
||||||
ori r6,r6,1000000@l
|
|
||||||
mulhwu r5,r5,r6
|
|
||||||
stw r5,TVAL32_TV_USEC(r10)
|
|
||||||
|
|
||||||
3: cmplwi r11,0 /* check if tz is NULL */
|
3: cmplwi r11,0 /* check if tz is NULL */
|
||||||
beq 1f
|
beq 1f
|
||||||
|
@ -70,14 +60,6 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday)
|
||||||
crclr cr0*4+so
|
crclr cr0*4+so
|
||||||
li r3,0
|
li r3,0
|
||||||
blr
|
blr
|
||||||
|
|
||||||
2:
|
|
||||||
mtlr r12
|
|
||||||
mr r3,r10
|
|
||||||
mr r4,r11
|
|
||||||
li r0,__NR_gettimeofday
|
|
||||||
sc
|
|
||||||
blr
|
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
V_FUNCTION_END(__kernel_gettimeofday)
|
V_FUNCTION_END(__kernel_gettimeofday)
|
||||||
|
|
||||||
|
@ -100,7 +82,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
|
||||||
mr r11,r4 /* r11 saves tp */
|
mr r11,r4 /* r11 saves tp */
|
||||||
bl __get_datapage@local /* get data page */
|
bl __get_datapage@local /* get data page */
|
||||||
mr r9,r3 /* datapage ptr in r9 */
|
mr r9,r3 /* datapage ptr in r9 */
|
||||||
|
lis r7,NSEC_PER_SEC@h /* want nanoseconds */
|
||||||
|
ori r7,r7,NSEC_PER_SEC@l
|
||||||
50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
|
50: bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
|
||||||
bne cr1,80f /* not monotonic -> all done */
|
bne cr1,80f /* not monotonic -> all done */
|
||||||
|
|
||||||
|
@ -198,83 +181,12 @@ V_FUNCTION_END(__kernel_clock_getres)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the core of gettimeofday() & friends, it returns the xsec
|
* This is the core of clock_gettime() and gettimeofday(),
|
||||||
* value in r3 & r4 and expects the datapage ptr (non clobbered)
|
* it returns the current time in r3 (seconds) and r4.
|
||||||
* in r9. clobbers r0,r4,r5,r6,r7,r8.
|
* On entry, r7 gives the resolution of r4, either USEC_PER_SEC
|
||||||
* When returning, r8 contains the counter value that can be reused
|
* or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
|
||||||
* by the monotonic clock implementation
|
|
||||||
*/
|
|
||||||
__do_get_xsec:
|
|
||||||
.cfi_startproc
|
|
||||||
/* Check for update count & load values. We use the low
|
|
||||||
* order 32 bits of the update count
|
|
||||||
*/
|
|
||||||
1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
|
|
||||||
andi. r0,r8,1 /* pending update ? loop */
|
|
||||||
bne- 1b
|
|
||||||
xor r0,r8,r8 /* create dependency */
|
|
||||||
add r9,r9,r0
|
|
||||||
|
|
||||||
/* Load orig stamp (offset to TB) */
|
|
||||||
lwz r5,CFG_TB_ORIG_STAMP(r9)
|
|
||||||
lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
|
|
||||||
|
|
||||||
/* Get a stable TB value */
|
|
||||||
2: mftbu r3
|
|
||||||
mftbl r4
|
|
||||||
mftbu r0
|
|
||||||
cmpl cr0,r3,r0
|
|
||||||
bne- 2b
|
|
||||||
|
|
||||||
/* Substract tb orig stamp. If the high part is non-zero, we jump to
|
|
||||||
* the slow path which call the syscall.
|
|
||||||
* If it's ok, then we have our 32 bits tb_ticks value in r7
|
|
||||||
*/
|
|
||||||
subfc r7,r6,r4
|
|
||||||
subfe. r0,r5,r3
|
|
||||||
bne- 3f
|
|
||||||
|
|
||||||
/* Load scale factor & do multiplication */
|
|
||||||
lwz r5,CFG_TB_TO_XS(r9) /* load values */
|
|
||||||
lwz r6,(CFG_TB_TO_XS+4)(r9)
|
|
||||||
mulhwu r4,r7,r5
|
|
||||||
mulhwu r6,r7,r6
|
|
||||||
mullw r0,r7,r5
|
|
||||||
addc r6,r6,r0
|
|
||||||
|
|
||||||
/* At this point, we have the scaled xsec value in r4 + XER:CA
|
|
||||||
* we load & add the stamp since epoch
|
|
||||||
*/
|
|
||||||
lwz r5,CFG_STAMP_XSEC(r9)
|
|
||||||
lwz r6,(CFG_STAMP_XSEC+4)(r9)
|
|
||||||
adde r4,r4,r6
|
|
||||||
addze r3,r5
|
|
||||||
|
|
||||||
/* We now have our result in r3,r4. We create a fake dependency
|
|
||||||
* on that result and re-check the counter
|
|
||||||
*/
|
|
||||||
or r6,r4,r3
|
|
||||||
xor r0,r6,r6
|
|
||||||
add r9,r9,r0
|
|
||||||
lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
|
|
||||||
cmpl cr0,r8,r0 /* check if updated */
|
|
||||||
bne- 1b
|
|
||||||
|
|
||||||
/* Warning ! The caller expects CR:EQ to be set to indicate a
|
|
||||||
* successful calculation (so it won't fallback to the syscall
|
|
||||||
* method). We have overriden that CR bit in the counter check,
|
|
||||||
* but fortunately, the loop exit condition _is_ CR:EQ set, so
|
|
||||||
* we can exit safely here. If you change this code, be careful
|
|
||||||
* of that side effect.
|
|
||||||
*/
|
|
||||||
3: blr
|
|
||||||
.cfi_endproc
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the core of clock_gettime(), it returns the current
|
|
||||||
* time in seconds and nanoseconds in r3 and r4.
|
|
||||||
* It expects the datapage ptr in r9 and doesn't clobber it.
|
* It expects the datapage ptr in r9 and doesn't clobber it.
|
||||||
* It clobbers r0, r5, r6, r10 and returns NSEC_PER_SEC in r7.
|
* It clobbers r0, r5 and r6.
|
||||||
* On return, r8 contains the counter value that can be reused.
|
* On return, r8 contains the counter value that can be reused.
|
||||||
* This clobbers cr0 but not any other cr field.
|
* This clobbers cr0 but not any other cr field.
|
||||||
*/
|
*/
|
||||||
|
@ -297,70 +209,58 @@ __do_get_tspec:
|
||||||
2: mftbu r3
|
2: mftbu r3
|
||||||
mftbl r4
|
mftbl r4
|
||||||
mftbu r0
|
mftbu r0
|
||||||
cmpl cr0,r3,r0
|
cmplw cr0,r3,r0
|
||||||
bne- 2b
|
bne- 2b
|
||||||
|
|
||||||
/* Subtract tb orig stamp and shift left 12 bits.
|
/* Subtract tb orig stamp and shift left 12 bits.
|
||||||
*/
|
*/
|
||||||
subfc r7,r6,r4
|
subfc r4,r6,r4
|
||||||
subfe r0,r5,r3
|
subfe r0,r5,r3
|
||||||
slwi r0,r0,12
|
slwi r0,r0,12
|
||||||
rlwimi. r0,r7,12,20,31
|
rlwimi. r0,r4,12,20,31
|
||||||
slwi r7,r7,12
|
slwi r4,r4,12
|
||||||
|
|
||||||
/* Load scale factor & do multiplication */
|
/*
|
||||||
|
* Load scale factor & do multiplication.
|
||||||
|
* We only use the high 32 bits of the tb_to_xs value.
|
||||||
|
* Even with a 1GHz timebase clock, the high 32 bits of
|
||||||
|
* tb_to_xs will be at least 4 million, so the error from
|
||||||
|
* ignoring the low 32 bits will be no more than 0.25ppm.
|
||||||
|
* The error will just make the clock run very very slightly
|
||||||
|
* slow until the next time the kernel updates the VDSO data,
|
||||||
|
* at which point the clock will catch up to the kernel's value,
|
||||||
|
* so there is no long-term error accumulation.
|
||||||
|
*/
|
||||||
lwz r5,CFG_TB_TO_XS(r9) /* load values */
|
lwz r5,CFG_TB_TO_XS(r9) /* load values */
|
||||||
lwz r6,(CFG_TB_TO_XS+4)(r9)
|
mulhwu r4,r4,r5
|
||||||
mulhwu r3,r7,r6
|
|
||||||
mullw r10,r7,r5
|
|
||||||
mulhwu r4,r7,r5
|
|
||||||
addc r10,r3,r10
|
|
||||||
li r3,0
|
li r3,0
|
||||||
|
|
||||||
beq+ 4f /* skip high part computation if 0 */
|
beq+ 4f /* skip high part computation if 0 */
|
||||||
mulhwu r3,r0,r5
|
mulhwu r3,r0,r5
|
||||||
mullw r7,r0,r5
|
mullw r5,r0,r5
|
||||||
mulhwu r5,r0,r6
|
|
||||||
mullw r6,r0,r6
|
|
||||||
adde r4,r4,r7
|
|
||||||
addze r3,r3
|
|
||||||
addc r4,r4,r5
|
addc r4,r4,r5
|
||||||
addze r3,r3
|
addze r3,r3
|
||||||
addc r10,r10,r6
|
4:
|
||||||
|
/* At this point, we have seconds since the xtime stamp
|
||||||
4: addze r4,r4 /* add in carry */
|
* as a 32.32 fixed-point number in r3 and r4.
|
||||||
lis r7,NSEC_PER_SEC@h
|
* Load & add the xtime stamp.
|
||||||
ori r7,r7,NSEC_PER_SEC@l
|
|
||||||
mulhwu r4,r4,r7 /* convert to nanoseconds */
|
|
||||||
|
|
||||||
/* At this point, we have seconds & nanoseconds since the xtime
|
|
||||||
* stamp in r3+CA and r4. Load & add the xtime stamp.
|
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PPC64
|
lwz r5,STAMP_XTIME+TSPEC_TV_SEC(r9)
|
||||||
lwz r5,STAMP_XTIME+TSPC64_TV_SEC+LOPART(r9)
|
lwz r6,STAMP_SEC_FRAC(r9)
|
||||||
lwz r6,STAMP_XTIME+TSPC64_TV_NSEC+LOPART(r9)
|
addc r4,r4,r6
|
||||||
#else
|
|
||||||
lwz r5,STAMP_XTIME+TSPC32_TV_SEC(r9)
|
|
||||||
lwz r6,STAMP_XTIME+TSPC32_TV_NSEC(r9)
|
|
||||||
#endif
|
|
||||||
add r4,r4,r6
|
|
||||||
adde r3,r3,r5
|
adde r3,r3,r5
|
||||||
|
|
||||||
/* We now have our result in r3,r4. We create a fake dependency
|
/* We create a fake dependency on the result in r3/r4
|
||||||
* on that result and re-check the counter
|
* and re-check the counter
|
||||||
*/
|
*/
|
||||||
or r6,r4,r3
|
or r6,r4,r3
|
||||||
xor r0,r6,r6
|
xor r0,r6,r6
|
||||||
add r9,r9,r0
|
add r9,r9,r0
|
||||||
lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
|
lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
|
||||||
cmpl cr0,r8,r0 /* check if updated */
|
cmplw cr0,r8,r0 /* check if updated */
|
||||||
bne- 1b
|
bne- 1b
|
||||||
|
|
||||||
/* check for nanosecond overflow and adjust if necessary */
|
mulhwu r4,r4,r7 /* convert to micro or nanoseconds */
|
||||||
cmpw r4,r7
|
|
||||||
bltlr /* all done if no overflow */
|
|
||||||
subf r4,r7,r4 /* adjust if overflow */
|
|
||||||
addi r3,r3,1
|
|
||||||
|
|
||||||
blr
|
blr
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
|
|
|
@ -33,18 +33,11 @@ V_FUNCTION_BEGIN(__kernel_gettimeofday)
|
||||||
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
|
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
|
||||||
cmpldi r11,0 /* check if tv is NULL */
|
cmpldi r11,0 /* check if tv is NULL */
|
||||||
beq 2f
|
beq 2f
|
||||||
bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
|
lis r7,1000000@ha /* load up USEC_PER_SEC */
|
||||||
lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
|
addi r7,r7,1000000@l
|
||||||
ori r7,r7,16960
|
bl V_LOCAL_FUNC(__do_get_tspec) /* get sec/us from tb & kernel */
|
||||||
rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
|
std r4,TVAL64_TV_SEC(r11) /* store sec in tv */
|
||||||
rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
|
std r5,TVAL64_TV_USEC(r11) /* store usec in tv */
|
||||||
std r5,TVAL64_TV_SEC(r11) /* store sec in tv */
|
|
||||||
subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
|
|
||||||
mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
|
|
||||||
* XSEC_PER_SEC
|
|
||||||
*/
|
|
||||||
rldicl r0,r0,44,20
|
|
||||||
std r0,TVAL64_TV_USEC(r11) /* store usec in tv */
|
|
||||||
2: cmpldi r10,0 /* check if tz is NULL */
|
2: cmpldi r10,0 /* check if tz is NULL */
|
||||||
beq 1f
|
beq 1f
|
||||||
lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */
|
lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */
|
||||||
|
@ -77,6 +70,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
|
||||||
.cfi_register lr,r12
|
.cfi_register lr,r12
|
||||||
mr r11,r4 /* r11 saves tp */
|
mr r11,r4 /* r11 saves tp */
|
||||||
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
|
bl V_LOCAL_FUNC(__get_datapage) /* get data page */
|
||||||
|
lis r7,NSEC_PER_SEC@h /* want nanoseconds */
|
||||||
|
ori r7,r7,NSEC_PER_SEC@l
|
||||||
50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
|
50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
|
||||||
bne cr1,80f /* if not monotonic, all done */
|
bne cr1,80f /* if not monotonic, all done */
|
||||||
|
|
||||||
|
@ -171,49 +166,12 @@ V_FUNCTION_END(__kernel_clock_getres)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the core of gettimeofday(), it returns the xsec
|
* This is the core of clock_gettime() and gettimeofday(),
|
||||||
* value in r4 and expects the datapage ptr (non clobbered)
|
* it returns the current time in r4 (seconds) and r5.
|
||||||
* in r3. clobbers r0,r4,r5,r6,r7,r8
|
* On entry, r7 gives the resolution of r5, either USEC_PER_SEC
|
||||||
* When returning, r8 contains the counter value that can be reused
|
* or NSEC_PER_SEC, giving r5 in microseconds or nanoseconds.
|
||||||
*/
|
|
||||||
V_FUNCTION_BEGIN(__do_get_xsec)
|
|
||||||
.cfi_startproc
|
|
||||||
/* check for update count & load values */
|
|
||||||
1: ld r8,CFG_TB_UPDATE_COUNT(r3)
|
|
||||||
andi. r0,r8,1 /* pending update ? loop */
|
|
||||||
bne- 1b
|
|
||||||
xor r0,r8,r8 /* create dependency */
|
|
||||||
add r3,r3,r0
|
|
||||||
|
|
||||||
/* Get TB & offset it. We use the MFTB macro which will generate
|
|
||||||
* workaround code for Cell.
|
|
||||||
*/
|
|
||||||
MFTB(r7)
|
|
||||||
ld r9,CFG_TB_ORIG_STAMP(r3)
|
|
||||||
subf r7,r9,r7
|
|
||||||
|
|
||||||
/* Scale result */
|
|
||||||
ld r5,CFG_TB_TO_XS(r3)
|
|
||||||
mulhdu r7,r7,r5
|
|
||||||
|
|
||||||
/* Add stamp since epoch */
|
|
||||||
ld r6,CFG_STAMP_XSEC(r3)
|
|
||||||
add r4,r6,r7
|
|
||||||
|
|
||||||
xor r0,r4,r4
|
|
||||||
add r3,r3,r0
|
|
||||||
ld r0,CFG_TB_UPDATE_COUNT(r3)
|
|
||||||
cmpld cr0,r0,r8 /* check if updated */
|
|
||||||
bne- 1b
|
|
||||||
blr
|
|
||||||
.cfi_endproc
|
|
||||||
V_FUNCTION_END(__do_get_xsec)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This is the core of clock_gettime(), it returns the current
|
|
||||||
* time in seconds and nanoseconds in r4 and r5.
|
|
||||||
* It expects the datapage ptr in r3 and doesn't clobber it.
|
* It expects the datapage ptr in r3 and doesn't clobber it.
|
||||||
* It clobbers r0 and r6 and returns NSEC_PER_SEC in r7.
|
* It clobbers r0, r6 and r9.
|
||||||
* On return, r8 contains the counter value that can be reused.
|
* On return, r8 contains the counter value that can be reused.
|
||||||
* This clobbers cr0 but not any other cr field.
|
* This clobbers cr0 but not any other cr field.
|
||||||
*/
|
*/
|
||||||
|
@ -229,18 +187,18 @@ V_FUNCTION_BEGIN(__do_get_tspec)
|
||||||
/* Get TB & offset it. We use the MFTB macro which will generate
|
/* Get TB & offset it. We use the MFTB macro which will generate
|
||||||
* workaround code for Cell.
|
* workaround code for Cell.
|
||||||
*/
|
*/
|
||||||
MFTB(r7)
|
MFTB(r6)
|
||||||
ld r9,CFG_TB_ORIG_STAMP(r3)
|
ld r9,CFG_TB_ORIG_STAMP(r3)
|
||||||
subf r7,r9,r7
|
subf r6,r9,r6
|
||||||
|
|
||||||
/* Scale result */
|
/* Scale result */
|
||||||
ld r5,CFG_TB_TO_XS(r3)
|
ld r5,CFG_TB_TO_XS(r3)
|
||||||
sldi r7,r7,12 /* compute time since stamp_xtime */
|
sldi r6,r6,12 /* compute time since stamp_xtime */
|
||||||
mulhdu r6,r7,r5 /* in units of 2^-32 seconds */
|
mulhdu r6,r6,r5 /* in units of 2^-32 seconds */
|
||||||
|
|
||||||
/* Add stamp since epoch */
|
/* Add stamp since epoch */
|
||||||
ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
|
ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
|
||||||
ld r5,STAMP_XTIME+TSPC64_TV_NSEC(r3)
|
lwz r5,STAMP_SEC_FRAC(r3)
|
||||||
or r0,r4,r5
|
or r0,r4,r5
|
||||||
or r0,r0,r6
|
or r0,r0,r6
|
||||||
xor r0,r0,r0
|
xor r0,r0,r0
|
||||||
|
@ -250,17 +208,11 @@ V_FUNCTION_BEGIN(__do_get_tspec)
|
||||||
bne- 1b /* reload if so */
|
bne- 1b /* reload if so */
|
||||||
|
|
||||||
/* convert to seconds & nanoseconds and add to stamp */
|
/* convert to seconds & nanoseconds and add to stamp */
|
||||||
lis r7,NSEC_PER_SEC@h
|
add r6,r6,r5 /* add on fractional seconds of xtime */
|
||||||
ori r7,r7,NSEC_PER_SEC@l
|
mulhwu r5,r6,r7 /* compute micro or nanoseconds and */
|
||||||
mulhwu r0,r6,r7 /* compute nanoseconds and */
|
|
||||||
srdi r6,r6,32 /* seconds since stamp_xtime */
|
srdi r6,r6,32 /* seconds since stamp_xtime */
|
||||||
clrldi r0,r0,32
|
clrldi r5,r5,32
|
||||||
add r5,r5,r0 /* add nanoseconds together */
|
|
||||||
cmpd r5,r7 /* overflow? */
|
|
||||||
add r4,r4,r6
|
add r4,r4,r6
|
||||||
bltlr /* all done if no overflow */
|
|
||||||
subf r5,r7,r5 /* if overflow, adjust */
|
|
||||||
addi r4,r4,1
|
|
||||||
blr
|
blr
|
||||||
.cfi_endproc
|
.cfi_endproc
|
||||||
V_FUNCTION_END(__do_get_tspec)
|
V_FUNCTION_END(__do_get_tspec)
|
||||||
|
|
|
@ -216,9 +216,6 @@ static int lite5200_pm_enter(suspend_state_t state)
|
||||||
|
|
||||||
lite5200_restore_regs();
|
lite5200_restore_regs();
|
||||||
|
|
||||||
/* restart jiffies */
|
|
||||||
wakeup_decrementer();
|
|
||||||
|
|
||||||
iounmap(mbar);
|
iounmap(mbar);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -171,9 +171,6 @@ int mpc52xx_pm_enter(suspend_state_t state)
|
||||||
/* restore SRAM */
|
/* restore SRAM */
|
||||||
memcpy(sram, saved_sram, sram_size);
|
memcpy(sram, saved_sram, sram_size);
|
||||||
|
|
||||||
/* restart jiffies */
|
|
||||||
wakeup_decrementer();
|
|
||||||
|
|
||||||
/* reenable interrupts in PIC */
|
/* reenable interrupts in PIC */
|
||||||
out_be32(&intr->main_mask, intr_main_mask);
|
out_be32(&intr->main_mask, intr_main_mask);
|
||||||
|
|
||||||
|
|
|
@ -310,8 +310,12 @@ static int pmu_set_cpu_speed(int low_speed)
|
||||||
/* Restore low level PMU operations */
|
/* Restore low level PMU operations */
|
||||||
pmu_unlock();
|
pmu_unlock();
|
||||||
|
|
||||||
/* Restore decrementer */
|
/*
|
||||||
wakeup_decrementer();
|
* Restore decrementer; we'll take a decrementer interrupt
|
||||||
|
* as soon as interrupts are re-enabled and the generic
|
||||||
|
* clockevents code will reprogram it with the right value.
|
||||||
|
*/
|
||||||
|
set_dec(1);
|
||||||
|
|
||||||
/* Restore interrupts */
|
/* Restore interrupts */
|
||||||
mpic_cpu_set_priority(pic_prio);
|
mpic_cpu_set_priority(pic_prio);
|
||||||
|
|
Loading…
Reference in New Issue