Pull ia64-clocksource into release branch

This commit is contained in:
Tony Luck 2007-07-20 11:26:47 -07:00
commit c36c282b88
24 changed files with 334 additions and 565 deletions

View File

@ -1154,6 +1154,8 @@ and is between 256 and 4096 characters. It is defined in the file
nointroute [IA-64]
nojitter [IA64] Disables jitter checking for ITC timers.
nolapic [IA-32,APIC] Do not enable or use the local APIC.
nolapic_timer [IA-32,APIC] Do not use the local APIC timer.

View File

@ -1,41 +0,0 @@
Time Interpolators
------------------
Time interpolators are a base of time calculation between timer ticks and
allow an accurate determination of time down to the accuracy of the time
source in nanoseconds.
The architecture specific code typically provides gettimeofday and
settimeofday under Linux. The time interpolator provides both if an arch
defines CONFIG_TIME_INTERPOLATION. The arch still must set up timer tick
operations and call the necessary functions to advance the clock.
With the time interpolator a standardized interface exists for time
interpolation between ticks. The provided logic is highly scalable
and has been tested in SMP situations of up to 512 CPUs.
If CONFIG_TIME_INTERPOLATION is defined then the architecture specific code
(or the device drivers - like HPET) may register time interpolators.
These are typically defined in the following way:
static struct time_interpolator my_interpolator {
.frequency = MY_FREQUENCY,
.source = TIME_SOURCE_MMIO32,
.shift = 8, /* scaling for higher accuracy */
.drift = -1, /* Unknown drift */
.jitter = 0 /* time source is stable */
};
void time_init(void)
{
....
/* Initialization of the timer *.
my_interpolator.address = &my_timer;
register_time_interpolator(&my_interpolator);
....
}
For more details see include/linux/timex.h and kernel/timer.c.
Christoph Lameter <christoph@lameter.com>, October 31, 2004

View File

@ -62,7 +62,11 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config TIME_INTERPOLATION
config GENERIC_TIME
bool
default y
config GENERIC_TIME_VSYSCALL
bool
default y

View File

@ -85,7 +85,7 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y

View File

@ -86,7 +86,7 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y

View File

@ -86,7 +86,7 @@ CONFIG_MMU=y
CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y

View File

@ -93,7 +93,7 @@ CONFIG_SWIOTLB=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y

View File

@ -97,7 +97,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y

View File

@ -96,7 +96,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y

View File

@ -97,7 +97,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_TIME_INTERPOLATION=y
CONFIG_GENERIC_TIME=y
CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_GENERIC_IOMAP=y

View File

@ -7,6 +7,7 @@
#define ASM_OFFSETS_C 1
#include <linux/sched.h>
#include <linux/clocksource.h>
#include <asm-ia64/processor.h>
#include <asm-ia64/ptrace.h>
@ -15,6 +16,7 @@
#include <asm-ia64/mca.h>
#include "../kernel/sigframe.h"
#include "../kernel/fsyscall_gtod_data.h"
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@ -256,17 +258,24 @@ void foo(void)
BLANK();
/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr));
DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source));
DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift));
DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc));
DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset));
DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle));
DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter));
DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter));
DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask));
DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU);
DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
DEFINE(IA64_GTOD_LOCK_OFFSET,
offsetof (struct fsyscall_gtod_data_t, lock));
DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, wall_time));
DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, monotonic_time));
DEFINE(IA64_CLKSRC_MASK_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_mask));
DEFINE(IA64_CLKSRC_MULT_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_mult));
DEFINE(IA64_CLKSRC_SHIFT_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_shift));
DEFINE(IA64_CLKSRC_MMIO_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio));
DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET,
offsetof (struct fsyscall_gtod_data_t, clk_cycle_last));
DEFINE(IA64_ITC_JITTER_OFFSET,
offsetof (struct itc_jitter_data_t, itc_jitter));
DEFINE(IA64_ITC_LASTCYCLE_OFFSET,
offsetof (struct itc_jitter_data_t, itc_lastcycle));
}

View File

@ -3,6 +3,7 @@
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <asm/io.h>
/* IBM Summit (EXA) Cyclone counter code*/
@ -18,13 +19,21 @@ void __init cyclone_setup(void)
use_cyclone = 1;
}
static void __iomem *cyclone_mc;
struct time_interpolator cyclone_interpolator = {
.source = TIME_SOURCE_MMIO64,
.shift = 16,
.frequency = CYCLONE_TIMER_FREQ,
.drift = -100,
.mask = (1LL << 40) - 1
static cycle_t read_cyclone(void)
{
return (cycle_t)readq((void __iomem *)cyclone_mc);
}
static struct clocksource clocksource_cyclone = {
.name = "cyclone",
.rating = 300,
.read = read_cyclone,
.mask = (1LL << 40) - 1,
.mult = 0, /*to be caluclated*/
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
int __init init_cyclone_clock(void)
@ -44,13 +53,15 @@ int __init init_cyclone_clock(void)
offset = (CYCLONE_CBAR_ADDR);
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
base = readq(reg);
if(!base){
printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n");
printk(KERN_ERR "Summit chipset: Could not find valid CBAR"
" value.\n");
use_cyclone = 0;
return -ENODEV;
}
@ -60,7 +71,8 @@ int __init init_cyclone_clock(void)
offset = (base + CYCLONE_PMCC_OFFSET);
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid PMCC"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
@ -71,7 +83,8 @@ int __init init_cyclone_clock(void)
offset = (base + CYCLONE_MPCS_OFFSET);
reg = (u64*)ioremap_nocache(offset, sizeof(u64));
if(!reg){
printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid MPCS"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
@ -82,7 +95,8 @@ int __init init_cyclone_clock(void)
offset = (base + CYCLONE_MPMC_OFFSET);
cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32));
if(!cyclone_timer){
printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n");
printk(KERN_ERR "Summit chipset: Could not find valid MPMC"
" register.\n");
use_cyclone = 0;
return -ENODEV;
}
@ -93,7 +107,8 @@ int __init init_cyclone_clock(void)
int stall = 100;
while(stall--) barrier();
if(readl(cyclone_timer) == old){
printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n");
printk(KERN_ERR "Summit chipset: Counter not counting!"
" DISABLED\n");
iounmap(cyclone_timer);
cyclone_timer = 0;
use_cyclone = 0;
@ -101,8 +116,11 @@ int __init init_cyclone_clock(void)
}
}
/* initialize last tick */
cyclone_interpolator.addr = cyclone_timer;
register_time_interpolator(&cyclone_interpolator);
cyclone_mc = cyclone_timer;
clocksource_cyclone.fsys_mmio = cyclone_timer;
clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ,
clocksource_cyclone.shift);
clocksource_register(&clocksource_cyclone);
return 0;
}

View File

@ -147,12 +147,11 @@ ENTRY(fsys_set_tid_address)
FSYS_RETURN
END(fsys_set_tid_address)
/*
* Ensure that the time interpolator structure is compatible with the asm code
*/
#if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \
|| IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4
#error fsys_gettimeofday incompatible with changes to struct time_interpolator
#if IA64_GTOD_LOCK_OFFSET !=0
#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
#endif
#if IA64_ITC_JITTER_OFFSET !=0
#error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t
#endif
#define CLOCK_REALTIME 0
#define CLOCK_MONOTONIC 1
@ -179,126 +178,124 @@ ENTRY(fsys_gettimeofday)
// r11 = preserved: saved ar.pfs
// r12 = preserved: memory stack
// r13 = preserved: thread pointer
// r14 = address of mask / mask
// r14 = address of mask / mask value
// r15 = preserved: system call number
// r16 = preserved: current task pointer
// r17 = wall to monotonic use
// r18 = time_interpolator->offset
// r19 = address of wall_to_monotonic
// r20 = pointer to struct time_interpolator / pointer to time_interpolator->address
// r21 = shift factor
// r22 = address of time interpolator->last_counter
// r23 = address of time_interpolator->last_cycle
// r24 = adress of time_interpolator->offset
// r25 = last_cycle value
// r26 = last_counter value
// r27 = pointer to xtime
// r17 = (not used)
// r18 = (not used)
// r19 = address of itc_lastcycle
// r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence)
// r21 = address of mmio_ptr
// r22 = address of wall_time or monotonic_time
// r23 = address of shift / value
// r24 = address mult factor / cycle_last value
// r25 = itc_lastcycle value
// r26 = address clocksource cycle_last
// r27 = (not used)
// r28 = sequence number at the beginning of critcal section
// r29 = address of seqlock
// r29 = address of itc_jitter
// r30 = time processing flags / memory address
// r31 = pointer to result
// Predicates
// p6,p7 short term use
// p8 = timesource ar.itc
// p9 = timesource mmio64
// p10 = timesource mmio32
// p10 = timesource mmio32 - not used
// p11 = timesource not to be handled by asm code
// p12 = memory time source ( = p9 | p10)
// p13 = do cmpxchg with time_interpolator_last_cycle
// p12 = memory time source ( = p9 | p10) - not used
// p13 = do cmpxchg with itc_lastcycle
// p14 = Divide by 1000
// p15 = Add monotonic
//
// Note that instructions are optimized for McKinley. McKinley can process two
// bundles simultaneously and therefore we continuously try to feed the CPU
// two bundles and then a stop.
tnat.nz p6,p0 = r31 // branch deferred since it does not fit into bundle structure
// Note that instructions are optimized for McKinley. McKinley can
// process two bundles simultaneously and therefore we continuously
// try to feed the CPU two bundles and then a stop.
//
// Additional note that code has changed a lot. Optimization is TBD.
// Comments begin with "?" are maybe outdated.
tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle
mov pr = r30,0xc000 // Set predicates according to function
add r2 = TI_FLAGS+IA64_TASK_SIZE,r16
movl r20 = time_interpolator
movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address
;;
ld8 r20 = [r20] // get pointer to time_interpolator structure
movl r29 = xtime_lock
movl r29 = itc_jitter_data // itc_jitter
add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time
ld4 r2 = [r2] // process work pending flags
movl r27 = xtime
;; // only one bundle here
ld8 r21 = [r20] // first quad with control information
and r2 = TIF_ALLWORK_MASK,r2
(p6) br.cond.spnt.few .fail_einval // deferred branch
;;
add r10 = IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET,r20
extr r3 = r21,32,32 // time_interpolator->nsec_per_cyc
extr r8 = r21,0,16 // time_interpolator->source
(p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time
add r21 = IA64_CLKSRC_MMIO_OFFSET,r20
add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29
and r2 = TIF_ALLWORK_MASK,r2
(p6) br.cond.spnt.few .fail_einval // ? deferred branch
;;
add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last
cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled
(p6) br.cond.spnt.many fsys_fallback_syscall
;;
cmp.eq p8,p12 = 0,r8 // Check for cpu timer
cmp.eq p9,p0 = 1,r8 // MMIO64 ?
extr r2 = r21,24,8 // time_interpolator->jitter
cmp.eq p10,p0 = 2,r8 // MMIO32 ?
cmp.ltu p11,p0 = 2,r8 // function or other clock
(p11) br.cond.spnt.many fsys_fallback_syscall
;;
setf.sig f7 = r3 // Setup for scaling of counter
(p15) movl r19 = wall_to_monotonic
(p12) ld8 r30 = [r10]
cmp.ne p13,p0 = r2,r0 // need jitter compensation?
extr r21 = r21,16,8 // shift factor
;;
// Begin critical section
.time_redo:
.pred.rel.mutex p8,p9,p10
ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes
ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first
;;
and r28 = ~1,r28 // Make sequence even to force retry if odd
and r28 = ~1,r28 // And make sequence even to force retry if odd
;;
ld8 r30 = [r21] // clocksource->mmio_ptr
add r24 = IA64_CLKSRC_MULT_OFFSET,r20
ld4 r2 = [r29] // itc_jitter value
add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20
add r14 = IA64_CLKSRC_MASK_OFFSET,r20
;;
ld4 r3 = [r24] // clocksource mult value
ld8 r14 = [r14] // clocksource mask value
cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr
;;
setf.sig f7 = r3 // Setup for mult scaling of counter
(p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13
ld4 r23 = [r23] // clocksource shift value
ld8 r24 = [r26] // get clksrc_cycle_last value
(p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control
;;
.pred.rel.mutex p8,p9
(p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!!
add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20
(p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues..
(p10) ld4 r2 = [r30] // readw(ti->address)
(p13) add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20
;; // could be removed by moving the last add upward
ld8 r26 = [r22] // time_interpolator->last_counter
(p13) ld8 r25 = [r23] // time interpolator->last_cycle
add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20
(p15) ld8 r17 = [r19],IA64_TIMESPEC_TV_NSEC_OFFSET
ld8 r9 = [r27],IA64_TIMESPEC_TV_NSEC_OFFSET
add r14 = IA64_TIME_INTERPOLATOR_MASK_OFFSET, r20
(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
(p13) ld8 r25 = [r19] // get itc_lastcycle value
;; // ? could be removed by moving the last add upward
ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec
;;
ld8 r18 = [r24] // time_interpolator->offset
ld8 r8 = [r27],-IA64_TIMESPEC_TV_NSEC_OFFSET // xtime.tv_nsec
(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec
(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
;;
ld8 r14 = [r14] // time_interpolator->mask
(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
sub r10 = r2,r26 // current_counter - last_counter
(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
sub r10 = r2,r24 // current_cycle - last_cycle
;;
(p6) sub r10 = r25,r26 // time we got was less than last_cycle
(p6) sub r10 = r25,r24 // time we got was less than last_cycle
(p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg
;;
(p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv
;;
(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful
;;
(p7) sub r10 = r3,r24 // then use new last_cycle instead
;;
and r10 = r10,r14 // Apply mask
;;
setf.sig f8 = r10
nop.i 123
;;
(p7) cmpxchg8.rel r3 = [r23],r2,ar.ccv
EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare time
// fault check takes 5 cycles and we have spare time
EX(.fail_efault, probe.w.fault r31, 3)
xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter)
(p15) add r9 = r9,r17 // Add wall to monotonic.secs to result secs
;;
(p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET
(p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo
// simulate tbit.nz.or p7,p0 = r28,0
// ? simulate tbit.nz.or p7,p0 = r28,0
getf.sig r2 = f8
mf
add r8 = r8,r18 // Add time interpolator offset
;;
ld4 r10 = [r29] // xtime_lock.sequence
(p15) add r8 = r8, r17 // Add monotonic.nsecs to nsecs
shr.u r2 = r2,r21
;; // overloaded 3 bundles!
// End critical section.
ld4 r10 = [r20] // gtod_lock.sequence
shr.u r2 = r2,r23 // shift by factor
;; // ? overloaded 3 bundles!
add r8 = r8,r2 // Add xtime.nsecs
cmp4.ne.or p7,p0 = r28,r10
(p7) br.cond.dpnt.few .time_redo // sequence number changed ?
cmp4.ne p7,p0 = r28,r10
(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
// End critical section.
// Now r8=tv->tv_nsec and r9=tv->tv_sec
mov r10 = r0
movl r2 = 1000000000
@ -308,19 +305,19 @@ EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare
.time_normalize:
mov r21 = r8
cmp.ge p6,p0 = r8,r2
(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting some time
(p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time
;;
(p14) setf.sig f8 = r20
(p6) sub r8 = r8,r2
(p6) add r9 = 1,r9 // two nops before the branch.
(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod
(p6) add r9 = 1,r9 // two nops before the branch.
(p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod
(p6) br.cond.dpnt.few .time_normalize
;;
// Divided by 8 though shift. Now divide by 125
// The compiler was able to do that with a multiply
// and a shift and we do the same
EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it...
EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles
(p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it
;;
mov r8 = r0
(p14) getf.sig r2 = f8

View File

@ -0,0 +1,23 @@
/*
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Contributed by Peter Keilty <peter.keilty@hp.com>
*
* fsyscall gettimeofday data
*/
struct fsyscall_gtod_data_t {
seqlock_t lock;
struct timespec wall_time;
struct timespec monotonic_time;
cycle_t clk_mask;
u32 clk_mult;
u32 clk_shift;
void *clk_fsys_mmio;
cycle_t clk_cycle_last;
} __attribute__ ((aligned (L1_CACHE_BYTES)));
struct itc_jitter_data_t {
int itc_jitter;
cycle_t itc_lastcycle;
} __attribute__ ((aligned (L1_CACHE_BYTES)));

View File

@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/efi.h>
#include <linux/timex.h>
#include <linux/clocksource.h>
#include <asm/machvec.h>
#include <asm/delay.h>
@ -28,6 +29,16 @@
#include <asm/sections.h>
#include <asm/system.h>
#include "fsyscall_gtod_data.h"
static cycle_t itc_get_cycles(void);
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
.lock = SEQLOCK_UNLOCKED,
};
struct itc_jitter_data_t itc_jitter_data;
volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
#ifdef CONFIG_IA64_DEBUG_IRQ
@ -37,11 +48,16 @@ EXPORT_SYMBOL(last_cli_ip);
#endif
static struct time_interpolator itc_interpolator = {
.shift = 16,
.mask = 0xffffffffffffffffLL,
.source = TIME_SOURCE_CPU
static struct clocksource clocksource_itc = {
.name = "itc",
.rating = 350,
.read = itc_get_cycles,
.mask = 0xffffffffffffffff,
.mult = 0, /*to be caluclated*/
.shift = 16,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *itc_clocksource;
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
@ -210,8 +226,6 @@ ia64_init_itm (void)
+ itc_freq/2)/itc_freq;
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
itc_interpolator.frequency = local_cpu_data->itc_freq;
itc_interpolator.drift = itc_drift;
#ifdef CONFIG_SMP
/* On IA64 in an SMP configuration ITCs are never accurately synchronized.
* Jitter compensation requires a cmpxchg which may limit
@ -223,15 +237,50 @@ ia64_init_itm (void)
* even going backward) if the ITC offsets between the individual CPUs
* are too large.
*/
if (!nojitter) itc_interpolator.jitter = 1;
if (!nojitter)
itc_jitter_data.itc_jitter = 1;
#endif
register_time_interpolator(&itc_interpolator);
}
/* Setup the CPU local timer tick */
ia64_cpu_local_tick();
if (!itc_clocksource) {
/* Sort out mult/shift values: */
clocksource_itc.mult =
clocksource_hz2mult(local_cpu_data->itc_freq,
clocksource_itc.shift);
clocksource_register(&clocksource_itc);
itc_clocksource = &clocksource_itc;
}
}
static cycle_t itc_get_cycles()
{
u64 lcycle, now, ret;
if (!itc_jitter_data.itc_jitter)
return get_cycles();
lcycle = itc_jitter_data.itc_lastcycle;
now = get_cycles();
if (lcycle && time_after(lcycle, now))
return lcycle;
/*
* Keep track of the last timer value returned.
* In an SMP environment, you could lose out in contention of
* cmpxchg. If so, your cmpxchg returns new value which the
* winner of contention updated to. Use the new value instead.
*/
ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
if (unlikely(ret != lcycle))
return ret;
return now;
}
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_IRQPOLL,
@ -307,3 +356,34 @@ ia64_setup_printk_clock(void)
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT))
ia64_printk_clock = ia64_itc_printk_clock;
}
void update_vsyscall(struct timespec *wall, struct clocksource *c)
{
unsigned long flags;
write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
fsyscall_gtod_data.clk_mult = c->mult;
fsyscall_gtod_data.clk_shift = c->shift;
fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
/* copy kernel time structures */
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
+ wall->tv_sec;
fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
+ wall->tv_nsec;
/* normalize */
while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
}

View File

@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <asm/hw_irq.h>
#include <asm/system.h>
@ -22,11 +23,21 @@
extern unsigned long sn_rtc_cycles_per_second;
static struct time_interpolator sn2_interpolator = {
.drift = -1,
.shift = 10,
.mask = (1LL << 55) - 1,
.source = TIME_SOURCE_MMIO64
static void __iomem *sn2_mc;
static cycle_t read_sn2(void)
{
return (cycle_t)readq(sn2_mc);
}
static struct clocksource clocksource_sn2 = {
.name = "sn2_rtc",
.rating = 300,
.read = read_sn2,
.mask = (1LL << 55) - 1,
.mult = 0,
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
@ -47,9 +58,11 @@ ia64_sn_udelay (unsigned long usecs)
void __init sn_timer_init(void)
{
sn2_interpolator.frequency = sn_rtc_cycles_per_second;
sn2_interpolator.addr = RTC_COUNTER_ADDR;
register_time_interpolator(&sn2_interpolator);
sn2_mc = RTC_COUNTER_ADDR;
clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
clocksource_sn2.shift);
clocksource_register(&clocksource_sn2);
ia64_udelay = &ia64_sn_udelay;
}

View File

@ -475,7 +475,7 @@ static void acpi_processor_idle(void)
/* Get end time (ticks) */
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
#ifdef CONFIG_GENERIC_TIME
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
/* TSC halts in C2, so notify users */
mark_tsc_unstable("possible TSC halt in C2");
#endif
@ -517,7 +517,7 @@ static void acpi_processor_idle(void)
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
}
#ifdef CONFIG_GENERIC_TIME
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
/* TSC halts in C3, so notify users */
mark_tsc_unstable("TSC halts in C3");
#endif

View File

@ -29,6 +29,7 @@
#include <linux/bcd.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/clocksource.h>
#include <asm/current.h>
#include <asm/uaccess.h>
@ -51,8 +52,34 @@
#define HPET_RANGE_SIZE 1024 /* from HPET spec */
#if BITS_PER_LONG == 64
#define write_counter(V, MC) writeq(V, MC)
#define read_counter(MC) readq(MC)
#else
#define write_counter(V, MC) writel(V, MC)
#define read_counter(MC) readl(MC)
#endif
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
static void __iomem *hpet_mctr;
static cycle_t read_hpet(void)
{
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
}
static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
.mask = 0xffffffffffffffff,
.mult = 0, /*to be caluclated*/
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
/* A lock for concurrent intermodule access to hpet and isr hpet activity. */
@ -79,7 +106,7 @@ struct hpets {
struct hpets *hp_next;
struct hpet __iomem *hp_hpet;
unsigned long hp_hpet_phys;
struct time_interpolator *hp_interpolator;
struct clocksource *hp_clocksource;
unsigned long long hp_tick_freq;
unsigned long hp_delta;
unsigned int hp_ntimer;
@ -94,13 +121,6 @@ static struct hpets *hpets;
#define HPET_PERIODIC 0x0004
#define HPET_SHARED_IRQ 0x0008
#if BITS_PER_LONG == 64
#define write_counter(V, MC) writeq(V, MC)
#define read_counter(MC) readq(MC)
#else
#define write_counter(V, MC) writel(V, MC)
#define read_counter(MC) readl(MC)
#endif
#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
@ -737,27 +757,6 @@ static ctl_table dev_root[] = {
static struct ctl_table_header *sysctl_header;
static void hpet_register_interpolator(struct hpets *hpetp)
{
#ifdef CONFIG_TIME_INTERPOLATION
struct time_interpolator *ti;
ti = kzalloc(sizeof(*ti), GFP_KERNEL);
if (!ti)
return;
ti->source = TIME_SOURCE_MMIO64;
ti->shift = 10;
ti->addr = &hpetp->hp_hpet->hpet_mc;
ti->frequency = hpetp->hp_tick_freq;
ti->drift = HPET_DRIFT;
ti->mask = -1;
hpetp->hp_interpolator = ti;
register_time_interpolator(ti);
#endif
}
/*
* Adjustment for when arming the timer with
* initial conditions. That is, main counter
@ -909,7 +908,16 @@ int hpet_alloc(struct hpet_data *hdp)
}
hpetp->hp_delta = hpet_calibrate(hpetp);
hpet_register_interpolator(hpetp);
if (!hpet_clocksource) {
hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq,
clocksource_hpet.shift);
clocksource_register(&clocksource_hpet);
hpetp->hp_clocksource = &clocksource_hpet;
hpet_clocksource = &clocksource_hpet;
}
return 0;
}
@ -995,7 +1003,7 @@ static int hpet_acpi_add(struct acpi_device *device)
static int hpet_acpi_remove(struct acpi_device *device, int type)
{
/* XXX need to unregister interpolator, dealloc mem, etc */
/* XXX need to unregister clocksource, dealloc mem, etc */
return -EINVAL;
}

View File

@ -67,6 +67,12 @@ struct clocksource {
unsigned long flags;
cycle_t (*vread)(void);
void (*resume)(void);
#ifdef CONFIG_IA64
void *fsys_mmio; /* used by fsyscall asm code */
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr))
#else
#define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
#endif
/* timekeeping specific data, ignore */
cycle_t cycle_interval;

View File

@ -224,66 +224,6 @@ static inline int ntp_synced(void)
__x < 0 ? -(-__x >> __s) : __x >> __s; \
})
#ifdef CONFIG_TIME_INTERPOLATION
#define TIME_SOURCE_CPU 0
#define TIME_SOURCE_MMIO64 1
#define TIME_SOURCE_MMIO32 2
#define TIME_SOURCE_FUNCTION 3
/* For proper operations time_interpolator clocks must run slightly slower
* than the standard clock since the interpolator may only correct by having
* time jump forward during a tick. A slower clock is usually a side effect
* of the integer divide of the nanoseconds in a second by the frequency.
* The accuracy of the division can be increased by specifying a shift.
* However, this may cause the clock not to be slow enough.
* The interpolator will self-tune the clock by slowing down if no
* resets occur or speeding up if the time jumps per analysis cycle
* become too high.
*
* Setting jitter compensates for a fluctuating timesource by comparing
* to the last value read from the timesource to insure that an earlier value
* is not returned by a later call. The price to pay
* for the compensation is that the timer routines are not as scalable anymore.
*/
struct time_interpolator {
u16 source; /* time source flags */
u8 shift; /* increases accuracy of multiply by shifting. */
/* Note that bits may be lost if shift is set too high */
u8 jitter; /* if set compensate for fluctuations */
u32 nsec_per_cyc; /* set by register_time_interpolator() */
void *addr; /* address of counter or function */
cycles_t mask; /* mask the valid bits of the counter */
unsigned long offset; /* nsec offset at last update of interpolator */
u64 last_counter; /* counter value in units of the counter at last update */
cycles_t last_cycle; /* Last timer value if TIME_SOURCE_JITTER is set */
u64 frequency; /* frequency in counts/second */
long drift; /* drift in parts-per-million (or -1) */
unsigned long skips; /* skips forward */
unsigned long ns_skipped; /* nanoseconds skipped */
struct time_interpolator *next;
};
extern void register_time_interpolator(struct time_interpolator *);
extern void unregister_time_interpolator(struct time_interpolator *);
extern void time_interpolator_reset(void);
extern unsigned long time_interpolator_get_offset(void);
extern void time_interpolator_update(long delta_nsec);
#else /* !CONFIG_TIME_INTERPOLATION */
static inline void time_interpolator_reset(void)
{
}
static inline void time_interpolator_update(long delta_nsec)
{
}
#endif /* !CONFIG_TIME_INTERPOLATION */
#define TICK_LENGTH_SHIFT 32
#ifdef CONFIG_NO_HZ

View File

@ -136,7 +136,6 @@ static inline void warp_clock(void)
write_seqlock_irq(&xtime_lock);
wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
xtime.tv_sec += sys_tz.tz_minuteswest * 60;
time_interpolator_reset();
write_sequnlock_irq(&xtime_lock);
clock_was_set();
}
@ -309,92 +308,6 @@ struct timespec timespec_trunc(struct timespec t, unsigned gran)
}
EXPORT_SYMBOL(timespec_trunc);
#ifdef CONFIG_TIME_INTERPOLATION
void getnstimeofday (struct timespec *tv)
{
unsigned long seq,sec,nsec;
do {
seq = read_seqbegin(&xtime_lock);
sec = xtime.tv_sec;
nsec = xtime.tv_nsec+time_interpolator_get_offset();
} while (unlikely(read_seqretry(&xtime_lock, seq)));
while (unlikely(nsec >= NSEC_PER_SEC)) {
nsec -= NSEC_PER_SEC;
++sec;
}
tv->tv_sec = sec;
tv->tv_nsec = nsec;
}
EXPORT_SYMBOL_GPL(getnstimeofday);
int do_settimeofday (struct timespec *tv)
{
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
write_seqlock_irq(&xtime_lock);
{
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
set_normalized_timespec(&xtime, sec, nsec);
set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
time_esterror = NTP_PHASE_LIMIT;
time_interpolator_reset();
}
write_sequnlock_irq(&xtime_lock);
clock_was_set();
return 0;
}
EXPORT_SYMBOL(do_settimeofday);
void do_gettimeofday (struct timeval *tv)
{
unsigned long seq, nsec, usec, sec, offset;
do {
seq = read_seqbegin(&xtime_lock);
offset = time_interpolator_get_offset();
sec = xtime.tv_sec;
nsec = xtime.tv_nsec;
} while (unlikely(read_seqretry(&xtime_lock, seq)));
usec = (nsec + offset) / 1000;
while (unlikely(usec >= USEC_PER_SEC)) {
usec -= USEC_PER_SEC;
++sec;
}
tv->tv_sec = sec;
tv->tv_usec = usec;
/*
* Make sure xtime.tv_sec [returned by sys_time()] always
* follows the gettimeofday() result precisely. This
* condition is extremely unlikely, it can hit at most
* once per second:
*/
if (unlikely(xtime.tv_sec != tv->tv_sec)) {
unsigned long flags;
write_seqlock_irqsave(&xtime_lock, flags);
update_wall_time();
write_sequnlock_irqrestore(&xtime_lock, flags);
}
}
EXPORT_SYMBOL(do_gettimeofday);
#else /* CONFIG_TIME_INTERPOLATION */
#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
@ -410,7 +323,6 @@ void getnstimeofday(struct timespec *tv)
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
#endif /* CONFIG_TIME_INTERPOLATION */
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59

View File

@ -116,11 +116,6 @@ void second_overflow(void)
if (xtime.tv_sec % 86400 == 0) {
xtime.tv_sec--;
wall_to_monotonic.tv_sec++;
/*
* The timer interpolator will make time change
* gradually instead of an immediate jump by one second
*/
time_interpolator_update(-NSEC_PER_SEC);
time_state = TIME_OOP;
printk(KERN_NOTICE "Clock: inserting leap second "
"23:59:60 UTC\n");
@ -130,11 +125,6 @@ void second_overflow(void)
if ((xtime.tv_sec + 1) % 86400 == 0) {
xtime.tv_sec++;
wall_to_monotonic.tv_sec--;
/*
* Use of time interpolator for a gradual change of
* time
*/
time_interpolator_update(NSEC_PER_SEC);
time_state = TIME_WAIT;
printk(KERN_NOTICE "Clock: deleting leap second "
"23:59:59 UTC\n");

View File

@ -466,10 +466,6 @@ void update_wall_time(void)
second_overflow();
}
/* interpolator bits */
time_interpolator_update(clock->xtime_interval
>> clock->shift);
/* accumulate error between NTP and clock interval */
clock->error += current_tick_length();
clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);

View File

@ -1349,194 +1349,6 @@ void __init init_timers(void)
open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
}
#ifdef CONFIG_TIME_INTERPOLATION
struct time_interpolator *time_interpolator __read_mostly;
static struct time_interpolator *time_interpolator_list __read_mostly;
static DEFINE_SPINLOCK(time_interpolator_lock);
static inline cycles_t time_interpolator_get_cycles(unsigned int src)
{
unsigned long (*x)(void);
switch (src)
{
case TIME_SOURCE_FUNCTION:
x = time_interpolator->addr;
return x();
case TIME_SOURCE_MMIO64 :
return readq_relaxed((void __iomem *)time_interpolator->addr);
case TIME_SOURCE_MMIO32 :
return readl_relaxed((void __iomem *)time_interpolator->addr);
default: return get_cycles();
}
}
static inline u64 time_interpolator_get_counter(int writelock)
{
unsigned int src = time_interpolator->source;
if (time_interpolator->jitter)
{
cycles_t lcycle;
cycles_t now;
do {
lcycle = time_interpolator->last_cycle;
now = time_interpolator_get_cycles(src);
if (lcycle && time_after(lcycle, now))
return lcycle;
/* When holding the xtime write lock, there's no need
* to add the overhead of the cmpxchg. Readers are
* force to retry until the write lock is released.
*/
if (writelock) {
time_interpolator->last_cycle = now;
return now;
}
/* Keep track of the last timer value returned. The use of cmpxchg here
* will cause contention in an SMP environment.
*/
} while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
return now;
}
else
return time_interpolator_get_cycles(src);
}
void time_interpolator_reset(void)
{
time_interpolator->offset = 0;
time_interpolator->last_counter = time_interpolator_get_counter(1);
}
#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
unsigned long time_interpolator_get_offset(void)
{
/* If we do not have a time interpolator set up then just return zero */
if (!time_interpolator)
return 0;
return time_interpolator->offset +
GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
}
#define INTERPOLATOR_ADJUST 65536
#define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
void time_interpolator_update(long delta_nsec)
{
u64 counter;
unsigned long offset;
/* If there is no time interpolator set up then do nothing */
if (!time_interpolator)
return;
/*
* The interpolator compensates for late ticks by accumulating the late
* time in time_interpolator->offset. A tick earlier than expected will
* lead to a reset of the offset and a corresponding jump of the clock
* forward. Again this only works if the interpolator clock is running
* slightly slower than the regular clock and the tuning logic insures
* that.
*/
counter = time_interpolator_get_counter(1);
offset = time_interpolator->offset +
GET_TI_NSECS(counter, time_interpolator);
if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
time_interpolator->offset = offset - delta_nsec;
else {
time_interpolator->skips++;
time_interpolator->ns_skipped += delta_nsec - offset;
time_interpolator->offset = 0;
}
time_interpolator->last_counter = counter;
/* Tuning logic for time interpolator invoked every minute or so.
* Decrease interpolator clock speed if no skips occurred and an offset is carried.
* Increase interpolator clock speed if we skip too much time.
*/
if (jiffies % INTERPOLATOR_ADJUST == 0)
{
if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
time_interpolator->nsec_per_cyc--;
if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
time_interpolator->nsec_per_cyc++;
time_interpolator->skips = 0;
time_interpolator->ns_skipped = 0;
}
}
static inline int
is_better_time_interpolator(struct time_interpolator *new)
{
if (!time_interpolator)
return 1;
return new->frequency > 2*time_interpolator->frequency ||
(unsigned long)new->drift < (unsigned long)time_interpolator->drift;
}
void
register_time_interpolator(struct time_interpolator *ti)
{
unsigned long flags;
/* Sanity check */
BUG_ON(ti->frequency == 0 || ti->mask == 0);
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);
write_seqlock_irqsave(&xtime_lock, flags);
if (is_better_time_interpolator(ti)) {
time_interpolator = ti;
time_interpolator_reset();
}
write_sequnlock_irqrestore(&xtime_lock, flags);
ti->next = time_interpolator_list;
time_interpolator_list = ti;
spin_unlock(&time_interpolator_lock);
}
void
unregister_time_interpolator(struct time_interpolator *ti)
{
struct time_interpolator *curr, **prev;
unsigned long flags;
spin_lock(&time_interpolator_lock);
prev = &time_interpolator_list;
for (curr = *prev; curr; curr = curr->next) {
if (curr == ti) {
*prev = curr->next;
break;
}
prev = &curr->next;
}
write_seqlock_irqsave(&xtime_lock, flags);
if (ti == time_interpolator) {
/* we lost the best time-interpolator: */
time_interpolator = NULL;
/* find the next-best interpolator */
for (curr = time_interpolator_list; curr; curr = curr->next)
if (is_better_time_interpolator(curr))
time_interpolator = curr;
time_interpolator_reset();
}
write_sequnlock_irqrestore(&xtime_lock, flags);
spin_unlock(&time_interpolator_lock);
}
#endif /* CONFIG_TIME_INTERPOLATION */
/**
* msleep - sleep safely even with waitqueue interruptions
* @msecs: Time in milliseconds to sleep for