clocksource: Simplify the logic around clocksource wrapping safety margins
The clocksource logic has a number of places where we try to include a safety margin. Most of these are 12% safety margins, but they are inconsistently applied and sometimes are applied on top of each other. Additionally, in the previous patch, we corrected an issue where we unintentionally in effect created a 50% safety margin, which these 12.5% margins where then added to. So to simplify the logic here, this patch removes the various 12.5% margins, and consolidates adding the margin in one place: clocks_calc_max_nsecs(). Additionally, Linus prefers a 50% safety margin, as it allows bad clock values to be more easily caught. This should really have no net effect, due to the corrected issue earlier which caused greater then 50% margins to be used w/o issue. Signed-off-by: John Stultz <john.stultz@linaro.org> Acked-by: Stephen Boyd <sboyd@codeaurora.org> (for the sched_clock.c bit) Cc: Dave Jones <davej@codemonkey.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1426133800-29329-3-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6086e346fd
commit
362fde0410
|
@ -469,6 +469,9 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
|
||||||
* @shift: cycle to nanosecond divisor (power of two)
|
* @shift: cycle to nanosecond divisor (power of two)
|
||||||
* @maxadj: maximum adjustment value to mult (~11%)
|
* @maxadj: maximum adjustment value to mult (~11%)
|
||||||
* @mask: bitmask for two's complement subtraction of non 64 bit counters
|
* @mask: bitmask for two's complement subtraction of non 64 bit counters
|
||||||
|
*
|
||||||
|
* NOTE: This function includes a safety margin of 50%, so that bad clock values
|
||||||
|
* can be detected.
|
||||||
*/
|
*/
|
||||||
u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
|
u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
|
||||||
{
|
{
|
||||||
|
@ -490,11 +493,14 @@ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
|
||||||
max_cycles = min(max_cycles, mask);
|
max_cycles = min(max_cycles, mask);
|
||||||
max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
|
max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
|
||||||
|
|
||||||
|
/* Return 50% of the actual maximum, so we can detect bad values */
|
||||||
|
max_nsecs >>= 1;
|
||||||
|
|
||||||
return max_nsecs;
|
return max_nsecs;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* clocksource_max_deferment - Returns max time the clocksource can be deferred
|
* clocksource_max_deferment - Returns max time the clocksource should be deferred
|
||||||
* @cs: Pointer to clocksource
|
* @cs: Pointer to clocksource
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
@ -504,13 +510,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
|
||||||
|
|
||||||
max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
|
max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
|
||||||
cs->mask);
|
cs->mask);
|
||||||
/*
|
return max_nsecs;
|
||||||
* To ensure that the clocksource does not wrap whilst we are idle,
|
|
||||||
* limit the time the clocksource can be deferred by 12.5%. Please
|
|
||||||
* note a margin of 12.5% is used because this can be computed with
|
|
||||||
* a shift, versus say 10% which would require division.
|
|
||||||
*/
|
|
||||||
return max_nsecs - (max_nsecs >> 3);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
|
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
|
||||||
|
@ -659,10 +659,9 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
|
||||||
* conversion precision. 10 minutes is still a reasonable
|
* conversion precision. 10 minutes is still a reasonable
|
||||||
* amount. That results in a shift value of 24 for a
|
* amount. That results in a shift value of 24 for a
|
||||||
* clocksource with mask >= 40bit and f >= 4GHz. That maps to
|
* clocksource with mask >= 40bit and f >= 4GHz. That maps to
|
||||||
* ~ 0.06ppm granularity for NTP. We apply the same 12.5%
|
* ~ 0.06ppm granularity for NTP.
|
||||||
* margin as we do in clocksource_max_deferment()
|
|
||||||
*/
|
*/
|
||||||
sec = (cs->mask - (cs->mask >> 3));
|
sec = cs->mask;
|
||||||
do_div(sec, freq);
|
do_div(sec, freq);
|
||||||
do_div(sec, scale);
|
do_div(sec, scale);
|
||||||
if (!sec)
|
if (!sec)
|
||||||
|
@ -674,9 +673,8 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
|
||||||
NSEC_PER_SEC / scale, sec * scale);
|
NSEC_PER_SEC / scale, sec * scale);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* for clocksources that have large mults, to avoid overflow.
|
* Ensure clocksources that have large 'mult' values don't overflow
|
||||||
* Since mult may be adjusted by ntp, add an safety extra margin
|
* when adjusted.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
cs->maxadj = clocksource_max_adjustment(cs);
|
cs->maxadj = clocksource_max_adjustment(cs);
|
||||||
while ((cs->mult + cs->maxadj < cs->mult)
|
while ((cs->mult + cs->maxadj < cs->mult)
|
||||||
|
|
|
@ -125,9 +125,9 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
|
||||||
|
|
||||||
new_mask = CLOCKSOURCE_MASK(bits);
|
new_mask = CLOCKSOURCE_MASK(bits);
|
||||||
|
|
||||||
/* calculate how many ns until we wrap */
|
/* calculate how many nanosecs until we risk wrapping */
|
||||||
wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
|
wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
|
||||||
new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
|
new_wrap_kt = ns_to_ktime(wrap);
|
||||||
|
|
||||||
/* update epoch for new counter and update epoch_ns from old counter*/
|
/* update epoch for new counter and update epoch_ns from old counter*/
|
||||||
new_epoch = read();
|
new_epoch = read();
|
||||||
|
|
Loading…
Reference in New Issue