time: Clean up CLOCK_MONOTONIC_RAW time handling
Now that we fixed the sub-ns handling for CLOCK_MONOTONIC_RAW, remove the duplicitive tk->raw_time.tv_nsec, which can be stored in tk->tkr_raw.xtime_nsec (similarly to how its handled for monotonic time). Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Miroslav Lichvar <mlichvar@redhat.com> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Stephen Boyd <stephen.boyd@linaro.org> Cc: Kevin Brodsky <kevin.brodsky@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Daniel Mentz <danielmentz@google.com> Tested-by: Daniel Mentz <danielmentz@google.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
parent
8e6cec1c7c
commit
fc6eead7c1
|
@ -220,10 +220,8 @@ void update_vsyscall(struct timekeeper *tk)
|
|||
if (!use_syscall) {
|
||||
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
|
||||
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
|
||||
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
|
||||
vdso_data->raw_time_nsec = (tk->raw_time.tv_nsec <<
|
||||
tk->tkr_raw.shift) +
|
||||
tk->tkr_raw.xtime_nsec;
|
||||
vdso_data->raw_time_sec = tk->raw_sec;
|
||||
vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
|
||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
|
||||
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
|
||||
|
|
|
@ -51,7 +51,7 @@ struct tk_read_base {
|
|||
* @clock_was_set_seq: The sequence number of clock was set events
|
||||
* @cs_was_changed_seq: The sequence number of clocksource change events
|
||||
* @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second
|
||||
* @raw_time: Monotonic raw base time in timespec64 format
|
||||
* @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
|
||||
* @cycle_interval: Number of clock cycles in one NTP interval
|
||||
* @xtime_interval: Number of clock shifted nano seconds in one NTP
|
||||
* interval.
|
||||
|
@ -93,7 +93,7 @@ struct timekeeper {
|
|||
unsigned int clock_was_set_seq;
|
||||
u8 cs_was_changed_seq;
|
||||
ktime_t next_leap_ktime;
|
||||
struct timespec64 raw_time;
|
||||
u64 raw_sec;
|
||||
|
||||
/* The following members are for timekeeping internal use */
|
||||
u64 cycle_interval;
|
||||
|
|
|
@ -72,6 +72,10 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
|
|||
tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
|
||||
tk->xtime_sec++;
|
||||
}
|
||||
while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
|
||||
tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
|
||||
tk->raw_sec++;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
|
||||
|
@ -285,12 +289,14 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|||
/* if changing clocks, convert xtime_nsec shift units */
|
||||
if (old_clock) {
|
||||
int shift_change = clock->shift - old_clock->shift;
|
||||
if (shift_change < 0)
|
||||
if (shift_change < 0) {
|
||||
tk->tkr_mono.xtime_nsec >>= -shift_change;
|
||||
else
|
||||
tk->tkr_raw.xtime_nsec >>= -shift_change;
|
||||
} else {
|
||||
tk->tkr_mono.xtime_nsec <<= shift_change;
|
||||
tk->tkr_raw.xtime_nsec <<= shift_change;
|
||||
}
|
||||
}
|
||||
tk->tkr_raw.xtime_nsec = 0;
|
||||
|
||||
tk->tkr_mono.shift = clock->shift;
|
||||
tk->tkr_raw.shift = clock->shift;
|
||||
|
@ -619,9 +625,6 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
|
|||
nsec = (u32) tk->wall_to_monotonic.tv_nsec;
|
||||
tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
|
||||
|
||||
/* Update the monotonic raw base */
|
||||
tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
|
||||
|
||||
/*
|
||||
* The sum of the nanoseconds portions of xtime and
|
||||
* wall_to_monotonic can be greater/equal one second. Take
|
||||
|
@ -631,6 +634,11 @@ static inline void tk_update_ktime_data(struct timekeeper *tk)
|
|||
if (nsec >= NSEC_PER_SEC)
|
||||
seconds++;
|
||||
tk->ktime_sec = seconds;
|
||||
|
||||
/* Update the monotonic raw base */
|
||||
seconds = tk->raw_sec;
|
||||
nsec = (u32)(tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift);
|
||||
tk->tkr_raw.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
|
||||
}
|
||||
|
||||
/* must hold timekeeper_lock */
|
||||
|
@ -672,7 +680,6 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
|
|||
static void timekeeping_forward_now(struct timekeeper *tk)
|
||||
{
|
||||
u64 cycle_now, delta;
|
||||
u64 nsec;
|
||||
|
||||
cycle_now = tk_clock_read(&tk->tkr_mono);
|
||||
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
|
||||
|
@ -684,10 +691,13 @@ static void timekeeping_forward_now(struct timekeeper *tk)
|
|||
/* If arch requires, add in get_arch_timeoffset() */
|
||||
tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
|
||||
|
||||
tk_normalize_xtime(tk);
|
||||
|
||||
nsec = clocksource_cyc2ns(delta, tk->tkr_raw.mult, tk->tkr_raw.shift);
|
||||
timespec64_add_ns(&tk->raw_time, nsec);
|
||||
tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
|
||||
|
||||
/* If arch requires, add in get_arch_timeoffset() */
|
||||
tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
|
||||
|
||||
tk_normalize_xtime(tk);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1373,19 +1383,18 @@ int timekeeping_notify(struct clocksource *clock)
|
|||
void getrawmonotonic64(struct timespec64 *ts)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
struct timespec64 ts64;
|
||||
unsigned long seq;
|
||||
u64 nsecs;
|
||||
|
||||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
ts->tv_sec = tk->raw_sec;
|
||||
nsecs = timekeeping_get_ns(&tk->tkr_raw);
|
||||
ts64 = tk->raw_time;
|
||||
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
timespec64_add_ns(&ts64, nsecs);
|
||||
*ts = ts64;
|
||||
ts->tv_nsec = 0;
|
||||
timespec64_add_ns(ts, nsecs);
|
||||
}
|
||||
EXPORT_SYMBOL(getrawmonotonic64);
|
||||
|
||||
|
@ -1509,8 +1518,7 @@ void __init timekeeping_init(void)
|
|||
tk_setup_internals(tk, clock);
|
||||
|
||||
tk_set_xtime(tk, &now);
|
||||
tk->raw_time.tv_sec = 0;
|
||||
tk->raw_time.tv_nsec = 0;
|
||||
tk->raw_sec = 0;
|
||||
if (boot.tv_sec == 0 && boot.tv_nsec == 0)
|
||||
boot = tk_xtime(tk);
|
||||
|
||||
|
@ -2011,15 +2019,12 @@ static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
|
|||
*clock_set |= accumulate_nsecs_to_secs(tk);
|
||||
|
||||
/* Accumulate raw time */
|
||||
tk->tkr_raw.xtime_nsec += (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
||||
tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
|
||||
snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
|
||||
while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
|
||||
tk->tkr_raw.xtime_nsec -= snsec_per_sec;
|
||||
tk->raw_time.tv_sec++;
|
||||
tk->raw_sec++;
|
||||
}
|
||||
tk->raw_time.tv_nsec = tk->tkr_raw.xtime_nsec >> tk->tkr_raw.shift;
|
||||
tk->tkr_raw.xtime_nsec -= (u64)tk->raw_time.tv_nsec << tk->tkr_raw.shift;
|
||||
|
||||
/* Accumulate error between NTP and clock interval */
|
||||
tk->ntp_error += tk->ntp_tick << shift;
|
||||
|
|
Loading…
Reference in New Issue