timekeeper: Move tk_xtime to core code

No users outside of the core.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
Thomas Gleixner 2014-07-16 21:04:05 +00:00 committed by John Stultz
parent d6d29896c6
commit c905fae43f
2 changed files with 43 additions and 45 deletions

View File

@ -71,16 +71,6 @@ struct timekeeper {
};
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
struct timespec64 ts;
ts.tv_sec = tk->xtime_sec;
ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
return ts;
}
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
extern void update_vsyscall(struct timekeeper *tk);
@ -92,14 +82,6 @@ extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
struct clocksource *c, u32 mult);
extern void update_vsyscall_tz(void);
static inline void update_vsyscall(struct timekeeper *tk)
{
struct timespec xt;
xt = tk_xtime(tk);
update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
}
#else
static inline void update_vsyscall(struct timekeeper *tk)

View File

@ -51,6 +51,15 @@ static inline void tk_normalize_xtime(struct timekeeper *tk)
}
}
static inline struct timespec64 tk_xtime(struct timekeeper *tk)
{
struct timespec64 ts;
ts.tv_sec = tk->xtime_sec;
ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
return ts;
}
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
{
tk->xtime_sec = ts->tv_sec;
@ -199,6 +208,40 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
return nsec + arch_gettimeoffset();
}
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
static inline void update_vsyscall(struct timekeeper *tk)
{
struct timespec xt;
xt = tk_xtime(tk);
update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
}
static inline void old_vsyscall_fixup(struct timekeeper *tk)
{
s64 remainder;
/*
* Store only full nanoseconds into xtime_nsec after rounding
* it up and add the remainder to the error difference.
* XXX - This is necessary to avoid small 1ns inconsistnecies caused
* by truncating the remainder in vsyscalls. However, it causes
* additional work to be done in timekeeping_adjust(). Once
* the vsyscall implementations are converted to use xtime_nsec
* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
* users are removed, this can be killed.
*/
remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
tk->xtime_nsec -= remainder;
tk->xtime_nsec += 1ULL << tk->shift;
tk->ntp_error += remainder << tk->ntp_error_shift;
tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
}
#else
#define old_vsyscall_fixup(tk)
#endif
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
@ -1330,33 +1373,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
return offset;
}
#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
static inline void old_vsyscall_fixup(struct timekeeper *tk)
{
s64 remainder;
/*
* Store only full nanoseconds into xtime_nsec after rounding
* it up and add the remainder to the error difference.
* XXX - This is necessary to avoid small 1ns inconsistnecies caused
* by truncating the remainder in vsyscalls. However, it causes
* additional work to be done in timekeeping_adjust(). Once
* the vsyscall implementations are converted to use xtime_nsec
* (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
* users are removed, this can be killed.
*/
remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
tk->xtime_nsec -= remainder;
tk->xtime_nsec += 1ULL << tk->shift;
tk->ntp_error += remainder << tk->ntp_error_shift;
tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
}
#else
#define old_vsyscall_fixup(tk)
#endif
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*