time: Kill xtime_lock, replacing it with jiffies_lock
Now that timekeeping is protected by its own locks, rename the xtime_lock to jifffies_lock to better describe what it protects. CC: Thomas Gleixner <tglx@linutronix.de> CC: Eric Dumazet <eric.dumazet@gmail.com> CC: Richard Cochran <richardcochran@gmail.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
parent
47c8c91b2d
commit
d6ad418763
|
@ -35,7 +35,7 @@ static cycle_t i8253_read(struct clocksource *cs)
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&i8253_lock, flags);
|
raw_spin_lock_irqsave(&i8253_lock, flags);
|
||||||
/*
|
/*
|
||||||
* Although our caller may have the read side of xtime_lock,
|
* Although our caller may have the read side of jiffies_lock,
|
||||||
* this is now a seqlock, and we are cheating in this routine
|
* this is now a seqlock, and we are cheating in this routine
|
||||||
* by having side effects on state that we cannot undo if
|
* by having side effects on state that we cannot undo if
|
||||||
* there is a collision on the seqlock and our caller has to
|
* there is a collision on the seqlock and our caller has to
|
||||||
|
|
|
@ -70,11 +70,12 @@ extern int register_refined_jiffies(long clock_tick_rate);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The 64-bit value is not atomic - you MUST NOT read it
|
* The 64-bit value is not atomic - you MUST NOT read it
|
||||||
* without sampling the sequence number in xtime_lock.
|
* without sampling the sequence number in jiffies_lock.
|
||||||
* get_jiffies_64() will do this for you as appropriate.
|
* get_jiffies_64() will do this for you as appropriate.
|
||||||
*/
|
*/
|
||||||
extern u64 __jiffy_data jiffies_64;
|
extern u64 __jiffy_data jiffies_64;
|
||||||
extern unsigned long volatile __jiffy_data jiffies;
|
extern unsigned long volatile __jiffy_data jiffies;
|
||||||
|
extern seqlock_t jiffies_lock;
|
||||||
|
|
||||||
#if (BITS_PER_LONG < 64)
|
#if (BITS_PER_LONG < 64)
|
||||||
u64 get_jiffies_64(void);
|
u64 get_jiffies_64(void);
|
||||||
|
|
|
@ -67,6 +67,8 @@ static struct clocksource clocksource_jiffies = {
|
||||||
.shift = JIFFIES_SHIFT,
|
.shift = JIFFIES_SHIFT,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
|
||||||
|
|
||||||
#if (BITS_PER_LONG < 64)
|
#if (BITS_PER_LONG < 64)
|
||||||
u64 get_jiffies_64(void)
|
u64 get_jiffies_64(void)
|
||||||
{
|
{
|
||||||
|
@ -74,9 +76,9 @@ u64 get_jiffies_64(void)
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
seq = read_seqbegin(&xtime_lock);
|
seq = read_seqbegin(&jiffies_lock);
|
||||||
ret = jiffies_64;
|
ret = jiffies_64;
|
||||||
} while (read_seqretry(&xtime_lock, seq));
|
} while (read_seqretry(&jiffies_lock, seq));
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(get_jiffies_64);
|
EXPORT_SYMBOL(get_jiffies_64);
|
||||||
|
|
|
@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
|
||||||
static void tick_periodic(int cpu)
|
static void tick_periodic(int cpu)
|
||||||
{
|
{
|
||||||
if (tick_do_timer_cpu == cpu) {
|
if (tick_do_timer_cpu == cpu) {
|
||||||
write_seqlock(&xtime_lock);
|
write_seqlock(&jiffies_lock);
|
||||||
|
|
||||||
/* Keep track of the next tick event */
|
/* Keep track of the next tick event */
|
||||||
tick_next_period = ktime_add(tick_next_period, tick_period);
|
tick_next_period = ktime_add(tick_next_period, tick_period);
|
||||||
|
|
||||||
do_timer(1);
|
do_timer(1);
|
||||||
write_sequnlock(&xtime_lock);
|
write_sequnlock(&jiffies_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
update_process_times(user_mode(get_irq_regs()));
|
update_process_times(user_mode(get_irq_regs()));
|
||||||
|
@ -130,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
|
||||||
ktime_t next;
|
ktime_t next;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
seq = read_seqbegin(&xtime_lock);
|
seq = read_seqbegin(&jiffies_lock);
|
||||||
next = tick_next_period;
|
next = tick_next_period;
|
||||||
} while (read_seqretry(&xtime_lock, seq));
|
} while (read_seqretry(&jiffies_lock, seq));
|
||||||
|
|
||||||
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
|
||||||
|
|
||||||
|
|
|
@ -141,4 +141,3 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void do_timer(unsigned long ticks);
|
extern void do_timer(unsigned long ticks);
|
||||||
extern seqlock_t xtime_lock;
|
|
||||||
|
|
|
@ -31,7 +31,7 @@
|
||||||
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The time, when the last jiffy update happened. Protected by xtime_lock.
|
* The time, when the last jiffy update happened. Protected by jiffies_lock.
|
||||||
*/
|
*/
|
||||||
static ktime_t last_jiffies_update;
|
static ktime_t last_jiffies_update;
|
||||||
|
|
||||||
|
@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
|
||||||
ktime_t delta;
|
ktime_t delta;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do a quick check without holding xtime_lock:
|
* Do a quick check without holding jiffies_lock:
|
||||||
*/
|
*/
|
||||||
delta = ktime_sub(now, last_jiffies_update);
|
delta = ktime_sub(now, last_jiffies_update);
|
||||||
if (delta.tv64 < tick_period.tv64)
|
if (delta.tv64 < tick_period.tv64)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Reevalute with xtime_lock held */
|
/* Reevalute with jiffies_lock held */
|
||||||
write_seqlock(&xtime_lock);
|
write_seqlock(&jiffies_lock);
|
||||||
|
|
||||||
delta = ktime_sub(now, last_jiffies_update);
|
delta = ktime_sub(now, last_jiffies_update);
|
||||||
if (delta.tv64 >= tick_period.tv64) {
|
if (delta.tv64 >= tick_period.tv64) {
|
||||||
|
@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
|
||||||
/* Keep the tick_next_period variable up to date */
|
/* Keep the tick_next_period variable up to date */
|
||||||
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
tick_next_period = ktime_add(last_jiffies_update, tick_period);
|
||||||
}
|
}
|
||||||
write_sequnlock(&xtime_lock);
|
write_sequnlock(&jiffies_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(void)
|
||||||
{
|
{
|
||||||
ktime_t period;
|
ktime_t period;
|
||||||
|
|
||||||
write_seqlock(&xtime_lock);
|
write_seqlock(&jiffies_lock);
|
||||||
/* Did we start the jiffies update yet ? */
|
/* Did we start the jiffies update yet ? */
|
||||||
if (last_jiffies_update.tv64 == 0)
|
if (last_jiffies_update.tv64 == 0)
|
||||||
last_jiffies_update = tick_next_period;
|
last_jiffies_update = tick_next_period;
|
||||||
period = last_jiffies_update;
|
period = last_jiffies_update;
|
||||||
write_sequnlock(&xtime_lock);
|
write_sequnlock(&jiffies_lock);
|
||||||
return period;
|
return period;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,11 +282,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
|
||||||
|
|
||||||
/* Read jiffies and the time when jiffies were updated last */
|
/* Read jiffies and the time when jiffies were updated last */
|
||||||
do {
|
do {
|
||||||
seq = read_seqbegin(&xtime_lock);
|
seq = read_seqbegin(&jiffies_lock);
|
||||||
last_update = last_jiffies_update;
|
last_update = last_jiffies_update;
|
||||||
last_jiffies = jiffies;
|
last_jiffies = jiffies;
|
||||||
time_delta = timekeeping_max_deferment();
|
time_delta = timekeeping_max_deferment();
|
||||||
} while (read_seqretry(&xtime_lock, seq));
|
} while (read_seqretry(&jiffies_lock, seq));
|
||||||
|
|
||||||
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
|
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
|
||||||
arch_needs_cpu(cpu)) {
|
arch_needs_cpu(cpu)) {
|
||||||
|
@ -658,7 +658,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
|
||||||
* concurrency: This happens only when the cpu in charge went
|
* concurrency: This happens only when the cpu in charge went
|
||||||
* into a long sleep. If two cpus happen to assign themself to
|
* into a long sleep. If two cpus happen to assign themself to
|
||||||
* this duty, then the jiffies update is still serialized by
|
* this duty, then the jiffies update is still serialized by
|
||||||
* xtime_lock.
|
* jiffies_lock.
|
||||||
*/
|
*/
|
||||||
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
||||||
tick_do_timer_cpu = cpu;
|
tick_do_timer_cpu = cpu;
|
||||||
|
@ -810,7 +810,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
||||||
* concurrency: This happens only when the cpu in charge went
|
* concurrency: This happens only when the cpu in charge went
|
||||||
* into a long sleep. If two cpus happen to assign themself to
|
* into a long sleep. If two cpus happen to assign themself to
|
||||||
* this duty, then the jiffies update is still serialized by
|
* this duty, then the jiffies update is still serialized by
|
||||||
* xtime_lock.
|
* jiffies_lock.
|
||||||
*/
|
*/
|
||||||
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
|
||||||
tick_do_timer_cpu = cpu;
|
tick_do_timer_cpu = cpu;
|
||||||
|
|
|
@ -25,12 +25,6 @@
|
||||||
|
|
||||||
static struct timekeeper timekeeper;
|
static struct timekeeper timekeeper;
|
||||||
|
|
||||||
/*
|
|
||||||
* This read-write spinlock protects us from races in SMP while
|
|
||||||
* playing with xtime.
|
|
||||||
*/
|
|
||||||
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
|
|
||||||
|
|
||||||
/* flag for if timekeeping is suspended */
|
/* flag for if timekeeping is suspended */
|
||||||
int __read_mostly timekeeping_suspended;
|
int __read_mostly timekeeping_suspended;
|
||||||
|
|
||||||
|
@ -1299,9 +1293,7 @@ struct timespec get_monotonic_coarse(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The 64-bit jiffies value is not atomic - you MUST NOT read it
|
* Must hold jiffies_lock
|
||||||
* without sampling the sequence number in xtime_lock.
|
|
||||||
* jiffies is defined in the linker script...
|
|
||||||
*/
|
*/
|
||||||
void do_timer(unsigned long ticks)
|
void do_timer(unsigned long ticks)
|
||||||
{
|
{
|
||||||
|
@ -1389,7 +1381,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
|
||||||
*/
|
*/
|
||||||
void xtime_update(unsigned long ticks)
|
void xtime_update(unsigned long ticks)
|
||||||
{
|
{
|
||||||
write_seqlock(&xtime_lock);
|
write_seqlock(&jiffies_lock);
|
||||||
do_timer(ticks);
|
do_timer(ticks);
|
||||||
write_sequnlock(&xtime_lock);
|
write_sequnlock(&jiffies_lock);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue