hrtimer: clean up cpu->base locking tricks
In order to more easily allow for the scheduler to use timers, clean up the locking a bit. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
48d5e25821
commit
2d44ae4d71
|
@ -1063,7 +1063,9 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||||||
basenow = ktime_add(now, base->offset);
|
basenow = ktime_add(now, base->offset);
|
||||||
|
|
||||||
while ((node = base->first)) {
|
while ((node = base->first)) {
|
||||||
|
enum hrtimer_restart (*fn)(struct hrtimer *);
|
||||||
struct hrtimer *timer;
|
struct hrtimer *timer;
|
||||||
|
int restart;
|
||||||
|
|
||||||
timer = rb_entry(node, struct hrtimer, node);
|
timer = rb_entry(node, struct hrtimer, node);
|
||||||
|
|
||||||
|
@ -1091,13 +1093,29 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||||||
HRTIMER_STATE_CALLBACK, 0);
|
HRTIMER_STATE_CALLBACK, 0);
|
||||||
timer_stats_account_hrtimer(timer);
|
timer_stats_account_hrtimer(timer);
|
||||||
|
|
||||||
|
fn = timer->function;
|
||||||
|
if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) {
|
||||||
|
/*
|
||||||
|
* Used for scheduler timers, avoid lock
|
||||||
|
* inversion with rq->lock and tasklist_lock.
|
||||||
|
*
|
||||||
|
* These timers are required to deal with
|
||||||
|
* enqueue expiry themselves and are not
|
||||||
|
* allowed to migrate.
|
||||||
|
*/
|
||||||
|
spin_unlock(&cpu_base->lock);
|
||||||
|
restart = fn(timer);
|
||||||
|
spin_lock(&cpu_base->lock);
|
||||||
|
} else
|
||||||
|
restart = fn(timer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: We clear the CALLBACK bit after
|
* Note: We clear the CALLBACK bit after
|
||||||
* enqueue_hrtimer to avoid reprogramming of
|
* enqueue_hrtimer to avoid reprogramming of
|
||||||
* the event hardware. This happens at the end
|
* the event hardware. This happens at the end
|
||||||
* of this function anyway.
|
* of this function anyway.
|
||||||
*/
|
*/
|
||||||
if (timer->function(timer) != HRTIMER_NORESTART) {
|
if (restart != HRTIMER_NORESTART) {
|
||||||
BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
|
BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
|
||||||
enqueue_hrtimer(timer, base, 0);
|
enqueue_hrtimer(timer, base, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -514,7 +514,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
||||||
{
|
{
|
||||||
struct tick_sched *ts =
|
struct tick_sched *ts =
|
||||||
container_of(timer, struct tick_sched, sched_timer);
|
container_of(timer, struct tick_sched, sched_timer);
|
||||||
struct hrtimer_cpu_base *base = timer->base->cpu_base;
|
|
||||||
struct pt_regs *regs = get_irq_regs();
|
struct pt_regs *regs = get_irq_regs();
|
||||||
ktime_t now = ktime_get();
|
ktime_t now = ktime_get();
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
@ -552,15 +551,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
||||||
touch_softlockup_watchdog();
|
touch_softlockup_watchdog();
|
||||||
ts->idle_jiffies++;
|
ts->idle_jiffies++;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* update_process_times() might take tasklist_lock, hence
|
|
||||||
* drop the base lock. sched-tick hrtimers are per-CPU and
|
|
||||||
* never accessible by userspace APIs, so this is safe to do.
|
|
||||||
*/
|
|
||||||
spin_unlock(&base->lock);
|
|
||||||
update_process_times(user_mode(regs));
|
update_process_times(user_mode(regs));
|
||||||
profile_tick(CPU_PROFILING);
|
profile_tick(CPU_PROFILING);
|
||||||
spin_lock(&base->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Do not restart, when we are in the idle loop */
|
/* Do not restart, when we are in the idle loop */
|
||||||
|
|
Loading…
Reference in New Issue