hrtimer: removing all ur callback modes, fix hotplug
Impact: fix hrtimer locking (reported by lockdep) in the CPU hotplug case This addition fixes the hotplug locking issue on my machine Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ca109491f6
commit
37810659ea
|
@ -1496,7 +1496,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
|
||||||
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
|
static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
|
||||||
struct hrtimer_clock_base *new_base, int dcpu)
|
struct hrtimer_clock_base *new_base)
|
||||||
{
|
{
|
||||||
struct hrtimer *timer;
|
struct hrtimer *timer;
|
||||||
struct rb_node *node;
|
struct rb_node *node;
|
||||||
|
@ -1514,40 +1514,34 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
|
||||||
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
|
__remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
|
||||||
timer->base = new_base;
|
timer->base = new_base;
|
||||||
/*
|
/*
|
||||||
* Enqueue the timer. Allow reprogramming of the event device
|
* Enqueue the timers on the new cpu, but do not reprogram
|
||||||
|
* the timer as that would enable a deadlock between
|
||||||
|
* hrtimer_enqueue_reprogramm() running the timer and us still
|
||||||
|
* holding a nested base lock.
|
||||||
|
*
|
||||||
|
* Instead we tickle the hrtimer interrupt after the migration
|
||||||
|
* is done, which will run all expired timers and re-programm
|
||||||
|
* the timer device.
|
||||||
*/
|
*/
|
||||||
enqueue_hrtimer(timer, new_base, 1);
|
enqueue_hrtimer(timer, new_base, 0);
|
||||||
|
|
||||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
|
||||||
/*
|
|
||||||
* Happens with high res enabled when the timer was
|
|
||||||
* already expired and the callback mode is
|
|
||||||
* HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
|
|
||||||
* enqueue code does not move them to the soft irq
|
|
||||||
* pending list for performance/latency reasons, but
|
|
||||||
* in the migration state, we need to do that
|
|
||||||
* otherwise we end up with a stale timer.
|
|
||||||
*/
|
|
||||||
if (timer->state == HRTIMER_STATE_MIGRATE) {
|
|
||||||
/* XXX: running on offline cpu */
|
|
||||||
__run_hrtimer(timer);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/* Clear the migration state bit */
|
/* Clear the migration state bit */
|
||||||
timer->state &= ~HRTIMER_STATE_MIGRATE;
|
timer->state &= ~HRTIMER_STATE_MIGRATE;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void migrate_hrtimers(int cpu)
|
static int migrate_hrtimers(int scpu)
|
||||||
{
|
{
|
||||||
struct hrtimer_cpu_base *old_base, *new_base;
|
struct hrtimer_cpu_base *old_base, *new_base;
|
||||||
int i;
|
int dcpu, i;
|
||||||
|
|
||||||
BUG_ON(cpu_online(cpu));
|
BUG_ON(cpu_online(scpu));
|
||||||
old_base = &per_cpu(hrtimer_bases, cpu);
|
old_base = &per_cpu(hrtimer_bases, scpu);
|
||||||
new_base = &get_cpu_var(hrtimer_bases);
|
new_base = &get_cpu_var(hrtimer_bases);
|
||||||
|
|
||||||
tick_cancel_sched_timer(cpu);
|
dcpu = smp_processor_id();
|
||||||
|
|
||||||
|
tick_cancel_sched_timer(scpu);
|
||||||
/*
|
/*
|
||||||
* The caller is globally serialized and nobody else
|
* The caller is globally serialized and nobody else
|
||||||
* takes two locks at once, deadlock is not possible.
|
* takes two locks at once, deadlock is not possible.
|
||||||
|
@ -1557,32 +1551,47 @@ static void migrate_hrtimers(int cpu)
|
||||||
|
|
||||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||||
migrate_hrtimer_list(&old_base->clock_base[i],
|
migrate_hrtimer_list(&old_base->clock_base[i],
|
||||||
&new_base->clock_base[i], cpu);
|
&new_base->clock_base[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&old_base->lock);
|
spin_unlock(&old_base->lock);
|
||||||
spin_unlock_irq(&new_base->lock);
|
spin_unlock_irq(&new_base->lock);
|
||||||
put_cpu_var(hrtimer_bases);
|
put_cpu_var(hrtimer_bases);
|
||||||
|
|
||||||
|
return dcpu;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tickle_timers(void *arg)
|
||||||
|
{
|
||||||
|
hrtimer_peek_ahead_timers();
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_HOTPLUG_CPU */
|
#endif /* CONFIG_HOTPLUG_CPU */
|
||||||
|
|
||||||
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
|
static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
|
||||||
unsigned long action, void *hcpu)
|
unsigned long action, void *hcpu)
|
||||||
{
|
{
|
||||||
unsigned int cpu = (long)hcpu;
|
int dcpu = -1, scpu = (long)hcpu;
|
||||||
|
|
||||||
switch (action) {
|
switch (action) {
|
||||||
|
|
||||||
case CPU_UP_PREPARE:
|
case CPU_UP_PREPARE:
|
||||||
case CPU_UP_PREPARE_FROZEN:
|
case CPU_UP_PREPARE_FROZEN:
|
||||||
init_hrtimers_cpu(cpu);
|
init_hrtimers_cpu(scpu);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
case CPU_DEAD:
|
case CPU_DEAD:
|
||||||
case CPU_DEAD_FROZEN:
|
case CPU_DEAD_FROZEN:
|
||||||
clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
|
clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
|
||||||
migrate_hrtimers(cpu);
|
dcpu = migrate_hrtimers(scpu);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case CPU_POST_DEAD:
|
||||||
|
if (dcpu == -1)
|
||||||
|
break;
|
||||||
|
|
||||||
|
smp_call_function_single(dcpu, tickle_timers, NULL, 0);
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue