sched: Remove redundant update_runtime notifier
migration_call() will do all the things that update_runtime() does. So let's remove it. Furthermore, there is potential risk that the current code will catch BUG_ON at line 689 of rt.c when do cpu hotplug while there are realtime threads running because of enabling runtime twice while the rt_runtime may already changed. Signed-off-by: Neil Zhang <zhangwm@marvell.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1365685499-26515-1-git-send-email-zhangwm@marvell.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
41261b6a83
commit
c5405a495e
|
@ -6285,9 +6285,6 @@ void __init sched_init_smp(void)
|
|||
hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
|
||||
hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
|
||||
|
||||
/* RT runtime code needs to handle some hotplug events */
|
||||
hotcpu_notifier(update_runtime, 0);
|
||||
|
||||
init_hrtick();
|
||||
|
||||
/* Move init over to a non-isolated CPU */
|
||||
|
|
|
@ -699,15 +699,6 @@ balanced:
|
|||
}
|
||||
}
|
||||
|
||||
static void disable_runtime(struct rq *rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
__disable_runtime(rq);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
static void __enable_runtime(struct rq *rq)
|
||||
{
|
||||
rt_rq_iter_t iter;
|
||||
|
@ -732,37 +723,6 @@ static void __enable_runtime(struct rq *rq)
|
|||
}
|
||||
}
|
||||
|
||||
static void enable_runtime(struct rq *rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
__enable_runtime(rq);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
{
|
||||
int cpu = (int)(long)hcpu;
|
||||
|
||||
switch (action) {
|
||||
case CPU_DOWN_PREPARE:
|
||||
case CPU_DOWN_PREPARE_FROZEN:
|
||||
disable_runtime(cpu_rq(cpu));
|
||||
return NOTIFY_OK;
|
||||
|
||||
case CPU_DOWN_FAILED:
|
||||
case CPU_DOWN_FAILED_FROZEN:
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
enable_runtime(cpu_rq(cpu));
|
||||
return NOTIFY_OK;
|
||||
|
||||
default:
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
}
|
||||
|
||||
static int balance_runtime(struct rt_rq *rt_rq)
|
||||
{
|
||||
int more = 0;
|
||||
|
|
|
@ -1041,7 +1041,6 @@ static inline void idle_balance(int cpu, struct rq *rq)
|
|||
extern void sysrq_sched_debug_show(void);
|
||||
extern void sched_init_granularity(void);
|
||||
extern void update_max_interval(void);
|
||||
extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
|
||||
extern void init_sched_rt_class(void);
|
||||
extern void init_sched_fair_class(void);
|
||||
|
||||
|
|
Loading…
Reference in New Issue