sched/hotplug: Move migration CPU_DYING to sched_cpu_dying()
Remove the hotplug notifier and make it an explicit state. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160310120025.502222097@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
7d97669933
commit
f2785ddb53
|
@ -59,9 +59,7 @@ struct notifier_block;
|
||||||
* CPU notifier priorities.
|
* CPU notifier priorities.
|
||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
/* migration should happen before other stuff but after perf */
|
|
||||||
CPU_PRI_PERF = 20,
|
CPU_PRI_PERF = 20,
|
||||||
CPU_PRI_MIGRATION = 10,
|
|
||||||
|
|
||||||
/* bring up workqueues before normal notifiers and down after */
|
/* bring up workqueues before normal notifiers and down after */
|
||||||
CPU_PRI_WORKQUEUE_UP = 5,
|
CPU_PRI_WORKQUEUE_UP = 5,
|
||||||
|
|
|
@ -376,6 +376,12 @@ extern int sched_cpu_starting(unsigned int cpu);
|
||||||
extern int sched_cpu_activate(unsigned int cpu);
|
extern int sched_cpu_activate(unsigned int cpu);
|
||||||
extern int sched_cpu_deactivate(unsigned int cpu);
|
extern int sched_cpu_deactivate(unsigned int cpu);
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
extern int sched_cpu_dying(unsigned int cpu);
|
||||||
|
#else
|
||||||
|
# define sched_cpu_dying NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
extern void sched_show_task(struct task_struct *p);
|
extern void sched_show_task(struct task_struct *p);
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKUP_DETECTOR
|
#ifdef CONFIG_LOCKUP_DETECTOR
|
||||||
|
|
|
@ -1223,7 +1223,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
||||||
[CPUHP_AP_SCHED_STARTING] = {
|
[CPUHP_AP_SCHED_STARTING] = {
|
||||||
.name = "sched:starting",
|
.name = "sched:starting",
|
||||||
.startup = sched_cpu_starting,
|
.startup = sched_cpu_starting,
|
||||||
.teardown = NULL,
|
.teardown = sched_cpu_dying,
|
||||||
},
|
},
|
||||||
/*
|
/*
|
||||||
* Low level startup/teardown notifiers. Run with interrupts
|
* Low level startup/teardown notifiers. Run with interrupts
|
||||||
|
|
|
@ -5411,51 +5411,6 @@ static void set_rq_offline(struct rq *rq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* migration_call - callback that gets triggered when a CPU is added.
|
|
||||||
* Here we can start up the necessary migration thread for the new CPU.
|
|
||||||
*/
|
|
||||||
static int
|
|
||||||
migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
int cpu = (long)hcpu;
|
|
||||||
unsigned long flags;
|
|
||||||
struct rq *rq = cpu_rq(cpu);
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
case CPU_DYING:
|
|
||||||
sched_ttwu_pending();
|
|
||||||
/* Update our root-domain */
|
|
||||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
|
||||||
if (rq->rd) {
|
|
||||||
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
|
||||||
set_rq_offline(rq);
|
|
||||||
}
|
|
||||||
migrate_tasks(rq);
|
|
||||||
BUG_ON(rq->nr_running != 1); /* the migration thread */
|
|
||||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
|
||||||
calc_load_migrate(rq);
|
|
||||||
break;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
update_max_interval();
|
|
||||||
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Register at high priority so that task migration (migrate_all_tasks)
|
|
||||||
* happens before everything else. This has to be lower priority than
|
|
||||||
* the notifier in the perf_event subsystem, though.
|
|
||||||
*/
|
|
||||||
static struct notifier_block migration_notifier = {
|
|
||||||
.notifier_call = migration_call,
|
|
||||||
.priority = CPU_PRI_MIGRATION,
|
|
||||||
};
|
|
||||||
|
|
||||||
static void set_cpu_rq_start_time(unsigned int cpu)
|
static void set_cpu_rq_start_time(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
@ -7158,6 +7113,28 @@ int sched_cpu_starting(unsigned int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
|
int sched_cpu_dying(unsigned int cpu)
|
||||||
|
{
|
||||||
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
/* Handle pending wakeups and then migrate everything off */
|
||||||
|
sched_ttwu_pending();
|
||||||
|
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||||
|
if (rq->rd) {
|
||||||
|
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
||||||
|
set_rq_offline(rq);
|
||||||
|
}
|
||||||
|
migrate_tasks(rq);
|
||||||
|
BUG_ON(rq->nr_running != 1);
|
||||||
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
|
calc_load_migrate(rq);
|
||||||
|
update_max_interval();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void __init sched_init_smp(void)
|
void __init sched_init_smp(void)
|
||||||
{
|
{
|
||||||
cpumask_var_t non_isolated_cpus;
|
cpumask_var_t non_isolated_cpus;
|
||||||
|
@ -7194,12 +7171,7 @@ void __init sched_init_smp(void)
|
||||||
|
|
||||||
static int __init migration_init(void)
|
static int __init migration_init(void)
|
||||||
{
|
{
|
||||||
void *cpu = (void *)(long)smp_processor_id();
|
|
||||||
|
|
||||||
sched_rq_cpu_starting(smp_processor_id());
|
sched_rq_cpu_starting(smp_processor_id());
|
||||||
migration_call(&migration_notifier, CPU_ONLINE, cpu);
|
|
||||||
register_cpu_notifier(&migration_notifier);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_initcall(migration_init);
|
early_initcall(migration_init);
|
||||||
|
|
Loading…
Reference in New Issue