smp/cfd: Convert core to hotplug state machine

Install the callbacks via the state machine. They are installed at runtime so
smpcfd_prepare_cpu() needs to be invoked by the boot-CPU.

Signed-off-by: Richard Weinberger <richard@nod.at>
[ Added the dropped CPU dying case back in. ]
Signed-off-by: Richard Cochran <rcochran@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Davidlohr Bueso <dave@stgolabs>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153337.818376366@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Richard Weinberger 2016-07-13 17:17:01 +00:00 committed by Ingo Molnar
parent 6b2c28471d
commit 31487f8328
4 changed files with 49 additions and 48 deletions

View File

@ -18,6 +18,7 @@ enum cpuhp_state {
CPUHP_HRTIMERS_PREPARE, CPUHP_HRTIMERS_PREPARE,
CPUHP_PROFILE_PREPARE, CPUHP_PROFILE_PREPARE,
CPUHP_X2APIC_PREPARE, CPUHP_X2APIC_PREPARE,
CPUHP_SMPCFD_PREPARE,
CPUHP_TIMERS_DEAD, CPUHP_TIMERS_DEAD,
CPUHP_NOTIFY_PREPARE, CPUHP_NOTIFY_PREPARE,
CPUHP_BRINGUP_CPU, CPUHP_BRINGUP_CPU,
@ -57,6 +58,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_CORESIGHT4_STARTING, CPUHP_AP_ARM_CORESIGHT4_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_LEDTRIG_STARTING,
CPUHP_AP_SMPCFD_DYING,
CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_X86_TBOOT_DYING,
CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE, CPUHP_AP_ONLINE,

View File

@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
void smp_setup_processor_id(void); void smp_setup_processor_id(void);
/* SMP core functions */
int smpcfd_prepare_cpu(unsigned int cpu);
int smpcfd_dead_cpu(unsigned int cpu);
int smpcfd_dying_cpu(unsigned int cpu);
#endif /* __LINUX_SMP_H */ #endif /* __LINUX_SMP_H */

View File

@ -1195,6 +1195,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.startup = hrtimers_prepare_cpu, .startup = hrtimers_prepare_cpu,
.teardown = hrtimers_dead_cpu, .teardown = hrtimers_dead_cpu,
}, },
[CPUHP_SMPCFD_PREPARE] = {
.name = "SMPCFD prepare",
.startup = smpcfd_prepare_cpu,
.teardown = smpcfd_dead_cpu,
},
[CPUHP_TIMERS_DEAD] = { [CPUHP_TIMERS_DEAD] = {
.name = "timers dead", .name = "timers dead",
.startup = NULL, .startup = NULL,
@ -1218,6 +1223,10 @@ static struct cpuhp_step cpuhp_bp_states[] = {
.teardown = NULL, .teardown = NULL,
.cant_stop = true, .cant_stop = true,
}, },
[CPUHP_AP_SMPCFD_DYING] = {
.startup = NULL,
.teardown = smpcfd_dying_cpu,
},
/* /*
* Handled on controll processor until the plugged processor manages * Handled on controll processor until the plugged processor manages
* this itself. * this itself.

View File

@ -33,38 +33,33 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
static void flush_smp_call_function_queue(bool warn_cpu_offline); static void flush_smp_call_function_queue(bool warn_cpu_offline);
static int int smpcfd_prepare_cpu(unsigned int cpu)
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
{ {
long cpu = (long)hcpu;
struct call_function_data *cfd = &per_cpu(cfd_data, cpu); struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu))) cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM); return -ENOMEM;
cfd->csd = alloc_percpu(struct call_single_data); cfd->csd = alloc_percpu(struct call_single_data);
if (!cfd->csd) { if (!cfd->csd) {
free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM); return -ENOMEM;
} }
break;
#ifdef CONFIG_HOTPLUG_CPU return 0;
case CPU_UP_CANCELED: }
case CPU_UP_CANCELED_FROZEN:
/* Fall-through to the CPU_DEAD[_FROZEN] case. */ int smpcfd_dead_cpu(unsigned int cpu)
{
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
case CPU_DEAD:
case CPU_DEAD_FROZEN:
free_cpumask_var(cfd->cpumask); free_cpumask_var(cfd->cpumask);
free_percpu(cfd->csd); free_percpu(cfd->csd);
break; return 0;
}
case CPU_DYING: int smpcfd_dying_cpu(unsigned int cpu)
case CPU_DYING_FROZEN: {
/* /*
* The IPIs for the smp-call-function callbacks queued by other * The IPIs for the smp-call-function callbacks queued by other
* CPUs might arrive late, either due to hardware latencies or * CPUs might arrive late, either due to hardware latencies or
@ -75,27 +70,17 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
* still pending. * still pending.
*/ */
flush_smp_call_function_queue(false); flush_smp_call_function_queue(false);
break; return 0;
#endif
};
return NOTIFY_OK;
} }
static struct notifier_block hotplug_cfd_notifier = {
.notifier_call = hotplug_cfd,
};
void __init call_function_init(void) void __init call_function_init(void)
{ {
void *cpu = (void *)(long)smp_processor_id();
int i; int i;
for_each_possible_cpu(i) for_each_possible_cpu(i)
init_llist_head(&per_cpu(call_single_queue, i)); init_llist_head(&per_cpu(call_single_queue, i));
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); smpcfd_prepare_cpu(smp_processor_id());
register_cpu_notifier(&hotplug_cfd_notifier);
} }
/* /*