perf/x86/intel/rapl: Convert to hotplug state machine
Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran <rcochran@linutronix.de> Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Huang Rui <ray.huang@amd.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.008808086@linutronix.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9744f7b7b3
commit
8b5b773d62
|
@ -555,14 +555,14 @@ const struct attribute_group *rapl_attr_groups[] = {
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void rapl_cpu_exit(int cpu)
|
static int rapl_cpu_offline(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||||
int target;
|
int target;
|
||||||
|
|
||||||
/* Check if exiting cpu is used for collecting rapl events */
|
/* Check if exiting cpu is used for collecting rapl events */
|
||||||
if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
|
if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
pmu->cpu = -1;
|
pmu->cpu = -1;
|
||||||
/* Find a new cpu to collect rapl events */
|
/* Find a new cpu to collect rapl events */
|
||||||
|
@ -574,9 +574,10 @@ static void rapl_cpu_exit(int cpu)
|
||||||
pmu->cpu = target;
|
pmu->cpu = target;
|
||||||
perf_pmu_migrate_context(pmu->pmu, cpu, target);
|
perf_pmu_migrate_context(pmu->pmu, cpu, target);
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rapl_cpu_init(int cpu)
|
static int rapl_cpu_online(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||||
int target;
|
int target;
|
||||||
|
@ -587,13 +588,14 @@ static void rapl_cpu_init(int cpu)
|
||||||
*/
|
*/
|
||||||
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
|
target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
|
||||||
if (target < nr_cpu_ids)
|
if (target < nr_cpu_ids)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
cpumask_set_cpu(cpu, &rapl_cpu_mask);
|
cpumask_set_cpu(cpu, &rapl_cpu_mask);
|
||||||
pmu->cpu = cpu;
|
pmu->cpu = cpu;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rapl_cpu_prepare(int cpu)
|
static int rapl_cpu_prepare(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||||
|
|
||||||
|
@ -614,33 +616,6 @@ static int rapl_cpu_prepare(int cpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rapl_cpu_notifier(struct notifier_block *self,
|
|
||||||
unsigned long action, void *hcpu)
|
|
||||||
{
|
|
||||||
unsigned int cpu = (long)hcpu;
|
|
||||||
|
|
||||||
switch (action & ~CPU_TASKS_FROZEN) {
|
|
||||||
case CPU_UP_PREPARE:
|
|
||||||
rapl_cpu_prepare(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_FAILED:
|
|
||||||
case CPU_ONLINE:
|
|
||||||
rapl_cpu_init(cpu);
|
|
||||||
break;
|
|
||||||
|
|
||||||
case CPU_DOWN_PREPARE:
|
|
||||||
rapl_cpu_exit(cpu);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
return NOTIFY_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block rapl_cpu_nb = {
|
|
||||||
.notifier_call = rapl_cpu_notifier,
|
|
||||||
.priority = CPU_PRI_PERF + 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int rapl_check_hw_unit(bool apply_quirk)
|
static int rapl_check_hw_unit(bool apply_quirk)
|
||||||
{
|
{
|
||||||
u64 msr_rapl_power_unit_bits;
|
u64 msr_rapl_power_unit_bits;
|
||||||
|
@ -691,24 +666,6 @@ static void __init rapl_advertise(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init rapl_prepare_cpus(void)
|
|
||||||
{
|
|
||||||
unsigned int cpu, pkg;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
for_each_online_cpu(cpu) {
|
|
||||||
pkg = topology_logical_package_id(cpu);
|
|
||||||
if (rapl_pmus->pmus[pkg])
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ret = rapl_cpu_prepare(cpu);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
rapl_cpu_init(cpu);
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void cleanup_rapl_pmus(void)
|
static void cleanup_rapl_pmus(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
@ -835,35 +792,44 @@ static int __init rapl_pmu_init(void)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
cpu_notifier_register_begin();
|
/*
|
||||||
|
* Install callbacks. Core will call them for each online cpu.
|
||||||
|
*/
|
||||||
|
|
||||||
ret = rapl_prepare_cpus();
|
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
|
||||||
|
rapl_cpu_prepare, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||||||
|
"AP_PERF_X86_RAPL_ONLINE",
|
||||||
|
rapl_cpu_online, rapl_cpu_offline);
|
||||||
|
if (ret)
|
||||||
|
goto out1;
|
||||||
|
|
||||||
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out2;
|
||||||
|
|
||||||
__register_cpu_notifier(&rapl_cpu_nb);
|
|
||||||
cpu_notifier_register_done();
|
|
||||||
rapl_advertise();
|
rapl_advertise();
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out2:
|
||||||
|
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||||
|
out1:
|
||||||
|
cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
|
||||||
out:
|
out:
|
||||||
pr_warn("Initialization failed (%d), disabled\n", ret);
|
pr_warn("Initialization failed (%d), disabled\n", ret);
|
||||||
cleanup_rapl_pmus();
|
cleanup_rapl_pmus();
|
||||||
cpu_notifier_register_done();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
module_init(rapl_pmu_init);
|
module_init(rapl_pmu_init);
|
||||||
|
|
||||||
static void __exit intel_rapl_exit(void)
|
static void __exit intel_rapl_exit(void)
|
||||||
{
|
{
|
||||||
cpu_notifier_register_begin();
|
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||||
__unregister_cpu_notifier(&rapl_cpu_nb);
|
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
|
||||||
perf_pmu_unregister(&rapl_pmus->pmu);
|
perf_pmu_unregister(&rapl_pmus->pmu);
|
||||||
cleanup_rapl_pmus();
|
cleanup_rapl_pmus();
|
||||||
cpu_notifier_register_done();
|
|
||||||
}
|
}
|
||||||
module_exit(intel_rapl_exit);
|
module_exit(intel_rapl_exit);
|
||||||
|
|
|
@ -8,6 +8,7 @@ enum cpuhp_state {
|
||||||
CPUHP_PERF_X86_PREPARE,
|
CPUHP_PERF_X86_PREPARE,
|
||||||
CPUHP_PERF_X86_UNCORE_PREP,
|
CPUHP_PERF_X86_UNCORE_PREP,
|
||||||
CPUHP_PERF_X86_AMD_UNCORE_PREP,
|
CPUHP_PERF_X86_AMD_UNCORE_PREP,
|
||||||
|
CPUHP_PERF_X86_RAPL_PREP,
|
||||||
CPUHP_NOTIFY_PREPARE,
|
CPUHP_NOTIFY_PREPARE,
|
||||||
CPUHP_BRINGUP_CPU,
|
CPUHP_BRINGUP_CPU,
|
||||||
CPUHP_AP_IDLE_DEAD,
|
CPUHP_AP_IDLE_DEAD,
|
||||||
|
@ -34,6 +35,7 @@ enum cpuhp_state {
|
||||||
CPUHP_AP_PERF_X86_ONLINE,
|
CPUHP_AP_PERF_X86_ONLINE,
|
||||||
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||||
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
|
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
|
||||||
|
CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||||||
CPUHP_AP_NOTIFY_ONLINE,
|
CPUHP_AP_NOTIFY_ONLINE,
|
||||||
CPUHP_AP_ONLINE_DYN,
|
CPUHP_AP_ONLINE_DYN,
|
||||||
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
|
||||||
|
|
Loading…
Reference in New Issue