Merge branch 'pm-cpufreq'
* pm-cpufreq: cpufreq: intel_pstate: Use most recent guaranteed performance values cpufreq: intel_pstate: Implement the ->adjust_perf() callback cpufreq: Add special-purpose fast-switching callback for drivers cpufreq: schedutil: Add util to struct sg_cpu cppc_cpufreq: replace per-cpu data array with a list cppc_cpufreq: expose information on frequency domains cppc_cpufreq: clarify support for coordination types cppc_cpufreq: use policy->cpu as driver of frequency setting ACPI: processor: fix NONE coordination for domain mapping failure ACPI: processor: Drop duplicate setting of shared_cpu_map
This commit is contained in:
commit
c3a74f8e25
|
@ -264,7 +264,8 @@ Description: Discover CPUs in the same CPU frequency coordination domain
|
|||
attribute is useful for user space DVFS controllers to get better
|
||||
power/performance results for platforms using acpi-cpufreq.
|
||||
|
||||
This file is only present if the acpi-cpufreq driver is in use.
|
||||
This file is only present if the acpi-cpufreq or the cppc-cpufreq
|
||||
drivers are in use.
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
|
||||
|
|
|
@ -414,109 +414,88 @@ end:
|
|||
return result;
|
||||
}
|
||||
|
||||
bool acpi_cpc_valid(void)
|
||||
{
|
||||
struct cpc_desc *cpc_ptr;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
|
||||
if (!cpc_ptr)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_cpc_valid);
|
||||
|
||||
/**
|
||||
* acpi_get_psd_map - Map the CPUs in a common freq domain.
|
||||
* @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
|
||||
* acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
|
||||
* @cpu: Find all CPUs that share a domain with cpu.
|
||||
* @cpu_data: Pointer to CPU specific CPPC data including PSD info.
|
||||
*
|
||||
* Return: 0 for success or negative value for err.
|
||||
*/
|
||||
int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
|
||||
int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
|
||||
{
|
||||
int count_target;
|
||||
int retval = 0;
|
||||
unsigned int i, j;
|
||||
cpumask_var_t covered_cpus;
|
||||
struct cppc_cpudata *pr, *match_pr;
|
||||
struct acpi_psd_package *pdomain;
|
||||
struct acpi_psd_package *match_pdomain;
|
||||
struct cpc_desc *cpc_ptr, *match_cpc_ptr;
|
||||
|
||||
if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
struct acpi_psd_package *match_pdomain;
|
||||
struct acpi_psd_package *pdomain;
|
||||
int count_target, i;
|
||||
|
||||
/*
|
||||
* Now that we have _PSD data from all CPUs, let's setup P-state
|
||||
* domain info.
|
||||
*/
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
|
||||
if (!cpc_ptr)
|
||||
return -EFAULT;
|
||||
|
||||
pdomain = &(cpc_ptr->domain_info);
|
||||
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
|
||||
if (pdomain->num_processors <= 1)
|
||||
return 0;
|
||||
|
||||
/* Validate the Domain info */
|
||||
count_target = pdomain->num_processors;
|
||||
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
|
||||
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
|
||||
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
|
||||
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
|
||||
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
if (cpumask_test_cpu(i, covered_cpus))
|
||||
if (i == cpu)
|
||||
continue;
|
||||
|
||||
pr = all_cpu_data[i];
|
||||
cpc_ptr = per_cpu(cpc_desc_ptr, i);
|
||||
if (!cpc_ptr) {
|
||||
retval = -EFAULT;
|
||||
goto err_ret;
|
||||
}
|
||||
match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
|
||||
if (!match_cpc_ptr)
|
||||
goto err_fault;
|
||||
|
||||
pdomain = &(cpc_ptr->domain_info);
|
||||
cpumask_set_cpu(i, pr->shared_cpu_map);
|
||||
cpumask_set_cpu(i, covered_cpus);
|
||||
if (pdomain->num_processors <= 1)
|
||||
match_pdomain = &(match_cpc_ptr->domain_info);
|
||||
if (match_pdomain->domain != pdomain->domain)
|
||||
continue;
|
||||
|
||||
/* Validate the Domain info */
|
||||
count_target = pdomain->num_processors;
|
||||
if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
|
||||
pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
|
||||
pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
|
||||
else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
|
||||
pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
|
||||
/* Here i and cpu are in the same domain */
|
||||
if (match_pdomain->num_processors != count_target)
|
||||
goto err_fault;
|
||||
|
||||
for_each_possible_cpu(j) {
|
||||
if (i == j)
|
||||
continue;
|
||||
if (pdomain->coord_type != match_pdomain->coord_type)
|
||||
goto err_fault;
|
||||
|
||||
match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
|
||||
if (!match_cpc_ptr) {
|
||||
retval = -EFAULT;
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
match_pdomain = &(match_cpc_ptr->domain_info);
|
||||
if (match_pdomain->domain != pdomain->domain)
|
||||
continue;
|
||||
|
||||
/* Here i and j are in the same domain */
|
||||
if (match_pdomain->num_processors != count_target) {
|
||||
retval = -EFAULT;
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
if (pdomain->coord_type != match_pdomain->coord_type) {
|
||||
retval = -EFAULT;
|
||||
goto err_ret;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(j, covered_cpus);
|
||||
cpumask_set_cpu(j, pr->shared_cpu_map);
|
||||
}
|
||||
|
||||
for_each_cpu(j, pr->shared_cpu_map) {
|
||||
if (i == j)
|
||||
continue;
|
||||
|
||||
match_pr = all_cpu_data[j];
|
||||
match_pr->shared_type = pr->shared_type;
|
||||
cpumask_copy(match_pr->shared_cpu_map,
|
||||
pr->shared_cpu_map);
|
||||
}
|
||||
cpumask_set_cpu(i, cpu_data->shared_cpu_map);
|
||||
}
|
||||
goto out;
|
||||
|
||||
err_ret:
|
||||
for_each_possible_cpu(i) {
|
||||
pr = all_cpu_data[i];
|
||||
return 0;
|
||||
|
||||
/* Assume no coordination on any error parsing domain info */
|
||||
cpumask_clear(pr->shared_cpu_map);
|
||||
cpumask_set_cpu(i, pr->shared_cpu_map);
|
||||
pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
}
|
||||
out:
|
||||
free_cpumask_var(covered_cpus);
|
||||
return retval;
|
||||
err_fault:
|
||||
/* Assume no coordination on any error parsing domain info */
|
||||
cpumask_clear(cpu_data->shared_cpu_map);
|
||||
cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
|
||||
cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
|
||||
|
||||
return -EFAULT;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
|
||||
|
||||
|
|
|
@ -616,7 +616,6 @@ int acpi_processor_preregister_performance(
|
|||
continue;
|
||||
|
||||
pr->performance = per_cpu_ptr(performance, i);
|
||||
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
||||
pdomain = &(pr->performance->domain_info);
|
||||
if (acpi_processor_get_psd(pr->handle, pdomain)) {
|
||||
retval = -EINVAL;
|
||||
|
@ -710,7 +709,7 @@ err_ret:
|
|||
if (retval) {
|
||||
cpumask_clear(pr->performance->shared_cpu_map);
|
||||
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
|
||||
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
|
||||
pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
|
||||
}
|
||||
pr->performance = NULL; /* Will be set for real in register */
|
||||
}
|
||||
|
|
|
@ -30,13 +30,13 @@
|
|||
#define DMI_PROCESSOR_MAX_SPEED 0x14
|
||||
|
||||
/*
|
||||
* These structs contain information parsed from per CPU
|
||||
* ACPI _CPC structures.
|
||||
* e.g. For each CPU the highest, lowest supported
|
||||
* performance capabilities, desired performance level
|
||||
* requested etc.
|
||||
* This list contains information parsed from per CPU ACPI _CPC and _PSD
|
||||
* structures: e.g. the highest and lowest supported performance, capabilities,
|
||||
* desired performance, level requested etc. Depending on the share_type, not
|
||||
* all CPUs will have an entry in the list.
|
||||
*/
|
||||
static struct cppc_cpudata **all_cpu_data;
|
||||
static LIST_HEAD(cpu_data_list);
|
||||
|
||||
static bool boost_supported;
|
||||
|
||||
struct cppc_workaround_oem_info {
|
||||
|
@ -148,8 +148,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
|
|||
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cpufreq_freqs freqs;
|
||||
u32 desired_perf;
|
||||
int ret = 0;
|
||||
|
@ -164,12 +166,12 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|||
freqs.new = target_freq;
|
||||
|
||||
cpufreq_freq_transition_begin(policy, &freqs);
|
||||
ret = cppc_set_perf(cpu_data->cpu, &cpu_data->perf_ctrls);
|
||||
ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
|
||||
cpufreq_freq_transition_end(policy, &freqs, ret != 0);
|
||||
|
||||
if (ret)
|
||||
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
|
||||
cpu_data->cpu, ret);
|
||||
cpu, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -182,7 +184,7 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
|
|||
|
||||
static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
|
||||
unsigned int cpu = policy->cpu;
|
||||
int ret;
|
||||
|
@ -193,6 +195,12 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
|
|||
if (ret)
|
||||
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
|
||||
caps->lowest_perf, cpu, ret);
|
||||
|
||||
/* Remove CPU node from list and free driver data for policy */
|
||||
free_cpumask_var(cpu_data->shared_cpu_map);
|
||||
list_del(&cpu_data->node);
|
||||
kfree(policy->driver_data);
|
||||
policy->driver_data = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -238,25 +246,61 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
|
|||
}
|
||||
#endif
|
||||
|
||||
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
|
||||
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
|
||||
unsigned int cpu = policy->cpu;
|
||||
int ret = 0;
|
||||
struct cppc_cpudata *cpu_data;
|
||||
int ret;
|
||||
|
||||
cpu_data->cpu = cpu;
|
||||
ret = cppc_get_perf_caps(cpu, caps);
|
||||
cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
|
||||
if (!cpu_data)
|
||||
goto out;
|
||||
|
||||
if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
|
||||
goto free_cpu;
|
||||
|
||||
ret = acpi_get_psd_map(cpu, cpu_data);
|
||||
if (ret) {
|
||||
pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
|
||||
cpu, ret);
|
||||
return ret;
|
||||
pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
|
||||
goto free_mask;
|
||||
}
|
||||
|
||||
ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
|
||||
if (ret) {
|
||||
pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
|
||||
goto free_mask;
|
||||
}
|
||||
|
||||
/* Convert the lowest and nominal freq from MHz to KHz */
|
||||
caps->lowest_freq *= 1000;
|
||||
caps->nominal_freq *= 1000;
|
||||
cpu_data->perf_caps.lowest_freq *= 1000;
|
||||
cpu_data->perf_caps.nominal_freq *= 1000;
|
||||
|
||||
list_add(&cpu_data->node, &cpu_data_list);
|
||||
|
||||
return cpu_data;
|
||||
|
||||
free_mask:
|
||||
free_cpumask_var(cpu_data->shared_cpu_map);
|
||||
free_cpu:
|
||||
kfree(cpu_data);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct cppc_cpudata *cpu_data;
|
||||
struct cppc_perf_caps *caps;
|
||||
int ret;
|
||||
|
||||
cpu_data = cppc_cpufreq_get_cpu_data(cpu);
|
||||
if (!cpu_data) {
|
||||
pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
|
||||
return -ENODEV;
|
||||
}
|
||||
caps = &cpu_data->perf_caps;
|
||||
policy->driver_data = cpu_data;
|
||||
|
||||
/*
|
||||
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
|
||||
|
@ -280,26 +324,25 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|||
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
|
||||
policy->shared_type = cpu_data->shared_type;
|
||||
|
||||
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
|
||||
int i;
|
||||
|
||||
switch (policy->shared_type) {
|
||||
case CPUFREQ_SHARED_TYPE_HW:
|
||||
case CPUFREQ_SHARED_TYPE_NONE:
|
||||
/* Nothing to be done - we'll have a policy for each CPU */
|
||||
break;
|
||||
case CPUFREQ_SHARED_TYPE_ANY:
|
||||
/*
|
||||
* All CPUs in the domain will share a policy and all cpufreq
|
||||
* operations will use a single cppc_cpudata structure stored
|
||||
* in policy->driver_data.
|
||||
*/
|
||||
cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
|
||||
|
||||
for_each_cpu(i, policy->cpus) {
|
||||
if (unlikely(i == cpu))
|
||||
continue;
|
||||
|
||||
memcpy(&all_cpu_data[i]->perf_caps, caps,
|
||||
sizeof(cpu_data->perf_caps));
|
||||
}
|
||||
} else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
|
||||
/* Support only SW_ANY for now. */
|
||||
pr_debug("Unsupported CPU co-ord type\n");
|
||||
break;
|
||||
default:
|
||||
pr_debug("Unsupported CPU co-ord type: %d\n",
|
||||
policy->shared_type);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
cpu_data->cur_policy = policy;
|
||||
|
||||
/*
|
||||
* If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
|
||||
* is supported.
|
||||
|
@ -354,9 +397,12 @@ static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
|
|||
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
|
||||
struct cppc_cpudata *cpu_data = all_cpu_data[cpu];
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
int ret;
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -372,7 +418,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
|
|||
|
||||
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
|
||||
int ret;
|
||||
|
||||
|
@ -396,6 +442,19 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
|
||||
return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
|
||||
}
|
||||
cpufreq_freq_attr_ro(freqdomain_cpus);
|
||||
|
||||
static struct freq_attr *cppc_cpufreq_attr[] = {
|
||||
&freqdomain_cpus,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct cpufreq_driver cppc_cpufreq_driver = {
|
||||
.flags = CPUFREQ_CONST_LOOPS,
|
||||
.verify = cppc_verify_policy,
|
||||
|
@ -404,6 +463,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
|
|||
.init = cppc_cpufreq_cpu_init,
|
||||
.stop_cpu = cppc_cpufreq_stop_cpu,
|
||||
.set_boost = cppc_cpufreq_set_boost,
|
||||
.attr = cppc_cpufreq_attr,
|
||||
.name = "cppc_cpufreq",
|
||||
};
|
||||
|
||||
|
@ -415,10 +475,13 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
|
|||
*/
|
||||
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data = all_cpu_data[cpu];
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
struct cppc_cpudata *cpu_data = policy->driver_data;
|
||||
u64 desired_perf;
|
||||
int ret;
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
|
||||
ret = cppc_get_desired_perf(cpu, &desired_perf);
|
||||
if (ret < 0)
|
||||
return -EIO;
|
||||
|
@ -451,68 +514,33 @@ static void cppc_check_hisi_workaround(void)
|
|||
|
||||
static int __init cppc_cpufreq_init(void)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data;
|
||||
int i, ret = 0;
|
||||
|
||||
if (acpi_disabled)
|
||||
if ((acpi_disabled) || !acpi_cpc_valid())
|
||||
return -ENODEV;
|
||||
|
||||
all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!all_cpu_data)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
|
||||
if (!all_cpu_data[i])
|
||||
goto out;
|
||||
|
||||
cpu_data = all_cpu_data[i];
|
||||
if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = acpi_get_psd_map(all_cpu_data);
|
||||
if (ret) {
|
||||
pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
|
||||
goto out;
|
||||
}
|
||||
INIT_LIST_HEAD(&cpu_data_list);
|
||||
|
||||
cppc_check_hisi_workaround();
|
||||
|
||||
ret = cpufreq_register_driver(&cppc_cpufreq_driver);
|
||||
if (ret)
|
||||
goto out;
|
||||
return cpufreq_register_driver(&cppc_cpufreq_driver);
|
||||
}
|
||||
|
||||
return ret;
|
||||
static inline void free_cpu_data(void)
|
||||
{
|
||||
struct cppc_cpudata *iter, *tmp;
|
||||
|
||||
out:
|
||||
for_each_possible_cpu(i) {
|
||||
cpu_data = all_cpu_data[i];
|
||||
if (!cpu_data)
|
||||
break;
|
||||
free_cpumask_var(cpu_data->shared_cpu_map);
|
||||
kfree(cpu_data);
|
||||
list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
|
||||
free_cpumask_var(iter->shared_cpu_map);
|
||||
list_del(&iter->node);
|
||||
kfree(iter);
|
||||
}
|
||||
|
||||
kfree(all_cpu_data);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __exit cppc_cpufreq_exit(void)
|
||||
{
|
||||
struct cppc_cpudata *cpu_data;
|
||||
int i;
|
||||
|
||||
cpufreq_unregister_driver(&cppc_cpufreq_driver);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
cpu_data = all_cpu_data[i];
|
||||
free_cpumask_var(cpu_data->shared_cpu_map);
|
||||
kfree(cpu_data);
|
||||
}
|
||||
|
||||
kfree(all_cpu_data);
|
||||
free_cpu_data();
|
||||
}
|
||||
|
||||
module_exit(cppc_cpufreq_exit);
|
||||
|
|
|
@ -2097,6 +2097,46 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
|
||||
|
||||
/**
|
||||
* cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
|
||||
* @cpu: Target CPU.
|
||||
* @min_perf: Minimum (required) performance level (units of @capacity).
|
||||
* @target_perf: Terget (desired) performance level (units of @capacity).
|
||||
* @capacity: Capacity of the target CPU.
|
||||
*
|
||||
* Carry out a fast performance level switch of @cpu without sleeping.
|
||||
*
|
||||
* The driver's ->adjust_perf() callback invoked by this function must be
|
||||
* suitable for being called from within RCU-sched read-side critical sections
|
||||
* and it is expected to select a suitable performance level equal to or above
|
||||
* @min_perf and preferably equal to or below @target_perf.
|
||||
*
|
||||
* This function must not be called if policy->fast_switch_enabled is unset.
|
||||
*
|
||||
* Governors calling this function must guarantee that it will never be invoked
|
||||
* twice in parallel for the same CPU and that it will never be called in
|
||||
* parallel with either ->target() or ->target_index() or ->fast_switch() for
|
||||
* the same CPU.
|
||||
*/
|
||||
void cpufreq_driver_adjust_perf(unsigned int cpu,
|
||||
unsigned long min_perf,
|
||||
unsigned long target_perf,
|
||||
unsigned long capacity)
|
||||
{
|
||||
cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
|
||||
*
|
||||
* Return 'true' if the ->adjust_perf callback is present for the
|
||||
* current driver or 'false' otherwise.
|
||||
*/
|
||||
bool cpufreq_driver_has_adjust_perf(void)
|
||||
{
|
||||
return !!cpufreq_driver->adjust_perf;
|
||||
}
|
||||
|
||||
/* Must set freqs->new to intermediate frequency */
|
||||
static int __target_intermediate(struct cpufreq_policy *policy,
|
||||
struct cpufreq_freqs *freqs, int index)
|
||||
|
|
|
@ -2207,9 +2207,9 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
|
|||
unsigned int policy_min,
|
||||
unsigned int policy_max)
|
||||
{
|
||||
int max_freq = intel_pstate_get_max_freq(cpu);
|
||||
int32_t max_policy_perf, min_policy_perf;
|
||||
int max_state, turbo_max;
|
||||
int max_freq;
|
||||
|
||||
/*
|
||||
* HWP needs some special consideration, because on BDX the
|
||||
|
@ -2223,6 +2223,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
|
|||
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||
turbo_max = cpu->pstate.turbo_pstate;
|
||||
}
|
||||
max_freq = max_state * cpu->pstate.scaling;
|
||||
|
||||
max_policy_perf = max_state * policy_max / max_freq;
|
||||
if (policy_max == policy_min) {
|
||||
|
@ -2325,9 +2326,18 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
|
|||
static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
|
||||
struct cpufreq_policy_data *policy)
|
||||
{
|
||||
int max_freq;
|
||||
|
||||
update_turbo_state();
|
||||
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
|
||||
intel_pstate_get_max_freq(cpu));
|
||||
if (hwp_active) {
|
||||
int max_state, turbo_max;
|
||||
|
||||
intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
|
||||
max_freq = max_state * cpu->pstate.scaling;
|
||||
} else {
|
||||
max_freq = intel_pstate_get_max_freq(cpu);
|
||||
}
|
||||
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
|
||||
|
||||
intel_pstate_adjust_policy_max(cpu, policy);
|
||||
}
|
||||
|
@ -2526,20 +2536,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
|
|||
fp_toint(cpu->iowait_boost * 100));
|
||||
}
|
||||
|
||||
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
|
||||
bool strict, bool fast_switch)
|
||||
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
|
||||
u32 desired, bool fast_switch)
|
||||
{
|
||||
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
|
||||
|
||||
value &= ~HWP_MIN_PERF(~0L);
|
||||
value |= HWP_MIN_PERF(target_pstate);
|
||||
value |= HWP_MIN_PERF(min);
|
||||
|
||||
/*
|
||||
* The entire MSR needs to be updated in order to update the HWP min
|
||||
* field in it, so opportunistically update the max too if needed.
|
||||
*/
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
|
||||
value |= HWP_MAX_PERF(max);
|
||||
|
||||
value &= ~HWP_DESIRED_PERF(~0L);
|
||||
value |= HWP_DESIRED_PERF(desired);
|
||||
|
||||
if (value == prev)
|
||||
return;
|
||||
|
@ -2569,11 +2578,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
|
|||
int old_pstate = cpu->pstate.current_pstate;
|
||||
|
||||
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
|
||||
if (hwp_active)
|
||||
intel_cpufreq_adjust_hwp(cpu, target_pstate,
|
||||
policy->strict_target, fast_switch);
|
||||
else if (target_pstate != old_pstate)
|
||||
if (hwp_active) {
|
||||
int max_pstate = policy->strict_target ?
|
||||
target_pstate : cpu->max_perf_ratio;
|
||||
|
||||
intel_cpufreq_adjust_hwp(cpu, target_pstate, max_pstate, 0,
|
||||
fast_switch);
|
||||
} else if (target_pstate != old_pstate) {
|
||||
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
|
||||
}
|
||||
|
||||
cpu->pstate.current_pstate = target_pstate;
|
||||
|
||||
|
@ -2634,6 +2647,47 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||
return target_pstate * cpu->pstate.scaling;
|
||||
}
|
||||
|
||||
static void intel_cpufreq_adjust_perf(unsigned int cpunum,
|
||||
unsigned long min_perf,
|
||||
unsigned long target_perf,
|
||||
unsigned long capacity)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[cpunum];
|
||||
int old_pstate = cpu->pstate.current_pstate;
|
||||
int cap_pstate, min_pstate, max_pstate, target_pstate;
|
||||
|
||||
update_turbo_state();
|
||||
cap_pstate = global.turbo_disabled ? cpu->pstate.max_pstate :
|
||||
cpu->pstate.turbo_pstate;
|
||||
|
||||
/* Optimization: Avoid unnecessary divisions. */
|
||||
|
||||
target_pstate = cap_pstate;
|
||||
if (target_perf < capacity)
|
||||
target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
|
||||
|
||||
min_pstate = cap_pstate;
|
||||
if (min_perf < capacity)
|
||||
min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
|
||||
|
||||
if (min_pstate < cpu->pstate.min_pstate)
|
||||
min_pstate = cpu->pstate.min_pstate;
|
||||
|
||||
if (min_pstate < cpu->min_perf_ratio)
|
||||
min_pstate = cpu->min_perf_ratio;
|
||||
|
||||
max_pstate = min(cap_pstate, cpu->max_perf_ratio);
|
||||
if (max_pstate < min_pstate)
|
||||
max_pstate = min_pstate;
|
||||
|
||||
target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
|
||||
|
||||
intel_cpufreq_adjust_hwp(cpu, min_pstate, max_pstate, target_pstate, true);
|
||||
|
||||
cpu->pstate.current_pstate = target_pstate;
|
||||
intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
|
||||
}
|
||||
|
||||
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int max_state, turbo_max, min_freq, max_freq, ret;
|
||||
|
@ -3032,6 +3086,8 @@ static int __init intel_pstate_init(void)
|
|||
intel_pstate.attr = hwp_cpufreq_attrs;
|
||||
intel_cpufreq.attr = hwp_cpufreq_attrs;
|
||||
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
|
||||
intel_cpufreq.fast_switch = NULL;
|
||||
intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
|
||||
if (!default_driver)
|
||||
default_driver = &intel_pstate;
|
||||
|
||||
|
|
|
@ -124,11 +124,10 @@ struct cppc_perf_fb_ctrs {
|
|||
|
||||
/* Per CPU container for runtime CPPC management. */
|
||||
struct cppc_cpudata {
|
||||
int cpu;
|
||||
struct list_head node;
|
||||
struct cppc_perf_caps perf_caps;
|
||||
struct cppc_perf_ctrls perf_ctrls;
|
||||
struct cppc_perf_fb_ctrs perf_fb_ctrs;
|
||||
struct cpufreq_policy *cur_policy;
|
||||
unsigned int shared_type;
|
||||
cpumask_var_t shared_cpu_map;
|
||||
};
|
||||
|
@ -137,7 +136,8 @@ extern int cppc_get_desired_perf(int cpunum, u64 *desired_perf);
|
|||
extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs);
|
||||
extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
|
||||
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
|
||||
extern int acpi_get_psd_map(struct cppc_cpudata **);
|
||||
extern bool acpi_cpc_valid(void);
|
||||
extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
|
||||
extern unsigned int cppc_get_transition_latency(int cpu);
|
||||
extern bool cpc_ffh_supported(void);
|
||||
extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
|
||||
|
|
|
@ -320,6 +320,15 @@ struct cpufreq_driver {
|
|||
unsigned int index);
|
||||
unsigned int (*fast_switch)(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq);
|
||||
/*
|
||||
* ->fast_switch() replacement for drivers that use an internal
|
||||
* representation of performance levels and can pass hints other than
|
||||
* the target performance level to the hardware.
|
||||
*/
|
||||
void (*adjust_perf)(unsigned int cpu,
|
||||
unsigned long min_perf,
|
||||
unsigned long target_perf,
|
||||
unsigned long capacity);
|
||||
|
||||
/*
|
||||
* Caches and returns the lowest driver-supported frequency greater than
|
||||
|
@ -588,6 +597,11 @@ struct cpufreq_governor {
|
|||
/* Pass a target to the cpufreq driver */
|
||||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq);
|
||||
void cpufreq_driver_adjust_perf(unsigned int cpu,
|
||||
unsigned long min_perf,
|
||||
unsigned long target_perf,
|
||||
unsigned long capacity);
|
||||
bool cpufreq_driver_has_adjust_perf(void);
|
||||
int cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
|
|
|
@ -28,6 +28,11 @@ static inline unsigned long map_util_freq(unsigned long util,
|
|||
{
|
||||
return (freq + (freq >> 2)) * util / cap;
|
||||
}
|
||||
|
||||
static inline unsigned long map_util_perf(unsigned long util)
|
||||
{
|
||||
return util + (util >> 2);
|
||||
}
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
#endif /* _LINUX_SCHED_CPUFREQ_H */
|
||||
|
|
|
@ -53,6 +53,7 @@ struct sugov_cpu {
|
|||
unsigned int iowait_boost;
|
||||
u64 last_update;
|
||||
|
||||
unsigned long util;
|
||||
unsigned long bw_dl;
|
||||
unsigned long max;
|
||||
|
||||
|
@ -276,16 +277,15 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
|
|||
return min(max, util);
|
||||
}
|
||||
|
||||
static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
|
||||
static void sugov_get_util(struct sugov_cpu *sg_cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(sg_cpu->cpu);
|
||||
unsigned long util = cpu_util_cfs(rq);
|
||||
unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
|
||||
|
||||
sg_cpu->max = max;
|
||||
sg_cpu->bw_dl = cpu_bw_dl(rq);
|
||||
|
||||
return schedutil_cpu_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL, NULL);
|
||||
sg_cpu->util = schedutil_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
|
||||
FREQUENCY_UTIL, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -362,8 +362,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
|
|||
* sugov_iowait_apply() - Apply the IO boost to a CPU.
|
||||
* @sg_cpu: the sugov data for the cpu to boost
|
||||
* @time: the update time from the caller
|
||||
* @util: the utilization to (eventually) boost
|
||||
* @max: the maximum value the utilization can be boosted to
|
||||
*
|
||||
* A CPU running a task which woken up after an IO operation can have its
|
||||
* utilization boosted to speed up the completion of those IO operations.
|
||||
|
@ -377,18 +375,17 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
|
|||
* This mechanism is designed to boost high frequently IO waiting tasks, while
|
||||
* being more conservative on tasks which does sporadic IO operations.
|
||||
*/
|
||||
static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
|
||||
unsigned long util, unsigned long max)
|
||||
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
|
||||
{
|
||||
unsigned long boost;
|
||||
|
||||
/* No boost currently required */
|
||||
if (!sg_cpu->iowait_boost)
|
||||
return util;
|
||||
return;
|
||||
|
||||
/* Reset boost if the CPU appears to have been idle enough */
|
||||
if (sugov_iowait_reset(sg_cpu, time, false))
|
||||
return util;
|
||||
return;
|
||||
|
||||
if (!sg_cpu->iowait_boost_pending) {
|
||||
/*
|
||||
|
@ -397,18 +394,19 @@ static unsigned long sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
|
|||
sg_cpu->iowait_boost >>= 1;
|
||||
if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
|
||||
sg_cpu->iowait_boost = 0;
|
||||
return util;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
sg_cpu->iowait_boost_pending = false;
|
||||
|
||||
/*
|
||||
* @util is already in capacity scale; convert iowait_boost
|
||||
* sg_cpu->util is already in capacity scale; convert iowait_boost
|
||||
* into the same scale so we can compare.
|
||||
*/
|
||||
boost = (sg_cpu->iowait_boost * max) >> SCHED_CAPACITY_SHIFT;
|
||||
return max(boost, util);
|
||||
boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
|
||||
if (sg_cpu->util < boost)
|
||||
sg_cpu->util = boost;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
|
@ -434,14 +432,10 @@ static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_p
|
|||
sg_policy->limits_changed = true;
|
||||
}
|
||||
|
||||
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
||||
unsigned int flags)
|
||||
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
|
||||
u64 time, unsigned int flags)
|
||||
{
|
||||
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
unsigned long util, max;
|
||||
unsigned int next_f;
|
||||
unsigned int cached_freq = sg_policy->cached_raw_freq;
|
||||
|
||||
sugov_iowait_boost(sg_cpu, time, flags);
|
||||
sg_cpu->last_update = time;
|
||||
|
@ -449,12 +443,26 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
ignore_dl_rate_limit(sg_cpu, sg_policy);
|
||||
|
||||
if (!sugov_should_update_freq(sg_policy, time))
|
||||
return false;
|
||||
|
||||
sugov_get_util(sg_cpu);
|
||||
sugov_iowait_apply(sg_cpu, time);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
unsigned int cached_freq = sg_policy->cached_raw_freq;
|
||||
unsigned int next_f;
|
||||
|
||||
if (!sugov_update_single_common(sg_cpu, time, flags))
|
||||
return;
|
||||
|
||||
util = sugov_get_util(sg_cpu);
|
||||
max = sg_cpu->max;
|
||||
util = sugov_iowait_apply(sg_cpu, time, util, max);
|
||||
next_f = get_next_freq(sg_policy, util, max);
|
||||
next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
|
||||
/*
|
||||
* Do not reduce the frequency if the CPU has not been idle
|
||||
* recently, as the reduction is likely to be premature then.
|
||||
|
@ -480,6 +488,38 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
}
|
||||
}
|
||||
|
||||
static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
||||
unsigned long prev_util = sg_cpu->util;
|
||||
|
||||
/*
|
||||
* Fall back to the "frequency" path if frequency invariance is not
|
||||
* supported, because the direct mapping between the utilization and
|
||||
* the performance levels depends on the frequency invariance.
|
||||
*/
|
||||
if (!arch_scale_freq_invariant()) {
|
||||
sugov_update_single_freq(hook, time, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!sugov_update_single_common(sg_cpu, time, flags))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Do not reduce the target performance level if the CPU has not been
|
||||
* idle recently, as the reduction is likely to be premature then.
|
||||
*/
|
||||
if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
|
||||
sg_cpu->util = prev_util;
|
||||
|
||||
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
|
||||
map_util_perf(sg_cpu->util), sg_cpu->max);
|
||||
|
||||
sg_cpu->sg_policy->last_freq_update_time = time;
|
||||
}
|
||||
|
||||
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
||||
{
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
|
@ -491,9 +531,10 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
|||
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
|
||||
unsigned long j_util, j_max;
|
||||
|
||||
j_util = sugov_get_util(j_sg_cpu);
|
||||
sugov_get_util(j_sg_cpu);
|
||||
sugov_iowait_apply(j_sg_cpu, time);
|
||||
j_util = j_sg_cpu->util;
|
||||
j_max = j_sg_cpu->max;
|
||||
j_util = sugov_iowait_apply(j_sg_cpu, time, j_util, j_max);
|
||||
|
||||
if (j_util * max > j_max * util) {
|
||||
util = j_util;
|
||||
|
@ -817,6 +858,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
|
|||
static int sugov_start(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct sugov_policy *sg_policy = policy->governor_data;
|
||||
void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
|
||||
unsigned int cpu;
|
||||
|
||||
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
|
||||
|
@ -836,13 +878,17 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||
sg_cpu->sg_policy = sg_policy;
|
||||
}
|
||||
|
||||
if (policy_is_shared(policy))
|
||||
uu = sugov_update_shared;
|
||||
else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
|
||||
uu = sugov_update_single_perf;
|
||||
else
|
||||
uu = sugov_update_single_freq;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
||||
|
||||
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
|
||||
policy_is_shared(policy) ?
|
||||
sugov_update_shared :
|
||||
sugov_update_single);
|
||||
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue