Merge branches 'pm-cpufreq' and 'pm-devfreq'
* pm-cpufreq: cpufreq: CPPC: Correct desired_perf calculation cpufreq: conservative: Fix next frequency selection cpufreq: skip invalid entries when searching the frequency cpufreq: intel_pstate: Fix struct pstate_adjust_policy kerneldoc cpufreq: intel_pstate: Proportional algorithm for Atom cpufreq: intel_pstate: Clarify comment in get_target_pstate_use_performance() cpufreq: intel_pstate: Fix unsafe HWP MSR access * pm-devfreq: PM / devfreq: Skip status update on uninitialized previous_freq PM / devfreq: Add proper locking around list_del() PM / devfreq: exynos-nocp: Remove redundant code PM / devfreq: exynos-nocp: Select REGMAP_MMIO
This commit is contained in:
commit
383731d98e
|
@ -80,11 +80,17 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
|
||||||
{
|
{
|
||||||
struct cpudata *cpu;
|
struct cpudata *cpu;
|
||||||
struct cpufreq_freqs freqs;
|
struct cpufreq_freqs freqs;
|
||||||
|
u32 desired_perf;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
cpu = all_cpu_data[policy->cpu];
|
cpu = all_cpu_data[policy->cpu];
|
||||||
|
|
||||||
cpu->perf_ctrls.desired_perf = (u64)target_freq * policy->max / cppc_dmi_max_khz;
|
desired_perf = (u64)target_freq * cpu->perf_caps.highest_perf / cppc_dmi_max_khz;
|
||||||
|
/* Return if it is exactly the same perf */
|
||||||
|
if (desired_perf == cpu->perf_ctrls.desired_perf)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
cpu->perf_ctrls.desired_perf = desired_perf;
|
||||||
freqs.old = policy->cur;
|
freqs.old = policy->cur;
|
||||||
freqs.new = target_freq;
|
freqs.new = target_freq;
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
struct cs_policy_dbs_info {
|
struct cs_policy_dbs_info {
|
||||||
struct policy_dbs_info policy_dbs;
|
struct policy_dbs_info policy_dbs;
|
||||||
unsigned int down_skip;
|
unsigned int down_skip;
|
||||||
|
unsigned int requested_freq;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
|
static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
|
||||||
|
@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
|
||||||
{
|
{
|
||||||
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
struct policy_dbs_info *policy_dbs = policy->governor_data;
|
||||||
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
|
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
|
||||||
|
unsigned int requested_freq = dbs_info->requested_freq;
|
||||||
struct dbs_data *dbs_data = policy_dbs->dbs_data;
|
struct dbs_data *dbs_data = policy_dbs->dbs_data;
|
||||||
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
|
||||||
unsigned int load = dbs_update(policy);
|
unsigned int load = dbs_update(policy);
|
||||||
|
@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
|
||||||
if (cs_tuners->freq_step == 0)
|
if (cs_tuners->freq_step == 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If requested_freq is out of range, it is likely that the limits
|
||||||
|
* changed in the meantime, so fall back to current frequency in that
|
||||||
|
* case.
|
||||||
|
*/
|
||||||
|
if (requested_freq > policy->max || requested_freq < policy->min)
|
||||||
|
requested_freq = policy->cur;
|
||||||
|
|
||||||
/* Check for frequency increase */
|
/* Check for frequency increase */
|
||||||
if (load > dbs_data->up_threshold) {
|
if (load > dbs_data->up_threshold) {
|
||||||
unsigned int requested_freq = policy->cur;
|
|
||||||
|
|
||||||
dbs_info->down_skip = 0;
|
dbs_info->down_skip = 0;
|
||||||
|
|
||||||
/* if we are already at full speed then break out early */
|
/* if we are already at full speed then break out early */
|
||||||
|
@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
requested_freq += get_freq_target(cs_tuners, policy);
|
requested_freq += get_freq_target(cs_tuners, policy);
|
||||||
|
if (requested_freq > policy->max)
|
||||||
|
requested_freq = policy->max;
|
||||||
|
|
||||||
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
|
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
|
||||||
|
dbs_info->requested_freq = requested_freq;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
|
||||||
|
|
||||||
/* Check for frequency decrease */
|
/* Check for frequency decrease */
|
||||||
if (load < cs_tuners->down_threshold) {
|
if (load < cs_tuners->down_threshold) {
|
||||||
unsigned int freq_target, requested_freq = policy->cur;
|
unsigned int freq_target;
|
||||||
/*
|
/*
|
||||||
* if we cannot reduce the frequency anymore, break out early
|
* if we cannot reduce the frequency anymore, break out early
|
||||||
*/
|
*/
|
||||||
|
@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy)
|
||||||
requested_freq = policy->min;
|
requested_freq = policy->min;
|
||||||
|
|
||||||
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
|
__cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
|
||||||
|
dbs_info->requested_freq = requested_freq;
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_policy *policy)
|
||||||
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
|
struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
|
||||||
|
|
||||||
dbs_info->down_skip = 0;
|
dbs_info->down_skip = 0;
|
||||||
|
dbs_info->requested_freq = policy->cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct dbs_governor cs_governor = {
|
static struct dbs_governor cs_governor = {
|
||||||
|
|
|
@ -225,7 +225,7 @@ struct cpudata {
|
||||||
static struct cpudata **all_cpu_data;
|
static struct cpudata **all_cpu_data;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct pid_adjust_policy - Stores static PID configuration data
|
* struct pstate_adjust_policy - Stores static PID configuration data
|
||||||
* @sample_rate_ms: PID calculation sample rate in ms
|
* @sample_rate_ms: PID calculation sample rate in ms
|
||||||
* @sample_rate_ns: Sample rate calculation in ns
|
* @sample_rate_ns: Sample rate calculation in ns
|
||||||
* @deadband: PID deadband
|
* @deadband: PID deadband
|
||||||
|
@ -562,12 +562,12 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
|
||||||
int min, hw_min, max, hw_max, cpu, range, adj_range;
|
int min, hw_min, max, hw_max, cpu, range, adj_range;
|
||||||
u64 value, cap;
|
u64 value, cap;
|
||||||
|
|
||||||
rdmsrl(MSR_HWP_CAPABILITIES, cap);
|
for_each_cpu(cpu, cpumask) {
|
||||||
|
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
|
||||||
hw_min = HWP_LOWEST_PERF(cap);
|
hw_min = HWP_LOWEST_PERF(cap);
|
||||||
hw_max = HWP_HIGHEST_PERF(cap);
|
hw_max = HWP_HIGHEST_PERF(cap);
|
||||||
range = hw_max - hw_min;
|
range = hw_max - hw_min;
|
||||||
|
|
||||||
for_each_cpu(cpu, cpumask) {
|
|
||||||
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
||||||
adj_range = limits->min_perf_pct * range / 100;
|
adj_range = limits->min_perf_pct * range / 100;
|
||||||
min = hw_min + adj_range;
|
min = hw_min + adj_range;
|
||||||
|
@ -1232,6 +1232,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
||||||
{
|
{
|
||||||
struct sample *sample = &cpu->sample;
|
struct sample *sample = &cpu->sample;
|
||||||
int32_t busy_frac, boost;
|
int32_t busy_frac, boost;
|
||||||
|
int target, avg_pstate;
|
||||||
|
|
||||||
busy_frac = div_fp(sample->mperf, sample->tsc);
|
busy_frac = div_fp(sample->mperf, sample->tsc);
|
||||||
|
|
||||||
|
@ -1242,7 +1243,26 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
||||||
busy_frac = boost;
|
busy_frac = boost;
|
||||||
|
|
||||||
sample->busy_scaled = busy_frac * 100;
|
sample->busy_scaled = busy_frac * 100;
|
||||||
return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
|
|
||||||
|
target = limits->no_turbo || limits->turbo_disabled ?
|
||||||
|
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||||
|
target += target >> 2;
|
||||||
|
target = mul_fp(target, busy_frac);
|
||||||
|
if (target < cpu->pstate.min_pstate)
|
||||||
|
target = cpu->pstate.min_pstate;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the average P-state during the previous cycle was higher than the
|
||||||
|
* current target, add 50% of the difference to the target to reduce
|
||||||
|
* possible performance oscillations and offset possible performance
|
||||||
|
* loss related to moving the workload from one CPU to another within
|
||||||
|
* a package/module.
|
||||||
|
*/
|
||||||
|
avg_pstate = get_avg_pstate(cpu);
|
||||||
|
if (avg_pstate > target)
|
||||||
|
target += (avg_pstate - target) >> 1;
|
||||||
|
|
||||||
|
return target;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
||||||
|
@ -1251,10 +1271,11 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
||||||
u64 duration_ns;
|
u64 duration_ns;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* perf_scaled is the average performance during the last sampling
|
* perf_scaled is the ratio of the average P-state during the last
|
||||||
* period scaled by the ratio of the maximum P-state to the P-state
|
* sampling period to the P-state requested last time (in percent).
|
||||||
* requested last time (in percent). That measures the system's
|
*
|
||||||
* response to the previous P-state selection.
|
* That measures the system's response to the previous P-state
|
||||||
|
* selection.
|
||||||
*/
|
*/
|
||||||
max_pstate = cpu->pstate.max_pstate_physical;
|
max_pstate = cpu->pstate.max_pstate_physical;
|
||||||
current_pstate = cpu->pstate.current_pstate;
|
current_pstate = cpu->pstate.current_pstate;
|
||||||
|
|
|
@ -137,6 +137,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
|
||||||
|
|
||||||
cur_time = jiffies;
|
cur_time = jiffies;
|
||||||
|
|
||||||
|
/* Immediately exit if previous_freq is not initialized yet. */
|
||||||
|
if (!devfreq->previous_freq)
|
||||||
|
goto out;
|
||||||
|
|
||||||
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
|
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
|
||||||
if (prev_lev < 0) {
|
if (prev_lev < 0) {
|
||||||
ret = prev_lev;
|
ret = prev_lev;
|
||||||
|
@ -594,17 +598,19 @@ struct devfreq *devfreq_add_device(struct device *dev,
|
||||||
if (devfreq->governor)
|
if (devfreq->governor)
|
||||||
err = devfreq->governor->event_handler(devfreq,
|
err = devfreq->governor->event_handler(devfreq,
|
||||||
DEVFREQ_GOV_START, NULL);
|
DEVFREQ_GOV_START, NULL);
|
||||||
mutex_unlock(&devfreq_list_lock);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(dev, "%s: Unable to start governor for the device\n",
|
dev_err(dev, "%s: Unable to start governor for the device\n",
|
||||||
__func__);
|
__func__);
|
||||||
goto err_init;
|
goto err_init;
|
||||||
}
|
}
|
||||||
|
mutex_unlock(&devfreq_list_lock);
|
||||||
|
|
||||||
return devfreq;
|
return devfreq;
|
||||||
|
|
||||||
err_init:
|
err_init:
|
||||||
list_del(&devfreq->node);
|
list_del(&devfreq->node);
|
||||||
|
mutex_unlock(&devfreq_list_lock);
|
||||||
|
|
||||||
device_unregister(&devfreq->dev);
|
device_unregister(&devfreq->dev);
|
||||||
err_out:
|
err_out:
|
||||||
return ERR_PTR(err);
|
return ERR_PTR(err);
|
||||||
|
|
|
@ -17,6 +17,7 @@ config DEVFREQ_EVENT_EXYNOS_NOCP
|
||||||
tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
|
tristate "EXYNOS NoC (Network On Chip) Probe DEVFREQ event Driver"
|
||||||
depends on ARCH_EXYNOS || COMPILE_TEST
|
depends on ARCH_EXYNOS || COMPILE_TEST
|
||||||
select PM_OPP
|
select PM_OPP
|
||||||
|
select REGMAP_MMIO
|
||||||
help
|
help
|
||||||
This add the devfreq-event driver for Exynos SoC. It provides NoC
|
This add the devfreq-event driver for Exynos SoC. It provides NoC
|
||||||
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
|
(Network on Chip) Probe counters to measure the bandwidth of AXI bus.
|
||||||
|
|
|
@ -176,9 +176,6 @@ static int exynos_nocp_get_event(struct devfreq_event_dev *edev,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
edata->load_count = 0;
|
|
||||||
edata->total_count = 0;
|
|
||||||
|
|
||||||
dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
|
dev_err(nocp->dev, "Failed to read the counter of NoC probe device\n");
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -639,19 +639,19 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||||
|
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
int i, best = -1;
|
|
||||||
|
|
||||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
cpufreq_for_each_valid_entry(pos, table) {
|
||||||
freq = table[i].frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if (freq >= target_freq)
|
if (freq >= target_freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
best = i;
|
best = pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find lowest freq at or above target in a table in descending order */
|
/* Find lowest freq at or above target in a table in descending order */
|
||||||
|
@ -659,28 +659,28 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||||
|
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
int i, best = -1;
|
|
||||||
|
|
||||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
cpufreq_for_each_valid_entry(pos, table) {
|
||||||
freq = table[i].frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if (freq == target_freq)
|
if (freq == target_freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
if (freq > target_freq) {
|
if (freq > target_freq) {
|
||||||
best = i;
|
best = pos;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No freq found above target_freq */
|
/* No freq found above target_freq */
|
||||||
if (best == -1)
|
if (best == table - 1)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
return best;
|
return best - pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
return best;
|
return best - pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Works only on sorted freq-tables */
|
/* Works only on sorted freq-tables */
|
||||||
|
@ -700,28 +700,28 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||||
|
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
int i, best = -1;
|
|
||||||
|
|
||||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
cpufreq_for_each_valid_entry(pos, table) {
|
||||||
freq = table[i].frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if (freq == target_freq)
|
if (freq == target_freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
if (freq < target_freq) {
|
if (freq < target_freq) {
|
||||||
best = i;
|
best = pos;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No freq found below target_freq */
|
/* No freq found below target_freq */
|
||||||
if (best == -1)
|
if (best == table - 1)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find highest freq at or below target in a table in descending order */
|
/* Find highest freq at or below target in a table in descending order */
|
||||||
|
@ -729,19 +729,19 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||||
|
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
int i, best = -1;
|
|
||||||
|
|
||||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
cpufreq_for_each_valid_entry(pos, table) {
|
||||||
freq = table[i].frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if (freq <= target_freq)
|
if (freq <= target_freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
best = i;
|
best = pos;
|
||||||
}
|
}
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Works only on sorted freq-tables */
|
/* Works only on sorted freq-tables */
|
||||||
|
@ -761,32 +761,32 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||||
|
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
int i, best = -1;
|
|
||||||
|
|
||||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
cpufreq_for_each_valid_entry(pos, table) {
|
||||||
freq = table[i].frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if (freq == target_freq)
|
if (freq == target_freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
if (freq < target_freq) {
|
if (freq < target_freq) {
|
||||||
best = i;
|
best = pos;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No freq found below target_freq */
|
/* No freq found below target_freq */
|
||||||
if (best == -1)
|
if (best == table - 1)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
/* Choose the closest freq */
|
/* Choose the closest freq */
|
||||||
if (target_freq - table[best].frequency > freq - target_freq)
|
if (target_freq - best->frequency > freq - target_freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Find closest freq to target in a table in descending order */
|
/* Find closest freq to target in a table in descending order */
|
||||||
|
@ -794,32 +794,32 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table *table = policy->freq_table;
|
struct cpufreq_frequency_table *table = policy->freq_table;
|
||||||
|
struct cpufreq_frequency_table *pos, *best = table - 1;
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
int i, best = -1;
|
|
||||||
|
|
||||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
cpufreq_for_each_valid_entry(pos, table) {
|
||||||
freq = table[i].frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if (freq == target_freq)
|
if (freq == target_freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
if (freq > target_freq) {
|
if (freq > target_freq) {
|
||||||
best = i;
|
best = pos;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* No freq found above target_freq */
|
/* No freq found above target_freq */
|
||||||
if (best == -1)
|
if (best == table - 1)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
/* Choose the closest freq */
|
/* Choose the closest freq */
|
||||||
if (table[best].frequency - target_freq > target_freq - freq)
|
if (best->frequency - target_freq > target_freq - freq)
|
||||||
return i;
|
return pos - table;
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
return best;
|
return best - table;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Works only on sorted freq-tables */
|
/* Works only on sorted freq-tables */
|
||||||
|
|
Loading…
Reference in New Issue