Merge branch 'pm-cpufreq'

* pm-cpufreq:
  cpufreq: s5pv210-cpufreq: fix wrong do_div() usage
  MAINTAINERS: update for intel P-state driver
  cpufreq: governor: Quit work-handlers early if governor is stopped
  intel_pstate: decrease number of "HWP enabled" messages
  cpufreq: arm_big_little: fix frequency check when bL switcher is active
This commit is contained in:
Rafael J. Wysocki 2015-11-07 01:30:49 +01:00
commit 1f47b0ddf3
5 changed files with 46 additions and 26 deletions

View File

@ -5440,7 +5440,8 @@ S: Supported
F: drivers/idle/intel_idle.c
INTEL PSTATE DRIVER
M: Kristen Carlson Accardi <kristen@linux.intel.com>
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
M: Len Brown <lenb@kernel.org>
L: linux-pm@vger.kernel.org
S: Supported
F: drivers/cpufreq/intel_pstate.c

View File

@ -149,6 +149,19 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
__func__, cpu, old_cluster, new_cluster, new_rate);
ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
if (!ret) {
/*
* FIXME: clk_set_rate hasn't returned an error here however it
* may be that clk_change_rate failed due to hardware or
* firmware issues and wasn't able to report that due to the
* current design of the clk core layer. To work around this
* problem we will read back the clock rate and check it is
* correct. This needs to be removed once clk core is fixed.
*/
if (clk_get_rate(clk[new_cluster]) != new_rate * 1000)
ret = -EIO;
}
if (WARN_ON(ret)) {
pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
new_cluster);
@ -189,15 +202,6 @@ bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
mutex_unlock(&cluster_lock[old_cluster]);
}
/*
* FIXME: clk_set_rate has to handle the case where clk_change_rate
* can fail due to hardware or firmware issues. Until the clk core
* layer is fixed, we can check here. In most of the cases we will
* be reading only the cached value anyway. This needs to be removed
* once clk core is fixed.
*/
if (bL_cpufreq_get_rate(cpu) != new_rate)
return -EIO;
return 0;
}

View File

@ -171,10 +171,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
{
int i;
mutex_lock(&cpufreq_governor_lock);
if (!policy->governor_enabled)
goto out_unlock;
if (!all_cpus) {
/*
* Use raw_smp_processor_id() to avoid preemptible warnings.
@ -188,9 +184,6 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
}
out_unlock:
mutex_unlock(&cpufreq_governor_lock);
}
EXPORT_SYMBOL_GPL(gov_queue_work);
@ -229,13 +222,24 @@ static void dbs_timer(struct work_struct *work)
struct cpu_dbs_info *cdbs = container_of(work, struct cpu_dbs_info,
dwork.work);
struct cpu_common_dbs_info *shared = cdbs->shared;
struct cpufreq_policy *policy = shared->policy;
struct dbs_data *dbs_data = policy->governor_data;
struct cpufreq_policy *policy;
struct dbs_data *dbs_data;
unsigned int sampling_rate, delay;
bool modify_all = true;
mutex_lock(&shared->timer_mutex);
policy = shared->policy;
/*
* Governor might already be disabled and there is no point continuing
* with the work-handler.
*/
if (!policy)
goto unlock;
dbs_data = policy->governor_data;
if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
@ -252,6 +256,7 @@ static void dbs_timer(struct work_struct *work)
delay = dbs_data->cdata->gov_dbs_timer(cdbs, dbs_data, modify_all);
gov_queue_work(dbs_data, policy, delay, modify_all);
unlock:
mutex_unlock(&shared->timer_mutex);
}
@ -478,9 +483,17 @@ static int cpufreq_governor_stop(struct cpufreq_policy *policy,
if (!shared || !shared->policy)
return -EBUSY;
/*
* Work-handler must see this updated, as it should not proceed any
* further after governor is disabled. And so timer_mutex is taken while
* updating this value.
*/
mutex_lock(&shared->timer_mutex);
shared->policy = NULL;
mutex_unlock(&shared->timer_mutex);
gov_cancel_work(dbs_data, policy);
shared->policy = NULL;
mutex_destroy(&shared->timer_mutex);
return 0;
}

View File

@ -684,8 +684,6 @@ static void __init intel_pstate_sysfs_expose_params(void)
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
pr_info("intel_pstate: HWP enabled\n");
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
}
@ -1557,8 +1555,10 @@ static int __init intel_pstate_init(void)
if (!all_cpu_data)
return -ENOMEM;
if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
pr_info("intel_pstate: HWP enabled\n");
hwp_active++;
}
if (!hwp_active && hwp_only)
goto out;
@ -1593,8 +1593,10 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "disable"))
no_load = 1;
if (!strcmp(str, "no_hwp"))
if (!strcmp(str, "no_hwp")) {
pr_info("intel_pstate: HWP disabled\n");
no_hwp = 1;
}
if (!strcmp(str, "force"))
force_load = 1;
if (!strcmp(str, "hwp_only"))

View File

@ -212,11 +212,11 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
/* Find current DRAM frequency */
tmp = s5pv210_dram_conf[ch].freq;
do_div(tmp, freq);
tmp /= freq;
tmp1 = s5pv210_dram_conf[ch].refresh;
do_div(tmp1, tmp);
tmp1 /= tmp;
__raw_writel(tmp1, reg);
}