2012-09-06 15:09:11 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 Freescale Semiconductor, Inc.
|
|
|
|
*
|
2014-08-28 13:52:24 +08:00
|
|
|
* Copyright (C) 2014 Linaro.
|
|
|
|
* Viresh Kumar <viresh.kumar@linaro.org>
|
|
|
|
*
|
2014-09-09 22:28:03 +08:00
|
|
|
* The OPP code in function set_target() is reused from
|
2012-09-06 15:09:11 +08:00
|
|
|
* drivers/cpufreq/omap-cpufreq.c
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/clk.h>
|
2013-09-11 01:59:46 +08:00
|
|
|
#include <linux/cpu.h>
|
2013-07-15 21:09:14 +08:00
|
|
|
#include <linux/cpu_cooling.h>
|
2012-09-06 15:09:11 +08:00
|
|
|
#include <linux/cpufreq.h>
|
2014-10-19 17:30:28 +08:00
|
|
|
#include <linux/cpufreq-dt.h>
|
2013-07-15 21:09:14 +08:00
|
|
|
#include <linux/cpumask.h>
|
2012-09-06 15:09:11 +08:00
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/of.h>
|
2013-09-20 05:03:52 +08:00
|
|
|
#include <linux/pm_opp.h>
|
2013-01-30 22:27:49 +08:00
|
|
|
#include <linux/platform_device.h>
|
2012-09-06 15:09:11 +08:00
|
|
|
#include <linux/regulator/consumer.h>
|
|
|
|
#include <linux/slab.h>
|
2013-07-15 21:09:14 +08:00
|
|
|
#include <linux/thermal.h>
|
2012-09-06 15:09:11 +08:00
|
|
|
|
2014-08-28 13:52:28 +08:00
|
|
|
struct private_data {
|
|
|
|
struct device *cpu_dev;
|
|
|
|
struct regulator *cpu_reg;
|
|
|
|
struct thermal_cooling_device *cdev;
|
|
|
|
unsigned int voltage_tolerance; /* in percentage */
|
|
|
|
};
|
2012-09-06 15:09:11 +08:00
|
|
|
|
2015-08-07 19:59:16 +08:00
|
|
|
static struct freq_attr *cpufreq_dt_attr[] = {
|
|
|
|
&cpufreq_freq_attr_scaling_available_freqs,
|
|
|
|
NULL, /* Extra space for boost-attr if required */
|
|
|
|
NULL,
|
|
|
|
};
|
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
2012-09-06 15:09:11 +08:00
|
|
|
{
|
2013-09-20 05:03:51 +08:00
|
|
|
struct dev_pm_opp *opp;
|
2014-08-28 13:52:28 +08:00
|
|
|
struct cpufreq_frequency_table *freq_table = policy->freq_table;
|
|
|
|
struct clk *cpu_clk = policy->clk;
|
|
|
|
struct private_data *priv = policy->driver_data;
|
|
|
|
struct device *cpu_dev = priv->cpu_dev;
|
|
|
|
struct regulator *cpu_reg = priv->cpu_reg;
|
2015-12-30 19:18:42 +08:00
|
|
|
unsigned long volt = 0, tol = 0;
|
|
|
|
int volt_old = 0;
|
2013-08-14 22:08:24 +08:00
|
|
|
unsigned int old_freq, new_freq;
|
2013-02-26 01:22:37 +08:00
|
|
|
long freq_Hz, freq_exact;
|
2012-09-06 15:09:11 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
|
2013-11-26 10:01:18 +08:00
|
|
|
if (freq_Hz <= 0)
|
2012-09-06 15:09:11 +08:00
|
|
|
freq_Hz = freq_table[index].frequency * 1000;
|
|
|
|
|
2013-08-14 22:08:24 +08:00
|
|
|
freq_exact = freq_Hz;
|
|
|
|
new_freq = freq_Hz / 1000;
|
|
|
|
old_freq = clk_get_rate(cpu_clk) / 1000;
|
2012-09-06 15:09:11 +08:00
|
|
|
|
2013-08-13 20:58:24 +08:00
|
|
|
if (!IS_ERR(cpu_reg)) {
|
2014-10-18 06:09:48 +08:00
|
|
|
unsigned long opp_freq;
|
|
|
|
|
2013-01-19 03:52:33 +08:00
|
|
|
rcu_read_lock();
|
2013-09-20 05:03:50 +08:00
|
|
|
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
|
2012-09-06 15:09:11 +08:00
|
|
|
if (IS_ERR(opp)) {
|
2013-01-19 03:52:33 +08:00
|
|
|
rcu_read_unlock();
|
2014-08-28 13:52:27 +08:00
|
|
|
dev_err(cpu_dev, "failed to find OPP for %ld\n",
|
|
|
|
freq_Hz);
|
2013-08-14 22:08:24 +08:00
|
|
|
return PTR_ERR(opp);
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
2013-09-20 05:03:50 +08:00
|
|
|
volt = dev_pm_opp_get_voltage(opp);
|
2014-10-18 06:09:48 +08:00
|
|
|
opp_freq = dev_pm_opp_get_freq(opp);
|
2013-01-19 03:52:33 +08:00
|
|
|
rcu_read_unlock();
|
2014-08-28 13:52:28 +08:00
|
|
|
tol = volt * priv->voltage_tolerance / 100;
|
2012-09-06 15:09:11 +08:00
|
|
|
volt_old = regulator_get_voltage(cpu_reg);
|
2014-10-18 06:09:48 +08:00
|
|
|
dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n",
|
|
|
|
opp_freq / 1000, volt);
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
|
|
|
|
2015-12-30 19:18:42 +08:00
|
|
|
dev_dbg(cpu_dev, "%u MHz, %d mV --> %u MHz, %ld mV\n",
|
2014-10-18 06:09:49 +08:00
|
|
|
old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1,
|
2014-08-28 13:52:27 +08:00
|
|
|
new_freq / 1000, volt ? volt / 1000 : -1);
|
2012-09-06 15:09:11 +08:00
|
|
|
|
|
|
|
/* scaling up? scale voltage before frequency */
|
2013-08-14 22:08:24 +08:00
|
|
|
if (!IS_ERR(cpu_reg) && new_freq > old_freq) {
|
2012-09-06 15:09:11 +08:00
|
|
|
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
|
|
|
if (ret) {
|
2014-08-28 13:52:27 +08:00
|
|
|
dev_err(cpu_dev, "failed to scale voltage up: %d\n",
|
|
|
|
ret);
|
2013-08-14 22:08:24 +08:00
|
|
|
return ret;
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-26 01:22:37 +08:00
|
|
|
ret = clk_set_rate(cpu_clk, freq_exact);
|
2012-09-06 15:09:11 +08:00
|
|
|
if (ret) {
|
2014-08-28 13:52:27 +08:00
|
|
|
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
|
2014-10-18 06:09:49 +08:00
|
|
|
if (!IS_ERR(cpu_reg) && volt_old > 0)
|
2012-09-06 15:09:11 +08:00
|
|
|
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
|
2013-08-14 22:08:24 +08:00
|
|
|
return ret;
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* scaling down? scale voltage after frequency */
|
2013-08-14 22:08:24 +08:00
|
|
|
if (!IS_ERR(cpu_reg) && new_freq < old_freq) {
|
2012-09-06 15:09:11 +08:00
|
|
|
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
|
|
|
|
if (ret) {
|
2014-08-28 13:52:27 +08:00
|
|
|
dev_err(cpu_dev, "failed to scale voltage down: %d\n",
|
|
|
|
ret);
|
2013-08-14 22:08:24 +08:00
|
|
|
clk_set_rate(cpu_clk, old_freq * 1000);
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-01 20:57:44 +08:00
|
|
|
return ret;
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
|
|
|
|
2014-08-28 13:52:30 +08:00
|
|
|
static int allocate_resources(int cpu, struct device **cdev,
|
2014-08-28 13:52:28 +08:00
|
|
|
struct regulator **creg, struct clk **cclk)
|
2012-09-06 15:09:11 +08:00
|
|
|
{
|
2014-08-28 13:52:28 +08:00
|
|
|
struct device *cpu_dev;
|
|
|
|
struct regulator *cpu_reg;
|
|
|
|
struct clk *cpu_clk;
|
|
|
|
int ret = 0;
|
2014-08-28 13:52:29 +08:00
|
|
|
char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
|
2012-09-06 15:09:11 +08:00
|
|
|
|
2014-08-28 13:52:30 +08:00
|
|
|
cpu_dev = get_cpu_device(cpu);
|
2013-09-11 01:59:46 +08:00
|
|
|
if (!cpu_dev) {
|
2014-08-28 13:52:30 +08:00
|
|
|
pr_err("failed to get cpu%d device\n", cpu);
|
2013-09-11 01:59:46 +08:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
2013-01-29 00:13:15 +08:00
|
|
|
|
2014-08-28 13:52:29 +08:00
|
|
|
/* Try "cpu0" for older DTs */
|
2014-08-28 13:52:30 +08:00
|
|
|
if (!cpu)
|
|
|
|
reg = reg_cpu0;
|
|
|
|
else
|
|
|
|
reg = reg_cpu;
|
2014-08-28 13:52:29 +08:00
|
|
|
|
|
|
|
try_again:
|
|
|
|
cpu_reg = regulator_get_optional(cpu_dev, reg);
|
cpufreq: cpufreq-cpu0: defer probe when regulator is not ready
With commit 1e4b545, regulator_get will now return -EPROBE_DEFER
when the cpu0-supply node is present, but the regulator is not yet
registered.
It is possible for this to occur when the regulator registration
by itself might be defered due to some dependent interface not yet
instantiated. For example: an regulator which uses I2C and GPIO might
need both systems available before proceeding, in this case, the
regulator might defer it's registration.
However, the cpufreq-cpu0 driver assumes that any un-successful
return result is equivalent of failure.
When the regulator_get returns failure other than -EPROBE_DEFER, it
makes sense to assume that supply node is not present and proceed
with the assumption that only clock control is necessary in the
platform.
With this change, we can now handle the following conditions:
a) cpu0-supply binding is not present, regulator_get will return
appropriate error result, resulting in cpufreq-cpu0 driver
controlling just the clock.
b) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, we retry resulting in cpufreq-cpu0 driver
registering later once the regulator is available.
c) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, however, regulator never registers, we retry until
cpufreq-cpu0 driver fails to register pointing at device tree
information bug. However, in this case, the fact that
cpufreq-cpu0 operates with clock only when the DT binding clearly
indicates need of a supply is a bug of it's own.
d) cpu0-supply gets an regulator at probe - cpufreq-cpu0 driver
controls both the clock and regulator
Signed-off-by: Nishanth Menon <nm@ti.com>
Acked-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2013-05-01 21:38:12 +08:00
|
|
|
if (IS_ERR(cpu_reg)) {
|
|
|
|
/*
|
2014-08-28 13:52:30 +08:00
|
|
|
* If cpu's regulator supply node is present, but regulator is
|
cpufreq: cpufreq-cpu0: defer probe when regulator is not ready
With commit 1e4b545, regulator_get will now return -EPROBE_DEFER
when the cpu0-supply node is present, but the regulator is not yet
registered.
It is possible for this to occur when the regulator registration
by itself might be defered due to some dependent interface not yet
instantiated. For example: an regulator which uses I2C and GPIO might
need both systems available before proceeding, in this case, the
regulator might defer it's registration.
However, the cpufreq-cpu0 driver assumes that any un-successful
return result is equivalent of failure.
When the regulator_get returns failure other than -EPROBE_DEFER, it
makes sense to assume that supply node is not present and proceed
with the assumption that only clock control is necessary in the
platform.
With this change, we can now handle the following conditions:
a) cpu0-supply binding is not present, regulator_get will return
appropriate error result, resulting in cpufreq-cpu0 driver
controlling just the clock.
b) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, we retry resulting in cpufreq-cpu0 driver
registering later once the regulator is available.
c) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, however, regulator never registers, we retry until
cpufreq-cpu0 driver fails to register pointing at device tree
information bug. However, in this case, the fact that
cpufreq-cpu0 operates with clock only when the DT binding clearly
indicates need of a supply is a bug of it's own.
d) cpu0-supply gets an regulator at probe - cpufreq-cpu0 driver
controls both the clock and regulator
Signed-off-by: Nishanth Menon <nm@ti.com>
Acked-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2013-05-01 21:38:12 +08:00
|
|
|
* not yet registered, we should try defering probe.
|
|
|
|
*/
|
|
|
|
if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
|
2014-08-28 13:52:30 +08:00
|
|
|
dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
|
|
|
|
cpu);
|
2014-08-28 13:52:28 +08:00
|
|
|
return -EPROBE_DEFER;
|
cpufreq: cpufreq-cpu0: defer probe when regulator is not ready
With commit 1e4b545, regulator_get will now return -EPROBE_DEFER
when the cpu0-supply node is present, but the regulator is not yet
registered.
It is possible for this to occur when the regulator registration
by itself might be defered due to some dependent interface not yet
instantiated. For example: an regulator which uses I2C and GPIO might
need both systems available before proceeding, in this case, the
regulator might defer it's registration.
However, the cpufreq-cpu0 driver assumes that any un-successful
return result is equivalent of failure.
When the regulator_get returns failure other than -EPROBE_DEFER, it
makes sense to assume that supply node is not present and proceed
with the assumption that only clock control is necessary in the
platform.
With this change, we can now handle the following conditions:
a) cpu0-supply binding is not present, regulator_get will return
appropriate error result, resulting in cpufreq-cpu0 driver
controlling just the clock.
b) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, we retry resulting in cpufreq-cpu0 driver
registering later once the regulator is available.
c) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, however, regulator never registers, we retry until
cpufreq-cpu0 driver fails to register pointing at device tree
information bug. However, in this case, the fact that
cpufreq-cpu0 operates with clock only when the DT binding clearly
indicates need of a supply is a bug of it's own.
d) cpu0-supply gets an regulator at probe - cpufreq-cpu0 driver
controls both the clock and regulator
Signed-off-by: Nishanth Menon <nm@ti.com>
Acked-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2013-05-01 21:38:12 +08:00
|
|
|
}
|
2014-08-28 13:52:29 +08:00
|
|
|
|
|
|
|
/* Try with "cpu-supply" */
|
|
|
|
if (reg == reg_cpu0) {
|
|
|
|
reg = reg_cpu;
|
|
|
|
goto try_again;
|
|
|
|
}
|
|
|
|
|
2014-10-19 17:30:29 +08:00
|
|
|
dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
|
|
|
|
cpu, PTR_ERR(cpu_reg));
|
cpufreq: cpufreq-cpu0: defer probe when regulator is not ready
With commit 1e4b545, regulator_get will now return -EPROBE_DEFER
when the cpu0-supply node is present, but the regulator is not yet
registered.
It is possible for this to occur when the regulator registration
by itself might be defered due to some dependent interface not yet
instantiated. For example: an regulator which uses I2C and GPIO might
need both systems available before proceeding, in this case, the
regulator might defer it's registration.
However, the cpufreq-cpu0 driver assumes that any un-successful
return result is equivalent of failure.
When the regulator_get returns failure other than -EPROBE_DEFER, it
makes sense to assume that supply node is not present and proceed
with the assumption that only clock control is necessary in the
platform.
With this change, we can now handle the following conditions:
a) cpu0-supply binding is not present, regulator_get will return
appropriate error result, resulting in cpufreq-cpu0 driver
controlling just the clock.
b) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, we retry resulting in cpufreq-cpu0 driver
registering later once the regulator is available.
c) cpu0-supply binding is present, regulator_get returns
-EPROBE_DEFER, however, regulator never registers, we retry until
cpufreq-cpu0 driver fails to register pointing at device tree
information bug. However, in this case, the fact that
cpufreq-cpu0 operates with clock only when the DT binding clearly
indicates need of a supply is a bug of it's own.
d) cpu0-supply gets an regulator at probe - cpufreq-cpu0 driver
controls both the clock and regulator
Signed-off-by: Nishanth Menon <nm@ti.com>
Acked-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2013-05-01 21:38:12 +08:00
|
|
|
}
|
|
|
|
|
2014-05-16 18:20:42 +08:00
|
|
|
cpu_clk = clk_get(cpu_dev, NULL);
|
2012-09-06 15:09:11 +08:00
|
|
|
if (IS_ERR(cpu_clk)) {
|
2014-08-28 13:52:28 +08:00
|
|
|
/* put regulator */
|
|
|
|
if (!IS_ERR(cpu_reg))
|
|
|
|
regulator_put(cpu_reg);
|
|
|
|
|
2012-09-06 15:09:11 +08:00
|
|
|
ret = PTR_ERR(cpu_clk);
|
2014-08-28 13:52:26 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If cpu's clk node is present, but clock is not yet
|
|
|
|
* registered, we should try defering probe.
|
|
|
|
*/
|
|
|
|
if (ret == -EPROBE_DEFER)
|
2014-08-28 13:52:30 +08:00
|
|
|
dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
|
2014-08-28 13:52:26 +08:00
|
|
|
else
|
2014-10-31 20:39:33 +08:00
|
|
|
dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
|
|
|
|
ret);
|
2014-08-28 13:52:28 +08:00
|
|
|
} else {
|
|
|
|
*cdev = cpu_dev;
|
|
|
|
*creg = cpu_reg;
|
|
|
|
*cclk = cpu_clk;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
static int cpufreq_init(struct cpufreq_policy *policy)
|
2014-08-28 13:52:28 +08:00
|
|
|
{
|
|
|
|
struct cpufreq_frequency_table *freq_table;
|
|
|
|
struct device_node *np;
|
|
|
|
struct private_data *priv;
|
|
|
|
struct device *cpu_dev;
|
|
|
|
struct regulator *cpu_reg;
|
|
|
|
struct clk *cpu_clk;
|
2015-09-09 00:41:03 +08:00
|
|
|
struct dev_pm_opp *suspend_opp;
|
2014-10-24 21:05:55 +08:00
|
|
|
unsigned long min_uV = ~0, max_uV = 0;
|
2014-08-28 13:52:28 +08:00
|
|
|
unsigned int transition_latency;
|
2015-07-29 18:53:10 +08:00
|
|
|
bool need_update = false;
|
2014-08-28 13:52:28 +08:00
|
|
|
int ret;
|
|
|
|
|
2014-08-28 13:52:30 +08:00
|
|
|
ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
|
2014-08-28 13:52:28 +08:00
|
|
|
if (ret) {
|
2014-10-23 17:52:54 +08:00
|
|
|
pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
|
2014-08-28 13:52:28 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2014-08-28 13:52:26 +08:00
|
|
|
|
2014-08-28 13:52:28 +08:00
|
|
|
np = of_node_get(cpu_dev->of_node);
|
|
|
|
if (!np) {
|
|
|
|
dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu);
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto out_put_reg_clk;
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
|
|
|
|
2015-07-29 18:53:10 +08:00
|
|
|
/* Get OPP-sharing information from "operating-points-v2" bindings */
|
2015-09-04 16:17:24 +08:00
|
|
|
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
|
2015-07-29 18:53:10 +08:00
|
|
|
if (ret) {
|
|
|
|
/*
|
|
|
|
* operating-points-v2 not supported, fallback to old method of
|
|
|
|
* finding shared-OPPs for backward compatibility.
|
|
|
|
*/
|
|
|
|
if (ret == -ENOENT)
|
|
|
|
need_update = true;
|
|
|
|
else
|
|
|
|
goto out_node_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize OPP tables for all policy->cpus. They will be shared by
|
|
|
|
* all CPUs which have marked their CPUs shared with OPP bindings.
|
|
|
|
*
|
|
|
|
* For platforms not using operating-points-v2 bindings, we do this
|
|
|
|
* before updating policy->cpus. Otherwise, we will end up creating
|
|
|
|
* duplicate OPPs for policy->cpus.
|
|
|
|
*
|
|
|
|
* OPPs might be populated at runtime, don't check for error here
|
|
|
|
*/
|
2015-09-04 16:17:24 +08:00
|
|
|
dev_pm_opp_of_cpumask_add_table(policy->cpus);
|
2015-07-29 18:53:10 +08:00
|
|
|
|
2015-09-02 17:06:48 +08:00
|
|
|
/*
|
|
|
|
* But we need OPP table to function so if it is not there let's
|
|
|
|
* give platform code chance to provide it for us.
|
|
|
|
*/
|
|
|
|
ret = dev_pm_opp_get_opp_count(cpu_dev);
|
|
|
|
if (ret <= 0) {
|
|
|
|
pr_debug("OPP table is not ready, deferring probe\n");
|
|
|
|
ret = -EPROBE_DEFER;
|
|
|
|
goto out_free_opp;
|
|
|
|
}
|
|
|
|
|
2015-07-29 18:53:10 +08:00
|
|
|
if (need_update) {
|
|
|
|
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
|
|
|
|
|
|
|
|
if (!pd || !pd->independent_clocks)
|
|
|
|
cpumask_setall(policy->cpus);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* OPP tables are initialized only for policy->cpu, do it for
|
|
|
|
* others as well.
|
|
|
|
*/
|
2015-09-04 16:17:24 +08:00
|
|
|
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
|
2015-09-02 17:06:49 +08:00
|
|
|
if (ret)
|
|
|
|
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
|
|
|
|
__func__, ret);
|
2015-07-29 18:53:10 +08:00
|
|
|
|
|
|
|
of_property_read_u32(np, "clock-latency", &transition_latency);
|
|
|
|
} else {
|
|
|
|
transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
|
|
|
|
}
|
2012-09-06 15:09:11 +08:00
|
|
|
|
2014-08-28 13:52:28 +08:00
|
|
|
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
|
|
|
|
if (!priv) {
|
|
|
|
ret = -ENOMEM;
|
2014-11-25 18:34:21 +08:00
|
|
|
goto out_free_opp;
|
2012-09-06 15:09:11 +08:00
|
|
|
}
|
|
|
|
|
2014-08-28 13:52:28 +08:00
|
|
|
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
|
2012-09-06 15:09:11 +08:00
|
|
|
|
2015-07-29 18:53:10 +08:00
|
|
|
if (!transition_latency)
|
2012-09-06 15:09:11 +08:00
|
|
|
transition_latency = CPUFREQ_ETERNAL;
|
|
|
|
|
2013-09-26 17:19:37 +08:00
|
|
|
if (!IS_ERR(cpu_reg)) {
|
2014-10-24 21:05:55 +08:00
|
|
|
unsigned long opp_freq = 0;
|
2012-09-06 15:09:11 +08:00
|
|
|
|
|
|
|
/*
|
2014-10-24 21:05:55 +08:00
|
|
|
* Disable any OPPs where the connected regulator isn't able to
|
|
|
|
* provide the specified voltage and record minimum and maximum
|
|
|
|
* voltage levels.
|
2012-09-06 15:09:11 +08:00
|
|
|
*/
|
2014-10-24 21:05:55 +08:00
|
|
|
while (1) {
|
|
|
|
struct dev_pm_opp *opp;
|
|
|
|
unsigned long opp_uV, tol_uV;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
|
|
|
|
if (IS_ERR(opp)) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
opp_uV = dev_pm_opp_get_voltage(opp);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
tol_uV = opp_uV * priv->voltage_tolerance / 100;
|
2015-09-02 17:06:50 +08:00
|
|
|
if (regulator_is_supported_voltage(cpu_reg,
|
|
|
|
opp_uV - tol_uV,
|
2014-10-24 21:05:55 +08:00
|
|
|
opp_uV + tol_uV)) {
|
|
|
|
if (opp_uV < min_uV)
|
|
|
|
min_uV = opp_uV;
|
|
|
|
if (opp_uV > max_uV)
|
|
|
|
max_uV = opp_uV;
|
|
|
|
} else {
|
|
|
|
dev_pm_opp_disable(cpu_dev, opp_freq);
|
|
|
|
}
|
|
|
|
|
|
|
|
opp_freq++;
|
|
|
|
}
|
|
|
|
|
2012-09-06 15:09:11 +08:00
|
|
|
ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
|
|
|
|
if (ret > 0)
|
|
|
|
transition_latency += ret * 1000;
|
|
|
|
}
|
|
|
|
|
2014-10-24 21:05:55 +08:00
|
|
|
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("failed to init cpufreq table: %d\n", ret);
|
|
|
|
goto out_free_priv;
|
|
|
|
}
|
|
|
|
|
2014-08-28 13:52:28 +08:00
|
|
|
priv->cpu_dev = cpu_dev;
|
|
|
|
priv->cpu_reg = cpu_reg;
|
|
|
|
policy->driver_data = priv;
|
|
|
|
|
|
|
|
policy->clk = cpu_clk;
|
2015-09-09 00:41:03 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
|
|
|
|
if (suspend_opp)
|
|
|
|
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2014-10-19 17:30:28 +08:00
|
|
|
ret = cpufreq_table_validate_and_show(policy, freq_table);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
|
|
|
|
ret);
|
2014-11-27 08:37:52 +08:00
|
|
|
goto out_free_cpufreq_table;
|
2015-07-29 18:53:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Support turbo/boost mode */
|
|
|
|
if (policy_has_boost_freq(policy)) {
|
|
|
|
/* This gets disabled by core on driver unregister */
|
|
|
|
ret = cpufreq_enable_boost_support();
|
|
|
|
if (ret)
|
|
|
|
goto out_free_cpufreq_table;
|
2015-08-07 19:59:16 +08:00
|
|
|
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
|
2014-10-19 17:30:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
policy->cpuinfo.transition_latency = transition_latency;
|
|
|
|
|
2014-09-26 21:33:46 +08:00
|
|
|
of_node_put(np);
|
|
|
|
|
2012-09-06 15:09:11 +08:00
|
|
|
return 0;
|
|
|
|
|
2014-11-27 08:37:52 +08:00
|
|
|
out_free_cpufreq_table:
|
2013-09-20 05:03:50 +08:00
|
|
|
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
|
2014-10-24 21:05:55 +08:00
|
|
|
out_free_priv:
|
|
|
|
kfree(priv);
|
2014-11-25 18:34:21 +08:00
|
|
|
out_free_opp:
|
2015-09-04 16:17:24 +08:00
|
|
|
dev_pm_opp_of_cpumask_remove_table(policy->cpus);
|
2015-07-29 18:53:10 +08:00
|
|
|
out_node_put:
|
2014-08-28 13:52:28 +08:00
|
|
|
of_node_put(np);
|
|
|
|
out_put_reg_clk:
|
2014-08-28 13:52:25 +08:00
|
|
|
clk_put(cpu_clk);
|
2014-05-16 18:20:42 +08:00
|
|
|
if (!IS_ERR(cpu_reg))
|
|
|
|
regulator_put(cpu_reg);
|
2014-08-28 13:52:28 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
static int cpufreq_exit(struct cpufreq_policy *policy)
|
2014-08-28 13:52:28 +08:00
|
|
|
{
|
|
|
|
struct private_data *priv = policy->driver_data;
|
|
|
|
|
2015-02-04 02:21:21 +08:00
|
|
|
cpufreq_cooling_unregister(priv->cdev);
|
2014-08-28 13:52:28 +08:00
|
|
|
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
|
2015-09-04 16:17:24 +08:00
|
|
|
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
|
2014-08-28 13:52:28 +08:00
|
|
|
clk_put(policy->clk);
|
|
|
|
if (!IS_ERR(priv->cpu_reg))
|
|
|
|
regulator_put(priv->cpu_reg);
|
|
|
|
kfree(priv);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-11-27 08:37:52 +08:00
|
|
|
static void cpufreq_ready(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct private_data *priv = policy->driver_data;
|
|
|
|
struct device_node *np = of_node_get(priv->cpu_dev->of_node);
|
|
|
|
|
|
|
|
if (WARN_ON(!np))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For now, just loading the cooling device;
|
|
|
|
* thermal DT code takes care of matching them.
|
|
|
|
*/
|
|
|
|
if (of_find_property(np, "#cooling-cells", NULL)) {
|
2015-11-17 20:06:22 +08:00
|
|
|
u32 power_coefficient = 0;
|
|
|
|
|
|
|
|
of_property_read_u32(np, "dynamic-power-coefficient",
|
|
|
|
&power_coefficient);
|
|
|
|
|
|
|
|
priv->cdev = of_cpufreq_power_cooling_register(np,
|
|
|
|
policy->related_cpus, power_coefficient, NULL);
|
2014-11-27 08:37:52 +08:00
|
|
|
if (IS_ERR(priv->cdev)) {
|
|
|
|
dev_err(priv->cpu_dev,
|
|
|
|
"running cpufreq without cooling device: %ld\n",
|
|
|
|
PTR_ERR(priv->cdev));
|
|
|
|
|
|
|
|
priv->cdev = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
of_node_put(np);
|
|
|
|
}
|
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
static struct cpufreq_driver dt_cpufreq_driver = {
|
2014-08-28 13:52:28 +08:00
|
|
|
.flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
|
|
|
|
.verify = cpufreq_generic_frequency_table_verify,
|
2014-09-09 22:28:03 +08:00
|
|
|
.target_index = set_target,
|
2014-08-28 13:52:28 +08:00
|
|
|
.get = cpufreq_generic_get,
|
2014-09-09 22:28:03 +08:00
|
|
|
.init = cpufreq_init,
|
|
|
|
.exit = cpufreq_exit,
|
2014-11-27 08:37:52 +08:00
|
|
|
.ready = cpufreq_ready,
|
2014-09-09 22:28:03 +08:00
|
|
|
.name = "cpufreq-dt",
|
2015-08-07 19:59:16 +08:00
|
|
|
.attr = cpufreq_dt_attr,
|
2015-09-09 00:41:03 +08:00
|
|
|
.suspend = cpufreq_generic_suspend,
|
2014-08-28 13:52:28 +08:00
|
|
|
};
|
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
static int dt_cpufreq_probe(struct platform_device *pdev)
|
2014-08-28 13:52:28 +08:00
|
|
|
{
|
|
|
|
struct device *cpu_dev;
|
|
|
|
struct regulator *cpu_reg;
|
|
|
|
struct clk *cpu_clk;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All per-cluster (CPUs sharing clock/voltages) initialization is done
|
|
|
|
* from ->init(). In probe(), we just need to make sure that clk and
|
|
|
|
* regulators are available. Else defer probe and retry.
|
|
|
|
*
|
|
|
|
* FIXME: Is checking this only for CPU0 sufficient ?
|
|
|
|
*/
|
2014-08-28 13:52:30 +08:00
|
|
|
ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
|
2014-08-28 13:52:28 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
clk_put(cpu_clk);
|
|
|
|
if (!IS_ERR(cpu_reg))
|
|
|
|
regulator_put(cpu_reg);
|
|
|
|
|
2014-10-19 17:30:28 +08:00
|
|
|
dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
|
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
ret = cpufreq_register_driver(&dt_cpufreq_driver);
|
2014-08-28 13:52:28 +08:00
|
|
|
if (ret)
|
|
|
|
dev_err(cpu_dev, "failed register driver: %d\n", ret);
|
|
|
|
|
2012-09-06 15:09:11 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2013-01-30 22:27:49 +08:00
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
static int dt_cpufreq_remove(struct platform_device *pdev)
|
2013-01-30 22:27:49 +08:00
|
|
|
{
|
2014-09-09 22:28:03 +08:00
|
|
|
cpufreq_unregister_driver(&dt_cpufreq_driver);
|
2013-01-30 22:27:49 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-09 22:28:03 +08:00
|
|
|
static struct platform_driver dt_cpufreq_platdrv = {
|
2013-01-30 22:27:49 +08:00
|
|
|
.driver = {
|
2014-09-09 22:28:03 +08:00
|
|
|
.name = "cpufreq-dt",
|
2013-01-30 22:27:49 +08:00
|
|
|
},
|
2014-09-09 22:28:03 +08:00
|
|
|
.probe = dt_cpufreq_probe,
|
|
|
|
.remove = dt_cpufreq_remove,
|
2013-01-30 22:27:49 +08:00
|
|
|
};
|
2014-09-09 22:28:03 +08:00
|
|
|
module_platform_driver(dt_cpufreq_platdrv);
|
2012-09-06 15:09:11 +08:00
|
|
|
|
2015-05-09 03:57:30 +08:00
|
|
|
MODULE_ALIAS("platform:cpufreq-dt");
|
2014-08-28 13:52:24 +08:00
|
|
|
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
|
2012-09-06 15:09:11 +08:00
|
|
|
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
|
2014-09-09 22:28:03 +08:00
|
|
|
MODULE_DESCRIPTION("Generic cpufreq driver");
|
2012-09-06 15:09:11 +08:00
|
|
|
MODULE_LICENSE("GPL");
|