Merge branch 'pm-cpufreq'
* pm-cpufreq: (37 commits) cpufreq: Add Tegra186 cpufreq driver cpufreq: imx6q: Fix error handling code cpufreq: imx6q: Set max suspend_freq to avoid changes during suspend cpufreq: imx6q: Fix handling EPROBE_DEFER from regulator cpufreq: schedutil: Use policy-dependent transition delays cpufreq: schedutil: Reduce frequencies slower cpufreq: intel_pstate: Add support for Gemini Lake cpufreq: intel_pstate: Eliminate intel_pstate_get_min_max() cpufreq: intel_pstate: Do not walk policy->cpus cpufreq: intel_pstate: Introduce pid_in_use() cpufreq: intel_pstate: Drop struct cpu_defaults cpufreq: intel_pstate: Move cpu_defaults definitions cpufreq: intel_pstate: Add update_util callback to pstate_funcs cpufreq: intel_pstate: Use different utilization update callbacks cpufreq: intel_pstate: Modify check in intel_pstate_update_status() cpufreq: intel_pstate: Drop driver_registered variable cpufreq: intel_pstate: Skip unnecessary PID resets on init cpufreq: intel_pstate: Set HWP sampling interval once cpufreq: intel_pstate: Clean up intel_pstate_busy_pid_reset() cpufreq: intel_pstate: Fold intel_pstate_reset_all_pid() into the caller ...
This commit is contained in:
commit
0807ee0f52
|
@ -3463,6 +3463,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
|
|||
T: git git://git.linaro.org/people/vireshk/linux.git (For ARM Updates)
|
||||
B: https://bugzilla.kernel.org
|
||||
F: Documentation/cpu-freq/
|
||||
F: Documentation/devicetree/bindings/cpufreq/
|
||||
F: drivers/cpufreq/
|
||||
F: include/linux/cpufreq.h
|
||||
F: tools/testing/selftests/cpufreq/
|
||||
|
|
|
@ -1189,11 +1189,6 @@
|
|||
status = "disabled";
|
||||
};
|
||||
|
||||
cpufreq-cooling {
|
||||
compatible = "stericsson,db8500-cpufreq-cooling";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
mcde@a0350000 {
|
||||
compatible = "stericsson,mcde";
|
||||
reg = <0xa0350000 0x1000>, /* MCDE */
|
||||
|
|
|
@ -247,6 +247,12 @@ config ARM_TEGRA124_CPUFREQ
|
|||
help
|
||||
This adds the CPUFreq driver support for Tegra124 SOCs.
|
||||
|
||||
config ARM_TEGRA186_CPUFREQ
|
||||
tristate "Tegra186 CPUFreq support"
|
||||
depends on ARCH_TEGRA && TEGRA_BPMP
|
||||
help
|
||||
This adds the CPUFreq driver support for Tegra186 SOCs.
|
||||
|
||||
config ARM_TI_CPUFREQ
|
||||
bool "Texas Instruments CPUFreq support"
|
||||
depends on ARCH_OMAP2PLUS
|
||||
|
|
|
@ -77,6 +77,7 @@ obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
|
|||
obj-$(CONFIG_ARM_STI_CPUFREQ) += sti-cpufreq.o
|
||||
obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
|
||||
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
|
||||
obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o
|
||||
obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o
|
||||
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
|
||||
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpu_cooling.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -18,6 +19,7 @@
|
|||
|
||||
static struct cpufreq_frequency_table *freq_table;
|
||||
static struct clk *armss_clk;
|
||||
static struct thermal_cooling_device *cdev;
|
||||
|
||||
static int dbx500_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
|
@ -32,6 +34,22 @@ static int dbx500_cpufreq_init(struct cpufreq_policy *policy)
|
|||
return cpufreq_generic_init(policy, freq_table, 20 * 1000);
|
||||
}
|
||||
|
||||
static int dbx500_cpufreq_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (!IS_ERR(cdev))
|
||||
cpufreq_cooling_unregister(cdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dbx500_cpufreq_ready(struct cpufreq_policy *policy)
|
||||
{
|
||||
cdev = cpufreq_cooling_register(policy->cpus);
|
||||
if (IS_ERR(cdev))
|
||||
pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev));
|
||||
else
|
||||
pr_info("Cooling device registered: %s\n", cdev->type);
|
||||
}
|
||||
|
||||
static struct cpufreq_driver dbx500_cpufreq_driver = {
|
||||
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
|
||||
CPUFREQ_NEED_INITIAL_FREQ_CHECK,
|
||||
|
@ -39,6 +57,8 @@ static struct cpufreq_driver dbx500_cpufreq_driver = {
|
|||
.target_index = dbx500_cpufreq_target,
|
||||
.get = cpufreq_generic_get,
|
||||
.init = dbx500_cpufreq_init,
|
||||
.exit = dbx500_cpufreq_exit,
|
||||
.ready = dbx500_cpufreq_ready,
|
||||
.name = "DBX500",
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
|
|
@ -161,8 +161,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
|
||||
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret;
|
||||
|
||||
policy->clk = arm_clk;
|
||||
return cpufreq_generic_init(policy, freq_table, transition_latency);
|
||||
ret = cpufreq_generic_init(policy, freq_table, transition_latency);
|
||||
policy->suspend_freq = policy->max;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver imx6q_cpufreq_driver = {
|
||||
|
@ -173,6 +178,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
|
|||
.init = imx6q_cpufreq_init,
|
||||
.name = "imx6q-cpufreq",
|
||||
.attr = cpufreq_generic_attr,
|
||||
.suspend = cpufreq_generic_suspend,
|
||||
};
|
||||
|
||||
static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
||||
|
@ -222,6 +228,13 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
arm_reg = regulator_get(cpu_dev, "arm");
|
||||
pu_reg = regulator_get_optional(cpu_dev, "pu");
|
||||
soc_reg = regulator_get(cpu_dev, "soc");
|
||||
if (PTR_ERR(arm_reg) == -EPROBE_DEFER ||
|
||||
PTR_ERR(soc_reg) == -EPROBE_DEFER ||
|
||||
PTR_ERR(pu_reg) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
dev_dbg(cpu_dev, "regulators not ready, defer\n");
|
||||
goto put_reg;
|
||||
}
|
||||
if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
|
||||
dev_err(cpu_dev, "failed to get regulators\n");
|
||||
ret = -ENOENT;
|
||||
|
@ -255,7 +268,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
|
|||
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
|
||||
if (ret) {
|
||||
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
|
||||
goto put_reg;
|
||||
goto out_free_opp;
|
||||
}
|
||||
|
||||
/* Make imx6_soc_volt array's size same as arm opp number */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -573,14 +573,33 @@ static struct platform_driver mt8173_cpufreq_platdrv = {
|
|||
.probe = mt8173_cpufreq_probe,
|
||||
};
|
||||
|
||||
static int mt8173_cpufreq_driver_init(void)
|
||||
/* List of machines supported by this driver */
|
||||
static const struct of_device_id mt8173_cpufreq_machines[] __initconst = {
|
||||
{ .compatible = "mediatek,mt817x", },
|
||||
{ .compatible = "mediatek,mt8173", },
|
||||
{ .compatible = "mediatek,mt8176", },
|
||||
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init mt8173_cpufreq_driver_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
const struct of_device_id *match;
|
||||
struct platform_device *pdev;
|
||||
int err;
|
||||
|
||||
if (!of_machine_is_compatible("mediatek,mt8173"))
|
||||
np = of_find_node_by_path("/");
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
|
||||
match = of_match_node(mt8173_cpufreq_machines, np);
|
||||
of_node_put(np);
|
||||
if (!match) {
|
||||
pr_warn("Machine is not compatible with mt8173-cpufreq\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
err = platform_driver_register(&mt8173_cpufreq_platdrv);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -52,17 +52,27 @@ static u32 get_bus_freq(void)
|
|||
{
|
||||
struct device_node *soc;
|
||||
u32 sysfreq;
|
||||
struct clk *pltclk;
|
||||
int ret;
|
||||
|
||||
/* get platform freq by searching bus-frequency property */
|
||||
soc = of_find_node_by_type(NULL, "soc");
|
||||
if (!soc)
|
||||
return 0;
|
||||
if (soc) {
|
||||
ret = of_property_read_u32(soc, "bus-frequency", &sysfreq);
|
||||
of_node_put(soc);
|
||||
if (!ret)
|
||||
return sysfreq;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(soc, "bus-frequency", &sysfreq))
|
||||
sysfreq = 0;
|
||||
/* get platform freq by its clock name */
|
||||
pltclk = clk_get(NULL, "cg-pll0-div1");
|
||||
if (IS_ERR(pltclk)) {
|
||||
pr_err("%s: can't get bus frequency %ld\n",
|
||||
__func__, PTR_ERR(pltclk));
|
||||
return PTR_ERR(pltclk);
|
||||
}
|
||||
|
||||
of_node_put(soc);
|
||||
|
||||
return sysfreq;
|
||||
return clk_get_rate(pltclk);
|
||||
}
|
||||
|
||||
static struct clk *cpu_to_clk(int cpu)
|
||||
|
|
|
@ -0,0 +1,275 @@
|
|||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include <soc/tegra/bpmp.h>
|
||||
#include <soc/tegra/bpmp-abi.h>
|
||||
|
||||
#define EDVD_CORE_VOLT_FREQ(core) (0x20 + (core) * 0x4)
|
||||
#define EDVD_CORE_VOLT_FREQ_F_SHIFT 0
|
||||
#define EDVD_CORE_VOLT_FREQ_V_SHIFT 16
|
||||
|
||||
struct tegra186_cpufreq_cluster_info {
|
||||
unsigned long offset;
|
||||
int cpus[4];
|
||||
unsigned int bpmp_cluster_id;
|
||||
};
|
||||
|
||||
#define NO_CPU -1
|
||||
static const struct tegra186_cpufreq_cluster_info tegra186_clusters[] = {
|
||||
/* Denver cluster */
|
||||
{
|
||||
.offset = SZ_64K * 7,
|
||||
.cpus = { 1, 2, NO_CPU, NO_CPU },
|
||||
.bpmp_cluster_id = 0,
|
||||
},
|
||||
/* A57 cluster */
|
||||
{
|
||||
.offset = SZ_64K * 6,
|
||||
.cpus = { 0, 3, 4, 5 },
|
||||
.bpmp_cluster_id = 1,
|
||||
},
|
||||
};
|
||||
|
||||
struct tegra186_cpufreq_cluster {
|
||||
const struct tegra186_cpufreq_cluster_info *info;
|
||||
struct cpufreq_frequency_table *table;
|
||||
};
|
||||
|
||||
struct tegra186_cpufreq_data {
|
||||
void __iomem *regs;
|
||||
|
||||
size_t num_clusters;
|
||||
struct tegra186_cpufreq_cluster *clusters;
|
||||
};
|
||||
|
||||
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < data->num_clusters; i++) {
|
||||
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
|
||||
const struct tegra186_cpufreq_cluster_info *info =
|
||||
cluster->info;
|
||||
int core;
|
||||
|
||||
for (core = 0; core < ARRAY_SIZE(info->cpus); core++) {
|
||||
if (info->cpus[core] == policy->cpu)
|
||||
break;
|
||||
}
|
||||
if (core == ARRAY_SIZE(info->cpus))
|
||||
continue;
|
||||
|
||||
policy->driver_data =
|
||||
data->regs + info->offset + EDVD_CORE_VOLT_FREQ(core);
|
||||
cpufreq_table_validate_and_show(policy, cluster->table);
|
||||
}
|
||||
|
||||
policy->cpuinfo.transition_latency = 300 * 1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
|
||||
unsigned int index)
|
||||
{
|
||||
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
|
||||
void __iomem *edvd_reg = policy->driver_data;
|
||||
u32 edvd_val = tbl->driver_data;
|
||||
|
||||
writel(edvd_val, edvd_reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpufreq_driver tegra186_cpufreq_driver = {
|
||||
.name = "tegra186",
|
||||
.flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
|
||||
.verify = cpufreq_generic_frequency_table_verify,
|
||||
.target_index = tegra186_cpufreq_set_target,
|
||||
.init = tegra186_cpufreq_init,
|
||||
.attr = cpufreq_generic_attr,
|
||||
};
|
||||
|
||||
static struct cpufreq_frequency_table *init_vhint_table(
|
||||
struct platform_device *pdev, struct tegra_bpmp *bpmp,
|
||||
unsigned int cluster_id)
|
||||
{
|
||||
struct cpufreq_frequency_table *table;
|
||||
struct mrq_cpu_vhint_request req;
|
||||
struct tegra_bpmp_message msg;
|
||||
struct cpu_vhint_data *data;
|
||||
int err, i, j, num_rates = 0;
|
||||
dma_addr_t phys;
|
||||
void *virt;
|
||||
|
||||
virt = dma_alloc_coherent(bpmp->dev, sizeof(*data), &phys,
|
||||
GFP_KERNEL | GFP_DMA32);
|
||||
if (!virt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
data = (struct cpu_vhint_data *)virt;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
req.addr = phys;
|
||||
req.cluster_id = cluster_id;
|
||||
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.mrq = MRQ_CPU_VHINT;
|
||||
msg.tx.data = &req;
|
||||
msg.tx.size = sizeof(req);
|
||||
|
||||
err = tegra_bpmp_transfer(bpmp, &msg);
|
||||
if (err) {
|
||||
table = ERR_PTR(err);
|
||||
goto free;
|
||||
}
|
||||
|
||||
for (i = data->vfloor; i <= data->vceil; i++) {
|
||||
u16 ndiv = data->ndiv[i];
|
||||
|
||||
if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
|
||||
continue;
|
||||
|
||||
/* Only store lowest voltage index for each rate */
|
||||
if (i > 0 && ndiv == data->ndiv[i - 1])
|
||||
continue;
|
||||
|
||||
num_rates++;
|
||||
}
|
||||
|
||||
table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
|
||||
GFP_KERNEL);
|
||||
if (!table) {
|
||||
table = ERR_PTR(-ENOMEM);
|
||||
goto free;
|
||||
}
|
||||
|
||||
for (i = data->vfloor, j = 0; i <= data->vceil; i++) {
|
||||
struct cpufreq_frequency_table *point;
|
||||
u16 ndiv = data->ndiv[i];
|
||||
u32 edvd_val = 0;
|
||||
|
||||
if (ndiv < data->ndiv_min || ndiv > data->ndiv_max)
|
||||
continue;
|
||||
|
||||
/* Only store lowest voltage index for each rate */
|
||||
if (i > 0 && ndiv == data->ndiv[i - 1])
|
||||
continue;
|
||||
|
||||
edvd_val |= i << EDVD_CORE_VOLT_FREQ_V_SHIFT;
|
||||
edvd_val |= ndiv << EDVD_CORE_VOLT_FREQ_F_SHIFT;
|
||||
|
||||
point = &table[j++];
|
||||
point->driver_data = edvd_val;
|
||||
point->frequency = data->ref_clk_hz * ndiv / data->pdiv /
|
||||
data->mdiv / 1000;
|
||||
}
|
||||
|
||||
table[j].frequency = CPUFREQ_TABLE_END;
|
||||
|
||||
free:
|
||||
dma_free_coherent(bpmp->dev, sizeof(*data), virt, phys);
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
static int tegra186_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct tegra186_cpufreq_data *data;
|
||||
struct tegra_bpmp *bpmp;
|
||||
struct resource *res;
|
||||
unsigned int i = 0, err;
|
||||
|
||||
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->clusters = devm_kcalloc(&pdev->dev, ARRAY_SIZE(tegra186_clusters),
|
||||
sizeof(*data->clusters), GFP_KERNEL);
|
||||
if (!data->clusters)
|
||||
return -ENOMEM;
|
||||
|
||||
data->num_clusters = ARRAY_SIZE(tegra186_clusters);
|
||||
|
||||
bpmp = tegra_bpmp_get(&pdev->dev);
|
||||
if (IS_ERR(bpmp))
|
||||
return PTR_ERR(bpmp);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
data->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(data->regs)) {
|
||||
err = PTR_ERR(data->regs);
|
||||
goto put_bpmp;
|
||||
}
|
||||
|
||||
for (i = 0; i < data->num_clusters; i++) {
|
||||
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
|
||||
|
||||
cluster->info = &tegra186_clusters[i];
|
||||
cluster->table = init_vhint_table(
|
||||
pdev, bpmp, cluster->info->bpmp_cluster_id);
|
||||
if (IS_ERR(cluster->table)) {
|
||||
err = PTR_ERR(cluster->table);
|
||||
goto put_bpmp;
|
||||
}
|
||||
}
|
||||
|
||||
tegra_bpmp_put(bpmp);
|
||||
|
||||
tegra186_cpufreq_driver.driver_data = data;
|
||||
|
||||
err = cpufreq_register_driver(&tegra186_cpufreq_driver);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
|
||||
put_bpmp:
|
||||
tegra_bpmp_put(bpmp);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int tegra186_cpufreq_remove(struct platform_device *pdev)
|
||||
{
|
||||
cpufreq_unregister_driver(&tegra186_cpufreq_driver);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id tegra186_cpufreq_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra186-ccplex-cluster", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, tegra186_cpufreq_of_match);
|
||||
|
||||
static struct platform_driver tegra186_cpufreq_platform_driver = {
|
||||
.driver = {
|
||||
.name = "tegra186-cpufreq",
|
||||
.of_match_table = tegra186_cpufreq_of_match,
|
||||
},
|
||||
.probe = tegra186_cpufreq_probe,
|
||||
.remove = tegra186_cpufreq_remove,
|
||||
};
|
||||
module_platform_driver(tegra186_cpufreq_platform_driver);
|
||||
|
||||
MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
|
||||
MODULE_DESCRIPTION("NVIDIA Tegra186 cpufreq driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -291,18 +291,6 @@ config ARMADA_THERMAL
|
|||
Enable this option if you want to have support for thermal management
|
||||
controller present in Armada 370 and Armada XP SoC.
|
||||
|
||||
config DB8500_CPUFREQ_COOLING
|
||||
tristate "DB8500 cpufreq cooling"
|
||||
depends on ARCH_U8500 || COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
depends on CPU_THERMAL
|
||||
default y
|
||||
help
|
||||
Adds DB8500 cpufreq cooling devices, and these cooling devices can be
|
||||
bound to thermal zone trip points. When a trip point reached, the
|
||||
bound cpufreq cooling device turns active to set CPU frequency low to
|
||||
cool down the CPU.
|
||||
|
||||
config INTEL_POWERCLAMP
|
||||
tristate "Intel PowerClamp idle injection driver"
|
||||
depends on THERMAL
|
||||
|
|
|
@ -41,7 +41,6 @@ obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
|
|||
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
|
||||
obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o
|
||||
obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o
|
||||
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
|
||||
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
|
||||
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
|
||||
obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o
|
||||
|
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* db8500_cpufreq_cooling.c - DB8500 cpufreq works as cooling device.
|
||||
*
|
||||
* Copyright (C) 2012 ST-Ericsson
|
||||
* Copyright (C) 2012 Linaro Ltd.
|
||||
*
|
||||
* Author: Hongbo Zhang <hongbo.zhang@linaro.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/cpu_cooling.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_cooling_device *cdev;
|
||||
|
||||
cdev = cpufreq_cooling_register(cpu_present_mask);
|
||||
if (IS_ERR(cdev)) {
|
||||
int ret = PTR_ERR(cdev);
|
||||
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to register cooling device %d\n",
|
||||
ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, cdev);
|
||||
|
||||
dev_info(&pdev->dev, "Cooling device registered: %s\n", cdev->type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int db8500_cpufreq_cooling_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct thermal_cooling_device *cdev = platform_get_drvdata(pdev);
|
||||
|
||||
cpufreq_cooling_unregister(cdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int db8500_cpufreq_cooling_suspend(struct platform_device *pdev,
|
||||
pm_message_t state)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static int db8500_cpufreq_cooling_resume(struct platform_device *pdev)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id db8500_cpufreq_cooling_match[] = {
|
||||
{ .compatible = "stericsson,db8500-cpufreq-cooling" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match);
|
||||
#endif
|
||||
|
||||
static struct platform_driver db8500_cpufreq_cooling_driver = {
|
||||
.driver = {
|
||||
.name = "db8500-cpufreq-cooling",
|
||||
.of_match_table = of_match_ptr(db8500_cpufreq_cooling_match),
|
||||
},
|
||||
.probe = db8500_cpufreq_cooling_probe,
|
||||
.suspend = db8500_cpufreq_cooling_suspend,
|
||||
.resume = db8500_cpufreq_cooling_resume,
|
||||
.remove = db8500_cpufreq_cooling_remove,
|
||||
};
|
||||
|
||||
static int __init db8500_cpufreq_cooling_init(void)
|
||||
{
|
||||
return platform_driver_register(&db8500_cpufreq_cooling_driver);
|
||||
}
|
||||
|
||||
static void __exit db8500_cpufreq_cooling_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&db8500_cpufreq_cooling_driver);
|
||||
}
|
||||
|
||||
/* Should be later than db8500_cpufreq_register */
|
||||
late_initcall(db8500_cpufreq_cooling_init);
|
||||
module_exit(db8500_cpufreq_cooling_exit);
|
||||
|
||||
MODULE_AUTHOR("Hongbo Zhang <hongbo.zhang@stericsson.com>");
|
||||
MODULE_DESCRIPTION("DB8500 cpufreq cooling driver");
|
||||
MODULE_LICENSE("GPL");
|
|
@ -120,6 +120,13 @@ struct cpufreq_policy {
|
|||
bool fast_switch_possible;
|
||||
bool fast_switch_enabled;
|
||||
|
||||
/*
|
||||
* Preferred average time interval between consecutive invocations of
|
||||
* the driver to set the frequency for this policy. To be set by the
|
||||
* scaling driver (0, which is the default, means no preference).
|
||||
*/
|
||||
unsigned int transition_delay_us;
|
||||
|
||||
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
|
||||
unsigned int cached_target_freq;
|
||||
int cached_resolved_idx;
|
||||
|
|
|
@ -117,6 +117,7 @@ extern void tick_nohz_idle_enter(void);
|
|||
extern void tick_nohz_idle_exit(void);
|
||||
extern void tick_nohz_irq_exit(void);
|
||||
extern ktime_t tick_nohz_get_sleep_length(void);
|
||||
extern unsigned long tick_nohz_get_idle_calls(void);
|
||||
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
|
||||
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
|
||||
#else /* !CONFIG_NO_HZ_COMMON */
|
||||
|
|
|
@ -61,6 +61,11 @@ struct sugov_cpu {
|
|||
unsigned long util;
|
||||
unsigned long max;
|
||||
unsigned int flags;
|
||||
|
||||
/* The field below is for single-CPU policies only. */
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
unsigned long saved_idle_calls;
|
||||
#endif
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
|
||||
|
@ -93,22 +98,23 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
|
|||
{
|
||||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
|
||||
if (sg_policy->next_freq == next_freq)
|
||||
return;
|
||||
|
||||
if (sg_policy->next_freq > next_freq)
|
||||
next_freq = (sg_policy->next_freq + next_freq) >> 1;
|
||||
|
||||
sg_policy->next_freq = next_freq;
|
||||
sg_policy->last_freq_update_time = time;
|
||||
|
||||
if (policy->fast_switch_enabled) {
|
||||
if (sg_policy->next_freq == next_freq) {
|
||||
trace_cpu_frequency(policy->cur, smp_processor_id());
|
||||
return;
|
||||
}
|
||||
sg_policy->next_freq = next_freq;
|
||||
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
|
||||
if (next_freq == CPUFREQ_ENTRY_INVALID)
|
||||
return;
|
||||
|
||||
policy->cur = next_freq;
|
||||
trace_cpu_frequency(next_freq, smp_processor_id());
|
||||
} else if (sg_policy->next_freq != next_freq) {
|
||||
sg_policy->next_freq = next_freq;
|
||||
} else {
|
||||
sg_policy->work_in_progress = true;
|
||||
irq_work_queue(&sg_policy->irq_work);
|
||||
}
|
||||
|
@ -192,6 +198,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
|
|||
sg_cpu->iowait_boost >>= 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
|
||||
{
|
||||
unsigned long idle_calls = tick_nohz_get_idle_calls();
|
||||
bool ret = idle_calls == sg_cpu->saved_idle_calls;
|
||||
|
||||
sg_cpu->saved_idle_calls = idle_calls;
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
||||
unsigned int flags)
|
||||
{
|
||||
|
@ -200,6 +219,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
unsigned long util, max;
|
||||
unsigned int next_f;
|
||||
bool busy;
|
||||
|
||||
sugov_set_iowait_boost(sg_cpu, time, flags);
|
||||
sg_cpu->last_update = time;
|
||||
|
@ -207,40 +227,37 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
if (!sugov_should_update_freq(sg_policy, time))
|
||||
return;
|
||||
|
||||
busy = sugov_cpu_is_busy(sg_cpu);
|
||||
|
||||
if (flags & SCHED_CPUFREQ_RT_DL) {
|
||||
next_f = policy->cpuinfo.max_freq;
|
||||
} else {
|
||||
sugov_get_util(&util, &max);
|
||||
sugov_iowait_boost(sg_cpu, &util, &max);
|
||||
next_f = get_next_freq(sg_policy, util, max);
|
||||
/*
|
||||
* Do not reduce the frequency if the CPU has not been idle
|
||||
* recently, as the reduction is likely to be premature then.
|
||||
*/
|
||||
if (busy && next_f < sg_policy->next_freq)
|
||||
next_f = sg_policy->next_freq;
|
||||
}
|
||||
sugov_update_commit(sg_policy, time, next_f);
|
||||
}
|
||||
|
||||
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
|
||||
unsigned long util, unsigned long max,
|
||||
unsigned int flags)
|
||||
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
|
||||
{
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
unsigned int max_f = policy->cpuinfo.max_freq;
|
||||
u64 last_freq_update_time = sg_policy->last_freq_update_time;
|
||||
unsigned long util = 0, max = 1;
|
||||
unsigned int j;
|
||||
|
||||
if (flags & SCHED_CPUFREQ_RT_DL)
|
||||
return max_f;
|
||||
|
||||
sugov_iowait_boost(sg_cpu, &util, &max);
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct sugov_cpu *j_sg_cpu;
|
||||
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
|
||||
unsigned long j_util, j_max;
|
||||
s64 delta_ns;
|
||||
|
||||
if (j == smp_processor_id())
|
||||
continue;
|
||||
|
||||
j_sg_cpu = &per_cpu(sugov_cpu, j);
|
||||
/*
|
||||
* If the CPU utilization was last updated before the previous
|
||||
* frequency update and the time elapsed between the last update
|
||||
|
@ -254,7 +271,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu,
|
|||
continue;
|
||||
}
|
||||
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
|
||||
return max_f;
|
||||
return policy->cpuinfo.max_freq;
|
||||
|
||||
j_util = j_sg_cpu->util;
|
||||
j_max = j_sg_cpu->max;
|
||||
|
@ -289,7 +306,11 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
|
|||
sg_cpu->last_update = time;
|
||||
|
||||
if (sugov_should_update_freq(sg_policy, time)) {
|
||||
next_f = sugov_next_freq_shared(sg_cpu, util, max, flags);
|
||||
if (flags & SCHED_CPUFREQ_RT_DL)
|
||||
next_f = sg_policy->policy->cpuinfo.max_freq;
|
||||
else
|
||||
next_f = sugov_next_freq_shared(sg_cpu);
|
||||
|
||||
sugov_update_commit(sg_policy, time, next_f);
|
||||
}
|
||||
|
||||
|
@ -473,7 +494,6 @@ static int sugov_init(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct sugov_policy *sg_policy;
|
||||
struct sugov_tunables *tunables;
|
||||
unsigned int lat;
|
||||
int ret = 0;
|
||||
|
||||
/* State should be equivalent to EXIT */
|
||||
|
@ -512,10 +532,16 @@ static int sugov_init(struct cpufreq_policy *policy)
|
|||
goto stop_kthread;
|
||||
}
|
||||
|
||||
tunables->rate_limit_us = LATENCY_MULTIPLIER;
|
||||
lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
|
||||
if (lat)
|
||||
tunables->rate_limit_us *= lat;
|
||||
if (policy->transition_delay_us) {
|
||||
tunables->rate_limit_us = policy->transition_delay_us;
|
||||
} else {
|
||||
unsigned int lat;
|
||||
|
||||
tunables->rate_limit_us = LATENCY_MULTIPLIER;
|
||||
lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
|
||||
if (lat)
|
||||
tunables->rate_limit_us *= lat;
|
||||
}
|
||||
|
||||
policy->governor_data = sg_policy;
|
||||
sg_policy->tunables = tunables;
|
||||
|
|
|
@ -993,6 +993,18 @@ ktime_t tick_nohz_get_sleep_length(void)
|
|||
return ts->sleep_length;
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_nohz_get_idle_calls - return the current idle calls counter value
|
||||
*
|
||||
* Called from the schedutil frequency scaling governor in scheduler context.
|
||||
*/
|
||||
unsigned long tick_nohz_get_idle_calls(void)
|
||||
{
|
||||
struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
|
||||
|
||||
return ts->idle_calls;
|
||||
}
|
||||
|
||||
static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
|
||||
{
|
||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||
|
|
Loading…
Reference in New Issue