From b3a826c05bd96122fd54c297062c495dbcc7511e Mon Sep 17 00:00:00 2001 From: James Geboski Date: Thu, 16 Oct 2014 07:37:11 -0400 Subject: [PATCH 1/9] cpufreq: allow powersave governor as the default without expert mode The intel_pstate driver only supports the performance and the powersave governors. With the performance governor ensuring the highest possible performance settings, userspace tools fail to make any lasting changes. In order to allow userspace tools to make modifications to the settings, the powersave governor must be in use. This makes having the powersave governor as the default convenient for systems where the intel_pstate driver is being employed. Having to enable expert mode in the kernel configuration is just a headache for such a trivial task. This patch applies to all kernel versions 2.6.38 or greater after the migration from CONFIG_EMBEDDED to CONFIG_EXPERT (6a108a14fa35). Most importantly, this applies to kernel versions 3.9 or greater when the intel_pstate driver was introduced. Signed-off-by: James Geboski Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Kconfig | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 3489f8f5fada..73df7dbc250a 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -63,7 +63,6 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE config CPU_FREQ_DEFAULT_GOV_POWERSAVE bool "powersave" - depends on EXPERT select CPU_FREQ_GOV_POWERSAVE help Use the CPUFreq governor 'powersave' as default. This sets From a0a22cf14472fa5cd44c8d107eba1a827df9549c Mon Sep 17 00:00:00 2001 From: Kelvin Cheung Date: Fri, 17 Oct 2014 18:23:31 +0800 Subject: [PATCH 2/9] cpufreq: Loongson1: Add cpufreq driver for Loongson1B This patch adds cpufreq driver for Loongson1B which is capable of changing the CPU frequency dynamically. Signed-off-by: Kelvin Cheung Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/Kconfig | 10 ++ drivers/cpufreq/Makefile | 1 + drivers/cpufreq/ls1x-cpufreq.c | 223 +++++++++++++++++++++++++++++++++ 3 files changed, 234 insertions(+) create mode 100644 drivers/cpufreq/ls1x-cpufreq.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 73df7dbc250a..4de4dfae4ccc 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -249,6 +249,16 @@ config LOONGSON2_CPUFREQ If in doubt, say N. +config LOONGSON1_CPUFREQ + tristate "Loongson1 CPUFreq Driver" + help + This option adds a CPUFreq driver for loongson1 processors which + support software configurable cpu frequency. + + For details, take a look at . + + If in doubt, say N. + endmenu menu "PowerPC CPU frequency scaling drivers" diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 40c53dc1937e..215e447abec6 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -98,6 +98,7 @@ obj-$(CONFIG_CRIS_MACH_ARTPEC3) += cris-artpec3-cpufreq.o obj-$(CONFIG_ETRAXFS) += cris-etraxfs-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_LOONGSON1_CPUFREQ) += ls1x-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o diff --git a/drivers/cpufreq/ls1x-cpufreq.c b/drivers/cpufreq/ls1x-cpufreq.c new file mode 100644 index 000000000000..25fbd6a1374f --- /dev/null +++ b/drivers/cpufreq/ls1x-cpufreq.c @@ -0,0 +1,223 @@ +/* + * CPU Frequency Scaling for Loongson 1 SoC + * + * Copyright (C) 2014 Zhang, Keguang + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +static struct { + struct device *dev; + struct clk *clk; /* CPU clk */ + struct clk *mux_clk; /* MUX of CPU clk */ + struct clk *pll_clk; /* PLL clk */ + struct clk *osc_clk; /* OSC clk */ + unsigned int max_freq; + unsigned int min_freq; +} ls1x_cpufreq; + +static int ls1x_cpufreq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + if (val == CPUFREQ_POSTCHANGE) + current_cpu_data.udelay_val = loops_per_jiffy; + + return NOTIFY_OK; +} + +static struct notifier_block ls1x_cpufreq_notifier_block = { + .notifier_call = ls1x_cpufreq_notifier +}; + +static int ls1x_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int old_freq, new_freq; + + old_freq = policy->cur; + new_freq = policy->freq_table[index].frequency; + + /* + * The procedure of reconfiguring CPU clk is as below. + * + * - Reparent CPU clk to OSC clk + * - Reset CPU clock (very important) + * - Reconfigure CPU DIV + * - Reparent CPU clk back to CPU DIV clk + */ + + dev_dbg(ls1x_cpufreq.dev, "%u KHz --> %u KHz\n", old_freq, new_freq); + clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk); + __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU, + LS1X_CLK_PLL_DIV); + __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU), + LS1X_CLK_PLL_DIV); + clk_set_rate(ls1x_cpufreq.mux_clk, new_freq * 1000); + clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk); + + return 0; +} + +static int ls1x_cpufreq_init(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *freq_tbl; + unsigned int pll_freq, freq; + int steps, i, ret; + + pll_freq = clk_get_rate(ls1x_cpufreq.pll_clk) / 1000; + + steps = 1 << DIV_CPU_WIDTH; + freq_tbl = kzalloc(sizeof(*freq_tbl) * steps, GFP_KERNEL); + if (!freq_tbl) { + dev_err(ls1x_cpufreq.dev, + "failed to alloc cpufreq_frequency_table\n"); + ret = -ENOMEM; + goto out; + } + + for (i = 0; i < (steps - 1); i++) { + freq = pll_freq / (i + 1); + if ((freq < ls1x_cpufreq.min_freq) || + (freq > ls1x_cpufreq.max_freq)) + freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID; + else + freq_tbl[i].frequency = freq; + dev_dbg(ls1x_cpufreq.dev, + "cpufreq table: index %d: frequency %d\n", i, + freq_tbl[i].frequency); + } + freq_tbl[i].frequency = CPUFREQ_TABLE_END; + + policy->clk = ls1x_cpufreq.clk; + ret = cpufreq_generic_init(policy, freq_tbl, 0); + if (ret) + kfree(freq_tbl); +out: + return ret; +} + +static int ls1x_cpufreq_exit(struct cpufreq_policy *policy) +{ + kfree(policy->freq_table); + return 0; +} + +static struct cpufreq_driver ls1x_cpufreq_driver = { + .name = "cpufreq-ls1x", + .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .target_index = ls1x_cpufreq_target, + .get = cpufreq_generic_get, + .init = ls1x_cpufreq_init, + .exit = ls1x_cpufreq_exit, + .attr = cpufreq_generic_attr, +}; + +static int ls1x_cpufreq_remove(struct platform_device *pdev) +{ + cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + cpufreq_unregister_driver(&ls1x_cpufreq_driver); + + return 0; +} + +static int ls1x_cpufreq_probe(struct platform_device *pdev) +{ + struct plat_ls1x_cpufreq *pdata = pdev->dev.platform_data; + struct clk *clk; + int ret; + + if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) + return -EINVAL; + + ls1x_cpufreq.dev = &pdev->dev; + + clk = devm_clk_get(&pdev->dev, pdata->clk_name); + if (IS_ERR(clk)) { + dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n", + pdata->clk_name); + ret = PTR_ERR(clk); + goto out; + } + ls1x_cpufreq.clk = clk; + + clk = clk_get_parent(clk); + if (IS_ERR(clk)) { + dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n", + __clk_get_name(ls1x_cpufreq.clk)); + ret = PTR_ERR(clk); + goto out; + } + ls1x_cpufreq.mux_clk = clk; + + clk = clk_get_parent(clk); + if (IS_ERR(clk)) { + dev_err(ls1x_cpufreq.dev, "unable to get parent of %s clock\n", + __clk_get_name(ls1x_cpufreq.mux_clk)); + ret = PTR_ERR(clk); + goto out; + } + ls1x_cpufreq.pll_clk = clk; + + clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name); + if (IS_ERR(clk)) { + dev_err(ls1x_cpufreq.dev, "unable to get %s clock\n", + pdata->osc_clk_name); + ret = PTR_ERR(clk); + goto out; + } + ls1x_cpufreq.osc_clk = clk; + + ls1x_cpufreq.max_freq = pdata->max_freq; + ls1x_cpufreq.min_freq = pdata->min_freq; + + ret = cpufreq_register_driver(&ls1x_cpufreq_driver); + if (ret) { + dev_err(ls1x_cpufreq.dev, + "failed to register cpufreq driver: %d\n", ret); + goto out; + } + + ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + if (!ret) + goto out; + + dev_err(ls1x_cpufreq.dev, "failed to register cpufreq notifier: %d\n", + ret); + + cpufreq_unregister_driver(&ls1x_cpufreq_driver); +out: + return ret; +} + +static struct platform_driver ls1x_cpufreq_platdrv = { + .driver = { + .name = "ls1x-cpufreq", + .owner = THIS_MODULE, + }, + .probe = ls1x_cpufreq_probe, + .remove = ls1x_cpufreq_remove, +}; + +module_platform_driver(ls1x_cpufreq_platdrv); + +MODULE_AUTHOR("Kelvin Cheung "); +MODULE_DESCRIPTION("Loongson 1 CPUFreq driver"); +MODULE_LICENSE("GPL"); From 0a1e879d353f6f6e661d14c6a0e42dd19efbc504 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Fri, 17 Oct 2014 22:09:48 +0000 Subject: [PATCH 3/9] cpufreq: cpufreq-dt: Improve debug about matching OPP During test of new DT OPPs it's very helpful to print the matching OPP in case of frequency change. So it will be easier to find frequency rounding issues in the dts file. Signed-off-by: Stefan Wahren Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index f657c571b18e..725fb1d86578 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -58,6 +58,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) old_freq = clk_get_rate(cpu_clk) / 1000; if (!IS_ERR(cpu_reg)) { + unsigned long opp_freq; + rcu_read_lock(); opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz); if (IS_ERR(opp)) { @@ -67,9 +69,12 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) return PTR_ERR(opp); } volt = dev_pm_opp_get_voltage(opp); + opp_freq = dev_pm_opp_get_freq(opp); rcu_read_unlock(); tol = volt * priv->voltage_tolerance / 100; volt_old = regulator_get_voltage(cpu_reg); + dev_dbg(cpu_dev, "Found OPP: %ld kHz, %ld uV\n", + opp_freq / 1000, volt); } dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", From 8197bb1bffbee6a7678d20b4c1e77940cdaa8640 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Fri, 17 Oct 2014 22:09:49 +0000 Subject: [PATCH 4/9] cpufreq: cpufreq-dt: Handle regulator_get_voltage() failure In error cases regulator_get_voltage() returns a negative value. So take care of it. Signed-off-by: Stefan Wahren Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 725fb1d86578..8cba13df5f28 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -78,7 +78,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) } dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", - old_freq / 1000, volt_old ? volt_old / 1000 : -1, + old_freq / 1000, (volt_old > 0) ? volt_old / 1000 : -1, new_freq / 1000, volt ? volt / 1000 : -1); /* scaling up? scale voltage before frequency */ @@ -94,7 +94,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index) ret = clk_set_rate(cpu_clk, freq_exact); if (ret) { dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); - if (!IS_ERR(cpu_reg)) + if (!IS_ERR(cpu_reg) && volt_old > 0) regulator_set_voltage_tol(cpu_reg, volt_old, tol); return ret; } From 619c144c84bd240487204e91dff88247cde68d92 Mon Sep 17 00:00:00 2001 From: Vince Hsu Date: Mon, 10 Nov 2014 14:14:50 +0800 Subject: [PATCH 5/9] cpufreq: respect the min/max settings from user space When the user space tries to set scaling_(max|min)_freq through sysfs, the cpufreq_set_policy() asks other driver's opinions for the max/min frequencies. Some device drivers, like Tegra CPU EDP which is not upstreamed yet though, may constrain the CPU maximum frequency dynamically because of board design. So if the user space access happens and some driver is capping the cpu frequency at the same time, the user_policy->(max|min) is overridden by the capped value, and that's not expected by the user space. And if the user space is not invoked again, the CPU will always be capped by the user_policy->(max|min) even no drivers limit the CPU frequency any more. This patch preserves the user specified min/max settings, so that every time the cpufreq policy is updated, the new max/min can be re-evaluated correctly based on the user's expection and the present device drivers' status. Signed-off-by: Vince Hsu Acked-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 644b54e1e7d1..0721ab352e2a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -535,7 +535,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, static ssize_t store_##file_name \ (struct cpufreq_policy *policy, const char *buf, size_t count) \ { \ - int ret; \ + int ret, temp; \ struct cpufreq_policy new_policy; \ \ ret = cpufreq_get_policy(&new_policy, policy->cpu); \ @@ -546,8 +546,10 @@ static ssize_t store_##file_name \ if (ret != 1) \ return -EINVAL; \ \ + temp = new_policy.object; \ ret = cpufreq_set_policy(policy, &new_policy); \ - policy->user_policy.object = policy->object; \ + if (!ret) \ + policy->user_policy.object = temp; \ \ return ret ? ret : count; \ } From 77873887729aaddec5cd27203a6ce8c4987733e4 Mon Sep 17 00:00:00 2001 From: Dirk Brandewie Date: Thu, 6 Nov 2014 09:40:46 -0800 Subject: [PATCH 6/9] x86: Add support for Intel HWP feature detection. Add support of Hardware Managed Performance States (HWP) described in Volume 3 section 14.4 of the SDM. One bit CPUID.06H:EAX[bit 7] expresses the presence of the HWP feature on the processor. The remaining bits CPUID.06H:EAX[bit 8-11] denote the presense of various HWP features. Signed-off-by: Dirk Brandewie Signed-off-by: Rafael J. Wysocki --- arch/x86/include/asm/cpufeature.h | 5 +++++ arch/x86/kernel/cpu/scattered.c | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 0bb1335313b2..aede2c347bde 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -189,6 +189,11 @@ #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ +#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ +#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ +#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ +#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ +#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ /* Virtualization flags: Linux defined, word 8 */ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index 4a8013d55947..60639093d536 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -36,6 +36,11 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, + { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, + { X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, + { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, + { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, + { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, From 2f86dc4cddcb21290ca099e1dce2a53533c86e0b Mon Sep 17 00:00:00 2001 From: Dirk Brandewie Date: Thu, 6 Nov 2014 09:40:47 -0800 Subject: [PATCH 7/9] intel_pstate: Add support for HWP Add support of Hardware Managed Performance States (HWP) described in Volume 3 section 14.4 of the SDM. With HWP enbaled intel_pstate will no longer be responsible for selecting P states for the processor. intel_pstate will continue to register to the cpufreq core as the scaling driver for CPUs implementing HWP. In HWP mode intel_pstate provides three functions reporting frequency to the cpufreq core, support for the set_policy() interface from the core and maintaining the intel_pstate sysfs interface in /sys/devices/system/cpu/intel_pstate. User preferences expressed via the set_policy() interface or the sysfs interface are forwared to the CPU via the HWP MSR interface. Signed-off-by: Dirk Brandewie Signed-off-by: Rafael J. Wysocki --- Documentation/cpu-freq/intel-pstate.txt | 33 +++++--- Documentation/kernel-parameters.txt | 3 + arch/x86/include/uapi/asm/msr-index.h | 41 ++++++++++ drivers/cpufreq/intel_pstate.c | 100 +++++++++++++++++++++++- 4 files changed, 165 insertions(+), 12 deletions(-) diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt index a69ffe1d54d5..765d7fc0e692 100644 --- a/Documentation/cpu-freq/intel-pstate.txt +++ b/Documentation/cpu-freq/intel-pstate.txt @@ -1,17 +1,28 @@ Intel P-state driver -------------------- -This driver implements a scaling driver with an internal governor for -Intel Core processors. The driver follows the same model as the -Transmeta scaling driver (longrun.c) and implements the setpolicy() -instead of target(). Scaling drivers that implement setpolicy() are -assumed to implement internal governors by the cpufreq core. All the -logic for selecting the current P state is contained within the -driver; no external governor is used by the cpufreq core. +This driver provides an interface to control the P state selection for +SandyBridge+ Intel processors. The driver can operate two different +modes based on the processor model legacy and Hardware P state (HWP) +mode. -Intel SandyBridge+ processors are supported. +In legacy mode the driver implements a scaling driver with an internal +governor for Intel Core processors. The driver follows the same model +as the Transmeta scaling driver (longrun.c) and implements the +setpolicy() instead of target(). Scaling drivers that implement +setpolicy() are assumed to implement internal governors by the cpufreq +core. All the logic for selecting the current P state is contained +within the driver; no external governor is used by the cpufreq core. -New sysfs files for controlling P state selection have been added to +In HWP mode P state selection is implemented in the processor +itself. The driver provides the interfaces between the cpufreq core and +the processor to control P state selection based on user preferences +and reporting frequency to the cpufreq core. In this mode the +internal governor code is disabled. + +In addtion to the interfaces provided by the cpufreq core for +controlling frequency the driver provides sysfs files for +controlling P state selection. These files have been added to /sys/devices/system/cpu/intel_pstate/ max_perf_pct: limits the maximum P state that will be requested by @@ -33,7 +44,9 @@ frequency is fiction for Intel Core processors. Even if the scaling driver selects a single P state the actual frequency the processor will run at is selected by the processor itself. -New debugfs files have also been added to /sys/kernel/debug/pstate_snb/ +For legacy mode debugfs files have also been added to allow tuning of +the internal governor algorythm. These files are located at +/sys/kernel/debug/pstate_snb/ These files are NOT present in HWP mode. deadband d_gain_pct diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4c81a860cc2b..907a0f119bee 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1446,6 +1446,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted. disable Do not enable intel_pstate as the default scaling driver for the supported processors + no_hwp + Do not enable hardware P state control (HWP) + if available. intremap= [X86-64, Intel-IOMMU] on enable Interrupt Remapping (default) diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index e21331ce368f..62838e54947d 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h @@ -152,6 +152,45 @@ #define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668 #define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669 +/* Hardware P state interface */ +#define MSR_PPERF 0x0000064e +#define MSR_PERF_LIMIT_REASONS 0x0000064f +#define MSR_PM_ENABLE 0x00000770 +#define MSR_HWP_CAPABILITIES 0x00000771 +#define MSR_HWP_REQUEST_PKG 0x00000772 +#define MSR_HWP_INTERRUPT 0x00000773 +#define MSR_HWP_REQUEST 0x00000774 +#define MSR_HWP_STATUS 0x00000777 + +/* CPUID.6.EAX */ +#define HWP_BASE_BIT (1<<7) +#define HWP_NOTIFICATIONS_BIT (1<<8) +#define HWP_ACTIVITY_WINDOW_BIT (1<<9) +#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10) +#define HWP_PACKAGE_LEVEL_REQUEST_BIT (1<<11) + +/* IA32_HWP_CAPABILITIES */ +#define HWP_HIGHEST_PERF(x) (x & 0xff) +#define HWP_GUARANTEED_PERF(x) ((x & (0xff << 8)) >>8) +#define HWP_MOSTEFFICIENT_PERF(x) ((x & (0xff << 16)) >>16) +#define HWP_LOWEST_PERF(x) ((x & (0xff << 24)) >>24) + +/* IA32_HWP_REQUEST */ +#define HWP_MIN_PERF(x) (x & 0xff) +#define HWP_MAX_PERF(x) ((x & 0xff) << 8) +#define HWP_DESIRED_PERF(x) ((x & 0xff) << 16) +#define HWP_ENERGY_PERF_PREFERENCE(x) ((x & 0xff) << 24) +#define HWP_ACTIVITY_WINDOW(x) ((x & 0xff3) << 32) +#define HWP_PACKAGE_CONTROL(x) ((x & 0x1) << 42) + +/* IA32_HWP_STATUS */ +#define HWP_GUARANTEED_CHANGE(x) (x & 0x1) +#define HWP_EXCURSION_TO_MINIMUM(x) (x & 0x4) + +/* IA32_HWP_INTERRUPT */ +#define HWP_CHANGE_TO_GUARANTEED_INT(x) (x & 0x1) +#define HWP_EXCURSION_TO_MINIMUM_INT(x) (x & 0x2) + #define MSR_AMD64_MC0_MASK 0xc0010044 #define MSR_IA32_MCx_CTL(x) (MSR_IA32_MC0_CTL + 4*(x)) @@ -345,6 +384,8 @@ #define MSR_IA32_TEMPERATURE_TARGET 0x000001a2 +#define MSR_MISC_PWR_MGMT 0x000001aa + #define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0 #define ENERGY_PERF_BIAS_PERFORMANCE 0 #define ENERGY_PERF_BIAS_NORMAL 6 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 27bb6d3877ed..ba35db092239 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -137,6 +137,7 @@ struct cpu_defaults { static struct pstate_adjust_policy pid_params; static struct pstate_funcs pstate_funcs; +static int hwp_active; struct perf_limits { int no_turbo; @@ -244,6 +245,34 @@ static inline void update_turbo_state(void) cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); } +#define PCT_TO_HWP(x) (x * 255 / 100) +static void intel_pstate_hwp_set(void) +{ + int min, max, cpu; + u64 value, freq; + + get_online_cpus(); + + for_each_online_cpu(cpu) { + rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); + min = PCT_TO_HWP(limits.min_perf_pct); + value &= ~HWP_MIN_PERF(~0L); + value |= HWP_MIN_PERF(min); + + max = PCT_TO_HWP(limits.max_perf_pct); + if (limits.no_turbo) { + rdmsrl( MSR_HWP_CAPABILITIES, freq); + max = HWP_GUARANTEED_PERF(freq); + } + + value &= ~HWP_MAX_PERF(~0L); + value |= HWP_MAX_PERF(max); + wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value); + } + + put_online_cpus(); +} + /************************** debugfs begin ************************/ static int pid_param_set(void *data, u64 val) { @@ -279,6 +308,8 @@ static void __init intel_pstate_debug_expose_params(void) struct dentry *debugfs_parent; int i = 0; + if (hwp_active) + return; debugfs_parent = debugfs_create_dir("pstate_snb", NULL); if (IS_ERR_OR_NULL(debugfs_parent)) return; @@ -329,8 +360,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); return -EPERM; } + limits.no_turbo = clamp_t(int, input, 0, 1); + if (hwp_active) + intel_pstate_hwp_set(); + return count; } @@ -348,6 +383,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); + if (hwp_active) + intel_pstate_hwp_set(); return count; } @@ -363,6 +400,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, limits.min_perf_pct = clamp_t(int, input, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); + if (hwp_active) + intel_pstate_hwp_set(); return count; } @@ -395,8 +434,16 @@ static void __init intel_pstate_sysfs_expose_params(void) rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); BUG_ON(rc); } - /************************** sysfs end ************************/ + +static void intel_pstate_hwp_enable(void) +{ + hwp_active++; + pr_info("intel_pstate HWP enabled\n"); + + wrmsrl( MSR_PM_ENABLE, 0x1); +} + static int byt_get_min_pstate(void) { u64 value; @@ -648,6 +695,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu) cpu->prev_mperf = mperf; } +static inline void intel_hwp_set_sample_time(struct cpudata *cpu) +{ + int delay; + + delay = msecs_to_jiffies(50); + mod_timer_pinned(&cpu->timer, jiffies + delay); +} + static inline void intel_pstate_set_sample_time(struct cpudata *cpu) { int delay; @@ -694,6 +749,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); } +static void intel_hwp_timer_func(unsigned long __data) +{ + struct cpudata *cpu = (struct cpudata *) __data; + + intel_pstate_sample(cpu); + intel_hwp_set_sample_time(cpu); +} + static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; @@ -737,6 +800,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); +static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = { + ICPU(0x56, core_params), + {} +}; + static int intel_pstate_init_cpu(unsigned int cpunum) { struct cpudata *cpu; @@ -753,9 +821,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum) intel_pstate_get_cpu_pstates(cpu); init_timer_deferrable(&cpu->timer); - cpu->timer.function = intel_pstate_timer_func; cpu->timer.data = (unsigned long)cpu; cpu->timer.expires = jiffies + HZ/100; + + if (!hwp_active) + cpu->timer.function = intel_pstate_timer_func; + else + cpu->timer.function = intel_hwp_timer_func; + intel_pstate_busy_pid_reset(cpu); intel_pstate_sample(cpu); @@ -792,6 +865,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.no_turbo = 0; return 0; } + limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); @@ -801,6 +875,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); + if (hwp_active) + intel_pstate_hwp_set(); + return 0; } @@ -823,6 +900,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) pr_info("intel_pstate CPU %d exiting\n", cpu_num); del_timer_sync(&all_cpu_data[cpu_num]->timer); + if (hwp_active) + return; + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } @@ -866,6 +946,7 @@ static struct cpufreq_driver intel_pstate_driver = { }; static int __initdata no_load; +static int __initdata no_hwp; static int intel_pstate_msrs_not_valid(void) { @@ -959,6 +1040,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void) { struct acpi_table_header hdr; struct hw_vendor_info *v_info; + const struct x86_cpu_id *id; + u64 misc_pwr; + + id = x86_match_cpu(intel_pstate_cpu_oob_ids); + if (id) { + rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr); + if ( misc_pwr & (1 << 8)) + return true; + } if (acpi_disabled || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) @@ -982,6 +1072,7 @@ static int __init intel_pstate_init(void) int cpu, rc = 0; const struct x86_cpu_id *id; struct cpu_defaults *cpu_info; + struct cpuinfo_x86 *c = &boot_cpu_data; if (no_load) return -ENODEV; @@ -1011,6 +1102,9 @@ static int __init intel_pstate_init(void) if (!all_cpu_data) return -ENOMEM; + if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp) + intel_pstate_hwp_enable(); + rc = cpufreq_register_driver(&intel_pstate_driver); if (rc) goto out; @@ -1041,6 +1135,8 @@ static int __init intel_pstate_setup(char *str) if (!strcmp(str, "disable")) no_load = 1; + if (!strcmp(str, "no_hwp")) + no_hwp = 1; return 0; } early_param("intel_pstate", intel_pstate_setup); From 43f8a966e91f387eabe85d2f2d12519c218f9dd0 Mon Sep 17 00:00:00 2001 From: Dirk Brandewie Date: Thu, 6 Nov 2014 09:50:45 -0800 Subject: [PATCH 8/9] intel_pstate: Add CPUID for BDW-H CPU Add BDW-H to the list of supported processors. Signed-off-by: Dirk Brandewie Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/intel_pstate.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index ba35db092239..ab2e100a1807 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -793,6 +793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(0x3f, core_params), ICPU(0x45, core_params), ICPU(0x46, core_params), + ICPU(0x47, core_params), ICPU(0x4c, byt_params), ICPU(0x4f, core_params), ICPU(0x56, core_params), From 7e7e8fe69820c6fa31395dbbd8e348e3c69cd2a9 Mon Sep 17 00:00:00 2001 From: Lenny Szubowicz Date: Thu, 13 Nov 2014 13:51:52 -0500 Subject: [PATCH 9/9] cpufreq: pcc: Enable autoload of pcc-cpufreq for ACPI processors The pcc-cpufreq driver is not automatically loaded on systems where the platform's power management setting requires this driver. Instead, on those systems no CPU frequency driver is registered and active. Make the autoloading matching criteria for loading the pcc-cpufreq driver the same as done in acpi-cpufreq by commit c655affbd524d01 ("ACPI / cpufreq: Add ACPI processor device IDs to acpi-cpufreq"). x86 CPU frequency drivers are now typically autoloaded by specifying MODULE_DEVICE_TABLE entries and x86cpu model specific matching. But pcc-cpufreq was omitted when acpi-cpufreq and other drivers were changed to use this approach. Both acpi-cpufreq and pcc-cpufreq depend on a distinct and mutually exclusive set of ACPI methods which are not directly tied to specific processor model numbers. Both of these drivers have init routines which look for their required ACPI methods. As a result, only the appropriate driver registers as the cpu frequency driver and the other one ends up being unloaded. Tested on various systems where acpi-cpufreq, intel_pstate, and pcc-cpufreq are the expected cpu frequency drivers. Signed-off-by: Lenny Szubowicz Signed-off-by: Joseph Szczypek Reported-by: Trinh Dao Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/pcc-cpufreq.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 4d2c8e861089..2a0d58959acf 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -603,6 +603,13 @@ static void __exit pcc_cpufreq_exit(void) free_percpu(pcc_cpu_info); } +static const struct acpi_device_id processor_device_ids[] = { + {ACPI_PROCESSOR_OBJECT_HID, }, + {ACPI_PROCESSOR_DEVICE_HID, }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, processor_device_ids); + MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); MODULE_VERSION(PCC_VERSION); MODULE_DESCRIPTION("Processor Clocking Control interface driver");