Merge branch 'pm-cpufreq'
Merge cpufreq changes for 6.1-rc1: - Add isupport for Tiger Lake in no-HWP mode to intel_pstate (Doug Smythies). - Update the AMD P-state driver (Perry Yuan): * Fix wrong lowest perf fetch. * Map desired perf into pstate scope for powersave governor. * Update pstate frequency transition delay time. * Fix initial highest_perf value. * Clean up. - Move max CPU capacity to sugov_policy in the schedutil cpufreq governor (Lukasz Luba). - Add SM6115 to cpufreq-dt blocklist (Adam Skladowski). - Add support for Tegra239 and minor cleanups (Sumit Gupta, ye xingchen, and Yang Yingliang). - Add freq qos for qcom cpufreq driver and minor cleanups (Xuewen Yan, and Viresh Kumar). - Minor cleanups around functions called at module_init() (Xiu Jianfeng). - Use module_init and add module_exit for bmips driver (Zhang Jianhua). * pm-cpufreq: cpufreq: qcom-cpufreq-hw: Add cpufreq qos for LMh cpufreq: Add __init annotation to module init funcs cpufreq: tegra194: change tegra239_cpufreq_soc to static cpufreq: tegra194: Add support for Tegra239 cpufreq: qcom-cpufreq-hw: Fix uninitialized throttled_freq warning cpufreq: intel_pstate: Add Tigerlake support in no-HWP mode cpufreq: amd-pstate: Fix initial highest_perf value cpufreq: tegra194: Remove the unneeded result variable cpufreq: amd-pstate: update pstate frequency transition delay time cpufreq: amd_pstate: map desired perf into pstate scope for powersave governor cpufreq: amd_pstate: fix wrong lowest perf fetch cpufreq: amd-pstate: fix white-space cpufreq: amd-pstate: simplify cpudata pointer assignment cpufreq: bmips-cpufreq: Use module_init and add module_exit cpufreq: schedutil: Move max CPU capacity to sugov_policy cpufreq: Add SM6115 to cpufreq-dt-platdev blocklist
This commit is contained in:
commit
0766fa2e8a
|
@ -46,8 +46,8 @@
|
|||
#include <asm/cpu_device_id.h>
|
||||
#include "amd-pstate-trace.h"
|
||||
|
||||
#define AMD_PSTATE_TRANSITION_LATENCY 0x20000
|
||||
#define AMD_PSTATE_TRANSITION_DELAY 500
|
||||
#define AMD_PSTATE_TRANSITION_LATENCY 20000
|
||||
#define AMD_PSTATE_TRANSITION_DELAY 1000
|
||||
|
||||
/*
|
||||
* TODO: We need more time to fine tune processors with shared memory solution
|
||||
|
@ -120,7 +120,7 @@ struct amd_cpudata {
|
|||
struct amd_aperf_mperf cur;
|
||||
struct amd_aperf_mperf prev;
|
||||
|
||||
u64 freq;
|
||||
u64 freq;
|
||||
bool boost_supported;
|
||||
};
|
||||
|
||||
|
@ -152,6 +152,7 @@ static inline int amd_pstate_enable(bool enable)
|
|||
static int pstate_init_perf(struct amd_cpudata *cpudata)
|
||||
{
|
||||
u64 cap1;
|
||||
u32 highest_perf;
|
||||
|
||||
int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
|
||||
&cap1);
|
||||
|
@ -163,7 +164,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
|
|||
*
|
||||
* CPPC entry doesn't indicate the highest performance in some ASICs.
|
||||
*/
|
||||
WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
|
||||
highest_perf = amd_get_highest_perf();
|
||||
if (highest_perf > AMD_CPPC_HIGHEST_PERF(cap1))
|
||||
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
|
||||
|
||||
WRITE_ONCE(cpudata->highest_perf, highest_perf);
|
||||
|
||||
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
|
||||
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
|
||||
|
@ -175,12 +180,17 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
|
|||
static int cppc_init_perf(struct amd_cpudata *cpudata)
|
||||
{
|
||||
struct cppc_perf_caps cppc_perf;
|
||||
u32 highest_perf;
|
||||
|
||||
int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
|
||||
highest_perf = amd_get_highest_perf();
|
||||
if (highest_perf > cppc_perf.highest_perf)
|
||||
highest_perf = cppc_perf.highest_perf;
|
||||
|
||||
WRITE_ONCE(cpudata->highest_perf, highest_perf);
|
||||
|
||||
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
|
||||
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
|
||||
|
@ -269,6 +279,7 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
|
|||
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
|
||||
u64 value = prev;
|
||||
|
||||
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
|
||||
value &= ~AMD_CPPC_MIN_PERF(~0L);
|
||||
value |= AMD_CPPC_MIN_PERF(min_perf);
|
||||
|
||||
|
@ -312,7 +323,7 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
|
|||
return -ENODEV;
|
||||
|
||||
cap_perf = READ_ONCE(cpudata->highest_perf);
|
||||
min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
|
||||
min_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
max_perf = cap_perf;
|
||||
|
||||
freqs.old = policy->cur;
|
||||
|
@ -357,8 +368,6 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
|||
if (max_perf < min_perf)
|
||||
max_perf = min_perf;
|
||||
|
||||
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
|
||||
|
||||
amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
|
||||
}
|
||||
|
||||
|
@ -555,9 +564,7 @@ free_cpudata1:
|
|||
|
||||
static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct amd_cpudata *cpudata;
|
||||
|
||||
cpudata = policy->driver_data;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
freq_qos_remove_request(&cpudata->req[1]);
|
||||
freq_qos_remove_request(&cpudata->req[0]);
|
||||
|
@ -599,9 +606,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
|
|||
char *buf)
|
||||
{
|
||||
int max_freq;
|
||||
struct amd_cpudata *cpudata;
|
||||
|
||||
cpudata = policy->driver_data;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
max_freq = amd_get_max_freq(cpudata);
|
||||
if (max_freq < 0)
|
||||
|
@ -614,9 +619,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
|
|||
char *buf)
|
||||
{
|
||||
int freq;
|
||||
struct amd_cpudata *cpudata;
|
||||
|
||||
cpudata = policy->driver_data;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
freq = amd_get_lowest_nonlinear_freq(cpudata);
|
||||
if (freq < 0)
|
||||
|
@ -662,7 +665,7 @@ static struct cpufreq_driver amd_pstate_driver = {
|
|||
.resume = amd_pstate_cpu_resume,
|
||||
.set_boost = amd_pstate_set_boost,
|
||||
.name = "amd-pstate",
|
||||
.attr = amd_pstate_attr,
|
||||
.attr = amd_pstate_attr,
|
||||
};
|
||||
|
||||
static int __init amd_pstate_init(void)
|
||||
|
|
|
@ -156,7 +156,7 @@ static struct cpufreq_driver bmips_cpufreq_driver = {
|
|||
.name = BMIPS_CPUFREQ_PREFIX,
|
||||
};
|
||||
|
||||
static int __init bmips_cpufreq_probe(void)
|
||||
static int __init bmips_cpufreq_driver_init(void)
|
||||
{
|
||||
struct cpufreq_compat *cc;
|
||||
struct device_node *np;
|
||||
|
@ -176,7 +176,13 @@ static int __init bmips_cpufreq_probe(void)
|
|||
|
||||
return cpufreq_register_driver(&bmips_cpufreq_driver);
|
||||
}
|
||||
device_initcall(bmips_cpufreq_probe);
|
||||
module_init(bmips_cpufreq_driver_init);
|
||||
|
||||
static void __exit bmips_cpufreq_driver_exit(void)
|
||||
{
|
||||
cpufreq_unregister_driver(&bmips_cpufreq_driver);
|
||||
}
|
||||
module_exit(bmips_cpufreq_driver_exit);
|
||||
|
||||
MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
|
||||
MODULE_DESCRIPTION("CPUfreq driver for Broadcom BMIPS SoCs");
|
||||
|
|
|
@ -146,6 +146,7 @@ static const struct of_device_id blocklist[] __initconst = {
|
|||
{ .compatible = "qcom,sc8180x", },
|
||||
{ .compatible = "qcom,sc8280xp", },
|
||||
{ .compatible = "qcom,sdm845", },
|
||||
{ .compatible = "qcom,sm6115", },
|
||||
{ .compatible = "qcom,sm6350", },
|
||||
{ .compatible = "qcom,sm8150", },
|
||||
{ .compatible = "qcom,sm8250", },
|
||||
|
|
|
@ -55,7 +55,7 @@ static struct notifier_block hb_cpufreq_clk_nb = {
|
|||
.notifier_call = hb_cpufreq_clk_notify,
|
||||
};
|
||||
|
||||
static int hb_cpufreq_driver_init(void)
|
||||
static int __init hb_cpufreq_driver_init(void)
|
||||
{
|
||||
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
|
||||
struct device *cpu_dev;
|
||||
|
|
|
@ -2416,6 +2416,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
|
|||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(COMETLAKE, core_funcs),
|
||||
X86_MATCH(ICELAKE_X, core_funcs),
|
||||
X86_MATCH(TIGERLAKE, core_funcs),
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/units.h>
|
||||
|
@ -56,6 +57,8 @@ struct qcom_cpufreq_data {
|
|||
struct cpufreq_policy *policy;
|
||||
|
||||
bool per_core_dcvs;
|
||||
|
||||
struct freq_qos_request throttle_freq_req;
|
||||
};
|
||||
|
||||
static unsigned long cpu_hw_rate, xo_rate;
|
||||
|
@ -316,14 +319,16 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
|
|||
if (IS_ERR(opp)) {
|
||||
dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
|
||||
} else {
|
||||
throttled_freq = freq_hz / HZ_PER_KHZ;
|
||||
|
||||
/* Update thermal pressure (the boost frequencies are accepted) */
|
||||
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
|
||||
|
||||
dev_pm_opp_put(opp);
|
||||
}
|
||||
|
||||
throttled_freq = freq_hz / HZ_PER_KHZ;
|
||||
|
||||
freq_qos_update_request(&data->throttle_freq_req, throttled_freq);
|
||||
|
||||
/* Update thermal pressure (the boost frequencies are accepted) */
|
||||
arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
|
||||
|
||||
/*
|
||||
* In the unlikely case policy is unregistered do not enable
|
||||
* polling or h/w interrupt
|
||||
|
@ -413,6 +418,14 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
|
|||
if (data->throttle_irq < 0)
|
||||
return data->throttle_irq;
|
||||
|
||||
ret = freq_qos_add_request(&policy->constraints,
|
||||
&data->throttle_freq_req, FREQ_QOS_MAX,
|
||||
FREQ_QOS_MAX_DEFAULT_VALUE);
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "Failed to add freq constraint (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
data->cancel_throttle = false;
|
||||
data->policy = policy;
|
||||
|
||||
|
@ -479,6 +492,7 @@ static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
|
|||
if (data->throttle_irq <= 0)
|
||||
return;
|
||||
|
||||
freq_qos_remove_request(&data->throttle_freq_req);
|
||||
free_irq(data->throttle_irq, data);
|
||||
}
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ static int sti_cpufreq_fetch_syscon_registers(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sti_cpufreq_init(void)
|
||||
static int __init sti_cpufreq_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -38,14 +38,6 @@
|
|||
/* cpufreq transisition latency */
|
||||
#define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */
|
||||
|
||||
enum cluster {
|
||||
CLUSTER0,
|
||||
CLUSTER1,
|
||||
CLUSTER2,
|
||||
CLUSTER3,
|
||||
MAX_CLUSTERS,
|
||||
};
|
||||
|
||||
struct tegra_cpu_ctr {
|
||||
u32 cpu;
|
||||
u32 coreclk_cnt, last_coreclk_cnt;
|
||||
|
@ -67,12 +59,12 @@ struct tegra_cpufreq_ops {
|
|||
struct tegra_cpufreq_soc {
|
||||
struct tegra_cpufreq_ops *ops;
|
||||
int maxcpus_per_cluster;
|
||||
unsigned int num_clusters;
|
||||
phys_addr_t actmon_cntr_base;
|
||||
};
|
||||
|
||||
struct tegra194_cpufreq_data {
|
||||
void __iomem *regs;
|
||||
size_t num_clusters;
|
||||
struct cpufreq_frequency_table **tables;
|
||||
const struct tegra_cpufreq_soc *soc;
|
||||
};
|
||||
|
@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = {
|
|||
.ops = &tegra234_cpufreq_ops,
|
||||
.actmon_cntr_base = 0x9000,
|
||||
.maxcpus_per_cluster = 4,
|
||||
.num_clusters = 3,
|
||||
};
|
||||
|
||||
static const struct tegra_cpufreq_soc tegra239_cpufreq_soc = {
|
||||
.ops = &tegra234_cpufreq_ops,
|
||||
.actmon_cntr_base = 0x4000,
|
||||
.maxcpus_per_cluster = 8,
|
||||
.num_clusters = 1,
|
||||
};
|
||||
|
||||
static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid)
|
||||
|
@ -314,11 +314,7 @@ static void tegra194_get_cpu_ndiv_sysreg(void *ndiv)
|
|||
|
||||
static int tegra194_get_cpu_ndiv(u32 cpu, u32 cpuid, u32 clusterid, u64 *ndiv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
|
||||
|
||||
return ret;
|
||||
return smp_call_function_single(cpu, tegra194_get_cpu_ndiv_sysreg, &ndiv, true);
|
||||
}
|
||||
|
||||
static void tegra194_set_cpu_ndiv_sysreg(void *data)
|
||||
|
@ -382,7 +378,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
|
|||
|
||||
data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid);
|
||||
|
||||
if (clusterid >= data->num_clusters || !data->tables[clusterid])
|
||||
if (clusterid >= data->soc->num_clusters || !data->tables[clusterid])
|
||||
return -EINVAL;
|
||||
|
||||
start_cpu = rounddown(policy->cpu, maxcpus_per_cluster);
|
||||
|
@ -433,6 +429,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = {
|
|||
static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = {
|
||||
.ops = &tegra194_cpufreq_ops,
|
||||
.maxcpus_per_cluster = 2,
|
||||
.num_clusters = 4,
|
||||
};
|
||||
|
||||
static void tegra194_cpufreq_free_resources(void)
|
||||
|
@ -525,15 +522,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
|||
|
||||
soc = of_device_get_match_data(&pdev->dev);
|
||||
|
||||
if (soc->ops && soc->maxcpus_per_cluster) {
|
||||
if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) {
|
||||
data->soc = soc;
|
||||
} else {
|
||||
dev_err(&pdev->dev, "soc data missing\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
data->num_clusters = MAX_CLUSTERS;
|
||||
data->tables = devm_kcalloc(&pdev->dev, data->num_clusters,
|
||||
data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters,
|
||||
sizeof(*data->tables), GFP_KERNEL);
|
||||
if (!data->tables)
|
||||
return -ENOMEM;
|
||||
|
@ -558,7 +554,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
|
|||
goto put_bpmp;
|
||||
}
|
||||
|
||||
for (i = 0; i < data->num_clusters; i++) {
|
||||
for (i = 0; i < data->soc->num_clusters; i++) {
|
||||
data->tables[i] = init_freq_table(pdev, bpmp, i);
|
||||
if (IS_ERR(data->tables[i])) {
|
||||
err = PTR_ERR(data->tables[i]);
|
||||
|
@ -590,6 +586,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev)
|
|||
static const struct of_device_id tegra194_cpufreq_of_match[] = {
|
||||
{ .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc },
|
||||
{ .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc },
|
||||
{ .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
|
|
|
@ -398,7 +398,7 @@ fail_put_node:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ti_cpufreq_init(void)
|
||||
static int __init ti_cpufreq_init(void)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
|
||||
|
|
|
@ -25,6 +25,9 @@ struct sugov_policy {
|
|||
unsigned int next_freq;
|
||||
unsigned int cached_raw_freq;
|
||||
|
||||
/* max CPU capacity, which is equal for all CPUs in freq. domain */
|
||||
unsigned long max;
|
||||
|
||||
/* The next fields are only needed if fast switch cannot be used: */
|
||||
struct irq_work irq_work;
|
||||
struct kthread_work work;
|
||||
|
@ -48,7 +51,6 @@ struct sugov_cpu {
|
|||
|
||||
unsigned long util;
|
||||
unsigned long bw_dl;
|
||||
unsigned long max;
|
||||
|
||||
/* The field below is for single-CPU policies only: */
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
|
@ -158,7 +160,6 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
|
|||
{
|
||||
struct rq *rq = cpu_rq(sg_cpu->cpu);
|
||||
|
||||
sg_cpu->max = arch_scale_cpu_capacity(sg_cpu->cpu);
|
||||
sg_cpu->bw_dl = cpu_bw_dl(rq);
|
||||
sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu),
|
||||
FREQUENCY_UTIL, NULL);
|
||||
|
@ -253,6 +254,7 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
|
|||
*/
|
||||
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
|
||||
{
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
unsigned long boost;
|
||||
|
||||
/* No boost currently required */
|
||||
|
@ -280,7 +282,8 @@ static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
|
|||
* sg_cpu->util is already in capacity scale; convert iowait_boost
|
||||
* into the same scale so we can compare.
|
||||
*/
|
||||
boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
|
||||
boost = sg_cpu->iowait_boost * sg_policy->max;
|
||||
boost >>= SCHED_CAPACITY_SHIFT;
|
||||
boost = uclamp_rq_util_with(cpu_rq(sg_cpu->cpu), boost, NULL);
|
||||
if (sg_cpu->util < boost)
|
||||
sg_cpu->util = boost;
|
||||
|
@ -337,7 +340,7 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
|
|||
if (!sugov_update_single_common(sg_cpu, time, flags))
|
||||
return;
|
||||
|
||||
next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
|
||||
next_f = get_next_freq(sg_policy, sg_cpu->util, sg_policy->max);
|
||||
/*
|
||||
* Do not reduce the frequency if the CPU has not been idle
|
||||
* recently, as the reduction is likely to be premature then.
|
||||
|
@ -373,6 +376,7 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
|
|||
unsigned int flags)
|
||||
{
|
||||
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
unsigned long prev_util = sg_cpu->util;
|
||||
|
||||
/*
|
||||
|
@ -399,7 +403,8 @@ static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
|
|||
sg_cpu->util = prev_util;
|
||||
|
||||
cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
|
||||
map_util_perf(sg_cpu->util), sg_cpu->max);
|
||||
map_util_perf(sg_cpu->util),
|
||||
sg_policy->max);
|
||||
|
||||
sg_cpu->sg_policy->last_freq_update_time = time;
|
||||
}
|
||||
|
@ -408,25 +413,19 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
|||
{
|
||||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
unsigned long util = 0, max = 1;
|
||||
unsigned long util = 0;
|
||||
unsigned int j;
|
||||
|
||||
for_each_cpu(j, policy->cpus) {
|
||||
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
|
||||
unsigned long j_util, j_max;
|
||||
|
||||
sugov_get_util(j_sg_cpu);
|
||||
sugov_iowait_apply(j_sg_cpu, time);
|
||||
j_util = j_sg_cpu->util;
|
||||
j_max = j_sg_cpu->max;
|
||||
|
||||
if (j_util * max > j_max * util) {
|
||||
util = j_util;
|
||||
max = j_max;
|
||||
}
|
||||
util = max(j_sg_cpu->util, util);
|
||||
}
|
||||
|
||||
return get_next_freq(sg_policy, util, max);
|
||||
return get_next_freq(sg_policy, util, sg_policy->max);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -752,7 +751,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||
{
|
||||
struct sugov_policy *sg_policy = policy->governor_data;
|
||||
void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
|
||||
unsigned int cpu;
|
||||
unsigned int cpu = cpumask_first(policy->cpus);
|
||||
|
||||
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
|
||||
sg_policy->last_freq_update_time = 0;
|
||||
|
@ -760,6 +759,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||
sg_policy->work_in_progress = false;
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
sg_policy->max = arch_scale_cpu_capacity(cpu);
|
||||
|
||||
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
|
||||
|
||||
|
|
Loading…
Reference in New Issue