2016-04-02 07:09:12 +08:00
|
|
|
/*
|
|
|
|
* CPUFreq governor based on scheduler-provided CPU utilization data.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016, Intel Corporation
|
|
|
|
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2016-05-18 20:25:28 +08:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
#include <linux/cpufreq.h>
|
2016-11-15 16:23:22 +08:00
|
|
|
#include <linux/kthread.h>
|
2017-02-02 01:07:51 +08:00
|
|
|
#include <uapi/linux/sched/types.h>
|
2016-04-02 07:09:12 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <trace/events/power.h>
|
|
|
|
|
|
|
|
#include "sched.h"
|
|
|
|
|
|
|
|
struct sugov_tunables {
|
|
|
|
struct gov_attr_set attr_set;
|
|
|
|
unsigned int rate_limit_us;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sugov_policy {
|
|
|
|
struct cpufreq_policy *policy;
|
|
|
|
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
struct list_head tunables_hook;
|
|
|
|
|
|
|
|
raw_spinlock_t update_lock; /* For shared policies */
|
|
|
|
u64 last_freq_update_time;
|
|
|
|
s64 freq_update_delay_ns;
|
|
|
|
unsigned int next_freq;
|
2017-03-02 16:33:20 +08:00
|
|
|
unsigned int cached_raw_freq;
|
2016-04-02 07:09:12 +08:00
|
|
|
|
|
|
|
/* The next fields are only needed if fast switch cannot be used. */
|
|
|
|
struct irq_work irq_work;
|
2016-11-15 16:23:22 +08:00
|
|
|
struct kthread_work work;
|
2016-04-02 07:09:12 +08:00
|
|
|
struct mutex work_lock;
|
2016-11-15 16:23:22 +08:00
|
|
|
struct kthread_worker worker;
|
|
|
|
struct task_struct *thread;
|
2016-04-02 07:09:12 +08:00
|
|
|
bool work_in_progress;
|
|
|
|
|
|
|
|
bool need_freq_update;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct sugov_cpu {
|
|
|
|
struct update_util_data update_util;
|
|
|
|
struct sugov_policy *sg_policy;
|
sched: cpufreq: Allow remote cpufreq callbacks
With Android UI and benchmarks the latency of cpufreq response to
certain scheduling events can become very critical. Currently, callbacks
into cpufreq governors are only made from the scheduler if the target
CPU of the event is the same as the current CPU. This means there are
certain situations where a target CPU may not run the cpufreq governor
for some time.
One testcase to show this behavior is where a task starts running on
CPU0, then a new task is also spawned on CPU0 by a task on CPU1. If the
system is configured such that the new tasks should receive maximum
demand initially, this should result in CPU0 increasing frequency
immediately. But because of the above mentioned limitation though, this
does not occur.
This patch updates the scheduler core to call the cpufreq callbacks for
remote CPUs as well.
The schedutil, ondemand and conservative governors are updated to
process cpufreq utilization update hooks called for remote CPUs where
the remote CPU is managed by the cpufreq policy of the local CPU.
The intel_pstate driver is updated to always reject remote callbacks.
This is tested with couple of usecases (Android: hackbench, recentfling,
galleryfling, vellamo, Ubuntu: hackbench) on ARM hikey board (64 bit
octa-core, single policy). Only galleryfling showed minor improvements,
while others didn't had much deviation.
The reason being that this patch only targets a corner case, where
following are required to be true to improve performance and that
doesn't happen too often with these tests:
- Task is migrated to another CPU.
- The task has high demand, and should take the target CPU to higher
OPPs.
- And the target CPU doesn't call into the cpufreq governor until the
next tick.
Based on initial work from Steve Muckle.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Saravana Kannan <skannan@codeaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-07-28 14:46:38 +08:00
|
|
|
unsigned int cpu;
|
2016-04-02 07:09:12 +08:00
|
|
|
|
2017-07-23 23:54:25 +08:00
|
|
|
bool iowait_boost_pending;
|
2017-07-23 23:54:26 +08:00
|
|
|
unsigned int iowait_boost;
|
|
|
|
unsigned int iowait_boost_max;
|
2016-09-10 06:00:31 +08:00
|
|
|
u64 last_update;
|
2016-07-14 04:25:26 +08:00
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
/* The fields below are only needed when sharing a policy. */
|
2017-12-04 18:23:21 +08:00
|
|
|
unsigned long util_cfs;
|
|
|
|
unsigned long util_dl;
|
2016-04-02 07:09:12 +08:00
|
|
|
unsigned long max;
|
2016-08-17 04:14:55 +08:00
|
|
|
unsigned int flags;
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
|
|
|
|
/* The field below is for single-CPU policies only. */
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
unsigned long saved_idle_calls;
|
|
|
|
#endif
|
2016-04-02 07:09:12 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
|
|
|
|
|
|
|
|
/************************ Governor internals ***********************/
|
|
|
|
|
|
|
|
static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
|
|
|
{
|
|
|
|
s64 delta_ns;
|
|
|
|
|
sched: cpufreq: Allow remote cpufreq callbacks
With Android UI and benchmarks the latency of cpufreq response to
certain scheduling events can become very critical. Currently, callbacks
into cpufreq governors are only made from the scheduler if the target
CPU of the event is the same as the current CPU. This means there are
certain situations where a target CPU may not run the cpufreq governor
for some time.
One testcase to show this behavior is where a task starts running on
CPU0, then a new task is also spawned on CPU0 by a task on CPU1. If the
system is configured such that the new tasks should receive maximum
demand initially, this should result in CPU0 increasing frequency
immediately. But because of the above mentioned limitation though, this
does not occur.
This patch updates the scheduler core to call the cpufreq callbacks for
remote CPUs as well.
The schedutil, ondemand and conservative governors are updated to
process cpufreq utilization update hooks called for remote CPUs where
the remote CPU is managed by the cpufreq policy of the local CPU.
The intel_pstate driver is updated to always reject remote callbacks.
This is tested with couple of usecases (Android: hackbench, recentfling,
galleryfling, vellamo, Ubuntu: hackbench) on ARM hikey board (64 bit
octa-core, single policy). Only galleryfling showed minor improvements,
while others didn't had much deviation.
The reason being that this patch only targets a corner case, where
following are required to be true to improve performance and that
doesn't happen too often with these tests:
- Task is migrated to another CPU.
- The task has high demand, and should take the target CPU to higher
OPPs.
- And the target CPU doesn't call into the cpufreq governor until the
next tick.
Based on initial work from Steve Muckle.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Saravana Kannan <skannan@codeaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-07-28 14:46:38 +08:00
|
|
|
/*
|
|
|
|
* Since cpufreq_update_util() is called with rq->lock held for
|
|
|
|
* the @target_cpu, our per-cpu data is fully serialized.
|
|
|
|
*
|
|
|
|
* However, drivers cannot in general deal with cross-cpu
|
|
|
|
* requests, so while get_next_freq() will work, our
|
2017-08-14 17:20:16 +08:00
|
|
|
* sugov_update_commit() call may not for the fast switching platforms.
|
sched: cpufreq: Allow remote cpufreq callbacks
With Android UI and benchmarks the latency of cpufreq response to
certain scheduling events can become very critical. Currently, callbacks
into cpufreq governors are only made from the scheduler if the target
CPU of the event is the same as the current CPU. This means there are
certain situations where a target CPU may not run the cpufreq governor
for some time.
One testcase to show this behavior is where a task starts running on
CPU0, then a new task is also spawned on CPU0 by a task on CPU1. If the
system is configured such that the new tasks should receive maximum
demand initially, this should result in CPU0 increasing frequency
immediately. But because of the above mentioned limitation though, this
does not occur.
This patch updates the scheduler core to call the cpufreq callbacks for
remote CPUs as well.
The schedutil, ondemand and conservative governors are updated to
process cpufreq utilization update hooks called for remote CPUs where
the remote CPU is managed by the cpufreq policy of the local CPU.
The intel_pstate driver is updated to always reject remote callbacks.
This is tested with couple of usecases (Android: hackbench, recentfling,
galleryfling, vellamo, Ubuntu: hackbench) on ARM hikey board (64 bit
octa-core, single policy). Only galleryfling showed minor improvements,
while others didn't had much deviation.
The reason being that this patch only targets a corner case, where
following are required to be true to improve performance and that
doesn't happen too often with these tests:
- Task is migrated to another CPU.
- The task has high demand, and should take the target CPU to higher
OPPs.
- And the target CPU doesn't call into the cpufreq governor until the
next tick.
Based on initial work from Steve Muckle.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Saravana Kannan <skannan@codeaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-07-28 14:46:38 +08:00
|
|
|
*
|
|
|
|
* Hence stop here for remote requests if they aren't supported
|
|
|
|
* by the hardware, as calculating the frequency is pointless if
|
|
|
|
* we cannot in fact act on it.
|
2017-08-14 17:20:16 +08:00
|
|
|
*
|
|
|
|
* For the slow switching platforms, the kthread is always scheduled on
|
|
|
|
* the right set of CPUs and any CPU can find the next frequency and
|
|
|
|
* schedule the kthread.
|
sched: cpufreq: Allow remote cpufreq callbacks
With Android UI and benchmarks the latency of cpufreq response to
certain scheduling events can become very critical. Currently, callbacks
into cpufreq governors are only made from the scheduler if the target
CPU of the event is the same as the current CPU. This means there are
certain situations where a target CPU may not run the cpufreq governor
for some time.
One testcase to show this behavior is where a task starts running on
CPU0, then a new task is also spawned on CPU0 by a task on CPU1. If the
system is configured such that the new tasks should receive maximum
demand initially, this should result in CPU0 increasing frequency
immediately. But because of the above mentioned limitation though, this
does not occur.
This patch updates the scheduler core to call the cpufreq callbacks for
remote CPUs as well.
The schedutil, ondemand and conservative governors are updated to
process cpufreq utilization update hooks called for remote CPUs where
the remote CPU is managed by the cpufreq policy of the local CPU.
The intel_pstate driver is updated to always reject remote callbacks.
This is tested with couple of usecases (Android: hackbench, recentfling,
galleryfling, vellamo, Ubuntu: hackbench) on ARM hikey board (64 bit
octa-core, single policy). Only galleryfling showed minor improvements,
while others didn't had much deviation.
The reason being that this patch only targets a corner case, where
following are required to be true to improve performance and that
doesn't happen too often with these tests:
- Task is migrated to another CPU.
- The task has high demand, and should take the target CPU to higher
OPPs.
- And the target CPU doesn't call into the cpufreq governor until the
next tick.
Based on initial work from Steve Muckle.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Saravana Kannan <skannan@codeaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-07-28 14:46:38 +08:00
|
|
|
*/
|
2017-08-14 17:20:16 +08:00
|
|
|
if (sg_policy->policy->fast_switch_enabled &&
|
|
|
|
!cpufreq_can_do_remote_dvfs(sg_policy->policy))
|
sched: cpufreq: Allow remote cpufreq callbacks
With Android UI and benchmarks the latency of cpufreq response to
certain scheduling events can become very critical. Currently, callbacks
into cpufreq governors are only made from the scheduler if the target
CPU of the event is the same as the current CPU. This means there are
certain situations where a target CPU may not run the cpufreq governor
for some time.
One testcase to show this behavior is where a task starts running on
CPU0, then a new task is also spawned on CPU0 by a task on CPU1. If the
system is configured such that the new tasks should receive maximum
demand initially, this should result in CPU0 increasing frequency
immediately. But because of the above mentioned limitation though, this
does not occur.
This patch updates the scheduler core to call the cpufreq callbacks for
remote CPUs as well.
The schedutil, ondemand and conservative governors are updated to
process cpufreq utilization update hooks called for remote CPUs where
the remote CPU is managed by the cpufreq policy of the local CPU.
The intel_pstate driver is updated to always reject remote callbacks.
This is tested with couple of usecases (Android: hackbench, recentfling,
galleryfling, vellamo, Ubuntu: hackbench) on ARM hikey board (64 bit
octa-core, single policy). Only galleryfling showed minor improvements,
while others didn't had much deviation.
The reason being that this patch only targets a corner case, where
following are required to be true to improve performance and that
doesn't happen too often with these tests:
- Task is migrated to another CPU.
- The task has high demand, and should take the target CPU to higher
OPPs.
- And the target CPU doesn't call into the cpufreq governor until the
next tick.
Based on initial work from Steve Muckle.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Acked-by: Saravana Kannan <skannan@codeaurora.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2017-07-28 14:46:38 +08:00
|
|
|
return false;
|
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
if (sg_policy->work_in_progress)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (unlikely(sg_policy->need_freq_update)) {
|
|
|
|
sg_policy->need_freq_update = false;
|
|
|
|
/*
|
|
|
|
* This happens when limits change, so forget the previous
|
|
|
|
* next_freq value and force an update.
|
|
|
|
*/
|
|
|
|
sg_policy->next_freq = UINT_MAX;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
delta_ns = time - sg_policy->last_freq_update_time;
|
|
|
|
return delta_ns >= sg_policy->freq_update_delay_ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
|
|
|
|
unsigned int next_freq)
|
|
|
|
{
|
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
|
|
|
|
2017-03-23 01:32:47 +08:00
|
|
|
if (sg_policy->next_freq == next_freq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sg_policy->next_freq = next_freq;
|
2016-04-02 07:09:12 +08:00
|
|
|
sg_policy->last_freq_update_time = time;
|
|
|
|
|
|
|
|
if (policy->fast_switch_enabled) {
|
|
|
|
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
|
2017-08-09 12:51:46 +08:00
|
|
|
if (!next_freq)
|
2016-04-02 07:09:12 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
policy->cur = next_freq;
|
|
|
|
trace_cpu_frequency(next_freq, smp_processor_id());
|
2017-03-23 01:32:47 +08:00
|
|
|
} else {
|
2016-04-02 07:09:12 +08:00
|
|
|
sg_policy->work_in_progress = true;
|
|
|
|
irq_work_queue(&sg_policy->irq_work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get_next_freq - Compute a new frequency for a given cpufreq policy.
|
2017-03-02 16:33:21 +08:00
|
|
|
* @sg_policy: schedutil policy object to compute the new frequency for.
|
2016-04-02 07:09:12 +08:00
|
|
|
* @util: Current CPU utilization.
|
|
|
|
* @max: CPU capacity.
|
|
|
|
*
|
|
|
|
* If the utilization is frequency-invariant, choose the new frequency to be
|
|
|
|
* proportional to it, that is
|
|
|
|
*
|
|
|
|
* next_freq = C * max_freq * util / max
|
|
|
|
*
|
|
|
|
* Otherwise, approximate the would-be frequency-invariant utilization by
|
|
|
|
* util_raw * (curr_freq / max_freq) which leads to
|
|
|
|
*
|
|
|
|
* next_freq = C * curr_freq * util_raw / max
|
|
|
|
*
|
|
|
|
* Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
|
2016-07-14 04:25:26 +08:00
|
|
|
*
|
|
|
|
* The lowest driver-supported frequency which is equal or greater than the raw
|
|
|
|
* next_freq (as calculated above) is returned, subject to policy min/max and
|
|
|
|
* cpufreq driver limitations.
|
2016-04-02 07:09:12 +08:00
|
|
|
*/
|
2017-03-02 16:33:21 +08:00
|
|
|
static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
|
|
|
unsigned long util, unsigned long max)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
2016-07-14 04:25:26 +08:00
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
2016-04-02 07:09:12 +08:00
|
|
|
unsigned int freq = arch_scale_freq_invariant() ?
|
|
|
|
policy->cpuinfo.max_freq : policy->cur;
|
|
|
|
|
2016-07-14 04:25:26 +08:00
|
|
|
freq = (freq + (freq >> 2)) * util / max;
|
|
|
|
|
2017-03-02 16:33:20 +08:00
|
|
|
if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
|
2016-07-14 04:25:26 +08:00
|
|
|
return sg_policy->next_freq;
|
2017-03-02 16:33:20 +08:00
|
|
|
sg_policy->cached_raw_freq = freq;
|
2016-07-14 04:25:26 +08:00
|
|
|
return cpufreq_driver_resolve_freq(policy, freq);
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
|
2017-12-04 18:23:21 +08:00
|
|
|
static void sugov_get_util(struct sugov_cpu *sg_cpu)
|
2016-08-17 04:14:55 +08:00
|
|
|
{
|
2017-12-04 18:23:21 +08:00
|
|
|
struct rq *rq = cpu_rq(sg_cpu->cpu);
|
2016-08-27 02:40:47 +08:00
|
|
|
|
2017-12-04 18:23:21 +08:00
|
|
|
sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
|
|
|
|
sg_cpu->util_cfs = cpu_util_cfs(rq);
|
|
|
|
sg_cpu->util_dl = cpu_util_dl(rq);
|
|
|
|
}
|
2016-08-17 04:14:55 +08:00
|
|
|
|
2017-12-04 18:23:21 +08:00
|
|
|
static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
|
|
|
|
{
|
2017-12-04 18:23:18 +08:00
|
|
|
/*
|
|
|
|
* Ideally we would like to set util_dl as min/guaranteed freq and
|
|
|
|
* util_cfs + util_dl as requested freq. However, cpufreq is not yet
|
|
|
|
* ready for such an interface. So, we only do the latter for now.
|
|
|
|
*/
|
2017-12-04 18:23:21 +08:00
|
|
|
return min(sg_cpu->util_cfs + sg_cpu->util_dl, sg_cpu->max);
|
2016-08-17 04:14:55 +08:00
|
|
|
}
|
|
|
|
|
2017-12-13 17:53:22 +08:00
|
|
|
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time)
|
2016-09-10 06:00:31 +08:00
|
|
|
{
|
2017-12-13 17:53:22 +08:00
|
|
|
if (sg_cpu->flags & SCHED_CPUFREQ_IOWAIT) {
|
2017-07-23 23:54:25 +08:00
|
|
|
if (sg_cpu->iowait_boost_pending)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sg_cpu->iowait_boost_pending = true;
|
|
|
|
|
|
|
|
if (sg_cpu->iowait_boost) {
|
|
|
|
sg_cpu->iowait_boost <<= 1;
|
|
|
|
if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
|
|
|
|
sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
|
|
|
|
} else {
|
|
|
|
sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
|
|
|
|
}
|
2016-09-10 06:00:31 +08:00
|
|
|
} else if (sg_cpu->iowait_boost) {
|
|
|
|
s64 delta_ns = time - sg_cpu->last_update;
|
|
|
|
|
|
|
|
/* Clear iowait_boost if the CPU apprears to have been idle. */
|
2017-07-23 23:54:25 +08:00
|
|
|
if (delta_ns > TICK_NSEC) {
|
2016-09-10 06:00:31 +08:00
|
|
|
sg_cpu->iowait_boost = 0;
|
2017-07-23 23:54:25 +08:00
|
|
|
sg_cpu->iowait_boost_pending = false;
|
|
|
|
}
|
2016-09-10 06:00:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
|
|
|
|
unsigned long *max)
|
|
|
|
{
|
2017-07-23 23:54:26 +08:00
|
|
|
unsigned int boost_util, boost_max;
|
2016-09-10 06:00:31 +08:00
|
|
|
|
2017-07-23 23:54:25 +08:00
|
|
|
if (!sg_cpu->iowait_boost)
|
2016-09-10 06:00:31 +08:00
|
|
|
return;
|
|
|
|
|
2017-07-23 23:54:25 +08:00
|
|
|
if (sg_cpu->iowait_boost_pending) {
|
|
|
|
sg_cpu->iowait_boost_pending = false;
|
|
|
|
} else {
|
|
|
|
sg_cpu->iowait_boost >>= 1;
|
|
|
|
if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
|
|
|
|
sg_cpu->iowait_boost = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
boost_util = sg_cpu->iowait_boost;
|
|
|
|
boost_max = sg_cpu->iowait_boost_max;
|
|
|
|
|
2016-09-10 06:00:31 +08:00
|
|
|
if (*util * boost_max < *max * boost_util) {
|
|
|
|
*util = boost_util;
|
|
|
|
*max = boost_max;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
|
|
|
|
{
|
2017-12-21 09:22:45 +08:00
|
|
|
unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
bool ret = idle_calls == sg_cpu->saved_idle_calls;
|
|
|
|
|
|
|
|
sg_cpu->saved_idle_calls = idle_calls;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
|
|
|
|
#endif /* CONFIG_NO_HZ_COMMON */
|
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
2016-08-17 04:14:55 +08:00
|
|
|
unsigned int flags)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
|
|
|
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
|
|
|
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
2016-08-17 04:14:55 +08:00
|
|
|
unsigned long util, max;
|
2016-04-02 07:09:12 +08:00
|
|
|
unsigned int next_f;
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
bool busy;
|
2016-04-02 07:09:12 +08:00
|
|
|
|
2017-12-13 17:53:22 +08:00
|
|
|
sugov_set_iowait_boost(sg_cpu, time);
|
2016-09-10 06:00:31 +08:00
|
|
|
sg_cpu->last_update = time;
|
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
if (!sugov_should_update_freq(sg_policy, time))
|
|
|
|
return;
|
|
|
|
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
busy = sugov_cpu_is_busy(sg_cpu);
|
|
|
|
|
2017-12-04 18:23:18 +08:00
|
|
|
if (flags & SCHED_CPUFREQ_RT) {
|
2016-08-17 04:14:55 +08:00
|
|
|
next_f = policy->cpuinfo.max_freq;
|
|
|
|
} else {
|
2017-12-04 18:23:21 +08:00
|
|
|
sugov_get_util(sg_cpu);
|
|
|
|
max = sg_cpu->max;
|
|
|
|
util = sugov_aggregate_util(sg_cpu);
|
2016-09-10 06:00:31 +08:00
|
|
|
sugov_iowait_boost(sg_cpu, &util, &max);
|
2017-03-02 16:33:21 +08:00
|
|
|
next_f = get_next_freq(sg_policy, util, max);
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
/*
|
|
|
|
* Do not reduce the frequency if the CPU has not been idle
|
|
|
|
* recently, as the reduction is likely to be premature then.
|
|
|
|
*/
|
2017-11-08 22:53:55 +08:00
|
|
|
if (busy && next_f < sg_policy->next_freq) {
|
cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
2017-03-22 07:08:50 +08:00
|
|
|
next_f = sg_policy->next_freq;
|
2017-11-08 22:53:55 +08:00
|
|
|
|
|
|
|
/* Reset cached freq as next_freq has changed */
|
|
|
|
sg_policy->cached_raw_freq = 0;
|
|
|
|
}
|
2016-08-17 04:14:55 +08:00
|
|
|
}
|
2016-04-02 07:09:12 +08:00
|
|
|
sugov_update_commit(sg_policy, time, next_f);
|
|
|
|
}
|
|
|
|
|
2017-05-03 21:30:48 +08:00
|
|
|
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
2016-07-14 04:25:26 +08:00
|
|
|
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
2016-04-02 07:09:12 +08:00
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
2017-03-09 12:04:54 +08:00
|
|
|
unsigned long util = 0, max = 1;
|
2016-04-02 07:09:12 +08:00
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
for_each_cpu(j, policy->cpus) {
|
2017-03-09 12:04:54 +08:00
|
|
|
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
|
2016-04-02 07:09:12 +08:00
|
|
|
unsigned long j_util, j_max;
|
|
|
|
s64 delta_ns;
|
|
|
|
|
|
|
|
/*
|
2017-12-04 18:23:22 +08:00
|
|
|
* If the CFS CPU utilization was last updated before the
|
|
|
|
* previous frequency update and the time elapsed between the
|
|
|
|
* last update of the CPU utilization and the last frequency
|
|
|
|
* update is long enough, reset iowait_boost and util_cfs, as
|
|
|
|
* they are now probably stale. However, still consider the
|
|
|
|
* CPU contribution if it has some DEADLINE utilization
|
|
|
|
* (util_dl).
|
2016-04-02 07:09:12 +08:00
|
|
|
*/
|
2017-05-03 21:30:48 +08:00
|
|
|
delta_ns = time - j_sg_cpu->last_update;
|
2016-09-10 06:00:31 +08:00
|
|
|
if (delta_ns > TICK_NSEC) {
|
|
|
|
j_sg_cpu->iowait_boost = 0;
|
2017-07-23 23:54:25 +08:00
|
|
|
j_sg_cpu->iowait_boost_pending = false;
|
2017-12-04 18:23:22 +08:00
|
|
|
j_sg_cpu->util_cfs = 0;
|
|
|
|
if (j_sg_cpu->util_dl == 0)
|
|
|
|
continue;
|
2016-09-10 06:00:31 +08:00
|
|
|
}
|
2017-12-04 18:23:18 +08:00
|
|
|
if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
|
2017-03-09 12:04:54 +08:00
|
|
|
return policy->cpuinfo.max_freq;
|
2016-04-02 07:09:12 +08:00
|
|
|
|
|
|
|
j_max = j_sg_cpu->max;
|
2017-12-04 18:23:21 +08:00
|
|
|
j_util = sugov_aggregate_util(j_sg_cpu);
|
2016-04-02 07:09:12 +08:00
|
|
|
if (j_util * max > j_max * util) {
|
|
|
|
util = j_util;
|
|
|
|
max = j_max;
|
|
|
|
}
|
2016-09-10 06:00:31 +08:00
|
|
|
|
|
|
|
sugov_iowait_boost(j_sg_cpu, &util, &max);
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
|
2017-03-02 16:33:21 +08:00
|
|
|
return get_next_freq(sg_policy, util, max);
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_update_shared(struct update_util_data *hook, u64 time,
|
2016-08-17 04:14:55 +08:00
|
|
|
unsigned int flags)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
|
|
|
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
|
|
|
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
|
|
|
unsigned int next_f;
|
|
|
|
|
|
|
|
raw_spin_lock(&sg_policy->update_lock);
|
|
|
|
|
2017-12-04 18:23:21 +08:00
|
|
|
sugov_get_util(sg_cpu);
|
2016-08-17 04:14:55 +08:00
|
|
|
sg_cpu->flags = flags;
|
2016-09-10 06:00:31 +08:00
|
|
|
|
2017-12-13 17:53:22 +08:00
|
|
|
sugov_set_iowait_boost(sg_cpu, time);
|
2016-04-02 07:09:12 +08:00
|
|
|
sg_cpu->last_update = time;
|
|
|
|
|
|
|
|
if (sugov_should_update_freq(sg_policy, time)) {
|
2017-12-04 18:23:18 +08:00
|
|
|
if (flags & SCHED_CPUFREQ_RT)
|
2017-03-09 12:04:54 +08:00
|
|
|
next_f = sg_policy->policy->cpuinfo.max_freq;
|
|
|
|
else
|
2017-05-03 21:30:48 +08:00
|
|
|
next_f = sugov_next_freq_shared(sg_cpu, time);
|
2017-03-09 12:04:54 +08:00
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
sugov_update_commit(sg_policy, time, next_f);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock(&sg_policy->update_lock);
|
|
|
|
}
|
|
|
|
|
2016-11-15 16:23:22 +08:00
|
|
|
static void sugov_work(struct kthread_work *work)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
|
|
|
|
|
|
|
|
mutex_lock(&sg_policy->work_lock);
|
|
|
|
__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
|
|
|
|
CPUFREQ_RELATION_L);
|
|
|
|
mutex_unlock(&sg_policy->work_lock);
|
|
|
|
|
|
|
|
sg_policy->work_in_progress = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_irq_work(struct irq_work *irq_work)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
|
|
|
|
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
|
2016-11-15 16:23:22 +08:00
|
|
|
|
|
|
|
/*
|
2017-12-04 18:23:18 +08:00
|
|
|
* For RT tasks, the schedutil governor shoots the frequency to maximum.
|
|
|
|
* Special care must be taken to ensure that this kthread doesn't result
|
|
|
|
* in the same behavior.
|
2016-11-15 16:23:22 +08:00
|
|
|
*
|
|
|
|
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
|
2016-11-24 16:21:11 +08:00
|
|
|
* updated only at the end of the sugov_work() function and before that
|
|
|
|
* the schedutil governor rejects all other frequency scaling requests.
|
2016-11-15 16:23:22 +08:00
|
|
|
*
|
2016-11-24 16:21:11 +08:00
|
|
|
* There is a very rare case though, where the RT thread yields right
|
2016-11-15 16:23:22 +08:00
|
|
|
* after the work_in_progress flag is cleared. The effects of that are
|
|
|
|
* neglected for now.
|
|
|
|
*/
|
|
|
|
kthread_queue_work(&sg_policy->worker, &sg_policy->work);
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/************************** sysfs interface ************************/
|
|
|
|
|
|
|
|
static struct sugov_tunables *global_tunables;
|
|
|
|
static DEFINE_MUTEX(global_tunables_lock);
|
|
|
|
|
|
|
|
static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
|
|
|
|
{
|
|
|
|
return container_of(attr_set, struct sugov_tunables, attr_set);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
|
|
|
return sprintf(buf, "%u\n", tunables->rate_limit_us);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
unsigned int rate_limit_us;
|
|
|
|
|
|
|
|
if (kstrtouint(buf, 10, &rate_limit_us))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tunables->rate_limit_us = rate_limit_us;
|
|
|
|
|
|
|
|
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
|
|
|
|
sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
|
|
|
|
|
|
|
|
static struct attribute *sugov_attributes[] = {
|
|
|
|
&rate_limit_us.attr,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kobj_type sugov_tunables_ktype = {
|
|
|
|
.default_attrs = sugov_attributes,
|
|
|
|
.sysfs_ops = &governor_sysfs_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
/********************** cpufreq governor interface *********************/
|
|
|
|
|
|
|
|
static struct cpufreq_governor schedutil_gov;
|
|
|
|
|
|
|
|
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
|
|
|
|
sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
|
|
|
|
if (!sg_policy)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sg_policy->policy = policy;
|
|
|
|
raw_spin_lock_init(&sg_policy->update_lock);
|
|
|
|
return sg_policy;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_policy_free(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
kfree(sg_policy);
|
|
|
|
}
|
|
|
|
|
2016-11-15 16:23:22 +08:00
|
|
|
static int sugov_kthread_create(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
struct task_struct *thread;
|
2017-12-04 18:23:20 +08:00
|
|
|
struct sched_attr attr = {
|
|
|
|
.size = sizeof(struct sched_attr),
|
|
|
|
.sched_policy = SCHED_DEADLINE,
|
|
|
|
.sched_flags = SCHED_FLAG_SUGOV,
|
|
|
|
.sched_nice = 0,
|
|
|
|
.sched_priority = 0,
|
|
|
|
/*
|
|
|
|
* Fake (unused) bandwidth; workaround to "fix"
|
|
|
|
* priority inheritance.
|
|
|
|
*/
|
|
|
|
.sched_runtime = 1000000,
|
|
|
|
.sched_deadline = 10000000,
|
|
|
|
.sched_period = 10000000,
|
|
|
|
};
|
2016-11-15 16:23:22 +08:00
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* kthread only required for slow path */
|
|
|
|
if (policy->fast_switch_enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
kthread_init_work(&sg_policy->work, sugov_work);
|
|
|
|
kthread_init_worker(&sg_policy->worker);
|
|
|
|
thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
|
|
|
|
"sugov:%d",
|
|
|
|
cpumask_first(policy->related_cpus));
|
|
|
|
if (IS_ERR(thread)) {
|
|
|
|
pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
|
|
|
|
return PTR_ERR(thread);
|
|
|
|
}
|
|
|
|
|
2017-12-04 18:23:20 +08:00
|
|
|
ret = sched_setattr_nocheck(thread, &attr);
|
2016-11-15 16:23:22 +08:00
|
|
|
if (ret) {
|
|
|
|
kthread_stop(thread);
|
2017-12-04 18:23:20 +08:00
|
|
|
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
|
2016-11-15 16:23:22 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_policy->thread = thread;
|
2017-08-10 12:20:55 +08:00
|
|
|
|
|
|
|
/* Kthread is bound to all CPUs by default */
|
|
|
|
if (!policy->dvfs_possible_from_any_cpu)
|
|
|
|
kthread_bind_mask(thread, policy->related_cpus);
|
|
|
|
|
2016-11-15 16:23:23 +08:00
|
|
|
init_irq_work(&sg_policy->irq_work, sugov_irq_work);
|
|
|
|
mutex_init(&sg_policy->work_lock);
|
|
|
|
|
2016-11-15 16:23:22 +08:00
|
|
|
wake_up_process(thread);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_kthread_stop(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
/* kthread only required for slow path */
|
|
|
|
if (sg_policy->policy->fast_switch_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
kthread_flush_worker(&sg_policy->worker);
|
|
|
|
kthread_stop(sg_policy->thread);
|
2016-11-15 16:23:23 +08:00
|
|
|
mutex_destroy(&sg_policy->work_lock);
|
2016-11-15 16:23:22 +08:00
|
|
|
}
|
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
|
|
|
|
tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
|
|
|
|
if (tunables) {
|
|
|
|
gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
|
|
|
|
if (!have_governor_per_policy())
|
|
|
|
global_tunables = tunables;
|
|
|
|
}
|
|
|
|
return tunables;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_tunables_free(struct sugov_tunables *tunables)
|
|
|
|
{
|
|
|
|
if (!have_governor_per_policy())
|
|
|
|
global_tunables = NULL;
|
|
|
|
|
|
|
|
kfree(tunables);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sugov_init(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* State should be equivalent to EXIT */
|
|
|
|
if (policy->governor_data)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2016-11-15 16:23:21 +08:00
|
|
|
cpufreq_enable_fast_switch(policy);
|
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
sg_policy = sugov_policy_alloc(policy);
|
2016-11-15 16:23:21 +08:00
|
|
|
if (!sg_policy) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto disable_fast_switch;
|
|
|
|
}
|
2016-04-02 07:09:12 +08:00
|
|
|
|
2016-11-15 16:23:22 +08:00
|
|
|
ret = sugov_kthread_create(sg_policy);
|
|
|
|
if (ret)
|
|
|
|
goto free_sg_policy;
|
|
|
|
|
2016-04-02 07:09:12 +08:00
|
|
|
mutex_lock(&global_tunables_lock);
|
|
|
|
|
|
|
|
if (global_tunables) {
|
|
|
|
if (WARN_ON(have_governor_per_policy())) {
|
|
|
|
ret = -EINVAL;
|
2016-11-15 16:23:22 +08:00
|
|
|
goto stop_kthread;
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
policy->governor_data = sg_policy;
|
|
|
|
sg_policy->tunables = global_tunables;
|
|
|
|
|
|
|
|
gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
tunables = sugov_tunables_alloc(sg_policy);
|
|
|
|
if (!tunables) {
|
|
|
|
ret = -ENOMEM;
|
2016-11-15 16:23:22 +08:00
|
|
|
goto stop_kthread;
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
|
2017-07-19 18:12:42 +08:00
|
|
|
tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
|
2016-04-02 07:09:12 +08:00
|
|
|
|
|
|
|
policy->governor_data = sg_policy;
|
|
|
|
sg_policy->tunables = tunables;
|
|
|
|
|
|
|
|
ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
|
|
|
|
get_governor_parent_kobj(policy), "%s",
|
|
|
|
schedutil_gov.name);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
|
2016-11-15 16:23:20 +08:00
|
|
|
out:
|
2016-04-02 07:09:12 +08:00
|
|
|
mutex_unlock(&global_tunables_lock);
|
|
|
|
return 0;
|
|
|
|
|
2016-11-15 16:23:20 +08:00
|
|
|
fail:
|
2016-04-02 07:09:12 +08:00
|
|
|
policy->governor_data = NULL;
|
|
|
|
sugov_tunables_free(tunables);
|
|
|
|
|
2016-11-15 16:23:22 +08:00
|
|
|
stop_kthread:
|
|
|
|
sugov_kthread_stop(sg_policy);
|
|
|
|
|
2016-11-15 16:23:20 +08:00
|
|
|
free_sg_policy:
|
2016-04-02 07:09:12 +08:00
|
|
|
mutex_unlock(&global_tunables_lock);
|
|
|
|
|
|
|
|
sugov_policy_free(sg_policy);
|
2016-11-15 16:23:21 +08:00
|
|
|
|
|
|
|
disable_fast_switch:
|
|
|
|
cpufreq_disable_fast_switch(policy);
|
|
|
|
|
2016-05-18 20:25:28 +08:00
|
|
|
pr_err("initialization failed (error %d)\n", ret);
|
2016-04-02 07:09:12 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-03 05:24:15 +08:00
|
|
|
static void sugov_exit(struct cpufreq_policy *policy)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
struct sugov_tunables *tunables = sg_policy->tunables;
|
|
|
|
unsigned int count;
|
|
|
|
|
|
|
|
mutex_lock(&global_tunables_lock);
|
|
|
|
|
|
|
|
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
|
|
|
|
policy->governor_data = NULL;
|
|
|
|
if (!count)
|
|
|
|
sugov_tunables_free(tunables);
|
|
|
|
|
|
|
|
mutex_unlock(&global_tunables_lock);
|
|
|
|
|
2016-11-15 16:23:22 +08:00
|
|
|
sugov_kthread_stop(sg_policy);
|
2016-04-02 07:09:12 +08:00
|
|
|
sugov_policy_free(sg_policy);
|
2016-11-15 16:23:21 +08:00
|
|
|
cpufreq_disable_fast_switch(policy);
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sugov_start(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
unsigned int cpu;
|
|
|
|
|
|
|
|
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
|
|
|
|
sg_policy->last_freq_update_time = 0;
|
|
|
|
sg_policy->next_freq = UINT_MAX;
|
|
|
|
sg_policy->work_in_progress = false;
|
|
|
|
sg_policy->need_freq_update = false;
|
2017-03-02 16:33:20 +08:00
|
|
|
sg_policy->cached_raw_freq = 0;
|
2016-04-02 07:09:12 +08:00
|
|
|
|
|
|
|
for_each_cpu(cpu, policy->cpus) {
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
|
2017-03-19 21:30:02 +08:00
|
|
|
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
2017-11-03 21:36:42 +08:00
|
|
|
sg_cpu->cpu = cpu;
|
2016-04-02 07:09:12 +08:00
|
|
|
sg_cpu->sg_policy = sg_policy;
|
2017-12-13 17:53:20 +08:00
|
|
|
sg_cpu->flags = 0;
|
2017-03-19 21:30:02 +08:00
|
|
|
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
|
2017-07-07 01:53:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for_each_cpu(cpu, policy->cpus) {
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
|
2017-03-19 21:30:02 +08:00
|
|
|
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
|
|
|
|
policy_is_shared(policy) ?
|
|
|
|
sugov_update_shared :
|
|
|
|
sugov_update_single);
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-03 05:24:15 +08:00
|
|
|
static void sugov_stop(struct cpufreq_policy *policy)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
unsigned int cpu;
|
|
|
|
|
|
|
|
for_each_cpu(cpu, policy->cpus)
|
|
|
|
cpufreq_remove_update_util_hook(cpu);
|
|
|
|
|
|
|
|
synchronize_sched();
|
|
|
|
|
2016-11-15 16:23:23 +08:00
|
|
|
if (!policy->fast_switch_enabled) {
|
|
|
|
irq_work_sync(&sg_policy->irq_work);
|
|
|
|
kthread_cancel_work_sync(&sg_policy->work);
|
|
|
|
}
|
2016-04-02 07:09:12 +08:00
|
|
|
}
|
|
|
|
|
2016-06-03 05:24:15 +08:00
|
|
|
static void sugov_limits(struct cpufreq_policy *policy)
|
2016-04-02 07:09:12 +08:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
|
|
|
|
if (!policy->fast_switch_enabled) {
|
|
|
|
mutex_lock(&sg_policy->work_lock);
|
2016-05-18 20:25:31 +08:00
|
|
|
cpufreq_policy_apply_limits(policy);
|
2016-04-02 07:09:12 +08:00
|
|
|
mutex_unlock(&sg_policy->work_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_policy->need_freq_update = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct cpufreq_governor schedutil_gov = {
|
|
|
|
.name = "schedutil",
|
|
|
|
.owner = THIS_MODULE,
|
2017-07-19 18:12:47 +08:00
|
|
|
.dynamic_switching = true,
|
2016-06-03 05:24:15 +08:00
|
|
|
.init = sugov_init,
|
|
|
|
.exit = sugov_exit,
|
|
|
|
.start = sugov_start,
|
|
|
|
.stop = sugov_stop,
|
|
|
|
.limits = sugov_limits,
|
2016-04-02 07:09:12 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
|
|
|
|
struct cpufreq_governor *cpufreq_default_governor(void)
|
|
|
|
{
|
|
|
|
return &schedutil_gov;
|
|
|
|
}
|
|
|
|
#endif
|
2016-08-17 04:14:55 +08:00
|
|
|
|
|
|
|
static int __init sugov_register(void)
|
|
|
|
{
|
|
|
|
return cpufreq_register_governor(&schedutil_gov);
|
|
|
|
}
|
|
|
|
fs_initcall(sugov_register);
|