Merge branches 'pm-cpufreq-sched' and 'pm-cpuidle'
* pm-cpufreq-sched: cpufreq: schedutil: Avoid missing updates for one-CPU policies schedutil: Allow cpufreq requests to be made even when kthread kicked cpufreq: Rename cpufreq_can_do_remote_dvfs() cpufreq: schedutil: Cleanup and document iowait boost cpufreq: schedutil: Fix iowait boost reset cpufreq: schedutil: Don't set next_freq to UINT_MAX Revert "cpufreq: schedutil: Don't restrict kthread to related_cpus unnecessarily" * pm-cpuidle: cpuidle: governors: Consolidate PM QoS handling cpuidle: governors: Drop redundant checks related to PM QoS
This commit is contained in:
commit
601ef1f3c0
drivers
include/linux
kernel/sched
|
@ -278,7 +278,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
|
|||
struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
|
||||
u64 delta_ns, lst;
|
||||
|
||||
if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
|
||||
if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
|
|
@ -8,8 +8,10 @@
|
|||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
|
@ -93,3 +95,16 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
|
|||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_governor_latency_req - Compute a latency constraint for CPU
|
||||
* @cpu: Target CPU
|
||||
*/
|
||||
int cpuidle_governor_latency_req(unsigned int cpu)
|
||||
{
|
||||
int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||
struct device *device = get_cpu_device(cpu);
|
||||
int device_req = dev_pm_qos_raw_read_value(device);
|
||||
|
||||
return device_req < global_req ? device_req : global_req;
|
||||
}
|
||||
|
|
|
@ -14,10 +14,8 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -69,16 +67,10 @@ static int ladder_select_state(struct cpuidle_driver *drv,
|
|||
struct cpuidle_device *dev, bool *dummy)
|
||||
{
|
||||
struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
|
||||
struct device *device = get_cpu_device(dev->cpu);
|
||||
struct ladder_device_state *last_state;
|
||||
int last_residency, last_idx = ldev->last_state_idx;
|
||||
int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0;
|
||||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||
int resume_latency = dev_pm_qos_raw_read_value(device);
|
||||
|
||||
if (resume_latency < latency_req &&
|
||||
resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
|
||||
latency_req = resume_latency;
|
||||
int latency_req = cpuidle_governor_latency_req(dev->cpu);
|
||||
|
||||
/* Special case when user has set very strict latency requirement */
|
||||
if (unlikely(latency_req == 0)) {
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
@ -21,7 +20,6 @@
|
|||
#include <linux/sched/loadavg.h>
|
||||
#include <linux/sched/stat.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
/*
|
||||
* Please note when changing the tuning values:
|
||||
|
@ -286,15 +284,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
bool *stop_tick)
|
||||
{
|
||||
struct menu_device *data = this_cpu_ptr(&menu_devices);
|
||||
struct device *device = get_cpu_device(dev->cpu);
|
||||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||
int latency_req = cpuidle_governor_latency_req(dev->cpu);
|
||||
int i;
|
||||
int first_idx;
|
||||
int idx;
|
||||
unsigned int interactivity_req;
|
||||
unsigned int expected_interval;
|
||||
unsigned long nr_iowaiters, cpu_load;
|
||||
int resume_latency = dev_pm_qos_raw_read_value(device);
|
||||
ktime_t delta_next;
|
||||
|
||||
if (data->needs_update) {
|
||||
|
@ -302,10 +298,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
data->needs_update = 0;
|
||||
}
|
||||
|
||||
if (resume_latency < latency_req &&
|
||||
resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
|
||||
latency_req = resume_latency;
|
||||
|
||||
/* Special case when user has set very strict latency requirement */
|
||||
if (unlikely(latency_req == 0)) {
|
||||
*stop_tick = false;
|
||||
|
|
|
@ -571,7 +571,7 @@ struct governor_attr {
|
|||
size_t count);
|
||||
};
|
||||
|
||||
static inline bool cpufreq_can_do_remote_dvfs(struct cpufreq_policy *policy)
|
||||
static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
|
||||
{
|
||||
/*
|
||||
* Allow remote callbacks if:
|
||||
|
|
|
@ -258,6 +258,7 @@ struct cpuidle_governor {
|
|||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
|
||||
extern int cpuidle_governor_latency_req(unsigned int cpu);
|
||||
#else
|
||||
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
|
||||
{return 0;}
|
||||
|
|
|
@ -89,46 +89,52 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
|||
* schedule the kthread.
|
||||
*/
|
||||
if (sg_policy->policy->fast_switch_enabled &&
|
||||
!cpufreq_can_do_remote_dvfs(sg_policy->policy))
|
||||
!cpufreq_this_cpu_can_update(sg_policy->policy))
|
||||
return false;
|
||||
|
||||
if (sg_policy->work_in_progress)
|
||||
return false;
|
||||
|
||||
if (unlikely(sg_policy->need_freq_update)) {
|
||||
sg_policy->need_freq_update = false;
|
||||
/*
|
||||
* This happens when limits change, so forget the previous
|
||||
* next_freq value and force an update.
|
||||
*/
|
||||
sg_policy->next_freq = UINT_MAX;
|
||||
if (unlikely(sg_policy->need_freq_update))
|
||||
return true;
|
||||
}
|
||||
|
||||
delta_ns = time - sg_policy->last_freq_update_time;
|
||||
|
||||
return delta_ns >= sg_policy->freq_update_delay_ns;
|
||||
}
|
||||
|
||||
static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
|
||||
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
|
||||
unsigned int next_freq)
|
||||
{
|
||||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
|
||||
if (sg_policy->next_freq == next_freq)
|
||||
return;
|
||||
return false;
|
||||
|
||||
sg_policy->next_freq = next_freq;
|
||||
sg_policy->last_freq_update_time = time;
|
||||
|
||||
if (policy->fast_switch_enabled) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
|
||||
unsigned int next_freq)
|
||||
{
|
||||
struct cpufreq_policy *policy = sg_policy->policy;
|
||||
|
||||
if (!sugov_update_next_freq(sg_policy, time, next_freq))
|
||||
return;
|
||||
|
||||
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
|
||||
if (!next_freq)
|
||||
return;
|
||||
|
||||
policy->cur = next_freq;
|
||||
trace_cpu_frequency(next_freq, smp_processor_id());
|
||||
} else {
|
||||
}
|
||||
|
||||
static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
|
||||
unsigned int next_freq)
|
||||
{
|
||||
if (!sugov_update_next_freq(sg_policy, time, next_freq))
|
||||
return;
|
||||
|
||||
if (!sg_policy->work_in_progress) {
|
||||
sg_policy->work_in_progress = true;
|
||||
irq_work_queue(&sg_policy->irq_work);
|
||||
}
|
||||
|
@ -165,8 +171,10 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
|||
|
||||
freq = (freq + (freq >> 2)) * util / max;
|
||||
|
||||
if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
|
||||
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
|
||||
return sg_policy->next_freq;
|
||||
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = freq;
|
||||
return cpufreq_driver_resolve_freq(policy, freq);
|
||||
}
|
||||
|
@ -201,43 +209,120 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
|
|||
return min(util, sg_cpu->max);
|
||||
}
|
||||
|
||||
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags)
|
||||
/**
|
||||
* sugov_iowait_reset() - Reset the IO boost status of a CPU.
|
||||
* @sg_cpu: the sugov data for the CPU to boost
|
||||
* @time: the update time from the caller
|
||||
* @set_iowait_boost: true if an IO boost has been requested
|
||||
*
|
||||
* The IO wait boost of a task is disabled after a tick since the last update
|
||||
* of a CPU. If a new IO wait boost is requested after more then a tick, then
|
||||
* we enable the boost starting from the minimum frequency, which improves
|
||||
* energy efficiency by ignoring sporadic wakeups from IO.
|
||||
*/
|
||||
static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
|
||||
bool set_iowait_boost)
|
||||
{
|
||||
if (flags & SCHED_CPUFREQ_IOWAIT) {
|
||||
if (sg_cpu->iowait_boost_pending)
|
||||
s64 delta_ns = time - sg_cpu->last_update;
|
||||
|
||||
/* Reset boost only if a tick has elapsed since last request */
|
||||
if (delta_ns <= TICK_NSEC)
|
||||
return false;
|
||||
|
||||
sg_cpu->iowait_boost = set_iowait_boost
|
||||
? sg_cpu->sg_policy->policy->min : 0;
|
||||
sg_cpu->iowait_boost_pending = set_iowait_boost;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* sugov_iowait_boost() - Updates the IO boost status of a CPU.
|
||||
* @sg_cpu: the sugov data for the CPU to boost
|
||||
* @time: the update time from the caller
|
||||
* @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
|
||||
*
|
||||
* Each time a task wakes up after an IO operation, the CPU utilization can be
|
||||
* boosted to a certain utilization which doubles at each "frequent and
|
||||
* successive" wakeup from IO, ranging from the utilization of the minimum
|
||||
* OPP to the utilization of the maximum OPP.
|
||||
* To keep doubling, an IO boost has to be requested at least once per tick,
|
||||
* otherwise we restart from the utilization of the minimum OPP.
|
||||
*/
|
||||
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
|
||||
unsigned int flags)
|
||||
{
|
||||
bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
|
||||
|
||||
/* Reset boost if the CPU appears to have been idle enough */
|
||||
if (sg_cpu->iowait_boost &&
|
||||
sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
|
||||
return;
|
||||
|
||||
/* Boost only tasks waking up after IO */
|
||||
if (!set_iowait_boost)
|
||||
return;
|
||||
|
||||
/* Ensure boost doubles only one time at each request */
|
||||
if (sg_cpu->iowait_boost_pending)
|
||||
return;
|
||||
sg_cpu->iowait_boost_pending = true;
|
||||
|
||||
/* Double the boost at each request */
|
||||
if (sg_cpu->iowait_boost) {
|
||||
sg_cpu->iowait_boost <<= 1;
|
||||
if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
|
||||
sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
|
||||
} else {
|
||||
sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
|
||||
return;
|
||||
}
|
||||
} else if (sg_cpu->iowait_boost) {
|
||||
s64 delta_ns = time - sg_cpu->last_update;
|
||||
|
||||
/* Clear iowait_boost if the CPU apprears to have been idle. */
|
||||
if (delta_ns > TICK_NSEC) {
|
||||
sg_cpu->iowait_boost = 0;
|
||||
sg_cpu->iowait_boost_pending = false;
|
||||
}
|
||||
}
|
||||
/* First wakeup after IO: start with minimum boost */
|
||||
sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
|
||||
}
|
||||
|
||||
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
|
||||
unsigned long *max)
|
||||
/**
|
||||
* sugov_iowait_apply() - Apply the IO boost to a CPU.
|
||||
* @sg_cpu: the sugov data for the cpu to boost
|
||||
* @time: the update time from the caller
|
||||
* @util: the utilization to (eventually) boost
|
||||
* @max: the maximum value the utilization can be boosted to
|
||||
*
|
||||
* A CPU running a task which woken up after an IO operation can have its
|
||||
* utilization boosted to speed up the completion of those IO operations.
|
||||
* The IO boost value is increased each time a task wakes up from IO, in
|
||||
* sugov_iowait_apply(), and it's instead decreased by this function,
|
||||
* each time an increase has not been requested (!iowait_boost_pending).
|
||||
*
|
||||
* A CPU which also appears to have been idle for at least one tick has also
|
||||
* its IO boost utilization reset.
|
||||
*
|
||||
* This mechanism is designed to boost high frequently IO waiting tasks, while
|
||||
* being more conservative on tasks which does sporadic IO operations.
|
||||
*/
|
||||
static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time,
|
||||
unsigned long *util, unsigned long *max)
|
||||
{
|
||||
unsigned int boost_util, boost_max;
|
||||
|
||||
/* No boost currently required */
|
||||
if (!sg_cpu->iowait_boost)
|
||||
return;
|
||||
|
||||
/* Reset boost if the CPU appears to have been idle enough */
|
||||
if (sugov_iowait_reset(sg_cpu, time, false))
|
||||
return;
|
||||
|
||||
/*
|
||||
* An IO waiting task has just woken up:
|
||||
* allow to further double the boost value
|
||||
*/
|
||||
if (sg_cpu->iowait_boost_pending) {
|
||||
sg_cpu->iowait_boost_pending = false;
|
||||
} else {
|
||||
/*
|
||||
* Otherwise: reduce the boost value and disable it when we
|
||||
* reach the minimum.
|
||||
*/
|
||||
sg_cpu->iowait_boost >>= 1;
|
||||
if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
|
||||
sg_cpu->iowait_boost = 0;
|
||||
|
@ -245,9 +330,12 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Apply the current boost value: a CPU is boosted only if its current
|
||||
* utilization is smaller then the current IO boost level.
|
||||
*/
|
||||
boost_util = sg_cpu->iowait_boost;
|
||||
boost_max = sg_cpu->iowait_boost_max;
|
||||
|
||||
if (*util * boost_max < *max * boost_util) {
|
||||
*util = boost_util;
|
||||
*max = boost_max;
|
||||
|
@ -286,7 +374,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
unsigned int next_f;
|
||||
bool busy;
|
||||
|
||||
sugov_set_iowait_boost(sg_cpu, time, flags);
|
||||
sugov_iowait_boost(sg_cpu, time, flags);
|
||||
sg_cpu->last_update = time;
|
||||
|
||||
ignore_dl_rate_limit(sg_cpu, sg_policy);
|
||||
|
@ -299,21 +387,31 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
sugov_get_util(sg_cpu);
|
||||
max = sg_cpu->max;
|
||||
util = sugov_aggregate_util(sg_cpu);
|
||||
sugov_iowait_boost(sg_cpu, &util, &max);
|
||||
sugov_iowait_apply(sg_cpu, time, &util, &max);
|
||||
next_f = get_next_freq(sg_policy, util, max);
|
||||
/*
|
||||
* Do not reduce the frequency if the CPU has not been idle
|
||||
* recently, as the reduction is likely to be premature then.
|
||||
*/
|
||||
if (busy && next_f < sg_policy->next_freq &&
|
||||
sg_policy->next_freq != UINT_MAX) {
|
||||
if (busy && next_f < sg_policy->next_freq) {
|
||||
next_f = sg_policy->next_freq;
|
||||
|
||||
/* Reset cached freq as next_freq has changed */
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
}
|
||||
|
||||
sugov_update_commit(sg_policy, time, next_f);
|
||||
/*
|
||||
* This code runs under rq->lock for the target CPU, so it won't run
|
||||
* concurrently on two different CPUs for the same target and it is not
|
||||
* necessary to acquire the lock in the fast switch case.
|
||||
*/
|
||||
if (sg_policy->policy->fast_switch_enabled) {
|
||||
sugov_fast_switch(sg_policy, time, next_f);
|
||||
} else {
|
||||
raw_spin_lock(&sg_policy->update_lock);
|
||||
sugov_deferred_update(sg_policy, time, next_f);
|
||||
raw_spin_unlock(&sg_policy->update_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
||||
|
@ -326,28 +424,12 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
|||
for_each_cpu(j, policy->cpus) {
|
||||
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
|
||||
unsigned long j_util, j_max;
|
||||
s64 delta_ns;
|
||||
|
||||
sugov_get_util(j_sg_cpu);
|
||||
|
||||
/*
|
||||
* If the CFS CPU utilization was last updated before the
|
||||
* previous frequency update and the time elapsed between the
|
||||
* last update of the CPU utilization and the last frequency
|
||||
* update is long enough, reset iowait_boost and util_cfs, as
|
||||
* they are now probably stale. However, still consider the
|
||||
* CPU contribution if it has some DEADLINE utilization
|
||||
* (util_dl).
|
||||
*/
|
||||
delta_ns = time - j_sg_cpu->last_update;
|
||||
if (delta_ns > TICK_NSEC) {
|
||||
j_sg_cpu->iowait_boost = 0;
|
||||
j_sg_cpu->iowait_boost_pending = false;
|
||||
}
|
||||
|
||||
j_max = j_sg_cpu->max;
|
||||
j_util = sugov_aggregate_util(j_sg_cpu);
|
||||
sugov_iowait_boost(j_sg_cpu, &j_util, &j_max);
|
||||
sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
|
||||
|
||||
if (j_util * max > j_max * util) {
|
||||
util = j_util;
|
||||
max = j_max;
|
||||
|
@ -366,14 +448,18 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
|
|||
|
||||
raw_spin_lock(&sg_policy->update_lock);
|
||||
|
||||
sugov_set_iowait_boost(sg_cpu, time, flags);
|
||||
sugov_iowait_boost(sg_cpu, time, flags);
|
||||
sg_cpu->last_update = time;
|
||||
|
||||
ignore_dl_rate_limit(sg_cpu, sg_policy);
|
||||
|
||||
if (sugov_should_update_freq(sg_policy, time)) {
|
||||
next_f = sugov_next_freq_shared(sg_cpu, time);
|
||||
sugov_update_commit(sg_policy, time, next_f);
|
||||
|
||||
if (sg_policy->policy->fast_switch_enabled)
|
||||
sugov_fast_switch(sg_policy, time, next_f);
|
||||
else
|
||||
sugov_deferred_update(sg_policy, time, next_f);
|
||||
}
|
||||
|
||||
raw_spin_unlock(&sg_policy->update_lock);
|
||||
|
@ -382,13 +468,27 @@ sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
|
|||
static void sugov_work(struct kthread_work *work)
|
||||
{
|
||||
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
|
||||
unsigned int freq;
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Hold sg_policy->update_lock shortly to handle the case where:
|
||||
* incase sg_policy->next_freq is read here, and then updated by
|
||||
* sugov_deferred_update() just before work_in_progress is set to false
|
||||
* here, we may miss queueing the new update.
|
||||
*
|
||||
* Note: If a work was queued after the update_lock is released,
|
||||
* sugov_work() will just be called again by kthread_work code; and the
|
||||
* request will be proceed before the sugov thread sleeps.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
|
||||
freq = sg_policy->next_freq;
|
||||
sg_policy->work_in_progress = false;
|
||||
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
|
||||
|
||||
mutex_lock(&sg_policy->work_lock);
|
||||
__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
|
||||
CPUFREQ_RELATION_L);
|
||||
__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
|
||||
mutex_unlock(&sg_policy->work_lock);
|
||||
|
||||
sg_policy->work_in_progress = false;
|
||||
}
|
||||
|
||||
static void sugov_irq_work(struct irq_work *irq_work)
|
||||
|
@ -511,11 +611,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
|
|||
}
|
||||
|
||||
sg_policy->thread = thread;
|
||||
|
||||
/* Kthread is bound to all CPUs by default */
|
||||
if (!policy->dvfs_possible_from_any_cpu)
|
||||
kthread_bind_mask(thread, policy->related_cpus);
|
||||
|
||||
init_irq_work(&sg_policy->irq_work, sugov_irq_work);
|
||||
mutex_init(&sg_policy->work_lock);
|
||||
|
||||
|
@ -658,7 +754,7 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||
|
||||
sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
|
||||
sg_policy->last_freq_update_time = 0;
|
||||
sg_policy->next_freq = UINT_MAX;
|
||||
sg_policy->next_freq = 0;
|
||||
sg_policy->work_in_progress = false;
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
|
|
Loading…
Reference in New Issue