Merge branch 'pm-cpufreq'

* pm-cpufreq:
  cpufreq: Avoid leaving stale IRQ work items during CPU offline
This commit is contained in:
Rafael J. Wysocki 2019-12-19 16:10:52 +01:00
commit 505b308b69
4 changed files with 24 additions and 16 deletions

View File

@ -595,17 +595,6 @@ struct governor_attr {
size_t count);
};
static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
{
/*
* Allow remote callbacks if:
* - dvfs_possible_from_any_cpu flag is set
* - the local and remote CPUs share cpufreq policy
*/
return policy->dvfs_possible_from_any_cpu ||
cpumask_test_cpu(smp_processor_id(), policy->cpus);
}
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/

View File

@ -12,6 +12,8 @@
#define SCHED_CPUFREQ_MIGRATION (1U << 1)
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy;
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
};
@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
static inline unsigned long map_util_freq(unsigned long util,
unsigned long freq, unsigned long cap)

View File

@ -5,6 +5,8 @@
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
#include <linux/cpufreq.h>
#include "sched.h"
DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu)
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
}
EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
/**
* cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated.
* @policy: cpufreq policy to check.
*
* Return 'true' if:
* - the local and remote CPUs share @policy,
* - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going
* offline (in which case it is not expected to run cpufreq updates any more).
*/
bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
{
return cpumask_test_cpu(smp_processor_id(), policy->cpus) ||
(policy->dvfs_possible_from_any_cpu &&
rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
}

View File

@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
* by the hardware, as calculating the frequency is pointless if
* we cannot in fact act on it.
*
* For the slow switching platforms, the kthread is always scheduled on
* the right set of CPUs and any CPU can find the next frequency and
* schedule the kthread.
* This is needed on the slow switching platforms too to prevent CPUs
* going offline from leaving stale IRQ work items behind.
*/
if (sg_policy->policy->fast_switch_enabled &&
!cpufreq_this_cpu_can_update(sg_policy->policy))
if (!cpufreq_this_cpu_can_update(sg_policy->policy))
return false;
if (unlikely(sg_policy->limits_changed)) {