Power management fixes for 5.10-rc3.
- Unify the handling of managed and stateless device links in the runtime PM framework and prevent runtime PM references to devices from being leaked after device link removal (Rafael Wysocki). - Fix two mistakes in the cpuidle documentation (Julia Lawall). - Prevent the schedutil cpufreq governor from missing policy limits updates in some cases (Viresh Kumar). - Prevent static OPPs from being dropped by mistake (Viresh Kumar). - Prevent helper function in the OPP framework from returning prematurely (Viresh Kumar). - Prevent opp_table_lock from being held too long during removal of OPP tables with no more active references (Viresh Kumar). - Drop redundant semicolon from the Intel RAPL power capping driver (Tom Rix). -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAl+kAnUSHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxND0P/j5e/wqFZ/34XEaUa4PIs27TDoJHq1Jh PfUxqSBEVO6Xm8TMCtOIuS8gw4x7ehenXC8gFdivy+Fu+4QNkPKYggF5/LgWO8Gl cMNzYwJ8shIBQWQZftIx01Bn5tAvk1YhV1mnSNf580Iy7FhKqojMnrvzQnpGD4jR piBkvBcbIJWDk+T96RzbqnqmMD0euvlYfzg1KnsyqsOpoRl7kZoH4ahWYskdIxDY NtA4SqQ8dvxjTwI8+a+JBb//ua9jjjDyjd7FUV87HMdcVh9rZKbmHKkk4Yv/3C4C jZuCTV6zaWIHCbbkZwi+OTNE8q21GgWm1xqwnGWVTFSGrs8ZyfjLs73/g9S5zI3N C/n4YUJxiYDVcnrAVwR7kSTEouiQ6mTuChOt7T3r1Ilx67rw4TDsI59Fk1Xn07bZ 1wfaPASUPsFAxF7vMhkdzsidhpR3BYNgMwmAWo9R4Yvw4evyRN4tywoUQJ1X27zg ERir6mFVz6XCnXPrJhyx0bWLo+VD8J/arhfIPSTqHR7wn0tgc23aVeYy7wvfYiiu QQJqQ58Aoa0Z7ZpeAv3xbSung+eqQ/dDC9FnXqxdZCI6brYUhrZ19OhsFnEyzGUR IDRzcP1/72AxhidpjO71txOJSJFTqKF2LzL/wuS6kMbYwmFjYfj18xX5GvOUuXmL hO+E+QdleQFo =lKQA -----END PGP SIGNATURE----- Merge tag 'pm-5.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management fixes from Rafael Wysocki: "These fix the device links support in runtime PM, correct mistakes in the cpuidle documentation, fix the handling of policy limits changes in the schedutil cpufreq governor, fix assorted issues in the OPP (operating performance points) framework and make one janitorial change. Specifics: - Unify the handling of managed and stateless device links in the runtime PM framework and prevent runtime PM references to devices from being leaked after device link removal (Rafael Wysocki). - Fix two mistakes in the cpuidle documentation (Julia Lawall). - Prevent the schedutil cpufreq governor from missing policy limits updates in some cases (Viresh Kumar). - Prevent static OPPs from being dropped by mistake (Viresh Kumar). - Prevent helper function in the OPP framework from returning prematurely (Viresh Kumar). - Prevent opp_table_lock from being held too long during removal of OPP tables with no more active references (Viresh Kumar). - Drop redundant semicolon from the Intel RAPL power capping driver (Tom Rix)" * tag 'pm-5.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: PM: runtime: Resume the device earlier in __device_release_driver() PM: runtime: Drop pm_runtime_clean_up_links() PM: runtime: Drop runtime PM references to supplier on link removal powercap/intel_rapl: remove unneeded semicolon Documentation: PM: cpuidle: correct path name Documentation: PM: cpuidle: correct typo cpufreq: schedutil: Don't skip freq update if need_freq_update is set opp: Reduce the size of critical section in _opp_table_kref_release() opp: Fix early exit from dev_pm_opp_register_set_opp_helper() opp: Don't always remove static OPPs in _of_add_opp_table_v1()
This commit is contained in:
commit
f786dfa374
|
@ -478,7 +478,7 @@ order to ask the hardware to enter that state. Also, for each
|
|||
statistics of the given idle state. That information is exposed by the kernel
|
||||
via ``sysfs``.
|
||||
|
||||
For each CPU in the system, there is a :file:`/sys/devices/system/cpu<N>/cpuidle/`
|
||||
For each CPU in the system, there is a :file:`/sys/devices/system/cpu/cpu<N>/cpuidle/`
|
||||
directory in ``sysfs``, where the number ``<N>`` is assigned to the given
|
||||
CPU at the initialization time. That directory contains a set of subdirectories
|
||||
called :file:`state0`, :file:`state1` and so on, up to the number of idle state
|
||||
|
@ -494,7 +494,7 @@ object corresponding to it, as follows:
|
|||
residency.
|
||||
|
||||
``below``
|
||||
Total number of times this idle state had been asked for, but cerainly
|
||||
Total number of times this idle state had been asked for, but certainly
|
||||
a deeper idle state would have been a better match for the observed idle
|
||||
duration.
|
||||
|
||||
|
|
|
@ -773,8 +773,7 @@ static void __device_link_del(struct kref *kref)
|
|||
dev_dbg(link->consumer, "Dropping the link to %s\n",
|
||||
dev_name(link->supplier));
|
||||
|
||||
if (link->flags & DL_FLAG_PM_RUNTIME)
|
||||
pm_runtime_drop_link(link->consumer);
|
||||
pm_runtime_drop_link(link);
|
||||
|
||||
list_del_rcu(&link->s_node);
|
||||
list_del_rcu(&link->c_node);
|
||||
|
@ -788,8 +787,7 @@ static void __device_link_del(struct kref *kref)
|
|||
dev_info(link->consumer, "Dropping the link to %s\n",
|
||||
dev_name(link->supplier));
|
||||
|
||||
if (link->flags & DL_FLAG_PM_RUNTIME)
|
||||
pm_runtime_drop_link(link->consumer);
|
||||
pm_runtime_drop_link(link);
|
||||
|
||||
list_del(&link->s_node);
|
||||
list_del(&link->c_node);
|
||||
|
|
|
@ -1117,6 +1117,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
|||
|
||||
drv = dev->driver;
|
||||
if (drv) {
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
while (device_links_busy(dev)) {
|
||||
__device_driver_unlock(dev, parent);
|
||||
|
||||
|
@ -1128,13 +1130,12 @@ static void __device_release_driver(struct device *dev, struct device *parent)
|
|||
* have released the driver successfully while this one
|
||||
* was waiting, so check for that.
|
||||
*/
|
||||
if (dev->driver != drv)
|
||||
if (dev->driver != drv) {
|
||||
pm_runtime_put(dev);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
pm_runtime_clean_up_links(dev);
|
||||
|
||||
driver_sysfs_remove(dev);
|
||||
|
||||
if (dev->bus)
|
||||
|
|
|
@ -1642,42 +1642,6 @@ void pm_runtime_remove(struct device *dev)
|
|||
pm_runtime_reinit(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
|
||||
* @dev: Device whose driver is going to be removed.
|
||||
*
|
||||
* Check links from this device to any consumers and if any of them have active
|
||||
* runtime PM references to the device, drop the usage counter of the device
|
||||
* (as many times as needed).
|
||||
*
|
||||
* Links with the DL_FLAG_MANAGED flag unset are ignored.
|
||||
*
|
||||
* Since the device is guaranteed to be runtime-active at the point this is
|
||||
* called, nothing else needs to be done here.
|
||||
*
|
||||
* Moreover, this is called after device_links_busy() has returned 'false', so
|
||||
* the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
|
||||
* therefore rpm_active can't be manipulated concurrently.
|
||||
*/
|
||||
void pm_runtime_clean_up_links(struct device *dev)
|
||||
{
|
||||
struct device_link *link;
|
||||
int idx;
|
||||
|
||||
idx = device_links_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
|
||||
device_links_read_lock_held()) {
|
||||
if (!(link->flags & DL_FLAG_MANAGED))
|
||||
continue;
|
||||
|
||||
while (refcount_dec_not_one(&link->rpm_active))
|
||||
pm_runtime_put_noidle(dev);
|
||||
}
|
||||
|
||||
device_links_read_unlock(idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_get_suppliers - Resume and reference-count supplier devices.
|
||||
* @dev: Consumer device.
|
||||
|
@ -1729,7 +1693,7 @@ void pm_runtime_new_link(struct device *dev)
|
|||
spin_unlock_irq(&dev->power.lock);
|
||||
}
|
||||
|
||||
void pm_runtime_drop_link(struct device *dev)
|
||||
static void pm_runtime_drop_link_count(struct device *dev)
|
||||
{
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
WARN_ON(dev->power.links_count == 0);
|
||||
|
@ -1737,6 +1701,25 @@ void pm_runtime_drop_link(struct device *dev)
|
|||
spin_unlock_irq(&dev->power.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_drop_link - Prepare for device link removal.
|
||||
* @link: Device link going away.
|
||||
*
|
||||
* Drop the link count of the consumer end of @link and decrement the supplier
|
||||
* device's runtime PM usage counter as many times as needed to drop all of the
|
||||
* PM runtime reference to it from the consumer.
|
||||
*/
|
||||
void pm_runtime_drop_link(struct device_link *link)
|
||||
{
|
||||
if (!(link->flags & DL_FLAG_PM_RUNTIME))
|
||||
return;
|
||||
|
||||
pm_runtime_drop_link_count(link->consumer);
|
||||
|
||||
while (refcount_dec_not_one(&link->rpm_active))
|
||||
pm_runtime_put(link->supplier);
|
||||
}
|
||||
|
||||
static bool pm_runtime_need_not_resume(struct device *dev)
|
||||
{
|
||||
return atomic_read(&dev->power.usage_count) <= 1 &&
|
||||
|
|
|
@ -1181,6 +1181,10 @@ static void _opp_table_kref_release(struct kref *kref)
|
|||
struct opp_device *opp_dev, *temp;
|
||||
int i;
|
||||
|
||||
/* Drop the lock as soon as we can */
|
||||
list_del(&opp_table->node);
|
||||
mutex_unlock(&opp_table_lock);
|
||||
|
||||
_of_clear_opp_table(opp_table);
|
||||
|
||||
/* Release clk */
|
||||
|
@ -1208,10 +1212,7 @@ static void _opp_table_kref_release(struct kref *kref)
|
|||
|
||||
mutex_destroy(&opp_table->genpd_virt_dev_lock);
|
||||
mutex_destroy(&opp_table->lock);
|
||||
list_del(&opp_table->node);
|
||||
kfree(opp_table);
|
||||
|
||||
mutex_unlock(&opp_table_lock);
|
||||
}
|
||||
|
||||
void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
|
||||
|
@ -1930,7 +1931,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev,
|
|||
return ERR_PTR(-EINVAL);
|
||||
|
||||
opp_table = dev_pm_opp_get_opp_table(dev);
|
||||
if (!IS_ERR(opp_table))
|
||||
if (IS_ERR(opp_table))
|
||||
return opp_table;
|
||||
|
||||
/* This should be called before OPPs are initialized */
|
||||
|
|
|
@ -944,6 +944,8 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
|
|||
nr -= 2;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
remove_static_opp:
|
||||
_opp_remove_all_static(opp_table);
|
||||
|
||||
|
|
|
@ -620,7 +620,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
|
|||
case ARBITRARY_UNIT:
|
||||
default:
|
||||
return value;
|
||||
};
|
||||
}
|
||||
|
||||
if (to_raw)
|
||||
return div64_u64(value, units) * scale;
|
||||
|
|
|
@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
|
|||
extern void pm_runtime_update_max_time_suspended(struct device *dev,
|
||||
s64 delta_ns);
|
||||
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
|
||||
extern void pm_runtime_clean_up_links(struct device *dev);
|
||||
extern void pm_runtime_get_suppliers(struct device *dev);
|
||||
extern void pm_runtime_put_suppliers(struct device *dev);
|
||||
extern void pm_runtime_new_link(struct device *dev);
|
||||
extern void pm_runtime_drop_link(struct device *dev);
|
||||
extern void pm_runtime_drop_link(struct device_link *link);
|
||||
|
||||
/**
|
||||
* pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
|
||||
|
@ -276,11 +275,10 @@ static inline u64 pm_runtime_autosuspend_expiration(
|
|||
struct device *dev) { return 0; }
|
||||
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
|
||||
bool enable){}
|
||||
static inline void pm_runtime_clean_up_links(struct device *dev) {}
|
||||
static inline void pm_runtime_get_suppliers(struct device *dev) {}
|
||||
static inline void pm_runtime_put_suppliers(struct device *dev) {}
|
||||
static inline void pm_runtime_new_link(struct device *dev) {}
|
||||
static inline void pm_runtime_drop_link(struct device *dev) {}
|
||||
static inline void pm_runtime_drop_link(struct device_link *link) {}
|
||||
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
|
|
|
@ -102,9 +102,12 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
|||
static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
|
||||
unsigned int next_freq)
|
||||
{
|
||||
if (sg_policy->next_freq == next_freq &&
|
||||
!cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
|
||||
return false;
|
||||
if (!sg_policy->need_freq_update) {
|
||||
if (sg_policy->next_freq == next_freq)
|
||||
return false;
|
||||
} else {
|
||||
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
|
||||
}
|
||||
|
||||
sg_policy->next_freq = next_freq;
|
||||
sg_policy->last_freq_update_time = time;
|
||||
|
@ -162,11 +165,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
|||
|
||||
freq = map_util_freq(util, freq, max);
|
||||
|
||||
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update &&
|
||||
!cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS))
|
||||
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
|
||||
return sg_policy->next_freq;
|
||||
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = freq;
|
||||
return cpufreq_driver_resolve_freq(policy, freq);
|
||||
}
|
||||
|
@ -442,7 +443,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
||||
unsigned long util, max;
|
||||
unsigned int next_f;
|
||||
bool busy;
|
||||
unsigned int cached_freq = sg_policy->cached_raw_freq;
|
||||
|
||||
sugov_iowait_boost(sg_cpu, time, flags);
|
||||
|
@ -453,9 +453,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
if (!sugov_should_update_freq(sg_policy, time))
|
||||
return;
|
||||
|
||||
/* Limits may have changed, don't skip frequency update */
|
||||
busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
|
||||
|
||||
util = sugov_get_util(sg_cpu);
|
||||
max = sg_cpu->max;
|
||||
util = sugov_iowait_apply(sg_cpu, time, util, max);
|
||||
|
@ -464,7 +461,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
|
|||
* Do not reduce the frequency if the CPU has not been idle
|
||||
* recently, as the reduction is likely to be premature then.
|
||||
*/
|
||||
if (busy && next_f < sg_policy->next_freq) {
|
||||
if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
|
||||
next_f = sg_policy->next_freq;
|
||||
|
||||
/* Restore cached freq as next_freq has changed */
|
||||
|
@ -829,9 +826,10 @@ static int sugov_start(struct cpufreq_policy *policy)
|
|||
sg_policy->next_freq = 0;
|
||||
sg_policy->work_in_progress = false;
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = false;
|
||||
sg_policy->cached_raw_freq = 0;
|
||||
|
||||
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
||||
|
||||
|
|
Loading…
Reference in New Issue