Merge branches 'pm-core', 'pm-qos', 'pm-domains' and 'pm-opp'
* pm-core: PM / runtime: Fix some typos * pm-qos: PM / QoS: Remove global notifiers * pm-domains: PM / Domains: Power off masters immediately in the power off sequence PM / Domains: Rename is_async to one_dev_on for genpd_power_off() PM / Domains: Move genpd_power_off() above genpd_power_on() * pm-opp: PM / OPP: Documentation: Fix opp-microvolt in examples PM / OPP: fix off-by-one bug in dev_pm_opp_get_max_volt_latency loop
This commit is contained in:
commit
21ff03c484
|
@ -188,14 +188,14 @@ Example 1: Single cluster Dual-core ARM cortex A9, switch DVFS states together.
|
|||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>;
|
||||
opp-microvolt = <975000 970000 985000>;
|
||||
opp-microamp = <70000>;
|
||||
clock-latency-ns = <300000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp@1100000000 {
|
||||
opp-hz = /bits/ 64 <1100000000>;
|
||||
opp-microvolt = <980000 1000000 1010000>;
|
||||
opp-microvolt = <1000000 980000 1010000>;
|
||||
opp-microamp = <80000>;
|
||||
clock-latency-ns = <310000>;
|
||||
};
|
||||
|
@ -267,14 +267,14 @@ independently.
|
|||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>;
|
||||
opp-microvolt = <975000 970000 985000>;
|
||||
opp-microamp = <70000>;
|
||||
clock-latency-ns = <300000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp@1100000000 {
|
||||
opp-hz = /bits/ 64 <1100000000>;
|
||||
opp-microvolt = <980000 1000000 1010000>;
|
||||
opp-microvolt = <1000000 980000 1010000>;
|
||||
opp-microamp = <80000>;
|
||||
clock-latency-ns = <310000>;
|
||||
};
|
||||
|
@ -343,14 +343,14 @@ DVFS state together.
|
|||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>;
|
||||
opp-microvolt = <975000 970000 985000>;
|
||||
opp-microamp = <70000>;
|
||||
clock-latency-ns = <300000>;
|
||||
opp-suspend;
|
||||
};
|
||||
opp@1100000000 {
|
||||
opp-hz = /bits/ 64 <1100000000>;
|
||||
opp-microvolt = <980000 1000000 1010000>;
|
||||
opp-microvolt = <1000000 980000 1010000>;
|
||||
opp-microamp = <80000>;
|
||||
clock-latency-ns = <310000>;
|
||||
};
|
||||
|
@ -369,7 +369,7 @@ DVFS state together.
|
|||
|
||||
opp@1300000000 {
|
||||
opp-hz = /bits/ 64 <1300000000>;
|
||||
opp-microvolt = <1045000 1050000 1055000>;
|
||||
opp-microvolt = <1050000 1045000 1055000>;
|
||||
opp-microamp = <95000>;
|
||||
clock-latency-ns = <400000>;
|
||||
opp-suspend;
|
||||
|
@ -382,7 +382,7 @@ DVFS state together.
|
|||
};
|
||||
opp@1500000000 {
|
||||
opp-hz = /bits/ 64 <1500000000>;
|
||||
opp-microvolt = <1010000 1100000 1110000>;
|
||||
opp-microvolt = <1100000 1010000 1110000>;
|
||||
opp-microamp = <95000>;
|
||||
clock-latency-ns = <400000>;
|
||||
turbo-mode;
|
||||
|
@ -424,9 +424,9 @@ Example 4: Handling multiple regulators
|
|||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
|
||||
<960000 965000 975000>, /* Supply 1 */
|
||||
<960000 965000 975000>; /* Supply 2 */
|
||||
opp-microvolt = <975000 970000 985000>, /* Supply 0 */
|
||||
<965000 960000 975000>, /* Supply 1 */
|
||||
<965000 960000 975000>; /* Supply 2 */
|
||||
opp-microamp = <70000>, /* Supply 0 */
|
||||
<70000>, /* Supply 1 */
|
||||
<70000>; /* Supply 2 */
|
||||
|
@ -437,9 +437,9 @@ Example 4: Handling multiple regulators
|
|||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt = <970000 975000 985000>, /* Supply 0 */
|
||||
<960000 965000 975000>, /* Supply 1 */
|
||||
<960000 965000 975000>; /* Supply 2 */
|
||||
opp-microvolt = <975000 970000 985000>, /* Supply 0 */
|
||||
<965000 960000 975000>, /* Supply 1 */
|
||||
<965000 960000 975000>; /* Supply 2 */
|
||||
opp-microamp = <70000>, /* Supply 0 */
|
||||
<0>, /* Supply 1 doesn't need this */
|
||||
<70000>; /* Supply 2 */
|
||||
|
@ -474,7 +474,7 @@ Example 5: opp-supported-hw
|
|||
*/
|
||||
opp-supported-hw = <0xF 0xFFFFFFFF 0xFFFFFFFF>
|
||||
opp-hz = /bits/ 64 <600000000>;
|
||||
opp-microvolt = <900000 915000 925000>;
|
||||
opp-microvolt = <915000 900000 925000>;
|
||||
...
|
||||
};
|
||||
|
||||
|
@ -487,7 +487,7 @@ Example 5: opp-supported-hw
|
|||
*/
|
||||
opp-supported-hw = <0x20 0xff0000ff 0x0000f4f0>
|
||||
opp-hz = /bits/ 64 <800000000>;
|
||||
opp-microvolt = <900000 915000 925000>;
|
||||
opp-microvolt = <915000 900000 925000>;
|
||||
...
|
||||
};
|
||||
};
|
||||
|
@ -512,18 +512,18 @@ Example 6: opp-microvolt-<name>, opp-microamp-<name>:
|
|||
|
||||
opp@1000000000 {
|
||||
opp-hz = /bits/ 64 <1000000000>;
|
||||
opp-microvolt-slow = <900000 915000 925000>;
|
||||
opp-microvolt-fast = <970000 975000 985000>;
|
||||
opp-microvolt-slow = <915000 900000 925000>;
|
||||
opp-microvolt-fast = <975000 970000 985000>;
|
||||
opp-microamp-slow = <70000>;
|
||||
opp-microamp-fast = <71000>;
|
||||
};
|
||||
|
||||
opp@1200000000 {
|
||||
opp-hz = /bits/ 64 <1200000000>;
|
||||
opp-microvolt-slow = <900000 915000 925000>, /* Supply vcc0 */
|
||||
<910000 925000 935000>; /* Supply vcc1 */
|
||||
opp-microvolt-fast = <970000 975000 985000>, /* Supply vcc0 */
|
||||
<960000 965000 975000>; /* Supply vcc1 */
|
||||
opp-microvolt-slow = <915000 900000 925000>, /* Supply vcc0 */
|
||||
<925000 910000 935000>; /* Supply vcc1 */
|
||||
opp-microvolt-fast = <975000 970000 985000>, /* Supply vcc0 */
|
||||
<965000 960000 975000>; /* Supply vcc1 */
|
||||
opp-microamp = <70000>; /* Will be used for both slow/fast */
|
||||
};
|
||||
};
|
||||
|
|
|
@ -163,8 +163,7 @@ of flags and remove sysfs attributes pm_qos_no_power_off and pm_qos_remote_wakeu
|
|||
under the device's power directory.
|
||||
|
||||
Notification mechanisms:
|
||||
The per-device PM QoS framework has 2 different and distinct notification trees:
|
||||
a per-device notification tree and a global notification tree.
|
||||
The per-device PM QoS framework has a per-device notification tree.
|
||||
|
||||
int dev_pm_qos_add_notifier(device, notifier):
|
||||
Adds a notification callback function for the device.
|
||||
|
@ -174,16 +173,6 @@ is changed (for resume latency device PM QoS only).
|
|||
int dev_pm_qos_remove_notifier(device, notifier):
|
||||
Removes the notification callback function for the device.
|
||||
|
||||
int dev_pm_qos_add_global_notifier(notifier):
|
||||
Adds a notification callback function in the global notification tree of the
|
||||
framework.
|
||||
The callback is called when the aggregated value for any device is changed
|
||||
(for resume latency device PM QoS only).
|
||||
|
||||
int dev_pm_qos_remove_global_notifier(notifier):
|
||||
Removes the notification callback function from the global notification tree
|
||||
of the framework.
|
||||
|
||||
|
||||
Active state latency tolerance
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ knows what to do to handle the device).
|
|||
* If the suspend callback returns an error code different from -EBUSY and
|
||||
-EAGAIN, the PM core regards this as a fatal error and will refuse to run
|
||||
the helper functions described in Section 4 for the device until its status
|
||||
is directly set to either'active', or 'suspended' (the PM core provides
|
||||
is directly set to either 'active', or 'suspended' (the PM core provides
|
||||
special helper functions for this purpose).
|
||||
|
||||
In particular, if the driver requires remote wakeup capability (i.e. hardware
|
||||
|
@ -217,7 +217,7 @@ defined in include/linux/pm.h:
|
|||
one to complete
|
||||
|
||||
spinlock_t lock;
|
||||
- lock used for synchronisation
|
||||
- lock used for synchronization
|
||||
|
||||
atomic_t usage_count;
|
||||
- the usage counter of the device
|
||||
|
@ -565,7 +565,7 @@ appropriate to ensure that the device is not put back to sleep during the
|
|||
probe. This can happen with systems such as the network device layer.
|
||||
|
||||
It may be desirable to suspend the device once ->probe() has finished.
|
||||
Therefore the driver core uses the asyncronous pm_request_idle() to submit a
|
||||
Therefore the driver core uses the asynchronous pm_request_idle() to submit a
|
||||
request to execute the subsystem-level idle callback for the device at that
|
||||
time. A driver that makes use of the runtime autosuspend feature, may want to
|
||||
update the last busy mark before returning from ->probe().
|
||||
|
|
|
@ -273,6 +273,93 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
|
|||
queue_work(pm_wq, &genpd->power_off_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_off - Remove power from a given PM domain.
|
||||
* @genpd: PM domain to power down.
|
||||
* @one_dev_on: If invoked from genpd's ->runtime_suspend|resume() callback, the
|
||||
* RPM status of the releated device is in an intermediate state, not yet turned
|
||||
* into RPM_SUSPENDED. This means genpd_power_off() must allow one device to not
|
||||
* be RPM_SUSPENDED, while it tries to power off the PM domain.
|
||||
*
|
||||
* If all of the @genpd's devices have been suspended and all of its subdomains
|
||||
* have been powered down, remove power from @genpd.
|
||||
*/
|
||||
static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
|
||||
unsigned int depth)
|
||||
{
|
||||
struct pm_domain_data *pdd;
|
||||
struct gpd_link *link;
|
||||
unsigned int not_suspended = 0;
|
||||
|
||||
/*
|
||||
* Do not try to power off the domain in the following situations:
|
||||
* (1) The domain is already in the "power off" state.
|
||||
* (2) System suspend is in progress.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
|| genpd->prepared_count > 0)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
enum pm_qos_flags_status stat;
|
||||
|
||||
stat = dev_pm_qos_flags(pdd->dev,
|
||||
PM_QOS_FLAG_NO_POWER_OFF
|
||||
| PM_QOS_FLAG_REMOTE_WAKEUP);
|
||||
if (stat > PM_QOS_FLAGS_NONE)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Do not allow PM domain to be powered off, when an IRQ safe
|
||||
* device is part of a non-IRQ safe domain.
|
||||
*/
|
||||
if (!pm_runtime_suspended(pdd->dev) ||
|
||||
irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
|
||||
not_suspended++;
|
||||
}
|
||||
|
||||
if (not_suspended > 1 || (not_suspended == 1 && !one_dev_on))
|
||||
return -EBUSY;
|
||||
|
||||
if (genpd->gov && genpd->gov->power_down_ok) {
|
||||
if (!genpd->gov->power_down_ok(&genpd->domain))
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (genpd->power_off) {
|
||||
int ret;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call genpd_power_on() for the master yet after
|
||||
* incrementing it. In that case genpd_power_on() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the genpd_power_on() restore power for us (this shouldn't
|
||||
* happen very often).
|
||||
*/
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_power_off(link->master, false, depth + 1);
|
||||
genpd_unlock(link->master);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_on - Restore power to a given PM domain and its masters.
|
||||
* @genpd: PM domain to power up.
|
||||
|
@ -321,7 +408,9 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
|
|||
&genpd->slave_links,
|
||||
slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_queue_power_off_work(link->master);
|
||||
genpd_lock_nested(link->master, depth + 1);
|
||||
genpd_power_off(link->master, false, depth + 1);
|
||||
genpd_unlock(link->master);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -367,87 +456,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
|
|||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_off - Remove power from a given PM domain.
|
||||
* @genpd: PM domain to power down.
|
||||
* @is_async: PM domain is powered down from a scheduled work
|
||||
*
|
||||
* If all of the @genpd's devices have been suspended and all of its subdomains
|
||||
* have been powered down, remove power from @genpd.
|
||||
*/
|
||||
static int genpd_power_off(struct generic_pm_domain *genpd, bool is_async)
|
||||
{
|
||||
struct pm_domain_data *pdd;
|
||||
struct gpd_link *link;
|
||||
unsigned int not_suspended = 0;
|
||||
|
||||
/*
|
||||
* Do not try to power off the domain in the following situations:
|
||||
* (1) The domain is already in the "power off" state.
|
||||
* (2) System suspend is in progress.
|
||||
*/
|
||||
if (genpd->status == GPD_STATE_POWER_OFF
|
||||
|| genpd->prepared_count > 0)
|
||||
return 0;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
list_for_each_entry(pdd, &genpd->dev_list, list_node) {
|
||||
enum pm_qos_flags_status stat;
|
||||
|
||||
stat = dev_pm_qos_flags(pdd->dev,
|
||||
PM_QOS_FLAG_NO_POWER_OFF
|
||||
| PM_QOS_FLAG_REMOTE_WAKEUP);
|
||||
if (stat > PM_QOS_FLAGS_NONE)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Do not allow PM domain to be powered off, when an IRQ safe
|
||||
* device is part of a non-IRQ safe domain.
|
||||
*/
|
||||
if (!pm_runtime_suspended(pdd->dev) ||
|
||||
irq_safe_dev_in_no_sleep_domain(pdd->dev, genpd))
|
||||
not_suspended++;
|
||||
}
|
||||
|
||||
if (not_suspended > 1 || (not_suspended == 1 && is_async))
|
||||
return -EBUSY;
|
||||
|
||||
if (genpd->gov && genpd->gov->power_down_ok) {
|
||||
if (!genpd->gov->power_down_ok(&genpd->domain))
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (genpd->power_off) {
|
||||
int ret;
|
||||
|
||||
if (atomic_read(&genpd->sd_count) > 0)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* If sd_count > 0 at this point, one of the subdomains hasn't
|
||||
* managed to call genpd_power_on() for the master yet after
|
||||
* incrementing it. In that case genpd_power_on() will wait
|
||||
* for us to drop the lock, so we can call .power_off() and let
|
||||
* the genpd_power_on() restore power for us (this shouldn't
|
||||
* happen very often).
|
||||
*/
|
||||
ret = _genpd_power_off(genpd, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
genpd->status = GPD_STATE_POWER_OFF;
|
||||
|
||||
list_for_each_entry(link, &genpd->slave_links, slave_node) {
|
||||
genpd_sd_counter_dec(link->master);
|
||||
genpd_queue_power_off_work(link->master);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
|
||||
* @work: Work structure used for scheduling the execution of this function.
|
||||
|
@ -459,7 +467,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
|
|||
genpd = container_of(work, struct generic_pm_domain, power_off_work);
|
||||
|
||||
genpd_lock(genpd);
|
||||
genpd_power_off(genpd, true);
|
||||
genpd_power_off(genpd, false, 0);
|
||||
genpd_unlock(genpd);
|
||||
}
|
||||
|
||||
|
@ -578,7 +586,7 @@ static int genpd_runtime_suspend(struct device *dev)
|
|||
return 0;
|
||||
|
||||
genpd_lock(genpd);
|
||||
genpd_power_off(genpd, false);
|
||||
genpd_power_off(genpd, true, 0);
|
||||
genpd_unlock(genpd);
|
||||
|
||||
return 0;
|
||||
|
@ -658,7 +666,7 @@ err_poweroff:
|
|||
if (!pm_runtime_is_irq_safe(dev) ||
|
||||
(pm_runtime_is_irq_safe(dev) && genpd_is_irq_safe(genpd))) {
|
||||
genpd_lock(genpd);
|
||||
genpd_power_off(genpd, 0);
|
||||
genpd_power_off(genpd, true, 0);
|
||||
genpd_unlock(genpd);
|
||||
}
|
||||
|
||||
|
|
|
@ -231,7 +231,8 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
|
|||
* The caller needs to ensure that opp_table (and hence the regulator)
|
||||
* isn't freed, while we are executing this routine.
|
||||
*/
|
||||
for (i = 0; reg = regulators[i], i < count; i++) {
|
||||
for (i = 0; i < count; i++) {
|
||||
reg = regulators[i];
|
||||
ret = regulator_set_voltage_time(reg, uV[i].min, uV[i].max);
|
||||
if (ret > 0)
|
||||
latency_ns += ret * 1000;
|
||||
|
|
|
@ -17,12 +17,9 @@
|
|||
*
|
||||
* This QoS design is best effort based. Dependents register their QoS needs.
|
||||
* Watchers register to keep track of the current QoS needs of the system.
|
||||
* Watchers can register different types of notification callbacks:
|
||||
* . a per-device notification callback using the dev_pm_qos_*_notifier API.
|
||||
* The notification chain data is stored in the per-device constraint
|
||||
* data struct.
|
||||
* . a system-wide notification callback using the dev_pm_qos_*_global_notifier
|
||||
* API. The notification chain data is stored in a static variable.
|
||||
* Watchers can register a per-device notification callback using the
|
||||
* dev_pm_qos_*_notifier API. The notification chain data is stored in the
|
||||
* per-device constraint data struct.
|
||||
*
|
||||
* Note about the per-device constraint data struct allocation:
|
||||
* . The per-device constraints data struct ptr is tored into the device
|
||||
|
@ -49,8 +46,6 @@
|
|||
static DEFINE_MUTEX(dev_pm_qos_mtx);
|
||||
static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
|
||||
|
||||
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
|
||||
|
||||
/**
|
||||
* __dev_pm_qos_flags - Check PM QoS flags for a given device.
|
||||
* @dev: Device to check the PM QoS flags for.
|
||||
|
@ -135,8 +130,7 @@ s32 dev_pm_qos_read_value(struct device *dev)
|
|||
* @value: Value to assign to the QoS request.
|
||||
*
|
||||
* Internal function to update the constraints list using the PM QoS core
|
||||
* code and if needed call the per-device and the global notification
|
||||
* callbacks
|
||||
* code and if needed call the per-device callbacks.
|
||||
*/
|
||||
static int apply_constraint(struct dev_pm_qos_request *req,
|
||||
enum pm_qos_req_action action, s32 value)
|
||||
|
@ -148,12 +142,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
|
|||
case DEV_PM_QOS_RESUME_LATENCY:
|
||||
ret = pm_qos_update_target(&qos->resume_latency,
|
||||
&req->data.pnode, action, value);
|
||||
if (ret) {
|
||||
value = pm_qos_read_value(&qos->resume_latency);
|
||||
blocking_notifier_call_chain(&dev_pm_notifiers,
|
||||
(unsigned long)value,
|
||||
req);
|
||||
}
|
||||
break;
|
||||
case DEV_PM_QOS_LATENCY_TOLERANCE:
|
||||
ret = pm_qos_update_target(&qos->latency_tolerance,
|
||||
|
@ -535,36 +523,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_global_notifier - sets notification entry for changes to
|
||||
* target value of the PM QoS constraints for any device
|
||||
*
|
||||
* @notifier: notifier block managed by caller.
|
||||
*
|
||||
* Will register the notifier into a notification chain that gets called
|
||||
* upon changes to the target value for any device.
|
||||
*/
|
||||
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier)
|
||||
{
|
||||
return blocking_notifier_chain_register(&dev_pm_notifiers, notifier);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_remove_global_notifier - deletes notification for changes to
|
||||
* target value of PM QoS constraints for any device
|
||||
*
|
||||
* @notifier: notifier block to be removed.
|
||||
*
|
||||
* Will remove the notifier from the notification chain that gets called
|
||||
* upon changes to the target value for any device.
|
||||
*/
|
||||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
|
||||
{
|
||||
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
|
||||
|
||||
/**
|
||||
* dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
|
||||
* @dev: Device whose ancestor to add the request for.
|
||||
|
|
|
@ -146,8 +146,6 @@ int dev_pm_qos_add_notifier(struct device *dev,
|
|||
struct notifier_block *notifier);
|
||||
int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
struct notifier_block *notifier);
|
||||
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
|
||||
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
|
||||
void dev_pm_qos_constraints_init(struct device *dev);
|
||||
void dev_pm_qos_constraints_destroy(struct device *dev);
|
||||
int dev_pm_qos_add_ancestor_request(struct device *dev,
|
||||
|
@ -199,12 +197,6 @@ static inline int dev_pm_qos_add_notifier(struct device *dev,
|
|||
static inline int dev_pm_qos_remove_notifier(struct device *dev,
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_add_global_notifier(
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline int dev_pm_qos_remove_global_notifier(
|
||||
struct notifier_block *notifier)
|
||||
{ return 0; }
|
||||
static inline void dev_pm_qos_constraints_init(struct device *dev)
|
||||
{
|
||||
dev->power.power_state = PMSG_ON;
|
||||
|
|
Loading…
Reference in New Issue