Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/suspend-2.6: spi / PM: Support dev_pm_ops PM: Prototype the pm_generic_ operations PM / Runtime: Generic resume shouldn't set RPM_ACTIVE unconditionally PM: Use dev_name() in core device suspend and resume routines PM: Permit registration of parentless devices during system suspend PM: Replace the device power.status field with a bit field PM: Remove redundant checks from core device resume routines PM: Use a different list of devices for each stage of device suspend PM: Avoid compiler warning in pm_noirq_op() PM: Use pm_wakeup_pending() in __device_suspend() PM / Wakeup: Replace pm_check_wakeup_events() with pm_wakeup_pending() PM: Prevent dpm_prepare() from returning errors unnecessarily PM: Fix references to basic-pm-debugging.txt in drivers-testing.txt PM / Runtime: Add synchronous runtime interface for interrupt handlers (v3) PM / Hibernate: When failed, in_suspend should be reset PM / Hibernate: hibernation_ops->leave should be checked too Freezer: Fix a race during freezing of TASK_STOPPED tasks PM: Use proper ccflag flag in kernel/power/Makefile PM / Runtime: Fix comments to match runtime callback code
This commit is contained in:
commit
c8940eca75
|
@ -23,10 +23,10 @@ Once you have resolved the suspend/resume-related problems with your test system
|
|||
without the new driver, you are ready to test it:
|
||||
|
||||
a) Build the driver as a module, load it and try the test modes of hibernation
|
||||
(see: Documents/power/basic-pm-debugging.txt, 1).
|
||||
(see: Documentation/power/basic-pm-debugging.txt, 1).
|
||||
|
||||
b) Load the driver and attempt to hibernate in the "reboot", "shutdown" and
|
||||
"platform" modes (see: Documents/power/basic-pm-debugging.txt, 1).
|
||||
"platform" modes (see: Documentation/power/basic-pm-debugging.txt, 1).
|
||||
|
||||
c) Compile the driver directly into the kernel and try the test modes of
|
||||
hibernation.
|
||||
|
@ -34,12 +34,12 @@ c) Compile the driver directly into the kernel and try the test modes of
|
|||
d) Attempt to hibernate with the driver compiled directly into the kernel
|
||||
in the "reboot", "shutdown" and "platform" modes.
|
||||
|
||||
e) Try the test modes of suspend (see: Documents/power/basic-pm-debugging.txt,
|
||||
e) Try the test modes of suspend (see: Documentation/power/basic-pm-debugging.txt,
|
||||
2). [As far as the STR tests are concerned, it should not matter whether or
|
||||
not the driver is built as a module.]
|
||||
|
||||
f) Attempt to suspend to RAM using the s2ram tool with the driver loaded
|
||||
(see: Documents/power/basic-pm-debugging.txt, 2).
|
||||
(see: Documentation/power/basic-pm-debugging.txt, 2).
|
||||
|
||||
Each of the above tests should be repeated several times and the STD tests
|
||||
should be mixed with the STR tests. If any of them fails, the driver cannot be
|
||||
|
|
|
@ -50,6 +50,15 @@ type's callbacks are not defined) of given device. The bus type, device type
|
|||
and device class callbacks are referred to as subsystem-level callbacks in what
|
||||
follows.
|
||||
|
||||
By default, the callbacks are always invoked in process context with interrupts
|
||||
enabled. However, subsystems can use the pm_runtime_irq_safe() helper function
|
||||
to tell the PM core that a device's ->runtime_suspend() and ->runtime_resume()
|
||||
callbacks should be invoked in atomic context with interrupts disabled
|
||||
(->runtime_idle() is still invoked the default way). This implies that these
|
||||
callback routines must not block or sleep, but it also means that the
|
||||
synchronous helper functions listed at the end of Section 4 can be used within
|
||||
an interrupt handler or in an atomic context.
|
||||
|
||||
The subsystem-level suspend callback is _entirely_ _responsible_ for handling
|
||||
the suspend of the device as appropriate, which may, but need not include
|
||||
executing the device driver's own ->runtime_suspend() callback (from the
|
||||
|
@ -237,6 +246,10 @@ defined in include/linux/pm.h:
|
|||
Section 8); it may be modified only by the pm_runtime_no_callbacks()
|
||||
helper function
|
||||
|
||||
unsigned int irq_safe;
|
||||
- indicates that the ->runtime_suspend() and ->runtime_resume() callbacks
|
||||
will be invoked with the spinlock held and interrupts disabled
|
||||
|
||||
unsigned int use_autosuspend;
|
||||
- indicates that the device's driver supports delayed autosuspend (see
|
||||
Section 9); it may be modified only by the
|
||||
|
@ -344,6 +357,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
|
|||
- decrement the device's usage counter; if the result is 0 then run
|
||||
pm_runtime_idle(dev) and return its result
|
||||
|
||||
int pm_runtime_put_sync_suspend(struct device *dev);
|
||||
- decrement the device's usage counter; if the result is 0 then run
|
||||
pm_runtime_suspend(dev) and return its result
|
||||
|
||||
int pm_runtime_put_sync_autosuspend(struct device *dev);
|
||||
- decrement the device's usage counter; if the result is 0 then run
|
||||
pm_runtime_autosuspend(dev) and return its result
|
||||
|
@ -397,6 +414,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
|
|||
PM attributes from /sys/devices/.../power (or prevent them from being
|
||||
added when the device is registered)
|
||||
|
||||
void pm_runtime_irq_safe(struct device *dev);
|
||||
- set the power.irq_safe flag for the device, causing the runtime-PM
|
||||
suspend and resume callbacks (but not the idle callback) to be invoked
|
||||
with interrupts disabled
|
||||
|
||||
void pm_runtime_mark_last_busy(struct device *dev);
|
||||
- set the power.last_busy field to the current time
|
||||
|
||||
|
@ -438,6 +460,15 @@ pm_runtime_suspended()
|
|||
pm_runtime_mark_last_busy()
|
||||
pm_runtime_autosuspend_expiration()
|
||||
|
||||
If pm_runtime_irq_safe() has been called for a device then the following helper
|
||||
functions may also be used in interrupt context:
|
||||
|
||||
pm_runtime_suspend()
|
||||
pm_runtime_autosuspend()
|
||||
pm_runtime_resume()
|
||||
pm_runtime_get_sync()
|
||||
pm_runtime_put_sync_suspend()
|
||||
|
||||
5. Run-time PM Initialization, Device Probing and Removal
|
||||
|
||||
Initially, the run-time PM is disabled for all devices, which means that the
|
||||
|
|
|
@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
|
|||
*
|
||||
* If PM operations are defined for the @dev's driver and they include
|
||||
* ->runtime_suspend(), execute it and return its error code. Otherwise,
|
||||
* return -EINVAL.
|
||||
* return 0.
|
||||
*/
|
||||
int pm_generic_runtime_suspend(struct device *dev)
|
||||
{
|
||||
|
@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
|
|||
*
|
||||
* If PM operations are defined for the @dev's driver and they include
|
||||
* ->runtime_resume(), execute it and return its error code. Otherwise,
|
||||
* return -EINVAL.
|
||||
* return 0.
|
||||
*/
|
||||
int pm_generic_runtime_resume(struct device *dev)
|
||||
{
|
||||
|
@ -185,7 +185,7 @@ static int __pm_generic_resume(struct device *dev, int event)
|
|||
return 0;
|
||||
|
||||
ret = callback(dev);
|
||||
if (!ret) {
|
||||
if (!ret && pm_runtime_enabled(dev)) {
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "../base.h"
|
||||
#include "power.h"
|
||||
|
@ -41,16 +42,13 @@
|
|||
*/
|
||||
|
||||
LIST_HEAD(dpm_list);
|
||||
LIST_HEAD(dpm_prepared_list);
|
||||
LIST_HEAD(dpm_suspended_list);
|
||||
LIST_HEAD(dpm_noirq_list);
|
||||
|
||||
static DEFINE_MUTEX(dpm_list_mtx);
|
||||
static pm_message_t pm_transition;
|
||||
|
||||
/*
|
||||
* Set once the preparation of devices for a PM transition has started, reset
|
||||
* before starting to resume devices. Protected by dpm_list_mtx.
|
||||
*/
|
||||
static bool transition_started;
|
||||
|
||||
static int async_error;
|
||||
|
||||
/**
|
||||
|
@ -59,7 +57,7 @@ static int async_error;
|
|||
*/
|
||||
void device_pm_init(struct device *dev)
|
||||
{
|
||||
dev->power.status = DPM_ON;
|
||||
dev->power.in_suspend = false;
|
||||
init_completion(&dev->power.completion);
|
||||
complete_all(&dev->power.completion);
|
||||
dev->power.wakeup = NULL;
|
||||
|
@ -90,22 +88,11 @@ void device_pm_unlock(void)
|
|||
void device_pm_add(struct device *dev)
|
||||
{
|
||||
pr_debug("PM: Adding info for %s:%s\n",
|
||||
dev->bus ? dev->bus->name : "No Bus",
|
||||
kobject_name(&dev->kobj));
|
||||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
if (dev->parent) {
|
||||
if (dev->parent->power.status >= DPM_SUSPENDING)
|
||||
dev_warn(dev, "parent %s should not be sleeping\n",
|
||||
dev_name(dev->parent));
|
||||
} else if (transition_started) {
|
||||
/*
|
||||
* We refuse to register parentless devices while a PM
|
||||
* transition is in progress in order to avoid leaving them
|
||||
* unhandled down the road
|
||||
*/
|
||||
dev_WARN(dev, "Parentless device registered during a PM transaction\n");
|
||||
}
|
||||
|
||||
if (dev->parent && dev->parent->power.in_suspend)
|
||||
dev_warn(dev, "parent %s should not be sleeping\n",
|
||||
dev_name(dev->parent));
|
||||
list_add_tail(&dev->power.entry, &dpm_list);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
}
|
||||
|
@ -117,8 +104,7 @@ void device_pm_add(struct device *dev)
|
|||
void device_pm_remove(struct device *dev)
|
||||
{
|
||||
pr_debug("PM: Removing info for %s:%s\n",
|
||||
dev->bus ? dev->bus->name : "No Bus",
|
||||
kobject_name(&dev->kobj));
|
||||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
complete_all(&dev->power.completion);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
list_del_init(&dev->power.entry);
|
||||
|
@ -135,10 +121,8 @@ void device_pm_remove(struct device *dev)
|
|||
void device_pm_move_before(struct device *deva, struct device *devb)
|
||||
{
|
||||
pr_debug("PM: Moving %s:%s before %s:%s\n",
|
||||
deva->bus ? deva->bus->name : "No Bus",
|
||||
kobject_name(&deva->kobj),
|
||||
devb->bus ? devb->bus->name : "No Bus",
|
||||
kobject_name(&devb->kobj));
|
||||
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
|
||||
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
|
||||
/* Delete deva from dpm_list and reinsert before devb. */
|
||||
list_move_tail(&deva->power.entry, &devb->power.entry);
|
||||
}
|
||||
|
@ -151,10 +135,8 @@ void device_pm_move_before(struct device *deva, struct device *devb)
|
|||
void device_pm_move_after(struct device *deva, struct device *devb)
|
||||
{
|
||||
pr_debug("PM: Moving %s:%s after %s:%s\n",
|
||||
deva->bus ? deva->bus->name : "No Bus",
|
||||
kobject_name(&deva->kobj),
|
||||
devb->bus ? devb->bus->name : "No Bus",
|
||||
kobject_name(&devb->kobj));
|
||||
deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
|
||||
devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
|
||||
/* Delete deva from dpm_list and reinsert after devb. */
|
||||
list_move(&deva->power.entry, &devb->power.entry);
|
||||
}
|
||||
|
@ -166,8 +148,7 @@ void device_pm_move_after(struct device *deva, struct device *devb)
|
|||
void device_pm_move_last(struct device *dev)
|
||||
{
|
||||
pr_debug("PM: Moving %s:%s to end of list\n",
|
||||
dev->bus ? dev->bus->name : "No Bus",
|
||||
kobject_name(&dev->kobj));
|
||||
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
|
||||
list_move_tail(&dev->power.entry, &dpm_list);
|
||||
}
|
||||
|
||||
|
@ -303,7 +284,7 @@ static int pm_noirq_op(struct device *dev,
|
|||
pm_message_t state)
|
||||
{
|
||||
int error = 0;
|
||||
ktime_t calltime, delta, rettime;
|
||||
ktime_t calltime = ktime_set(0, 0), delta, rettime;
|
||||
|
||||
if (initcall_debug) {
|
||||
pr_info("calling %s+ @ %i, parent: %s\n",
|
||||
|
@ -405,7 +386,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
|
|||
int error)
|
||||
{
|
||||
printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
|
||||
kobject_name(&dev->kobj), pm_verb(state.event), info, error);
|
||||
dev_name(dev), pm_verb(state.event), info, error);
|
||||
}
|
||||
|
||||
static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
|
||||
|
@ -475,33 +456,24 @@ End:
|
|||
*/
|
||||
void dpm_resume_noirq(pm_message_t state)
|
||||
{
|
||||
struct list_head list;
|
||||
ktime_t starttime = ktime_get();
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
transition_started = false;
|
||||
while (!list_empty(&dpm_list)) {
|
||||
struct device *dev = to_device(dpm_list.next);
|
||||
while (!list_empty(&dpm_noirq_list)) {
|
||||
struct device *dev = to_device(dpm_noirq_list.next);
|
||||
int error;
|
||||
|
||||
get_device(dev);
|
||||
if (dev->power.status > DPM_OFF) {
|
||||
int error;
|
||||
list_move_tail(&dev->power.entry, &dpm_suspended_list);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
||||
dev->power.status = DPM_OFF;
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
error = device_resume_noirq(dev, state);
|
||||
if (error)
|
||||
pm_dev_err(dev, state, " early", error);
|
||||
|
||||
error = device_resume_noirq(dev, state);
|
||||
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
if (error)
|
||||
pm_dev_err(dev, state, " early", error);
|
||||
}
|
||||
if (!list_empty(&dev->power.entry))
|
||||
list_move_tail(&dev->power.entry, &list);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
put_device(dev);
|
||||
}
|
||||
list_splice(&list, &dpm_list);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
dpm_show_time(starttime, state, "early");
|
||||
resume_device_irqs();
|
||||
|
@ -544,7 +516,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
|
|||
dpm_wait(dev->parent, async);
|
||||
device_lock(dev);
|
||||
|
||||
dev->power.status = DPM_RESUMING;
|
||||
dev->power.in_suspend = false;
|
||||
|
||||
if (dev->bus) {
|
||||
if (dev->bus->pm) {
|
||||
|
@ -610,19 +582,14 @@ static bool is_async(struct device *dev)
|
|||
*/
|
||||
static void dpm_resume(pm_message_t state)
|
||||
{
|
||||
struct list_head list;
|
||||
struct device *dev;
|
||||
ktime_t starttime = ktime_get();
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
pm_transition = state;
|
||||
async_error = 0;
|
||||
|
||||
list_for_each_entry(dev, &dpm_list, power.entry) {
|
||||
if (dev->power.status < DPM_OFF)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
|
||||
INIT_COMPLETION(dev->power.completion);
|
||||
if (is_async(dev)) {
|
||||
get_device(dev);
|
||||
|
@ -630,28 +597,24 @@ static void dpm_resume(pm_message_t state)
|
|||
}
|
||||
}
|
||||
|
||||
while (!list_empty(&dpm_list)) {
|
||||
dev = to_device(dpm_list.next);
|
||||
while (!list_empty(&dpm_suspended_list)) {
|
||||
dev = to_device(dpm_suspended_list.next);
|
||||
get_device(dev);
|
||||
if (dev->power.status >= DPM_OFF && !is_async(dev)) {
|
||||
if (!is_async(dev)) {
|
||||
int error;
|
||||
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
||||
error = device_resume(dev, state, false);
|
||||
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
if (error)
|
||||
pm_dev_err(dev, state, "", error);
|
||||
} else if (dev->power.status == DPM_SUSPENDING) {
|
||||
/* Allow new children of the device to be registered */
|
||||
dev->power.status = DPM_RESUMING;
|
||||
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
}
|
||||
if (!list_empty(&dev->power.entry))
|
||||
list_move_tail(&dev->power.entry, &list);
|
||||
list_move_tail(&dev->power.entry, &dpm_prepared_list);
|
||||
put_device(dev);
|
||||
}
|
||||
list_splice(&list, &dpm_list);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
async_synchronize_full();
|
||||
dpm_show_time(starttime, state, NULL);
|
||||
|
@ -697,22 +660,18 @@ static void dpm_complete(pm_message_t state)
|
|||
|
||||
INIT_LIST_HEAD(&list);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
transition_started = false;
|
||||
while (!list_empty(&dpm_list)) {
|
||||
struct device *dev = to_device(dpm_list.prev);
|
||||
while (!list_empty(&dpm_prepared_list)) {
|
||||
struct device *dev = to_device(dpm_prepared_list.prev);
|
||||
|
||||
get_device(dev);
|
||||
if (dev->power.status > DPM_ON) {
|
||||
dev->power.status = DPM_ON;
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
dev->power.in_suspend = false;
|
||||
list_move(&dev->power.entry, &list);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
||||
device_complete(dev, state);
|
||||
pm_runtime_put_sync(dev);
|
||||
device_complete(dev, state);
|
||||
pm_runtime_put_sync(dev);
|
||||
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
}
|
||||
if (!list_empty(&dev->power.entry))
|
||||
list_move(&dev->power.entry, &list);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
put_device(dev);
|
||||
}
|
||||
list_splice(&list, &dpm_list);
|
||||
|
@ -802,15 +761,13 @@ End:
|
|||
*/
|
||||
int dpm_suspend_noirq(pm_message_t state)
|
||||
{
|
||||
struct list_head list;
|
||||
ktime_t starttime = ktime_get();
|
||||
int error = 0;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
suspend_device_irqs();
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
while (!list_empty(&dpm_list)) {
|
||||
struct device *dev = to_device(dpm_list.prev);
|
||||
while (!list_empty(&dpm_suspended_list)) {
|
||||
struct device *dev = to_device(dpm_suspended_list.prev);
|
||||
|
||||
get_device(dev);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
@ -823,12 +780,10 @@ int dpm_suspend_noirq(pm_message_t state)
|
|||
put_device(dev);
|
||||
break;
|
||||
}
|
||||
dev->power.status = DPM_OFF_IRQ;
|
||||
if (!list_empty(&dev->power.entry))
|
||||
list_move(&dev->power.entry, &list);
|
||||
list_move(&dev->power.entry, &dpm_noirq_list);
|
||||
put_device(dev);
|
||||
}
|
||||
list_splice_tail(&list, &dpm_list);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
if (error)
|
||||
dpm_resume_noirq(resume_event(state));
|
||||
|
@ -876,6 +831,11 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
if (async_error)
|
||||
goto End;
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
async_error = -EBUSY;
|
||||
goto End;
|
||||
}
|
||||
|
||||
if (dev->class) {
|
||||
if (dev->class->pm) {
|
||||
pm_dev_dbg(dev, state, "class ");
|
||||
|
@ -907,9 +867,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
}
|
||||
}
|
||||
|
||||
if (!error)
|
||||
dev->power.status = DPM_OFF;
|
||||
|
||||
End:
|
||||
device_unlock(dev);
|
||||
complete_all(&dev->power.completion);
|
||||
|
@ -951,16 +908,14 @@ static int device_suspend(struct device *dev)
|
|||
*/
|
||||
static int dpm_suspend(pm_message_t state)
|
||||
{
|
||||
struct list_head list;
|
||||
ktime_t starttime = ktime_get();
|
||||
int error = 0;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
pm_transition = state;
|
||||
async_error = 0;
|
||||
while (!list_empty(&dpm_list)) {
|
||||
struct device *dev = to_device(dpm_list.prev);
|
||||
while (!list_empty(&dpm_prepared_list)) {
|
||||
struct device *dev = to_device(dpm_prepared_list.prev);
|
||||
|
||||
get_device(dev);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
@ -974,12 +929,11 @@ static int dpm_suspend(pm_message_t state)
|
|||
break;
|
||||
}
|
||||
if (!list_empty(&dev->power.entry))
|
||||
list_move(&dev->power.entry, &list);
|
||||
list_move(&dev->power.entry, &dpm_suspended_list);
|
||||
put_device(dev);
|
||||
if (async_error)
|
||||
break;
|
||||
}
|
||||
list_splice(&list, dpm_list.prev);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
async_synchronize_full();
|
||||
if (!error)
|
||||
|
@ -1038,22 +992,20 @@ static int device_prepare(struct device *dev, pm_message_t state)
|
|||
*/
|
||||
static int dpm_prepare(pm_message_t state)
|
||||
{
|
||||
struct list_head list;
|
||||
int error = 0;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
transition_started = true;
|
||||
while (!list_empty(&dpm_list)) {
|
||||
struct device *dev = to_device(dpm_list.next);
|
||||
|
||||
get_device(dev);
|
||||
dev->power.status = DPM_PREPARING;
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
|
||||
/* Wake-up requested during system sleep transition. */
|
||||
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
|
||||
pm_wakeup_event(dev, 0);
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
pm_runtime_put_sync(dev);
|
||||
error = -EBUSY;
|
||||
} else {
|
||||
|
@ -1062,24 +1014,22 @@ static int dpm_prepare(pm_message_t state)
|
|||
|
||||
mutex_lock(&dpm_list_mtx);
|
||||
if (error) {
|
||||
dev->power.status = DPM_ON;
|
||||
if (error == -EAGAIN) {
|
||||
put_device(dev);
|
||||
error = 0;
|
||||
continue;
|
||||
}
|
||||
printk(KERN_ERR "PM: Failed to prepare device %s "
|
||||
"for power transition: error %d\n",
|
||||
kobject_name(&dev->kobj), error);
|
||||
printk(KERN_INFO "PM: Device %s not prepared "
|
||||
"for power transition: code %d\n",
|
||||
dev_name(dev), error);
|
||||
put_device(dev);
|
||||
break;
|
||||
}
|
||||
dev->power.status = DPM_SUSPENDING;
|
||||
dev->power.in_suspend = true;
|
||||
if (!list_empty(&dev->power.entry))
|
||||
list_move_tail(&dev->power.entry, &list);
|
||||
list_move_tail(&dev->power.entry, &dpm_prepared_list);
|
||||
put_device(dev);
|
||||
}
|
||||
list_splice(&list, &dpm_list);
|
||||
mutex_unlock(&dpm_list_mtx);
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -250,13 +250,16 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|||
if (!cb)
|
||||
return -ENOSYS;
|
||||
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
if (dev->power.irq_safe) {
|
||||
retval = cb(dev);
|
||||
} else {
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
retval = cb(dev);
|
||||
retval = cb(dev);
|
||||
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
}
|
||||
dev->power.runtime_error = retval;
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -404,7 +407,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (parent && !parent->power.ignore_children) {
|
||||
if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
pm_request_idle(parent);
|
||||
|
@ -527,10 +530,13 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|||
|
||||
if (!parent && dev->parent) {
|
||||
/*
|
||||
* Increment the parent's resume counter and resume it if
|
||||
* necessary.
|
||||
* Increment the parent's usage counter and resume it if
|
||||
* necessary. Not needed if dev is irq-safe; then the
|
||||
* parent is permanently resumed.
|
||||
*/
|
||||
parent = dev->parent;
|
||||
if (dev->power.irq_safe)
|
||||
goto skip_parent;
|
||||
spin_unlock(&dev->power.lock);
|
||||
|
||||
pm_runtime_get_noresume(parent);
|
||||
|
@ -553,6 +559,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|||
goto out;
|
||||
goto repeat;
|
||||
}
|
||||
skip_parent:
|
||||
|
||||
if (dev->power.no_callbacks)
|
||||
goto no_callback; /* Assume success. */
|
||||
|
@ -584,7 +591,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|||
rpm_idle(dev, RPM_ASYNC);
|
||||
|
||||
out:
|
||||
if (parent) {
|
||||
if (parent && !dev->power.irq_safe) {
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
|
||||
pm_runtime_put(parent);
|
||||
|
@ -1065,7 +1072,6 @@ EXPORT_SYMBOL_GPL(pm_runtime_allow);
|
|||
* Set the power.no_callbacks flag, which tells the PM core that this
|
||||
* device is power-managed through its parent and has no run-time PM
|
||||
* callbacks of its own. The run-time sysfs attributes will be removed.
|
||||
*
|
||||
*/
|
||||
void pm_runtime_no_callbacks(struct device *dev)
|
||||
{
|
||||
|
@ -1077,6 +1083,27 @@ void pm_runtime_no_callbacks(struct device *dev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
|
||||
|
||||
/**
|
||||
* pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
|
||||
* @dev: Device to handle
|
||||
*
|
||||
* Set the power.irq_safe flag, which tells the PM core that the
|
||||
* ->runtime_suspend() and ->runtime_resume() callbacks for this device should
|
||||
* always be invoked with the spinlock held and interrupts disabled. It also
|
||||
* causes the parent's usage counter to be permanently incremented, preventing
|
||||
* the parent from runtime suspending -- otherwise an irq-safe child might have
|
||||
* to wait for a non-irq-safe parent.
|
||||
*/
|
||||
void pm_runtime_irq_safe(struct device *dev)
|
||||
{
|
||||
if (dev->parent)
|
||||
pm_runtime_get_sync(dev->parent);
|
||||
spin_lock_irq(&dev->power.lock);
|
||||
dev->power.irq_safe = 1;
|
||||
spin_unlock_irq(&dev->power.lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
|
||||
|
||||
/**
|
||||
* update_autosuspend - Handle a change to a device's autosuspend settings.
|
||||
* @dev: Device to handle.
|
||||
|
@ -1199,4 +1226,6 @@ void pm_runtime_remove(struct device *dev)
|
|||
/* Change the status back to 'suspended' to match the initial status. */
|
||||
if (dev->power.runtime_status == RPM_ACTIVE)
|
||||
pm_runtime_set_suspended(dev);
|
||||
if (dev->power.irq_safe && dev->parent)
|
||||
pm_runtime_put_sync(dev->parent);
|
||||
}
|
||||
|
|
|
@ -542,26 +542,26 @@ static void pm_wakeup_update_hit_counts(void)
|
|||
}
|
||||
|
||||
/**
|
||||
* pm_check_wakeup_events - Check for new wakeup events.
|
||||
* pm_wakeup_pending - Check if power transition in progress should be aborted.
|
||||
*
|
||||
* Compare the current number of registered wakeup events with its preserved
|
||||
* value from the past to check if new wakeup events have been registered since
|
||||
* the old value was stored. Check if the current number of wakeup events being
|
||||
* processed is zero.
|
||||
* value from the past and return true if new wakeup events have been registered
|
||||
* since the old value was stored. Also return true if the current number of
|
||||
* wakeup events being processed is different from zero.
|
||||
*/
|
||||
bool pm_check_wakeup_events(void)
|
||||
bool pm_wakeup_pending(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret = true;
|
||||
bool ret = false;
|
||||
|
||||
spin_lock_irqsave(&events_lock, flags);
|
||||
if (events_check_enabled) {
|
||||
ret = ((unsigned int)atomic_read(&event_count) == saved_count)
|
||||
&& !atomic_read(&events_in_progress);
|
||||
events_check_enabled = ret;
|
||||
ret = ((unsigned int)atomic_read(&event_count) != saved_count)
|
||||
|| atomic_read(&events_in_progress);
|
||||
events_check_enabled = !ret;
|
||||
}
|
||||
spin_unlock_irqrestore(&events_lock, flags);
|
||||
if (!ret)
|
||||
if (ret)
|
||||
pm_wakeup_update_hit_counts();
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/of_spi.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
|
||||
static void spidev_release(struct device *dev)
|
||||
{
|
||||
|
@ -100,9 +101,8 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int spi_suspend(struct device *dev, pm_message_t message)
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int spi_legacy_suspend(struct device *dev, pm_message_t message)
|
||||
{
|
||||
int value = 0;
|
||||
struct spi_driver *drv = to_spi_driver(dev->driver);
|
||||
|
@ -117,7 +117,7 @@ static int spi_suspend(struct device *dev, pm_message_t message)
|
|||
return value;
|
||||
}
|
||||
|
||||
static int spi_resume(struct device *dev)
|
||||
static int spi_legacy_resume(struct device *dev)
|
||||
{
|
||||
int value = 0;
|
||||
struct spi_driver *drv = to_spi_driver(dev->driver);
|
||||
|
@ -132,18 +132,94 @@ static int spi_resume(struct device *dev)
|
|||
return value;
|
||||
}
|
||||
|
||||
static int spi_pm_suspend(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
if (pm)
|
||||
return pm_generic_suspend(dev);
|
||||
else
|
||||
return spi_legacy_suspend(dev, PMSG_SUSPEND);
|
||||
}
|
||||
|
||||
static int spi_pm_resume(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
if (pm)
|
||||
return pm_generic_resume(dev);
|
||||
else
|
||||
return spi_legacy_resume(dev);
|
||||
}
|
||||
|
||||
static int spi_pm_freeze(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
if (pm)
|
||||
return pm_generic_freeze(dev);
|
||||
else
|
||||
return spi_legacy_suspend(dev, PMSG_FREEZE);
|
||||
}
|
||||
|
||||
static int spi_pm_thaw(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
if (pm)
|
||||
return pm_generic_thaw(dev);
|
||||
else
|
||||
return spi_legacy_resume(dev);
|
||||
}
|
||||
|
||||
static int spi_pm_poweroff(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
if (pm)
|
||||
return pm_generic_poweroff(dev);
|
||||
else
|
||||
return spi_legacy_suspend(dev, PMSG_HIBERNATE);
|
||||
}
|
||||
|
||||
static int spi_pm_restore(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
if (pm)
|
||||
return pm_generic_restore(dev);
|
||||
else
|
||||
return spi_legacy_resume(dev);
|
||||
}
|
||||
#else
|
||||
#define spi_suspend NULL
|
||||
#define spi_resume NULL
|
||||
#define spi_pm_suspend NULL
|
||||
#define spi_pm_resume NULL
|
||||
#define spi_pm_freeze NULL
|
||||
#define spi_pm_thaw NULL
|
||||
#define spi_pm_poweroff NULL
|
||||
#define spi_pm_restore NULL
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops spi_pm = {
|
||||
.suspend = spi_pm_suspend,
|
||||
.resume = spi_pm_resume,
|
||||
.freeze = spi_pm_freeze,
|
||||
.thaw = spi_pm_thaw,
|
||||
.poweroff = spi_pm_poweroff,
|
||||
.restore = spi_pm_restore,
|
||||
SET_RUNTIME_PM_OPS(
|
||||
pm_generic_runtime_suspend,
|
||||
pm_generic_runtime_resume,
|
||||
pm_generic_runtime_idle
|
||||
)
|
||||
};
|
||||
|
||||
struct bus_type spi_bus_type = {
|
||||
.name = "spi",
|
||||
.dev_attrs = spi_dev_attrs,
|
||||
.match = spi_match_device,
|
||||
.uevent = spi_uevent,
|
||||
.suspend = spi_suspend,
|
||||
.resume = spi_resume,
|
||||
.pm = &spi_pm,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(spi_bus_type);
|
||||
|
||||
|
|
|
@ -375,7 +375,7 @@ static int usb_unbind_interface(struct device *dev)
|
|||
* Just re-enable it without affecting the endpoint toggles.
|
||||
*/
|
||||
usb_enable_interface(udev, intf, false);
|
||||
} else if (!error && intf->dev.power.status == DPM_ON) {
|
||||
} else if (!error && !intf->dev.power.in_suspend) {
|
||||
r = usb_set_interface(udev, intf->altsetting[0].
|
||||
desc.bInterfaceNumber, 0);
|
||||
if (r < 0)
|
||||
|
@ -960,7 +960,7 @@ void usb_rebind_intf(struct usb_interface *intf)
|
|||
}
|
||||
|
||||
/* Try to rebind the interface */
|
||||
if (intf->dev.power.status == DPM_ON) {
|
||||
if (!intf->dev.power.in_suspend) {
|
||||
intf->needs_binding = 0;
|
||||
rc = device_attach(&intf->dev);
|
||||
if (rc < 0)
|
||||
|
@ -1107,8 +1107,7 @@ static int usb_resume_interface(struct usb_device *udev,
|
|||
if (intf->condition == USB_INTERFACE_UNBOUND) {
|
||||
|
||||
/* Carry out a deferred switch to altsetting 0 */
|
||||
if (intf->needs_altsetting0 &&
|
||||
intf->dev.power.status == DPM_ON) {
|
||||
if (intf->needs_altsetting0 && !intf->dev.power.in_suspend) {
|
||||
usb_set_interface(udev, intf->altsetting[0].
|
||||
desc.bInterfaceNumber, 0);
|
||||
intf->needs_altsetting0 = 0;
|
||||
|
|
|
@ -508,13 +508,13 @@ static inline int device_is_registered(struct device *dev)
|
|||
|
||||
static inline void device_enable_async_suspend(struct device *dev)
|
||||
{
|
||||
if (dev->power.status == DPM_ON)
|
||||
if (!dev->power.in_suspend)
|
||||
dev->power.async_suspend = true;
|
||||
}
|
||||
|
||||
static inline void device_disable_async_suspend(struct device *dev)
|
||||
{
|
||||
if (dev->power.status == DPM_ON)
|
||||
if (!dev->power.in_suspend)
|
||||
dev->power.async_suspend = false;
|
||||
}
|
||||
|
||||
|
|
|
@ -366,45 +366,6 @@ extern struct dev_pm_ops generic_subsys_pm_ops;
|
|||
#define PMSG_AUTO_RESUME ((struct pm_message) \
|
||||
{ .event = PM_EVENT_AUTO_RESUME, })
|
||||
|
||||
/**
|
||||
* Device power management states
|
||||
*
|
||||
* These state labels are used internally by the PM core to indicate the current
|
||||
* status of a device with respect to the PM core operations.
|
||||
*
|
||||
* DPM_ON Device is regarded as operational. Set this way
|
||||
* initially and when ->complete() is about to be called.
|
||||
* Also set when ->prepare() fails.
|
||||
*
|
||||
* DPM_PREPARING Device is going to be prepared for a PM transition. Set
|
||||
* when ->prepare() is about to be called.
|
||||
*
|
||||
* DPM_RESUMING Device is going to be resumed. Set when ->resume(),
|
||||
* ->thaw(), or ->restore() is about to be called.
|
||||
*
|
||||
* DPM_SUSPENDING Device has been prepared for a power transition. Set
|
||||
* when ->prepare() has just succeeded.
|
||||
*
|
||||
* DPM_OFF Device is regarded as inactive. Set immediately after
|
||||
* ->suspend(), ->freeze(), or ->poweroff() has succeeded.
|
||||
* Also set when ->resume()_noirq, ->thaw_noirq(), or
|
||||
* ->restore_noirq() is about to be called.
|
||||
*
|
||||
* DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after
|
||||
* ->suspend_noirq(), ->freeze_noirq(), or
|
||||
* ->poweroff_noirq() has just succeeded.
|
||||
*/
|
||||
|
||||
enum dpm_state {
|
||||
DPM_INVALID,
|
||||
DPM_ON,
|
||||
DPM_PREPARING,
|
||||
DPM_RESUMING,
|
||||
DPM_SUSPENDING,
|
||||
DPM_OFF,
|
||||
DPM_OFF_IRQ,
|
||||
};
|
||||
|
||||
/**
|
||||
* Device run-time power management status.
|
||||
*
|
||||
|
@ -463,8 +424,8 @@ struct wakeup_source;
|
|||
struct dev_pm_info {
|
||||
pm_message_t power_state;
|
||||
unsigned int can_wakeup:1;
|
||||
unsigned async_suspend:1;
|
||||
enum dpm_state status; /* Owned by the PM core */
|
||||
unsigned int async_suspend:1;
|
||||
unsigned int in_suspend:1; /* Owned by the PM core */
|
||||
spinlock_t lock;
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
struct list_head entry;
|
||||
|
@ -486,6 +447,7 @@ struct dev_pm_info {
|
|||
unsigned int run_wake:1;
|
||||
unsigned int runtime_auto:1;
|
||||
unsigned int no_callbacks:1;
|
||||
unsigned int irq_safe:1;
|
||||
unsigned int use_autosuspend:1;
|
||||
unsigned int timer_autosuspends:1;
|
||||
enum rpm_request request;
|
||||
|
@ -610,4 +572,11 @@ extern unsigned int pm_flags;
|
|||
#define PM_APM 1
|
||||
#define PM_ACPI 2
|
||||
|
||||
extern int pm_generic_suspend(struct device *dev);
|
||||
extern int pm_generic_resume(struct device *dev);
|
||||
extern int pm_generic_freeze(struct device *dev);
|
||||
extern int pm_generic_thaw(struct device *dev);
|
||||
extern int pm_generic_restore(struct device *dev);
|
||||
extern int pm_generic_poweroff(struct device *dev);
|
||||
|
||||
#endif /* _LINUX_PM_H */
|
||||
|
|
|
@ -40,6 +40,7 @@ extern int pm_generic_runtime_idle(struct device *dev);
|
|||
extern int pm_generic_runtime_suspend(struct device *dev);
|
||||
extern int pm_generic_runtime_resume(struct device *dev);
|
||||
extern void pm_runtime_no_callbacks(struct device *dev);
|
||||
extern void pm_runtime_irq_safe(struct device *dev);
|
||||
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
|
||||
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
|
||||
extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
|
||||
|
@ -81,6 +82,11 @@ static inline bool pm_runtime_suspended(struct device *dev)
|
|||
&& !dev->power.disable_depth;
|
||||
}
|
||||
|
||||
static inline bool pm_runtime_enabled(struct device *dev)
|
||||
{
|
||||
return !dev->power.disable_depth;
|
||||
}
|
||||
|
||||
static inline void pm_runtime_mark_last_busy(struct device *dev)
|
||||
{
|
||||
ACCESS_ONCE(dev->power.last_busy) = jiffies;
|
||||
|
@ -119,11 +125,13 @@ static inline void pm_runtime_put_noidle(struct device *dev) {}
|
|||
static inline bool device_run_wake(struct device *dev) { return false; }
|
||||
static inline void device_set_run_wake(struct device *dev, bool enable) {}
|
||||
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
|
||||
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
|
||||
|
||||
static inline int pm_generic_runtime_idle(struct device *dev) { return 0; }
|
||||
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
|
||||
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
|
||||
static inline void pm_runtime_no_callbacks(struct device *dev) {}
|
||||
static inline void pm_runtime_irq_safe(struct device *dev) {}
|
||||
|
||||
static inline void pm_runtime_mark_last_busy(struct device *dev) {}
|
||||
static inline void __pm_runtime_use_autosuspend(struct device *dev,
|
||||
|
@ -196,6 +204,11 @@ static inline int pm_runtime_put_sync(struct device *dev)
|
|||
return __pm_runtime_idle(dev, RPM_GET_PUT);
|
||||
}
|
||||
|
||||
static inline int pm_runtime_put_sync_suspend(struct device *dev)
|
||||
{
|
||||
return __pm_runtime_suspend(dev, RPM_GET_PUT);
|
||||
}
|
||||
|
||||
static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
|
||||
{
|
||||
return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
|
||||
|
|
|
@ -292,7 +292,7 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
|
|||
/* drivers/base/power/wakeup.c */
|
||||
extern bool events_check_enabled;
|
||||
|
||||
extern bool pm_check_wakeup_events(void);
|
||||
extern bool pm_wakeup_pending(void);
|
||||
extern bool pm_get_wakeup_count(unsigned int *count);
|
||||
extern bool pm_save_wakeup_count(unsigned int count);
|
||||
#else /* !CONFIG_PM_SLEEP */
|
||||
|
@ -309,7 +309,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
|||
|
||||
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
|
||||
static inline bool pm_check_wakeup_events(void) { return true; }
|
||||
static inline bool pm_wakeup_pending(void) { return false; }
|
||||
#endif /* !CONFIG_PM_SLEEP */
|
||||
|
||||
extern struct mutex pm_mutex;
|
||||
|
|
|
@ -104,8 +104,13 @@ bool freeze_task(struct task_struct *p, bool sig_only)
|
|||
}
|
||||
|
||||
if (should_send_signal(p)) {
|
||||
if (!signal_pending(p))
|
||||
fake_signal_wake_up(p);
|
||||
fake_signal_wake_up(p);
|
||||
/*
|
||||
* fake_signal_wake_up() goes through p's scheduler
|
||||
* lock and guarantees that TASK_STOPPED/TRACED ->
|
||||
* TASK_RUNNING transition can't race with task state
|
||||
* testing in try_to_freeze_tasks().
|
||||
*/
|
||||
} else if (sig_only) {
|
||||
return false;
|
||||
} else {
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
|
||||
ifeq ($(CONFIG_PM_DEBUG),y)
|
||||
EXTRA_CFLAGS += -DDEBUG
|
||||
endif
|
||||
ccflags-$(CONFIG_PM_DEBUG) := -DDEBUG
|
||||
|
||||
obj-$(CONFIG_PM) += main.o
|
||||
obj-$(CONFIG_PM_SLEEP) += console.o
|
||||
|
|
|
@ -62,7 +62,7 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
|
|||
{
|
||||
if (ops && !(ops->begin && ops->end && ops->pre_snapshot
|
||||
&& ops->prepare && ops->finish && ops->enter && ops->pre_restore
|
||||
&& ops->restore_cleanup)) {
|
||||
&& ops->restore_cleanup && ops->leave)) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
@ -278,7 +278,7 @@ static int create_image(int platform_mode)
|
|||
goto Enable_irqs;
|
||||
}
|
||||
|
||||
if (hibernation_test(TEST_CORE) || !pm_check_wakeup_events())
|
||||
if (hibernation_test(TEST_CORE) || pm_wakeup_pending())
|
||||
goto Power_up;
|
||||
|
||||
in_suspend = 1;
|
||||
|
@ -516,7 +516,7 @@ int hibernation_platform_enter(void)
|
|||
|
||||
local_irq_disable();
|
||||
sysdev_suspend(PMSG_HIBERNATE);
|
||||
if (!pm_check_wakeup_events()) {
|
||||
if (pm_wakeup_pending()) {
|
||||
error = -EAGAIN;
|
||||
goto Power_up;
|
||||
}
|
||||
|
@ -647,6 +647,7 @@ int hibernate(void)
|
|||
swsusp_free();
|
||||
if (!error)
|
||||
power_down();
|
||||
in_suspend = 0;
|
||||
pm_restore_gfp_mask();
|
||||
} else {
|
||||
pr_debug("PM: Image restored successfully.\n");
|
||||
|
|
|
@ -64,6 +64,12 @@ static int try_to_freeze_tasks(bool sig_only)
|
|||
* perturb a task in TASK_STOPPED or TASK_TRACED.
|
||||
* It is "frozen enough". If the task does wake
|
||||
* up, it will immediately call try_to_freeze.
|
||||
*
|
||||
* Because freeze_task() goes through p's
|
||||
* scheduler lock after setting TIF_FREEZE, it's
|
||||
* guaranteed that either we see TASK_RUNNING or
|
||||
* try_to_stop() after schedule() in ptrace/signal
|
||||
* stop sees TIF_FREEZE.
|
||||
*/
|
||||
if (!task_is_stopped_or_traced(p) &&
|
||||
!freezer_should_skip(p))
|
||||
|
@ -79,7 +85,7 @@ static int try_to_freeze_tasks(bool sig_only)
|
|||
if (!todo || time_after(jiffies, end_time))
|
||||
break;
|
||||
|
||||
if (!pm_check_wakeup_events()) {
|
||||
if (pm_wakeup_pending()) {
|
||||
wakeup = true;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ static int suspend_enter(suspend_state_t state)
|
|||
|
||||
error = sysdev_suspend(PMSG_SUSPEND);
|
||||
if (!error) {
|
||||
if (!suspend_test(TEST_CORE) && pm_check_wakeup_events()) {
|
||||
if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
|
||||
error = suspend_ops->enter(state);
|
||||
events_check_enabled = false;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue