Merge branch 'pm-sleep'
* pm-sleep: PM: sleep: Drop dev_pm_skip_next_resume_phases() ACPI: PM: Drop unused function and function header ACPI: PM: Introduce "poweroff" callbacks for ACPI PM domain and LPSS ACPI: PM: Simplify and fix PM domain hibernation callbacks PCI: PM: Simplify bus-level hibernation callbacks PM: ACPI/PCI: Resume all devices during hibernation kernel: power: swap: use kzalloc() instead of kmalloc() followed by memset() PM: sleep: Update struct wakeup_source documentation drivers: base: power: remove wakeup_sources_stats_dentry variable PM: suspend: Rename pm_suspend_via_s2idle() PM: sleep: Show how long dpm_suspend_start() and dpm_suspend_end() take PM: hibernate: powerpc: Expose pfn_is_nosave() prototype
This commit is contained in:
commit
3dbeb44854
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
|
|
|
@ -63,7 +63,6 @@ void __init startup_init(void);
|
|||
void die(struct pt_regs *regs, const char *str);
|
||||
int setup_profiling_timer(unsigned int multiplier);
|
||||
void __init time_init(void);
|
||||
int pfn_is_nosave(unsigned long);
|
||||
void s390_early_resume(void);
|
||||
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long sp, unsigned long ip);
|
||||
|
||||
|
|
|
@ -1061,6 +1061,13 @@ static int acpi_lpss_suspend_noirq(struct device *dev)
|
|||
int ret;
|
||||
|
||||
if (pdata->dev_desc->resume_from_noirq) {
|
||||
/*
|
||||
* The driver's ->suspend_late callback will be invoked by
|
||||
* acpi_lpss_do_suspend_late(), with the assumption that the
|
||||
* driver really wanted to run that code in ->suspend_noirq, but
|
||||
* it could not run after acpi_dev_suspend() and the driver
|
||||
* expected the latter to be called in the "late" phase.
|
||||
*/
|
||||
ret = acpi_lpss_do_suspend_late(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1091,16 +1098,99 @@ static int acpi_lpss_resume_noirq(struct device *dev)
|
|||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
|
||||
ret = acpi_subsys_resume_noirq(dev);
|
||||
/* Follow acpi_subsys_resume_noirq(). */
|
||||
if (dev_pm_may_skip_resume(dev))
|
||||
return 0;
|
||||
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
pm_runtime_set_active(dev);
|
||||
|
||||
ret = pm_generic_resume_noirq(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq)
|
||||
ret = acpi_lpss_do_resume_early(dev);
|
||||
if (!pdata->dev_desc->resume_from_noirq)
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
/*
|
||||
* The driver's ->resume_early callback will be invoked by
|
||||
* acpi_lpss_do_resume_early(), with the assumption that the driver
|
||||
* really wanted to run that code in ->resume_noirq, but it could not
|
||||
* run before acpi_dev_resume() and the driver expected the latter to be
|
||||
* called in the "early" phase.
|
||||
*/
|
||||
return acpi_lpss_do_resume_early(dev);
|
||||
}
|
||||
|
||||
static int acpi_lpss_do_restore_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_lpss_resume(dev);
|
||||
|
||||
return ret ? ret : pm_generic_restore_early(dev);
|
||||
}
|
||||
|
||||
static int acpi_lpss_restore_early(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
|
||||
if (pdata->dev_desc->resume_from_noirq)
|
||||
return 0;
|
||||
|
||||
return acpi_lpss_do_restore_early(dev);
|
||||
}
|
||||
|
||||
static int acpi_lpss_restore_noirq(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
int ret;
|
||||
|
||||
ret = pm_generic_restore_noirq(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!pdata->dev_desc->resume_from_noirq)
|
||||
return 0;
|
||||
|
||||
/* This is analogous to what happens in acpi_lpss_resume_noirq(). */
|
||||
return acpi_lpss_do_restore_early(dev);
|
||||
}
|
||||
|
||||
static int acpi_lpss_do_poweroff_late(struct device *dev)
|
||||
{
|
||||
int ret = pm_generic_poweroff_late(dev);
|
||||
|
||||
return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
|
||||
}
|
||||
|
||||
static int acpi_lpss_poweroff_late(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
return 0;
|
||||
|
||||
if (pdata->dev_desc->resume_from_noirq)
|
||||
return 0;
|
||||
|
||||
return acpi_lpss_do_poweroff_late(dev);
|
||||
}
|
||||
|
||||
static int acpi_lpss_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
|
||||
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
return 0;
|
||||
|
||||
if (pdata->dev_desc->resume_from_noirq) {
|
||||
/* This is analogous to the acpi_lpss_suspend_noirq() case. */
|
||||
int ret = acpi_lpss_do_poweroff_late(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return pm_generic_poweroff_noirq(dev);
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static int acpi_lpss_runtime_suspend(struct device *dev)
|
||||
|
@ -1134,14 +1224,11 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
|
|||
.resume_noirq = acpi_lpss_resume_noirq,
|
||||
.resume_early = acpi_lpss_resume_early,
|
||||
.freeze = acpi_subsys_freeze,
|
||||
.freeze_late = acpi_subsys_freeze_late,
|
||||
.freeze_noirq = acpi_subsys_freeze_noirq,
|
||||
.thaw_noirq = acpi_subsys_thaw_noirq,
|
||||
.poweroff = acpi_subsys_suspend,
|
||||
.poweroff_late = acpi_lpss_suspend_late,
|
||||
.poweroff_noirq = acpi_lpss_suspend_noirq,
|
||||
.restore_noirq = acpi_lpss_resume_noirq,
|
||||
.restore_early = acpi_lpss_resume_early,
|
||||
.poweroff = acpi_subsys_poweroff,
|
||||
.poweroff_late = acpi_lpss_poweroff_late,
|
||||
.poweroff_noirq = acpi_lpss_poweroff_noirq,
|
||||
.restore_noirq = acpi_lpss_restore_noirq,
|
||||
.restore_early = acpi_lpss_restore_early,
|
||||
#endif
|
||||
.runtime_suspend = acpi_lpss_runtime_suspend,
|
||||
.runtime_resume = acpi_lpss_runtime_resume,
|
||||
|
|
|
@ -1120,7 +1120,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
|
|||
* acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
int acpi_subsys_resume_noirq(struct device *dev)
|
||||
static int acpi_subsys_resume_noirq(struct device *dev)
|
||||
{
|
||||
if (dev_pm_may_skip_resume(dev))
|
||||
return 0;
|
||||
|
@ -1135,7 +1135,6 @@ int acpi_subsys_resume_noirq(struct device *dev)
|
|||
|
||||
return pm_generic_resume_noirq(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
|
||||
|
||||
/**
|
||||
* acpi_subsys_resume_early - Resume device using ACPI.
|
||||
|
@ -1145,12 +1144,11 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
|
|||
* generic early resume procedure for it during system transition into the
|
||||
* working state.
|
||||
*/
|
||||
int acpi_subsys_resume_early(struct device *dev)
|
||||
static int acpi_subsys_resume_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_dev_resume(dev);
|
||||
return ret ? ret : pm_generic_resume_early(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
|
||||
|
||||
/**
|
||||
* acpi_subsys_freeze - Run the device driver's freeze callback.
|
||||
|
@ -1159,65 +1157,81 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
|
|||
int acpi_subsys_freeze(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* This used to be done in acpi_subsys_prepare() for all devices and
|
||||
* some drivers may depend on it, so do it here. Ideally, however,
|
||||
* runtime-suspended devices should not be touched during freeze/thaw
|
||||
* transitions.
|
||||
* Resume all runtime-suspended devices before creating a snapshot
|
||||
* image of system memory, because the restore kernel generally cannot
|
||||
* be expected to always handle them consistently and they need to be
|
||||
* put into the runtime-active metastate during system resume anyway,
|
||||
* so it is better to ensure that the state saved in the image will be
|
||||
* always consistent with that.
|
||||
*/
|
||||
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
|
||||
pm_runtime_resume(dev);
|
||||
pm_runtime_resume(dev);
|
||||
|
||||
return pm_generic_freeze(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
|
||||
|
||||
/**
|
||||
* acpi_subsys_freeze_late - Run the device driver's "late" freeze callback.
|
||||
* @dev: Device to handle.
|
||||
* acpi_subsys_restore_early - Restore device using ACPI.
|
||||
* @dev: Device to restore.
|
||||
*/
|
||||
int acpi_subsys_freeze_late(struct device *dev)
|
||||
int acpi_subsys_restore_early(struct device *dev)
|
||||
{
|
||||
int ret = acpi_dev_resume(dev);
|
||||
return ret ? ret : pm_generic_restore_early(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
|
||||
|
||||
/**
|
||||
* acpi_subsys_poweroff - Run the device driver's poweroff callback.
|
||||
* @dev: Device to handle.
|
||||
*
|
||||
* Follow PCI and resume devices from runtime suspend before running their
|
||||
* system poweroff callbacks, unless the driver can cope with runtime-suspended
|
||||
* devices during system suspend and there are no ACPI-specific reasons for
|
||||
* resuming them.
|
||||
*/
|
||||
int acpi_subsys_poweroff(struct device *dev)
|
||||
{
|
||||
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
|
||||
acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
|
||||
pm_runtime_resume(dev);
|
||||
|
||||
return pm_generic_poweroff(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_poweroff);
|
||||
|
||||
/**
|
||||
* acpi_subsys_poweroff_late - Run the device driver's poweroff callback.
|
||||
* @dev: Device to handle.
|
||||
*
|
||||
* Carry out the generic late poweroff procedure for @dev and use ACPI to put
|
||||
* it into a low-power state during system transition into a sleep state.
|
||||
*/
|
||||
static int acpi_subsys_poweroff_late(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
return 0;
|
||||
|
||||
return pm_generic_freeze_late(dev);
|
||||
ret = pm_generic_poweroff_late(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return acpi_dev_suspend(dev, device_may_wakeup(dev));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_freeze_late);
|
||||
|
||||
/**
|
||||
* acpi_subsys_freeze_noirq - Run the device driver's "noirq" freeze callback.
|
||||
* @dev: Device to handle.
|
||||
* acpi_subsys_poweroff_noirq - Run the driver's "noirq" poweroff callback.
|
||||
* @dev: Device to suspend.
|
||||
*/
|
||||
int acpi_subsys_freeze_noirq(struct device *dev)
|
||||
static int acpi_subsys_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
return 0;
|
||||
|
||||
return pm_generic_freeze_noirq(dev);
|
||||
return pm_generic_poweroff_noirq(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_freeze_noirq);
|
||||
|
||||
/**
|
||||
* acpi_subsys_thaw_noirq - Run the device driver's "noirq" thaw callback.
|
||||
* @dev: Device to handle.
|
||||
*/
|
||||
int acpi_subsys_thaw_noirq(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* If the device is in runtime suspend, the "thaw" code may not work
|
||||
* correctly with it, so skip the driver callback and make the PM core
|
||||
* skip all of the subsequent "thaw" callbacks for the device.
|
||||
*/
|
||||
if (dev_pm_smart_suspend_and_suspended(dev)) {
|
||||
dev_pm_skip_next_resume_phases(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pm_generic_thaw_noirq(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_subsys_thaw_noirq);
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static struct dev_pm_domain acpi_general_pm_domain = {
|
||||
|
@ -1233,14 +1247,10 @@ static struct dev_pm_domain acpi_general_pm_domain = {
|
|||
.resume_noirq = acpi_subsys_resume_noirq,
|
||||
.resume_early = acpi_subsys_resume_early,
|
||||
.freeze = acpi_subsys_freeze,
|
||||
.freeze_late = acpi_subsys_freeze_late,
|
||||
.freeze_noirq = acpi_subsys_freeze_noirq,
|
||||
.thaw_noirq = acpi_subsys_thaw_noirq,
|
||||
.poweroff = acpi_subsys_suspend,
|
||||
.poweroff_late = acpi_subsys_suspend_late,
|
||||
.poweroff_noirq = acpi_subsys_suspend_noirq,
|
||||
.restore_noirq = acpi_subsys_resume_noirq,
|
||||
.restore_early = acpi_subsys_resume_early,
|
||||
.poweroff = acpi_subsys_poweroff,
|
||||
.poweroff_late = acpi_subsys_poweroff_late,
|
||||
.poweroff_noirq = acpi_subsys_poweroff_noirq,
|
||||
.restore_early = acpi_subsys_restore_early,
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
|
|
@ -529,21 +529,6 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
|
|||
|
||||
/*------------------------- Resume routines -------------------------*/
|
||||
|
||||
/**
|
||||
* dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
|
||||
* @dev: Target device.
|
||||
*
|
||||
* Make the core skip the "early resume" and "resume" phases for @dev.
|
||||
*
|
||||
* This function can be called by middle-layer code during the "noirq" phase of
|
||||
* system resume if necessary, but not by device drivers.
|
||||
*/
|
||||
void dev_pm_skip_next_resume_phases(struct device *dev)
|
||||
{
|
||||
dev->power.is_late_suspended = false;
|
||||
dev->power.is_suspended = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* suspend_event - Return a "suspend" message for given "resume" one.
|
||||
* @resume_msg: PM message representing a system-wide resume transition.
|
||||
|
@ -681,6 +666,9 @@ Skip:
|
|||
dev->power.is_noirq_suspended = false;
|
||||
|
||||
if (skip_resume) {
|
||||
/* Make the next phases of resume skip the device. */
|
||||
dev->power.is_late_suspended = false;
|
||||
dev->power.is_suspended = false;
|
||||
/*
|
||||
* The device is going to be left in suspend, but it might not
|
||||
* have been in runtime suspend before the system suspended, so
|
||||
|
@ -689,7 +677,6 @@ Skip:
|
|||
* device again.
|
||||
*/
|
||||
pm_runtime_set_suspended(dev);
|
||||
dev_pm_skip_next_resume_phases(dev);
|
||||
}
|
||||
|
||||
Out:
|
||||
|
@ -1631,17 +1618,20 @@ int dpm_suspend_late(pm_message_t state)
|
|||
*/
|
||||
int dpm_suspend_end(pm_message_t state)
|
||||
{
|
||||
int error = dpm_suspend_late(state);
|
||||
ktime_t starttime = ktime_get();
|
||||
int error;
|
||||
|
||||
error = dpm_suspend_late(state);
|
||||
if (error)
|
||||
return error;
|
||||
goto out;
|
||||
|
||||
error = dpm_suspend_noirq(state);
|
||||
if (error) {
|
||||
if (error)
|
||||
dpm_resume_early(resume_event(state));
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out:
|
||||
dpm_show_time(starttime, state, error, "end");
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dpm_suspend_end);
|
||||
|
||||
|
@ -2034,6 +2024,7 @@ int dpm_prepare(pm_message_t state)
|
|||
*/
|
||||
int dpm_suspend_start(pm_message_t state)
|
||||
{
|
||||
ktime_t starttime = ktime_get();
|
||||
int error;
|
||||
|
||||
error = dpm_prepare(state);
|
||||
|
@ -2042,6 +2033,7 @@ int dpm_suspend_start(pm_message_t state)
|
|||
dpm_save_failed_step(SUSPEND_PREPARE);
|
||||
} else
|
||||
error = dpm_suspend(state);
|
||||
dpm_show_time(starttime, state, error, "start");
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dpm_suspend_start);
|
||||
|
|
|
@ -968,8 +968,6 @@ void pm_wakep_autosleep_enabled(bool set)
|
|||
}
|
||||
#endif /* CONFIG_PM_AUTOSLEEP */
|
||||
|
||||
static struct dentry *wakeup_sources_stats_dentry;
|
||||
|
||||
/**
|
||||
* print_wakeup_source_stats - Print wakeup source statistics information.
|
||||
* @m: seq_file to print the statistics into.
|
||||
|
@ -1099,8 +1097,8 @@ static const struct file_operations wakeup_sources_stats_fops = {
|
|||
|
||||
static int __init wakeup_sources_debugfs_init(void)
|
||||
{
|
||||
wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
|
||||
S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
|
||||
debugfs_create_file("wakeup_sources", S_IRUGO, NULL, NULL,
|
||||
&wakeup_sources_stats_fops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1406,7 +1406,7 @@ static void __init i8042_register_ports(void)
|
|||
* behavior on many platforms using suspend-to-RAM (ACPI S3)
|
||||
* by default.
|
||||
*/
|
||||
if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
|
||||
if (pm_suspend_default_s2idle() && i == I8042_KBD_PORT_NO)
|
||||
device_set_wakeup_enable(&serio->dev, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1012,15 +1012,15 @@ static int pci_pm_freeze(struct device *dev)
|
|||
}
|
||||
|
||||
/*
|
||||
* This used to be done in pci_pm_prepare() for all devices and some
|
||||
* drivers may depend on it, so do it here. Ideally, runtime-suspended
|
||||
* devices should not be touched during freeze/thaw transitions,
|
||||
* however.
|
||||
* Resume all runtime-suspended devices before creating a snapshot
|
||||
* image of system memory, because the restore kernel generally cannot
|
||||
* be expected to always handle them consistently and they need to be
|
||||
* put into the runtime-active metastate during system resume anyway,
|
||||
* so it is better to ensure that the state saved in the image will be
|
||||
* always consistent with that.
|
||||
*/
|
||||
if (!dev_pm_smart_suspend_and_suspended(dev)) {
|
||||
pm_runtime_resume(dev);
|
||||
pci_dev->state_saved = false;
|
||||
}
|
||||
pm_runtime_resume(dev);
|
||||
pci_dev->state_saved = false;
|
||||
|
||||
if (pm->freeze) {
|
||||
int error;
|
||||
|
@ -1034,22 +1034,11 @@ static int pci_pm_freeze(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int pci_pm_freeze_late(struct device *dev)
|
||||
{
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
return 0;
|
||||
|
||||
return pm_generic_freeze_late(dev);
|
||||
}
|
||||
|
||||
static int pci_pm_freeze_noirq(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pci_dev = to_pci_dev(dev);
|
||||
struct device_driver *drv = dev->driver;
|
||||
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
return 0;
|
||||
|
||||
if (pci_has_legacy_pm_support(pci_dev))
|
||||
return pci_legacy_suspend_late(dev, PMSG_FREEZE);
|
||||
|
||||
|
@ -1079,16 +1068,6 @@ static int pci_pm_thaw_noirq(struct device *dev)
|
|||
struct device_driver *drv = dev->driver;
|
||||
int error = 0;
|
||||
|
||||
/*
|
||||
* If the device is in runtime suspend, the code below may not work
|
||||
* correctly with it, so skip that code and make the PM core skip all of
|
||||
* the subsequent "thaw" callbacks for the device.
|
||||
*/
|
||||
if (dev_pm_smart_suspend_and_suspended(dev)) {
|
||||
dev_pm_skip_next_resume_phases(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (pcibios_pm_ops.thaw_noirq) {
|
||||
error = pcibios_pm_ops.thaw_noirq(dev);
|
||||
if (error)
|
||||
|
@ -1226,10 +1205,6 @@ static int pci_pm_restore_noirq(struct device *dev)
|
|||
struct device_driver *drv = dev->driver;
|
||||
int error = 0;
|
||||
|
||||
/* This is analogous to the pci_pm_resume_noirq() case. */
|
||||
if (dev_pm_smart_suspend_and_suspended(dev))
|
||||
pm_runtime_set_active(dev);
|
||||
|
||||
if (pcibios_pm_ops.restore_noirq) {
|
||||
error = pcibios_pm_ops.restore_noirq(dev);
|
||||
if (error)
|
||||
|
@ -1279,7 +1254,6 @@ static int pci_pm_restore(struct device *dev)
|
|||
#else /* !CONFIG_HIBERNATE_CALLBACKS */
|
||||
|
||||
#define pci_pm_freeze NULL
|
||||
#define pci_pm_freeze_late NULL
|
||||
#define pci_pm_freeze_noirq NULL
|
||||
#define pci_pm_thaw NULL
|
||||
#define pci_pm_thaw_noirq NULL
|
||||
|
@ -1405,7 +1379,6 @@ static const struct dev_pm_ops pci_dev_pm_ops = {
|
|||
.suspend_late = pci_pm_suspend_late,
|
||||
.resume = pci_pm_resume,
|
||||
.freeze = pci_pm_freeze,
|
||||
.freeze_late = pci_pm_freeze_late,
|
||||
.thaw = pci_pm_thaw,
|
||||
.poweroff = pci_pm_poweroff,
|
||||
.poweroff_late = pci_pm_poweroff_late,
|
||||
|
|
|
@ -913,31 +913,21 @@ static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
|
||||
int acpi_dev_suspend_late(struct device *dev);
|
||||
int acpi_subsys_prepare(struct device *dev);
|
||||
void acpi_subsys_complete(struct device *dev);
|
||||
int acpi_subsys_suspend_late(struct device *dev);
|
||||
int acpi_subsys_suspend_noirq(struct device *dev);
|
||||
int acpi_subsys_resume_noirq(struct device *dev);
|
||||
int acpi_subsys_resume_early(struct device *dev);
|
||||
int acpi_subsys_suspend(struct device *dev);
|
||||
int acpi_subsys_freeze(struct device *dev);
|
||||
int acpi_subsys_freeze_late(struct device *dev);
|
||||
int acpi_subsys_freeze_noirq(struct device *dev);
|
||||
int acpi_subsys_thaw_noirq(struct device *dev);
|
||||
int acpi_subsys_poweroff(struct device *dev);
|
||||
#else
|
||||
static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
|
||||
static inline void acpi_subsys_complete(struct device *dev) {}
|
||||
static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; }
|
||||
static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
|
|
@ -760,7 +760,6 @@ extern int pm_generic_poweroff_late(struct device *dev);
|
|||
extern int pm_generic_poweroff(struct device *dev);
|
||||
extern void pm_generic_complete(struct device *dev);
|
||||
|
||||
extern void dev_pm_skip_next_resume_phases(struct device *dev);
|
||||
extern bool dev_pm_may_skip_resume(struct device *dev);
|
||||
extern bool dev_pm_smart_suspend_and_suspended(struct device *dev);
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ struct wake_irq;
|
|||
* @expire_count: Number of times the wakeup source's timeout has expired.
|
||||
* @wakeup_count: Number of times the wakeup source might abort suspend.
|
||||
* @active: Status of the wakeup source.
|
||||
* @has_timeout: The wakeup source has been activated with a timeout.
|
||||
* @autosleep_enabled: Autosleep is active, so update @prevent_sleep_time.
|
||||
*/
|
||||
struct wakeup_source {
|
||||
const char *name;
|
||||
|
|
|
@ -304,7 +304,7 @@ static inline bool idle_should_enter_s2idle(void)
|
|||
return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
|
||||
}
|
||||
|
||||
extern bool pm_suspend_via_s2idle(void);
|
||||
extern bool pm_suspend_default_s2idle(void);
|
||||
extern void __init pm_states_init(void);
|
||||
extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
|
||||
extern void s2idle_wake(void);
|
||||
|
@ -336,7 +336,7 @@ static inline void pm_set_suspend_via_firmware(void) {}
|
|||
static inline void pm_set_resume_via_firmware(void) {}
|
||||
static inline bool pm_suspend_via_firmware(void) { return false; }
|
||||
static inline bool pm_resume_via_firmware(void) { return false; }
|
||||
static inline bool pm_suspend_via_s2idle(void) { return false; }
|
||||
static inline bool pm_suspend_default_s2idle(void) { return false; }
|
||||
|
||||
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
|
||||
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
|
||||
|
@ -448,6 +448,7 @@ extern bool system_entering_hibernation(void);
|
|||
extern bool hibernation_available(void);
|
||||
asmlinkage int swsusp_save(void);
|
||||
extern struct pbe *restore_pblist;
|
||||
int pfn_is_nosave(unsigned long pfn);
|
||||
#else /* CONFIG_HIBERNATION */
|
||||
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
|
||||
static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
|
||||
|
|
|
@ -75,8 +75,6 @@ static inline void hibernate_reserved_size_init(void) {}
|
|||
static inline void hibernate_image_size_init(void) {}
|
||||
#endif /* !CONFIG_HIBERNATION */
|
||||
|
||||
extern int pfn_is_nosave(unsigned long);
|
||||
|
||||
#define power_attr(_name) \
|
||||
static struct kobj_attribute _name##_attr = { \
|
||||
.attr = { \
|
||||
|
|
|
@ -62,16 +62,16 @@ enum s2idle_states __read_mostly s2idle_state;
|
|||
static DEFINE_RAW_SPINLOCK(s2idle_lock);
|
||||
|
||||
/**
|
||||
* pm_suspend_via_s2idle - Check if suspend-to-idle is the default suspend.
|
||||
* pm_suspend_default_s2idle - Check if suspend-to-idle is the default suspend.
|
||||
*
|
||||
* Return 'true' if suspend-to-idle has been selected as the default system
|
||||
* suspend method.
|
||||
*/
|
||||
bool pm_suspend_via_s2idle(void)
|
||||
bool pm_suspend_default_s2idle(void)
|
||||
{
|
||||
return mem_sleep_current == PM_SUSPEND_TO_IDLE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle);
|
||||
EXPORT_SYMBOL_GPL(pm_suspend_default_s2idle);
|
||||
|
||||
void s2idle_set_ops(const struct platform_s2idle_ops *ops)
|
||||
{
|
||||
|
|
|
@ -974,12 +974,11 @@ static int get_swap_reader(struct swap_map_handle *handle,
|
|||
last = handle->maps = NULL;
|
||||
offset = swsusp_header->image;
|
||||
while (offset) {
|
||||
tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
|
||||
tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
|
||||
if (!tmp) {
|
||||
release_swap_reader(handle);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(tmp, 0, sizeof(*tmp));
|
||||
if (!handle->maps)
|
||||
handle->maps = tmp;
|
||||
if (last)
|
||||
|
|
Loading…
Reference in New Issue