Merge branches 'pm-core', 'pm-sleep' and 'acpi-pm'
* pm-core: driver core: Introduce device links reference counting PM / wakeirq: Add wakeup name to dedicated wake irqs * pm-sleep: PM / hibernate: Change message when writing to /sys/power/resume PM / hibernate: Make passing hibernate offsets more friendly PCMCIA / PM: Avoid noirq suspend aborts during suspend-to-idle * acpi-pm: ACPI / PM: Fix keyboard wakeup from suspend-to-idle on ASUS UX331UA ACPI / PM: Allow deeper wakeup power states with no _SxD nor _SxW ACPI / PM: Reduce LPI constraints logging noise ACPI / PM: Do not reconfigure GPEs for suspend-to-idle
This commit is contained in:
commit
e3a495c4ee
|
@ -287,3 +287,17 @@ Description:
|
||||||
Writing a "1" to this file enables the debug messages and
|
Writing a "1" to this file enables the debug messages and
|
||||||
writing a "0" (default) to it disables them. Reads from
|
writing a "0" (default) to it disables them. Reads from
|
||||||
this file return the current value.
|
this file return the current value.
|
||||||
|
|
||||||
|
What: /sys/power/resume_offset
|
||||||
|
Date: April 2018
|
||||||
|
Contact: Mario Limonciello <mario.limonciello@dell.com>
|
||||||
|
Description:
|
||||||
|
This file is used for telling the kernel an offset into a disk
|
||||||
|
to use when hibernating the system such as with a swap file.
|
||||||
|
|
||||||
|
Reads from this file will display the current offset
|
||||||
|
the kernel will be using on the next hibernation
|
||||||
|
attempt.
|
||||||
|
|
||||||
|
Using this sysfs file will override any values that were
|
||||||
|
set using the kernel command line for disk offset.
|
|
@ -24,8 +24,16 @@ Some warnings, first.
|
||||||
* see the FAQ below for details. (This is not true for more traditional
|
* see the FAQ below for details. (This is not true for more traditional
|
||||||
* power states like "standby", which normally don't turn USB off.)
|
* power states like "standby", which normally don't turn USB off.)
|
||||||
|
|
||||||
|
Swap partition:
|
||||||
You need to append resume=/dev/your_swap_partition to kernel command
|
You need to append resume=/dev/your_swap_partition to kernel command
|
||||||
line. Then you suspend by
|
line or specify it using /sys/power/resume.
|
||||||
|
|
||||||
|
Swap file:
|
||||||
|
If using a swapfile you can also specify a resume offset using
|
||||||
|
resume_offset=<number> on the kernel command line or specify it
|
||||||
|
in /sys/power/resume_offset.
|
||||||
|
|
||||||
|
After preparing then you suspend by
|
||||||
|
|
||||||
echo shutdown > /sys/power/disk; echo disk > /sys/power/state
|
echo shutdown > /sys/power/disk; echo disk > /sys/power/state
|
||||||
|
|
||||||
|
|
|
@ -543,6 +543,7 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
|
||||||
unsigned long long ret;
|
unsigned long long ret;
|
||||||
int d_min, d_max;
|
int d_min, d_max;
|
||||||
bool wakeup = false;
|
bool wakeup = false;
|
||||||
|
bool has_sxd = false;
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -581,6 +582,10 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
|
||||||
else
|
else
|
||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (status == AE_OK)
|
||||||
|
has_sxd = true;
|
||||||
|
|
||||||
d_min = ret;
|
d_min = ret;
|
||||||
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
|
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
|
||||||
&& adev->wakeup.sleep_state >= target_state;
|
&& adev->wakeup.sleep_state >= target_state;
|
||||||
|
@ -599,7 +604,11 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
|
||||||
method[3] = 'W';
|
method[3] = 'W';
|
||||||
status = acpi_evaluate_integer(handle, method, NULL, &ret);
|
status = acpi_evaluate_integer(handle, method, NULL, &ret);
|
||||||
if (status == AE_NOT_FOUND) {
|
if (status == AE_NOT_FOUND) {
|
||||||
if (target_state > ACPI_STATE_S0)
|
/* No _SxW. In this case, the ACPI spec says that we
|
||||||
|
* must not go into any power state deeper than the
|
||||||
|
* value returned from _SxD.
|
||||||
|
*/
|
||||||
|
if (has_sxd && target_state > ACPI_STATE_S0)
|
||||||
d_max = d_min;
|
d_max = d_min;
|
||||||
} else if (ACPI_SUCCESS(status) && ret <= ACPI_STATE_D3_COLD) {
|
} else if (ACPI_SUCCESS(status) && ret <= ACPI_STATE_D3_COLD) {
|
||||||
/* Fall back to D3cold if ret is not a valid state. */
|
/* Fall back to D3cold if ret is not a valid state. */
|
||||||
|
|
|
@ -851,23 +851,25 @@ static void lpi_check_constraints(void)
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < lpi_constraints_table_size; ++i) {
|
for (i = 0; i < lpi_constraints_table_size; ++i) {
|
||||||
|
acpi_handle handle = lpi_constraints_table[i].handle;
|
||||||
struct acpi_device *adev;
|
struct acpi_device *adev;
|
||||||
|
|
||||||
if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev))
|
if (!handle || acpi_bus_get_device(handle, &adev))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
acpi_handle_debug(adev->handle,
|
acpi_handle_debug(handle,
|
||||||
"LPI: required min power state:%s current power state:%s\n",
|
"LPI: required min power state:%s current power state:%s\n",
|
||||||
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
|
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
|
||||||
acpi_power_state_string(adev->power.state));
|
acpi_power_state_string(adev->power.state));
|
||||||
|
|
||||||
if (!adev->flags.power_manageable) {
|
if (!adev->flags.power_manageable) {
|
||||||
acpi_handle_info(adev->handle, "LPI: Device not power manageble\n");
|
acpi_handle_info(handle, "LPI: Device not power manageable\n");
|
||||||
|
lpi_constraints_table[i].handle = NULL;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->power.state < lpi_constraints_table[i].min_dstate)
|
if (adev->power.state < lpi_constraints_table[i].min_dstate)
|
||||||
acpi_handle_info(adev->handle,
|
acpi_handle_info(handle,
|
||||||
"LPI: Constraint not met; min power state:%s current power state:%s\n",
|
"LPI: Constraint not met; min power state:%s current power state:%s\n",
|
||||||
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
|
acpi_power_state_string(lpi_constraints_table[i].min_dstate),
|
||||||
acpi_power_state_string(adev->power.state));
|
acpi_power_state_string(adev->power.state));
|
||||||
|
@ -953,15 +955,8 @@ static int acpi_s2idle_prepare(void)
|
||||||
if (lps0_device_handle) {
|
if (lps0_device_handle) {
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* The configuration of GPEs is changed here to avoid spurious
|
|
||||||
* wakeups, but that should not be necessary if this is a
|
|
||||||
* "low-power S0" platform and the low-power S0 _DSM is present.
|
|
||||||
*/
|
|
||||||
acpi_enable_all_wakeup_gpes();
|
|
||||||
acpi_os_wait_events_complete();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (acpi_sci_irq_valid())
|
if (acpi_sci_irq_valid())
|
||||||
enable_irq_wake(acpi_sci_irq);
|
enable_irq_wake(acpi_sci_irq);
|
||||||
|
|
||||||
|
@ -994,8 +989,9 @@ static void acpi_s2idle_sync(void)
|
||||||
* The EC driver uses the system workqueue and an additional special
|
* The EC driver uses the system workqueue and an additional special
|
||||||
* one, so those need to be flushed too.
|
* one, so those need to be flushed too.
|
||||||
*/
|
*/
|
||||||
|
acpi_os_wait_events_complete(); /* synchronize SCI IRQ handling */
|
||||||
acpi_ec_flush_work();
|
acpi_ec_flush_work();
|
||||||
acpi_os_wait_events_complete();
|
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
||||||
s2idle_wakeup = false;
|
s2idle_wakeup = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1007,8 +1003,6 @@ static void acpi_s2idle_restore(void)
|
||||||
if (lps0_device_handle) {
|
if (lps0_device_handle) {
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
|
||||||
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
|
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
|
||||||
} else {
|
|
||||||
acpi_enable_all_runtime_gpes();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -196,8 +196,10 @@ struct device_link *device_link_add(struct device *consumer,
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry(link, &supplier->links.consumers, s_node)
|
list_for_each_entry(link, &supplier->links.consumers, s_node)
|
||||||
if (link->consumer == consumer)
|
if (link->consumer == consumer) {
|
||||||
|
kref_get(&link->kref);
|
||||||
goto out;
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
link = kzalloc(sizeof(*link), GFP_KERNEL);
|
link = kzalloc(sizeof(*link), GFP_KERNEL);
|
||||||
if (!link)
|
if (!link)
|
||||||
|
@ -222,6 +224,7 @@ struct device_link *device_link_add(struct device *consumer,
|
||||||
link->consumer = consumer;
|
link->consumer = consumer;
|
||||||
INIT_LIST_HEAD(&link->c_node);
|
INIT_LIST_HEAD(&link->c_node);
|
||||||
link->flags = flags;
|
link->flags = flags;
|
||||||
|
kref_init(&link->kref);
|
||||||
|
|
||||||
/* Determine the initial link state. */
|
/* Determine the initial link state. */
|
||||||
if (flags & DL_FLAG_STATELESS) {
|
if (flags & DL_FLAG_STATELESS) {
|
||||||
|
@ -292,8 +295,10 @@ static void __device_link_free_srcu(struct rcu_head *rhead)
|
||||||
device_link_free(container_of(rhead, struct device_link, rcu_head));
|
device_link_free(container_of(rhead, struct device_link, rcu_head));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __device_link_del(struct device_link *link)
|
static void __device_link_del(struct kref *kref)
|
||||||
{
|
{
|
||||||
|
struct device_link *link = container_of(kref, struct device_link, kref);
|
||||||
|
|
||||||
dev_info(link->consumer, "Dropping the link to %s\n",
|
dev_info(link->consumer, "Dropping the link to %s\n",
|
||||||
dev_name(link->supplier));
|
dev_name(link->supplier));
|
||||||
|
|
||||||
|
@ -305,8 +310,10 @@ static void __device_link_del(struct device_link *link)
|
||||||
call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
|
call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
|
||||||
}
|
}
|
||||||
#else /* !CONFIG_SRCU */
|
#else /* !CONFIG_SRCU */
|
||||||
static void __device_link_del(struct device_link *link)
|
static void __device_link_del(struct kref *kref)
|
||||||
{
|
{
|
||||||
|
struct device_link *link = container_of(kref, struct device_link, kref);
|
||||||
|
|
||||||
dev_info(link->consumer, "Dropping the link to %s\n",
|
dev_info(link->consumer, "Dropping the link to %s\n",
|
||||||
dev_name(link->supplier));
|
dev_name(link->supplier));
|
||||||
|
|
||||||
|
@ -324,13 +331,15 @@ static void __device_link_del(struct device_link *link)
|
||||||
* @link: Device link to delete.
|
* @link: Device link to delete.
|
||||||
*
|
*
|
||||||
* The caller must ensure proper synchronization of this function with runtime
|
* The caller must ensure proper synchronization of this function with runtime
|
||||||
* PM.
|
* PM. If the link was added multiple times, it needs to be deleted as often.
|
||||||
|
* Care is required for hotplugged devices: Their links are purged on removal
|
||||||
|
* and calling device_link_del() is then no longer allowed.
|
||||||
*/
|
*/
|
||||||
void device_link_del(struct device_link *link)
|
void device_link_del(struct device_link *link)
|
||||||
{
|
{
|
||||||
device_links_write_lock();
|
device_links_write_lock();
|
||||||
device_pm_lock();
|
device_pm_lock();
|
||||||
__device_link_del(link);
|
kref_put(&link->kref, __device_link_del);
|
||||||
device_pm_unlock();
|
device_pm_unlock();
|
||||||
device_links_write_unlock();
|
device_links_write_unlock();
|
||||||
}
|
}
|
||||||
|
@ -444,7 +453,7 @@ static void __device_links_no_driver(struct device *dev)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (link->flags & DL_FLAG_AUTOREMOVE)
|
if (link->flags & DL_FLAG_AUTOREMOVE)
|
||||||
__device_link_del(link);
|
kref_put(&link->kref, __device_link_del);
|
||||||
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
|
else if (link->status != DL_STATE_SUPPLIER_UNBIND)
|
||||||
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
|
||||||
}
|
}
|
||||||
|
@ -597,13 +606,13 @@ static void device_links_purge(struct device *dev)
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
|
list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
|
||||||
WARN_ON(link->status == DL_STATE_ACTIVE);
|
WARN_ON(link->status == DL_STATE_ACTIVE);
|
||||||
__device_link_del(link);
|
__device_link_del(&link->kref);
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
|
list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
|
||||||
WARN_ON(link->status != DL_STATE_DORMANT &&
|
WARN_ON(link->status != DL_STATE_DORMANT &&
|
||||||
link->status != DL_STATE_NONE);
|
link->status != DL_STATE_NONE);
|
||||||
__device_link_del(link);
|
__device_link_del(&link->kref);
|
||||||
}
|
}
|
||||||
|
|
||||||
device_links_write_unlock();
|
device_links_write_unlock();
|
||||||
|
|
|
@ -31,6 +31,7 @@ struct wake_irq {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
unsigned int status;
|
unsigned int status;
|
||||||
int irq;
|
int irq;
|
||||||
|
const char *name;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
|
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
|
||||||
|
|
|
@ -112,6 +112,7 @@ void dev_pm_clear_wake_irq(struct device *dev)
|
||||||
free_irq(wirq->irq, wirq);
|
free_irq(wirq->irq, wirq);
|
||||||
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
|
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
|
||||||
}
|
}
|
||||||
|
kfree(wirq->name);
|
||||||
kfree(wirq);
|
kfree(wirq);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
|
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
|
||||||
|
@ -184,6 +185,12 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
||||||
if (!wirq)
|
if (!wirq)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
|
||||||
|
if (!wirq->name) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto err_free;
|
||||||
|
}
|
||||||
|
|
||||||
wirq->dev = dev;
|
wirq->dev = dev;
|
||||||
wirq->irq = irq;
|
wirq->irq = irq;
|
||||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||||
|
@ -196,9 +203,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
||||||
* so we use a threaded irq.
|
* so we use a threaded irq.
|
||||||
*/
|
*/
|
||||||
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
|
err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
|
||||||
IRQF_ONESHOT, dev_name(dev), wirq);
|
IRQF_ONESHOT, wirq->name, wirq);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_free;
|
goto err_free_name;
|
||||||
|
|
||||||
err = dev_pm_attach_wake_irq(dev, irq, wirq);
|
err = dev_pm_attach_wake_irq(dev, irq, wirq);
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -210,6 +217,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
||||||
|
|
||||||
err_free_irq:
|
err_free_irq:
|
||||||
free_irq(irq, wirq);
|
free_irq(irq, wirq);
|
||||||
|
err_free_name:
|
||||||
|
kfree(wirq->name);
|
||||||
err_free:
|
err_free:
|
||||||
kfree(wirq);
|
kfree(wirq);
|
||||||
|
|
||||||
|
|
|
@ -452,10 +452,12 @@ static int socket_insert(struct pcmcia_socket *skt)
|
||||||
|
|
||||||
static int socket_suspend(struct pcmcia_socket *skt)
|
static int socket_suspend(struct pcmcia_socket *skt)
|
||||||
{
|
{
|
||||||
if (skt->state & SOCKET_SUSPEND)
|
if ((skt->state & SOCKET_SUSPEND) && !(skt->state & SOCKET_IN_RESUME))
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
||||||
mutex_lock(&skt->ops_mutex);
|
mutex_lock(&skt->ops_mutex);
|
||||||
|
/* store state on first suspend, but not after spurious wakeups */
|
||||||
|
if (!(skt->state & SOCKET_IN_RESUME))
|
||||||
skt->suspended_state = skt->state;
|
skt->suspended_state = skt->state;
|
||||||
|
|
||||||
skt->socket = dead_socket;
|
skt->socket = dead_socket;
|
||||||
|
@ -463,6 +465,7 @@ static int socket_suspend(struct pcmcia_socket *skt)
|
||||||
if (skt->ops->suspend)
|
if (skt->ops->suspend)
|
||||||
skt->ops->suspend(skt);
|
skt->ops->suspend(skt);
|
||||||
skt->state |= SOCKET_SUSPEND;
|
skt->state |= SOCKET_SUSPEND;
|
||||||
|
skt->state &= ~SOCKET_IN_RESUME;
|
||||||
mutex_unlock(&skt->ops_mutex);
|
mutex_unlock(&skt->ops_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -475,6 +478,7 @@ static int socket_early_resume(struct pcmcia_socket *skt)
|
||||||
skt->ops->set_socket(skt, &skt->socket);
|
skt->ops->set_socket(skt, &skt->socket);
|
||||||
if (skt->state & SOCKET_PRESENT)
|
if (skt->state & SOCKET_PRESENT)
|
||||||
skt->resume_status = socket_setup(skt, resume_delay);
|
skt->resume_status = socket_setup(skt, resume_delay);
|
||||||
|
skt->state |= SOCKET_IN_RESUME;
|
||||||
mutex_unlock(&skt->ops_mutex);
|
mutex_unlock(&skt->ops_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -484,7 +488,7 @@ static int socket_late_resume(struct pcmcia_socket *skt)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
mutex_lock(&skt->ops_mutex);
|
mutex_lock(&skt->ops_mutex);
|
||||||
skt->state &= ~SOCKET_SUSPEND;
|
skt->state &= ~(SOCKET_SUSPEND | SOCKET_IN_RESUME);
|
||||||
mutex_unlock(&skt->ops_mutex);
|
mutex_unlock(&skt->ops_mutex);
|
||||||
|
|
||||||
if (!(skt->state & SOCKET_PRESENT)) {
|
if (!(skt->state & SOCKET_PRESENT)) {
|
||||||
|
|
|
@ -70,6 +70,7 @@ struct pccard_resource_ops {
|
||||||
/* Flags in socket state */
|
/* Flags in socket state */
|
||||||
#define SOCKET_PRESENT 0x0008
|
#define SOCKET_PRESENT 0x0008
|
||||||
#define SOCKET_INUSE 0x0010
|
#define SOCKET_INUSE 0x0010
|
||||||
|
#define SOCKET_IN_RESUME 0x0040
|
||||||
#define SOCKET_SUSPEND 0x0080
|
#define SOCKET_SUSPEND 0x0080
|
||||||
#define SOCKET_WIN_REQ(i) (0x0100<<(i))
|
#define SOCKET_WIN_REQ(i) (0x0100<<(i))
|
||||||
#define SOCKET_CARDBUS 0x8000
|
#define SOCKET_CARDBUS 0x8000
|
||||||
|
|
|
@ -769,6 +769,7 @@ enum device_link_state {
|
||||||
* @status: The state of the link (with respect to the presence of drivers).
|
* @status: The state of the link (with respect to the presence of drivers).
|
||||||
* @flags: Link flags.
|
* @flags: Link flags.
|
||||||
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
|
* @rpm_active: Whether or not the consumer device is runtime-PM-active.
|
||||||
|
* @kref: Count repeated addition of the same link.
|
||||||
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
|
* @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
|
||||||
*/
|
*/
|
||||||
struct device_link {
|
struct device_link {
|
||||||
|
@ -779,6 +780,7 @@ struct device_link {
|
||||||
enum device_link_state status;
|
enum device_link_state status;
|
||||||
u32 flags;
|
u32 flags;
|
||||||
bool rpm_active;
|
bool rpm_active;
|
||||||
|
struct kref kref;
|
||||||
#ifdef CONFIG_SRCU
|
#ifdef CONFIG_SRCU
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1053,7 +1053,7 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
lock_system_sleep();
|
lock_system_sleep();
|
||||||
swsusp_resume_device = res;
|
swsusp_resume_device = res;
|
||||||
unlock_system_sleep();
|
unlock_system_sleep();
|
||||||
pr_info("Starting manual resume from disk\n");
|
pm_pr_dbg("Configured resume from disk to %u\n", swsusp_resume_device);
|
||||||
noresume = 0;
|
noresume = 0;
|
||||||
software_resume();
|
software_resume();
|
||||||
return n;
|
return n;
|
||||||
|
@ -1061,6 +1061,29 @@ static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
|
|
||||||
power_attr(resume);
|
power_attr(resume);
|
||||||
|
|
||||||
|
static ssize_t resume_offset_show(struct kobject *kobj,
|
||||||
|
struct kobj_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
return sprintf(buf, "%llu\n", (unsigned long long)swsusp_resume_block);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t resume_offset_store(struct kobject *kobj,
|
||||||
|
struct kobj_attribute *attr, const char *buf,
|
||||||
|
size_t n)
|
||||||
|
{
|
||||||
|
unsigned long long offset;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = kstrtoull(buf, 0, &offset);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
swsusp_resume_block = offset;
|
||||||
|
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
power_attr(resume_offset);
|
||||||
|
|
||||||
static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr,
|
static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
|
@ -1106,6 +1129,7 @@ power_attr(reserved_size);
|
||||||
|
|
||||||
static struct attribute * g[] = {
|
static struct attribute * g[] = {
|
||||||
&disk_attr.attr,
|
&disk_attr.attr,
|
||||||
|
&resume_offset_attr.attr,
|
||||||
&resume_attr.attr,
|
&resume_attr.attr,
|
||||||
&image_size_attr.attr,
|
&image_size_attr.attr,
|
||||||
&reserved_size_attr.attr,
|
&reserved_size_attr.attr,
|
||||||
|
|
Loading…
Reference in New Issue