2019-06-01 16:08:42 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2006-03-23 19:00:03 +08:00
|
|
|
/*
|
|
|
|
* linux/kernel/power/user.c
|
|
|
|
*
|
|
|
|
* This file provides the user space interface for software suspend/resume.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/suspend.h>
|
2006-12-07 12:34:06 +08:00
|
|
|
#include <linux/reboot.h>
|
2006-03-23 19:00:03 +08:00
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/miscdevice.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/swapops.h>
|
|
|
|
#include <linux/pm.h>
|
|
|
|
#include <linux/fs.h>
|
2011-12-28 05:54:52 +08:00
|
|
|
#include <linux/compat.h>
|
2006-10-11 16:20:45 +08:00
|
|
|
#include <linux/console.h>
|
2006-09-26 14:32:48 +08:00
|
|
|
#include <linux/cpu.h>
|
2006-12-07 12:34:23 +08:00
|
|
|
#include <linux/freezer.h>
|
2006-03-23 19:00:03 +08:00
|
|
|
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2006-03-23 19:00:03 +08:00
|
|
|
|
|
|
|
#include "power.h"
|
|
|
|
|
2007-10-26 07:01:10 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
static struct snapshot_data {
|
|
|
|
struct snapshot_handle handle;
|
|
|
|
int swap;
|
|
|
|
int mode;
|
2013-10-19 04:20:40 +08:00
|
|
|
bool frozen;
|
|
|
|
bool ready;
|
|
|
|
bool platform_support;
|
2013-10-01 01:40:56 +08:00
|
|
|
bool free_bitmaps;
|
2020-09-21 15:19:55 +08:00
|
|
|
dev_t dev;
|
2006-03-23 19:00:03 +08:00
|
|
|
} snapshot_state;
|
|
|
|
|
2020-09-21 15:19:55 +08:00
|
|
|
int is_hibernate_resume_dev(dev_t dev)
|
2020-05-20 02:14:10 +08:00
|
|
|
{
|
2020-09-21 15:19:55 +08:00
|
|
|
return hibernation_available() && snapshot_state.dev == dev;
|
2020-05-20 02:14:10 +08:00
|
|
|
}
|
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
static int snapshot_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct snapshot_data *data;
|
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 21:57:36 +08:00
|
|
|
int error;
|
2006-03-23 19:00:03 +08:00
|
|
|
|
2014-06-14 04:30:35 +08:00
|
|
|
if (!hibernation_available())
|
|
|
|
return -EPERM;
|
|
|
|
|
2011-12-08 05:29:54 +08:00
|
|
|
lock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2020-05-07 15:19:52 +08:00
|
|
|
if (!hibernate_acquire()) {
|
2008-06-12 04:09:45 +08:00
|
|
|
error = -EBUSY;
|
|
|
|
goto Unlock;
|
|
|
|
}
|
2006-03-23 19:00:03 +08:00
|
|
|
|
2007-05-07 05:50:44 +08:00
|
|
|
if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
|
2020-05-07 15:19:52 +08:00
|
|
|
hibernate_release();
|
2008-06-12 04:09:45 +08:00
|
|
|
error = -ENOSYS;
|
|
|
|
goto Unlock;
|
2007-05-07 05:50:44 +08:00
|
|
|
}
|
2006-03-23 19:00:03 +08:00
|
|
|
nonseekable_open(inode, filp);
|
|
|
|
data = &snapshot_state;
|
|
|
|
filp->private_data = data;
|
|
|
|
memset(&data->handle, 0, sizeof(struct snapshot_handle));
|
|
|
|
if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
|
2009-04-13 02:06:56 +08:00
|
|
|
/* Hibernating. The image device should be accessible. */
|
2020-09-21 15:19:56 +08:00
|
|
|
data->swap = swap_type_of(swsusp_resume_device, 0);
|
2006-03-23 19:00:03 +08:00
|
|
|
data->mode = O_RDONLY;
|
2013-11-15 06:26:58 +08:00
|
|
|
data->free_bitmaps = false;
|
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 21:57:36 +08:00
|
|
|
error = pm_notifier_call_chain_robust(PM_HIBERNATION_PREPARE, PM_POST_HIBERNATION);
|
2006-03-23 19:00:03 +08:00
|
|
|
} else {
|
2009-04-13 02:06:56 +08:00
|
|
|
/*
|
|
|
|
* Resuming. We may need to wait for the image device to
|
|
|
|
* appear.
|
|
|
|
*/
|
|
|
|
wait_for_device_probe();
|
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
data->swap = -1;
|
|
|
|
data->mode = O_WRONLY;
|
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 21:57:36 +08:00
|
|
|
error = pm_notifier_call_chain_robust(PM_RESTORE_PREPARE, PM_POST_RESTORE);
|
2013-10-01 01:40:56 +08:00
|
|
|
if (!error) {
|
|
|
|
error = create_basic_memory_bitmaps();
|
|
|
|
data->free_bitmaps = !error;
|
notifier: Fix broken error handling pattern
The current notifiers have the following error handling pattern all
over the place:
int err, nr;
err = __foo_notifier_call_chain(&chain, val_up, v, -1, &nr);
if (err & NOTIFIER_STOP_MASK)
__foo_notifier_call_chain(&chain, val_down, v, nr-1, NULL)
And aside from the endless repetition thereof, it is broken. Consider
blocking notifiers; both calls take and drop the rwsem, this means
that the notifier list can change in between the two calls, making @nr
meaningless.
Fix this by replacing all the __foo_notifier_call_chain() functions
with foo_notifier_call_chain_robust() that embeds the above pattern,
but ensures it is inside a single lock region.
Note: I switched atomic_notifier_call_chain_robust() to use
the spinlock, since RCU cannot provide the guarantee
required for the recovery.
Note: software_resume() error handling was broken afaict.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Link: https://lore.kernel.org/r/20200818135804.325626653@infradead.org
2020-08-18 21:57:36 +08:00
|
|
|
}
|
2007-11-20 06:38:25 +08:00
|
|
|
}
|
2013-08-30 20:19:38 +08:00
|
|
|
if (error)
|
2020-05-07 15:19:52 +08:00
|
|
|
hibernate_release();
|
2013-08-30 20:19:38 +08:00
|
|
|
|
2013-10-19 04:20:40 +08:00
|
|
|
data->frozen = false;
|
|
|
|
data->ready = false;
|
|
|
|
data->platform_support = false;
|
2020-09-21 15:19:55 +08:00
|
|
|
data->dev = 0;
|
2006-03-23 19:00:03 +08:00
|
|
|
|
2008-06-12 04:09:45 +08:00
|
|
|
Unlock:
|
2011-12-08 05:29:54 +08:00
|
|
|
unlock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
|
|
|
return error;
|
2006-03-23 19:00:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int snapshot_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct snapshot_data *data;
|
|
|
|
|
2011-12-08 05:29:54 +08:00
|
|
|
lock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
swsusp_free();
|
|
|
|
data = filp->private_data;
|
2020-09-21 15:19:55 +08:00
|
|
|
data->dev = 0;
|
2007-05-07 05:50:47 +08:00
|
|
|
free_all_swap_pages(data->swap);
|
2011-05-11 03:10:01 +08:00
|
|
|
if (data->frozen) {
|
|
|
|
pm_restore_gfp_mask();
|
2013-08-30 20:19:38 +08:00
|
|
|
free_basic_memory_bitmaps();
|
2006-03-23 19:00:03 +08:00
|
|
|
thaw_processes();
|
2013-10-01 01:40:56 +08:00
|
|
|
} else if (data->free_bitmaps) {
|
|
|
|
free_basic_memory_bitmaps();
|
2011-05-11 03:10:01 +08:00
|
|
|
}
|
2010-12-10 07:16:39 +08:00
|
|
|
pm_notifier_call_chain(data->mode == O_RDONLY ?
|
2007-11-20 06:38:25 +08:00
|
|
|
PM_POST_HIBERNATION : PM_POST_RESTORE);
|
2020-05-07 15:19:52 +08:00
|
|
|
hibernate_release();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2011-12-08 05:29:54 +08:00
|
|
|
unlock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t snapshot_read(struct file *filp, char __user *buf,
|
|
|
|
size_t count, loff_t *offp)
|
|
|
|
{
|
|
|
|
struct snapshot_data *data;
|
|
|
|
ssize_t res;
|
2010-05-02 05:52:02 +08:00
|
|
|
loff_t pg_offp = *offp & ~PAGE_MASK;
|
2006-03-23 19:00:03 +08:00
|
|
|
|
2011-12-08 05:29:54 +08:00
|
|
|
lock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
data = filp->private_data;
|
2008-06-12 04:09:45 +08:00
|
|
|
if (!data->ready) {
|
|
|
|
res = -ENODATA;
|
|
|
|
goto Unlock;
|
|
|
|
}
|
2010-05-02 05:52:02 +08:00
|
|
|
if (!pg_offp) { /* on page boundary? */
|
|
|
|
res = snapshot_read_next(&data->handle);
|
|
|
|
if (res <= 0)
|
|
|
|
goto Unlock;
|
|
|
|
} else {
|
|
|
|
res = PAGE_SIZE - pg_offp;
|
2006-03-23 19:00:03 +08:00
|
|
|
}
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2010-05-02 05:52:02 +08:00
|
|
|
res = simple_read_from_buffer(buf, count, &pg_offp,
|
|
|
|
data_of(data->handle), res);
|
|
|
|
if (res > 0)
|
|
|
|
*offp += res;
|
|
|
|
|
2008-06-12 04:09:45 +08:00
|
|
|
Unlock:
|
2011-12-08 05:29:54 +08:00
|
|
|
unlock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
|
|
|
size_t count, loff_t *offp)
|
|
|
|
{
|
|
|
|
struct snapshot_data *data;
|
|
|
|
ssize_t res;
|
2010-05-02 05:52:02 +08:00
|
|
|
loff_t pg_offp = *offp & ~PAGE_MASK;
|
2006-03-23 19:00:03 +08:00
|
|
|
|
2011-12-08 05:29:54 +08:00
|
|
|
lock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
data = filp->private_data;
|
2010-05-02 05:52:02 +08:00
|
|
|
|
|
|
|
if (!pg_offp) {
|
|
|
|
res = snapshot_write_next(&data->handle);
|
|
|
|
if (res <= 0)
|
|
|
|
goto unlock;
|
|
|
|
} else {
|
|
|
|
res = PAGE_SIZE - pg_offp;
|
2006-03-23 19:00:03 +08:00
|
|
|
}
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2018-05-26 08:59:36 +08:00
|
|
|
if (!data_of(data->handle)) {
|
|
|
|
res = -EINVAL;
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
|
2010-05-02 05:52:02 +08:00
|
|
|
res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
|
|
|
|
buf, count);
|
|
|
|
if (res > 0)
|
|
|
|
*offp += res;
|
|
|
|
unlock:
|
2011-12-08 05:29:54 +08:00
|
|
|
unlock_system_sleep();
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2020-04-06 19:58:35 +08:00
|
|
|
struct compat_resume_swap_area {
|
|
|
|
compat_loff_t offset;
|
|
|
|
u32 dev;
|
|
|
|
} __packed;
|
|
|
|
|
2020-04-06 19:58:34 +08:00
|
|
|
static int snapshot_set_swap_area(struct snapshot_data *data,
|
|
|
|
void __user *argp)
|
|
|
|
{
|
|
|
|
sector_t offset;
|
|
|
|
dev_t swdev;
|
|
|
|
|
|
|
|
if (swsusp_swap_in_use())
|
|
|
|
return -EPERM;
|
2020-04-06 19:58:35 +08:00
|
|
|
|
|
|
|
if (in_compat_syscall()) {
|
|
|
|
struct compat_resume_swap_area swap_area;
|
|
|
|
|
|
|
|
if (copy_from_user(&swap_area, argp, sizeof(swap_area)))
|
|
|
|
return -EFAULT;
|
|
|
|
swdev = new_decode_dev(swap_area.dev);
|
|
|
|
offset = swap_area.offset;
|
|
|
|
} else {
|
|
|
|
struct resume_swap_area swap_area;
|
|
|
|
|
|
|
|
if (copy_from_user(&swap_area, argp, sizeof(swap_area)))
|
|
|
|
return -EFAULT;
|
|
|
|
swdev = new_decode_dev(swap_area.dev);
|
|
|
|
offset = swap_area.offset;
|
|
|
|
}
|
2020-04-06 19:58:34 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* User space encodes device types as two-byte values,
|
|
|
|
* so we need to recode them
|
|
|
|
*/
|
2020-09-21 15:19:56 +08:00
|
|
|
data->swap = swap_type_of(swdev, offset);
|
2020-04-06 19:58:34 +08:00
|
|
|
if (data->swap < 0)
|
2020-09-21 15:19:56 +08:00
|
|
|
return swdev ? -ENODEV : -EINVAL;
|
|
|
|
data->dev = swdev;
|
2020-04-06 19:58:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-12 04:07:52 +08:00
|
|
|
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
|
|
|
|
unsigned long arg)
|
2006-03-23 19:00:03 +08:00
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
struct snapshot_data *data;
|
2007-10-26 06:59:31 +08:00
|
|
|
loff_t size;
|
2006-12-07 12:34:10 +08:00
|
|
|
sector_t offset;
|
2006-03-23 19:00:03 +08:00
|
|
|
|
|
|
|
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
|
|
|
|
return -ENOTTY;
|
|
|
|
if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
|
|
|
|
return -ENOTTY;
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
2018-07-31 16:51:32 +08:00
|
|
|
if (!mutex_trylock(&system_transition_mutex))
|
2008-06-12 04:09:45 +08:00
|
|
|
return -EBUSY;
|
2006-03-23 19:00:03 +08:00
|
|
|
|
2013-08-30 20:19:46 +08:00
|
|
|
lock_device_hotplug();
|
2008-06-12 04:09:45 +08:00
|
|
|
data = filp->private_data;
|
2008-06-12 04:07:52 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
case SNAPSHOT_FREEZE:
|
|
|
|
if (data->frozen)
|
|
|
|
break;
|
2008-10-16 13:01:21 +08:00
|
|
|
|
2019-02-25 20:36:41 +08:00
|
|
|
ksys_sync_helper();
|
2007-11-20 06:38:25 +08:00
|
|
|
|
2008-10-16 13:01:21 +08:00
|
|
|
error = freeze_processes();
|
2013-08-30 20:19:38 +08:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
error = create_basic_memory_bitmaps();
|
|
|
|
if (error)
|
|
|
|
thaw_processes();
|
|
|
|
else
|
2013-10-19 04:20:40 +08:00
|
|
|
data->frozen = true;
|
2013-08-30 20:19:38 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SNAPSHOT_UNFREEZE:
|
2007-06-17 01:16:03 +08:00
|
|
|
if (!data->frozen || data->ready)
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
2010-12-04 05:57:45 +08:00
|
|
|
pm_restore_gfp_mask();
|
2013-08-30 20:19:38 +08:00
|
|
|
free_basic_memory_bitmaps();
|
2013-10-01 01:40:56 +08:00
|
|
|
data->free_bitmaps = false;
|
2006-03-23 19:00:03 +08:00
|
|
|
thaw_processes();
|
2013-10-19 04:20:40 +08:00
|
|
|
data->frozen = false;
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
|
|
|
|
2010-01-28 06:47:50 +08:00
|
|
|
case SNAPSHOT_CREATE_IMAGE:
|
2006-03-23 19:00:03 +08:00
|
|
|
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
|
|
|
|
error = -EPERM;
|
|
|
|
break;
|
|
|
|
}
|
2010-12-04 05:57:45 +08:00
|
|
|
pm_restore_gfp_mask();
|
2007-10-26 07:01:10 +08:00
|
|
|
error = hibernation_snapshot(data->platform_support);
|
2012-02-05 05:26:38 +08:00
|
|
|
if (!error) {
|
2007-10-26 07:03:33 +08:00
|
|
|
error = put_user(in_suspend, (int __user *)arg);
|
2012-02-05 06:39:56 +08:00
|
|
|
data->ready = !freezer_test_done && !error;
|
|
|
|
freezer_test_done = false;
|
2011-12-02 05:33:10 +08:00
|
|
|
}
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SNAPSHOT_ATOMIC_RESTORE:
|
[PATCH] swsusp: Improve handling of highmem
Currently swsusp saves the contents of highmem pages by copying them to the
normal zone which is quite inefficient (eg. it requires two normal pages
to be used for saving one highmem page). This may be improved by using
highmem for saving the contents of saveable highmem pages.
Namely, during the suspend phase of the suspend-resume cycle we try to
allocate as many free highmem pages as there are saveable highmem pages.
If there are not enough highmem image pages to store the contents of all of
the saveable highmem pages, some of them will be stored in the "normal"
memory. Next, we allocate as many free "normal" pages as needed to store
the (remaining) image data. We use a memory bitmap to mark the allocated
free pages (ie. highmem as well as "normal" image pages).
Now, we use another memory bitmap to mark all of the saveable pages
(highmem as well as "normal") and the contents of the saveable pages are
copied into the image pages. Then, the second bitmap is used to save the
pfns corresponding to the saveable pages and the first one is used to save
their data.
During the resume phase the pfns of the pages that were saveable during the
suspend are loaded from the image and used to mark the "unsafe" page
frames. Next, we try to allocate as many free highmem page frames as to
load all of the image data that had been in the highmem before the suspend
and we allocate so many free "normal" page frames that the total number of
allocated free pages (highmem and "normal") is equal to the size of the
image. While doing this we have to make sure that there will be some extra
free "normal" and "safe" page frames for two lists of PBEs constructed
later.
Now, the image data are loaded, if possible, into their "original" page
frames. The image data that cannot be written into their "original" page
frames are loaded into "safe" page frames and their "original" kernel
virtual addresses, as well as the addresses of the "safe" pages containing
their copies, are stored in one of two lists of PBEs.
One list of PBEs is for the copies of "normal" suspend pages (ie. "normal"
pages that were saveable during the suspend) and it is used in the same way
as previously (ie. by the architecture-dependent parts of swsusp). The
other list of PBEs is for the copies of highmem suspend pages. The pages
in this list are restored (in a reversible way) right before the
arch-dependent code is called.
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Cc: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-07 12:34:18 +08:00
|
|
|
snapshot_write_finalize(&data->handle);
|
2006-03-23 19:00:03 +08:00
|
|
|
if (data->mode != O_WRONLY || !data->frozen ||
|
|
|
|
!snapshot_image_loaded(&data->handle)) {
|
|
|
|
error = -EPERM;
|
|
|
|
break;
|
|
|
|
}
|
2007-10-26 07:01:10 +08:00
|
|
|
error = hibernation_restore(data->platform_support);
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SNAPSHOT_FREE:
|
|
|
|
swsusp_free();
|
|
|
|
memset(&data->handle, 0, sizeof(struct snapshot_handle));
|
2013-10-19 04:20:40 +08:00
|
|
|
data->ready = false;
|
2012-01-30 03:35:52 +08:00
|
|
|
/*
|
|
|
|
* It is necessary to thaw kernel threads here, because
|
|
|
|
* SNAPSHOT_CREATE_IMAGE may be invoked directly after
|
|
|
|
* SNAPSHOT_FREE. In that case, if kernel threads were not
|
|
|
|
* thawed, the preallocation of memory carried out by
|
|
|
|
* hibernation_snapshot() might run into problems (i.e. it
|
|
|
|
* might fail or even deadlock).
|
|
|
|
*/
|
|
|
|
thaw_kernel_threads();
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
|
|
|
|
2010-01-28 06:47:50 +08:00
|
|
|
case SNAPSHOT_PREF_IMAGE_SIZE:
|
2006-03-23 19:00:03 +08:00
|
|
|
image_size = arg;
|
|
|
|
break;
|
|
|
|
|
2007-10-26 06:59:31 +08:00
|
|
|
case SNAPSHOT_GET_IMAGE_SIZE:
|
|
|
|
if (!data->ready) {
|
|
|
|
error = -ENODATA;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
size = snapshot_get_image_size();
|
|
|
|
size <<= PAGE_SHIFT;
|
|
|
|
error = put_user(size, (loff_t __user *)arg);
|
|
|
|
break;
|
|
|
|
|
2010-01-28 06:47:50 +08:00
|
|
|
case SNAPSHOT_AVAIL_SWAP_SIZE:
|
2007-10-26 06:59:31 +08:00
|
|
|
size = count_swap_pages(data->swap, 1);
|
|
|
|
size <<= PAGE_SHIFT;
|
|
|
|
error = put_user(size, (loff_t __user *)arg);
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
|
|
|
|
2010-01-28 06:47:50 +08:00
|
|
|
case SNAPSHOT_ALLOC_SWAP_PAGE:
|
2006-03-23 19:00:03 +08:00
|
|
|
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
|
|
|
|
error = -ENODEV;
|
|
|
|
break;
|
|
|
|
}
|
2007-05-07 05:50:47 +08:00
|
|
|
offset = alloc_swapdev_block(data->swap);
|
2006-03-23 19:00:03 +08:00
|
|
|
if (offset) {
|
|
|
|
offset <<= PAGE_SHIFT;
|
2007-10-26 07:03:33 +08:00
|
|
|
error = put_user(offset, (loff_t __user *)arg);
|
2006-03-23 19:00:03 +08:00
|
|
|
} else {
|
|
|
|
error = -ENOSPC;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SNAPSHOT_FREE_SWAP_PAGES:
|
|
|
|
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
|
|
|
|
error = -ENODEV;
|
|
|
|
break;
|
|
|
|
}
|
2007-05-07 05:50:47 +08:00
|
|
|
free_all_swap_pages(data->swap);
|
2006-03-23 19:00:03 +08:00
|
|
|
break;
|
|
|
|
|
2006-03-23 19:00:09 +08:00
|
|
|
case SNAPSHOT_S2RAM:
|
|
|
|
if (!data->frozen) {
|
|
|
|
error = -EPERM;
|
|
|
|
break;
|
|
|
|
}
|
2007-07-19 16:47:38 +08:00
|
|
|
/*
|
|
|
|
* Tasks are frozen and the notifiers have been called with
|
|
|
|
* PM_HIBERNATION_PREPARE
|
|
|
|
*/
|
|
|
|
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
|
2013-10-19 04:20:40 +08:00
|
|
|
data->ready = false;
|
2006-03-23 19:00:09 +08:00
|
|
|
break;
|
|
|
|
|
2007-10-26 07:01:10 +08:00
|
|
|
case SNAPSHOT_PLATFORM_SUPPORT:
|
|
|
|
data->platform_support = !!arg;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SNAPSHOT_POWER_OFF:
|
|
|
|
if (data->platform_support)
|
|
|
|
error = hibernation_platform_enter();
|
|
|
|
break;
|
|
|
|
|
2006-12-07 12:34:15 +08:00
|
|
|
case SNAPSHOT_SET_SWAP_AREA:
|
2020-04-06 19:58:34 +08:00
|
|
|
error = snapshot_set_swap_area(data, (void __user *)arg);
|
2006-12-07 12:34:15 +08:00
|
|
|
break;
|
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
default:
|
|
|
|
error = -ENOTTY;
|
|
|
|
|
|
|
|
}
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2013-08-30 20:19:46 +08:00
|
|
|
unlock_device_hotplug();
|
2018-07-31 16:51:32 +08:00
|
|
|
mutex_unlock(&system_transition_mutex);
|
2008-06-12 04:09:45 +08:00
|
|
|
|
2006-03-23 19:00:03 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2011-12-28 05:54:52 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static long
|
|
|
|
snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SNAPSHOT_GET_IMAGE_SIZE:
|
|
|
|
case SNAPSHOT_AVAIL_SWAP_SIZE:
|
2020-03-08 11:27:01 +08:00
|
|
|
case SNAPSHOT_ALLOC_SWAP_PAGE:
|
2011-12-28 05:54:52 +08:00
|
|
|
case SNAPSHOT_CREATE_IMAGE:
|
2020-04-06 19:58:35 +08:00
|
|
|
case SNAPSHOT_SET_SWAP_AREA:
|
2011-12-28 05:54:52 +08:00
|
|
|
return snapshot_ioctl(file, cmd,
|
|
|
|
(unsigned long) compat_ptr(arg));
|
|
|
|
default:
|
|
|
|
return snapshot_ioctl(file, cmd, arg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
2006-12-07 12:40:36 +08:00
|
|
|
static const struct file_operations snapshot_fops = {
|
2006-03-23 19:00:03 +08:00
|
|
|
.open = snapshot_open,
|
|
|
|
.release = snapshot_release,
|
|
|
|
.read = snapshot_read,
|
|
|
|
.write = snapshot_write,
|
|
|
|
.llseek = no_llseek,
|
2008-06-12 04:07:52 +08:00
|
|
|
.unlocked_ioctl = snapshot_ioctl,
|
2011-12-28 05:54:52 +08:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
.compat_ioctl = snapshot_compat_ioctl,
|
|
|
|
#endif
|
2006-03-23 19:00:03 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct miscdevice snapshot_device = {
|
|
|
|
.minor = SNAPSHOT_MINOR,
|
|
|
|
.name = "snapshot",
|
|
|
|
.fops = &snapshot_fops,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init snapshot_device_init(void)
|
|
|
|
{
|
|
|
|
return misc_register(&snapshot_device);
|
|
|
|
};
|
|
|
|
|
|
|
|
device_initcall(snapshot_device_init);
|