Merge branches 'acpi-pm' and 'pm-sleep'
* acpi-pm: ACPI / PM: LPIT: Register sysfs attributes based on FADT * pm-sleep: x86-32, hibernate: Adjust in_suspend after resumed on 32bit system x86-32, hibernate: Set up temporary text mapping for 32bit system x86-32, hibernate: Switch to relocated restore code during resume on 32bit system x86-32, hibernate: Switch to original page table after resumed x86-32, hibernate: Use the page size macro instead of constant value x86-32, hibernate: Use temp_pgt as the temporary page table x86, hibernate: Rename temp_level4_pgt to temp_pgt x86-32, hibernate: Enable CONFIG_ARCH_HIBERNATION_HEADER on 32bit system x86, hibernate: Extract the common code of 64/32 bit system x86-32/asm/power: Create stack frames in hibernate_asm_32.S PM / hibernate: Check the success of generating md5 digest before hibernation x86, hibernate: Fix nosave_regions setup for hibernation PM / sleep: Show freezing tasks that caused a suspend abort PM / hibernate: Documentation: fix image_size default value
This commit is contained in:
commit
3f858ae02c
|
@ -99,7 +99,7 @@ Description:
|
|||
this file, the suspend image will be as small as possible.
|
||||
|
||||
Reading from this file will display the current image size
|
||||
limit, which is set to 500 MB by default.
|
||||
limit, which is set to around 2/5 of available RAM by default.
|
||||
|
||||
What: /sys/power/pm_trace
|
||||
Date: August 2006
|
||||
|
|
|
@ -56,7 +56,7 @@ If you want to limit the suspend image size to N bytes, do
|
|||
|
||||
echo N > /sys/power/image_size
|
||||
|
||||
before suspend (it is limited to 500 MB by default).
|
||||
before suspend (it is limited to around 2/5 of available RAM by default).
|
||||
|
||||
. The resume process checks for the presence of the resume device,
|
||||
if found, it then checks the contents for the hibernation image signature.
|
||||
|
|
|
@ -2422,7 +2422,7 @@ menu "Power management and ACPI options"
|
|||
|
||||
config ARCH_HIBERNATION_HEADER
|
||||
def_bool y
|
||||
depends on X86_64 && HIBERNATION
|
||||
depends on HIBERNATION
|
||||
|
||||
source "kernel/power/Kconfig"
|
||||
|
||||
|
|
|
@ -4,3 +4,11 @@
|
|||
#else
|
||||
# include <asm/suspend_64.h>
|
||||
#endif
|
||||
extern unsigned long restore_jump_address __visible;
|
||||
extern unsigned long jump_address_phys;
|
||||
extern unsigned long restore_cr3 __visible;
|
||||
extern unsigned long temp_pgt __visible;
|
||||
extern unsigned long relocated_restore_code __visible;
|
||||
extern int relocate_restore_code(void);
|
||||
/* Defined in hibernate_asm_32/64.S */
|
||||
extern asmlinkage __visible int restore_image(void);
|
||||
|
|
|
@ -32,4 +32,8 @@ struct saved_context {
|
|||
unsigned long return_address;
|
||||
} __attribute__((packed));
|
||||
|
||||
/* routines for saving/restoring kernel state */
|
||||
extern char core_restore_code[];
|
||||
extern char restore_registers[];
|
||||
|
||||
#endif /* _ASM_X86_SUSPEND_32_H */
|
||||
|
|
|
@ -1251,7 +1251,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
x86_init.hyper.guest_late_init();
|
||||
|
||||
e820__reserve_resources();
|
||||
e820__register_nosave_regions(max_low_pfn);
|
||||
e820__register_nosave_regions(max_pfn);
|
||||
|
||||
x86_init.resources.reserve_resources();
|
||||
|
||||
|
|
|
@ -7,4 +7,4 @@ nostackp := $(call cc-option, -fno-stack-protector)
|
|||
CFLAGS_cpu.o := $(nostackp)
|
||||
|
||||
obj-$(CONFIG_PM_SLEEP) += cpu.o
|
||||
obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o
|
||||
obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o
|
||||
|
|
|
@ -0,0 +1,248 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Hibernation support for x86
|
||||
*
|
||||
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
|
||||
* Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
|
||||
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
|
||||
*/
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
||||
#include <crypto/hash.h>
|
||||
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/init.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/*
|
||||
* Address to jump to in the last phase of restore in order to get to the image
|
||||
* kernel's text (this value is passed in the image header).
|
||||
*/
|
||||
unsigned long restore_jump_address __visible;
|
||||
unsigned long jump_address_phys;
|
||||
|
||||
/*
|
||||
* Value of the cr3 register from before the hibernation (this value is passed
|
||||
* in the image header).
|
||||
*/
|
||||
unsigned long restore_cr3 __visible;
|
||||
unsigned long temp_pgt __visible;
|
||||
unsigned long relocated_restore_code __visible;
|
||||
|
||||
/**
|
||||
* pfn_is_nosave - check if given pfn is in the 'nosave' section
|
||||
*/
|
||||
int pfn_is_nosave(unsigned long pfn)
|
||||
{
|
||||
unsigned long nosave_begin_pfn;
|
||||
unsigned long nosave_end_pfn;
|
||||
|
||||
nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
|
||||
nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
|
||||
|
||||
return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
|
||||
}
|
||||
|
||||
|
||||
#define MD5_DIGEST_SIZE 16
|
||||
|
||||
struct restore_data_record {
|
||||
unsigned long jump_address;
|
||||
unsigned long jump_address_phys;
|
||||
unsigned long cr3;
|
||||
unsigned long magic;
|
||||
u8 e820_digest[MD5_DIGEST_SIZE];
|
||||
};
|
||||
|
||||
#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
|
||||
/**
|
||||
* get_e820_md5 - calculate md5 according to given e820 table
|
||||
*
|
||||
* @table: the e820 table to be calculated
|
||||
* @buf: the md5 result to be stored to
|
||||
*/
|
||||
static int get_e820_md5(struct e820_table *table, void *buf)
|
||||
{
|
||||
struct crypto_shash *tfm;
|
||||
struct shash_desc *desc;
|
||||
int size;
|
||||
int ret = 0;
|
||||
|
||||
tfm = crypto_alloc_shash("md5", 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
return -ENOMEM;
|
||||
|
||||
desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
|
||||
GFP_KERNEL);
|
||||
if (!desc) {
|
||||
ret = -ENOMEM;
|
||||
goto free_tfm;
|
||||
}
|
||||
|
||||
desc->tfm = tfm;
|
||||
desc->flags = 0;
|
||||
|
||||
size = offsetof(struct e820_table, entries) +
|
||||
sizeof(struct e820_entry) * table->nr_entries;
|
||||
|
||||
if (crypto_shash_digest(desc, (u8 *)table, size, buf))
|
||||
ret = -EINVAL;
|
||||
|
||||
kzfree(desc);
|
||||
|
||||
free_tfm:
|
||||
crypto_free_shash(tfm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hibernation_e820_save(void *buf)
|
||||
{
|
||||
return get_e820_md5(e820_table_firmware, buf);
|
||||
}
|
||||
|
||||
static bool hibernation_e820_mismatch(void *buf)
|
||||
{
|
||||
int ret;
|
||||
u8 result[MD5_DIGEST_SIZE];
|
||||
|
||||
memset(result, 0, MD5_DIGEST_SIZE);
|
||||
/* If there is no digest in suspend kernel, let it go. */
|
||||
if (!memcmp(result, buf, MD5_DIGEST_SIZE))
|
||||
return false;
|
||||
|
||||
ret = get_e820_md5(e820_table_firmware, result);
|
||||
if (ret)
|
||||
return true;
|
||||
|
||||
return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
|
||||
}
|
||||
#else
|
||||
static int hibernation_e820_save(void *buf)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool hibernation_e820_mismatch(void *buf)
|
||||
{
|
||||
/* If md5 is not builtin for restore kernel, let it go. */
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#define RESTORE_MAGIC 0x23456789ABCDEF01UL
|
||||
#else
|
||||
#define RESTORE_MAGIC 0x12345678UL
|
||||
#endif
|
||||
|
||||
/**
|
||||
* arch_hibernation_header_save - populate the architecture specific part
|
||||
* of a hibernation image header
|
||||
* @addr: address to save the data at
|
||||
*/
|
||||
int arch_hibernation_header_save(void *addr, unsigned int max_size)
|
||||
{
|
||||
struct restore_data_record *rdr = addr;
|
||||
|
||||
if (max_size < sizeof(struct restore_data_record))
|
||||
return -EOVERFLOW;
|
||||
rdr->magic = RESTORE_MAGIC;
|
||||
rdr->jump_address = (unsigned long)restore_registers;
|
||||
rdr->jump_address_phys = __pa_symbol(restore_registers);
|
||||
|
||||
/*
|
||||
* The restore code fixes up CR3 and CR4 in the following sequence:
|
||||
*
|
||||
* [in hibernation asm]
|
||||
* 1. CR3 <= temporary page tables
|
||||
* 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
|
||||
* 3. CR3 <= rdr->cr3
|
||||
* 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
|
||||
* [in restore_processor_state()]
|
||||
* 5. CR4 <= saved CR4
|
||||
* 6. CR3 <= saved CR3
|
||||
*
|
||||
* Our mmu_cr4_features has CR4.PCIDE=0, and toggling
|
||||
* CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
|
||||
* rdr->cr3 needs to point to valid page tables but must not
|
||||
* have any of the PCID bits set.
|
||||
*/
|
||||
rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
|
||||
|
||||
return hibernation_e820_save(rdr->e820_digest);
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_hibernation_header_restore - read the architecture specific data
|
||||
* from the hibernation image header
|
||||
* @addr: address to read the data from
|
||||
*/
|
||||
int arch_hibernation_header_restore(void *addr)
|
||||
{
|
||||
struct restore_data_record *rdr = addr;
|
||||
|
||||
if (rdr->magic != RESTORE_MAGIC) {
|
||||
pr_crit("Unrecognized hibernate image header format!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
restore_jump_address = rdr->jump_address;
|
||||
jump_address_phys = rdr->jump_address_phys;
|
||||
restore_cr3 = rdr->cr3;
|
||||
|
||||
if (hibernation_e820_mismatch(rdr->e820_digest)) {
|
||||
pr_crit("Hibernate inconsistent memory map detected!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int relocate_restore_code(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
relocated_restore_code = get_safe_page(GFP_ATOMIC);
|
||||
if (!relocated_restore_code)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
|
||||
|
||||
/* Make the page containing the relocated code executable */
|
||||
pgd = (pgd_t *)__va(read_cr3_pa()) +
|
||||
pgd_index(relocated_restore_code);
|
||||
p4d = p4d_offset(pgd, relocated_restore_code);
|
||||
if (p4d_large(*p4d)) {
|
||||
set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
pud = pud_offset(p4d, relocated_restore_code);
|
||||
if (pud_large(*pud)) {
|
||||
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
pmd = pmd_offset(pud, relocated_restore_code);
|
||||
if (pmd_large(*pmd)) {
|
||||
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
pte = pte_offset_kernel(pmd, relocated_restore_code);
|
||||
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
|
||||
out:
|
||||
__flush_tlb_all();
|
||||
return 0;
|
||||
}
|
|
@ -14,9 +14,7 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/mmzone.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/* Defined in hibernate_asm_32.S */
|
||||
extern int restore_image(void);
|
||||
#include <asm/suspend.h>
|
||||
|
||||
/* Pointer to the temporary resume page tables */
|
||||
pgd_t *resume_pg_dir;
|
||||
|
@ -145,6 +143,32 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
|
|||
#endif
|
||||
}
|
||||
|
||||
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
pgd = pgd_base + pgd_index(restore_jump_address);
|
||||
|
||||
pmd = resume_one_md_table_init(pgd);
|
||||
if (!pmd)
|
||||
return -ENOMEM;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_PSE)) {
|
||||
set_pmd(pmd + pmd_index(restore_jump_address),
|
||||
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
|
||||
} else {
|
||||
pte = resume_one_page_table_init(pmd);
|
||||
if (!pte)
|
||||
return -ENOMEM;
|
||||
set_pte(pte + pte_index(restore_jump_address),
|
||||
__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage int swsusp_arch_resume(void)
|
||||
{
|
||||
int error;
|
||||
|
@ -154,22 +178,22 @@ asmlinkage int swsusp_arch_resume(void)
|
|||
return -ENOMEM;
|
||||
|
||||
resume_init_first_level_page_table(resume_pg_dir);
|
||||
|
||||
error = set_up_temporary_text_mapping(resume_pg_dir);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = resume_physical_mapping_init(resume_pg_dir);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
temp_pgt = __pa(resume_pg_dir);
|
||||
|
||||
error = relocate_restore_code();
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
/* We have got enough memory and from now on we cannot recover */
|
||||
restore_image();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* pfn_is_nosave - check if given pfn is in the 'nosave' section
|
||||
*/
|
||||
|
||||
int pfn_is_nosave(unsigned long pfn)
|
||||
{
|
||||
unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
|
||||
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
|
||||
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
|
||||
}
|
||||
|
|
|
@ -26,26 +26,6 @@
|
|||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
/* Defined in hibernate_asm_64.S */
|
||||
extern asmlinkage __visible int restore_image(void);
|
||||
|
||||
/*
|
||||
* Address to jump to in the last phase of restore in order to get to the image
|
||||
* kernel's text (this value is passed in the image header).
|
||||
*/
|
||||
unsigned long restore_jump_address __visible;
|
||||
unsigned long jump_address_phys;
|
||||
|
||||
/*
|
||||
* Value of the cr3 register from before the hibernation (this value is passed
|
||||
* in the image header).
|
||||
*/
|
||||
unsigned long restore_cr3 __visible;
|
||||
|
||||
unsigned long temp_level4_pgt __visible;
|
||||
|
||||
unsigned long relocated_restore_code __visible;
|
||||
|
||||
static int set_up_temporary_text_mapping(pgd_t *pgd)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
|
@ -141,46 +121,7 @@ static int set_up_temporary_mappings(void)
|
|||
return result;
|
||||
}
|
||||
|
||||
temp_level4_pgt = __pa(pgd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int relocate_restore_code(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
|
||||
relocated_restore_code = get_safe_page(GFP_ATOMIC);
|
||||
if (!relocated_restore_code)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
|
||||
|
||||
/* Make the page containing the relocated code executable */
|
||||
pgd = (pgd_t *)__va(read_cr3_pa()) +
|
||||
pgd_index(relocated_restore_code);
|
||||
p4d = p4d_offset(pgd, relocated_restore_code);
|
||||
if (p4d_large(*p4d)) {
|
||||
set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
pud = pud_offset(p4d, relocated_restore_code);
|
||||
if (pud_large(*pud)) {
|
||||
set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
pmd = pmd_offset(pud, relocated_restore_code);
|
||||
if (pmd_large(*pmd)) {
|
||||
set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
|
||||
goto out;
|
||||
}
|
||||
pte = pte_offset_kernel(pmd, relocated_restore_code);
|
||||
set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
|
||||
out:
|
||||
__flush_tlb_all();
|
||||
temp_pgt = __pa(pgd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -200,166 +141,3 @@ asmlinkage int swsusp_arch_resume(void)
|
|||
restore_image();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* pfn_is_nosave - check if given pfn is in the 'nosave' section
|
||||
*/
|
||||
|
||||
int pfn_is_nosave(unsigned long pfn)
|
||||
{
|
||||
unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
|
||||
unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
|
||||
return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
|
||||
}
|
||||
|
||||
#define MD5_DIGEST_SIZE 16
|
||||
|
||||
struct restore_data_record {
|
||||
unsigned long jump_address;
|
||||
unsigned long jump_address_phys;
|
||||
unsigned long cr3;
|
||||
unsigned long magic;
|
||||
u8 e820_digest[MD5_DIGEST_SIZE];
|
||||
};
|
||||
|
||||
#define RESTORE_MAGIC 0x23456789ABCDEF01UL
|
||||
|
||||
#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
|
||||
/**
|
||||
* get_e820_md5 - calculate md5 according to given e820 table
|
||||
*
|
||||
* @table: the e820 table to be calculated
|
||||
* @buf: the md5 result to be stored to
|
||||
*/
|
||||
static int get_e820_md5(struct e820_table *table, void *buf)
|
||||
{
|
||||
struct crypto_shash *tfm;
|
||||
struct shash_desc *desc;
|
||||
int size;
|
||||
int ret = 0;
|
||||
|
||||
tfm = crypto_alloc_shash("md5", 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
return -ENOMEM;
|
||||
|
||||
desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
|
||||
GFP_KERNEL);
|
||||
if (!desc) {
|
||||
ret = -ENOMEM;
|
||||
goto free_tfm;
|
||||
}
|
||||
|
||||
desc->tfm = tfm;
|
||||
desc->flags = 0;
|
||||
|
||||
size = offsetof(struct e820_table, entries) +
|
||||
sizeof(struct e820_entry) * table->nr_entries;
|
||||
|
||||
if (crypto_shash_digest(desc, (u8 *)table, size, buf))
|
||||
ret = -EINVAL;
|
||||
|
||||
kzfree(desc);
|
||||
|
||||
free_tfm:
|
||||
crypto_free_shash(tfm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hibernation_e820_save(void *buf)
|
||||
{
|
||||
get_e820_md5(e820_table_firmware, buf);
|
||||
}
|
||||
|
||||
static bool hibernation_e820_mismatch(void *buf)
|
||||
{
|
||||
int ret;
|
||||
u8 result[MD5_DIGEST_SIZE];
|
||||
|
||||
memset(result, 0, MD5_DIGEST_SIZE);
|
||||
/* If there is no digest in suspend kernel, let it go. */
|
||||
if (!memcmp(result, buf, MD5_DIGEST_SIZE))
|
||||
return false;
|
||||
|
||||
ret = get_e820_md5(e820_table_firmware, result);
|
||||
if (ret)
|
||||
return true;
|
||||
|
||||
return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
|
||||
}
|
||||
#else
|
||||
static void hibernation_e820_save(void *buf)
|
||||
{
|
||||
}
|
||||
|
||||
static bool hibernation_e820_mismatch(void *buf)
|
||||
{
|
||||
/* If md5 is not builtin for restore kernel, let it go. */
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* arch_hibernation_header_save - populate the architecture specific part
|
||||
* of a hibernation image header
|
||||
* @addr: address to save the data at
|
||||
*/
|
||||
int arch_hibernation_header_save(void *addr, unsigned int max_size)
|
||||
{
|
||||
struct restore_data_record *rdr = addr;
|
||||
|
||||
if (max_size < sizeof(struct restore_data_record))
|
||||
return -EOVERFLOW;
|
||||
rdr->jump_address = (unsigned long)restore_registers;
|
||||
rdr->jump_address_phys = __pa_symbol(restore_registers);
|
||||
|
||||
/*
|
||||
* The restore code fixes up CR3 and CR4 in the following sequence:
|
||||
*
|
||||
* [in hibernation asm]
|
||||
* 1. CR3 <= temporary page tables
|
||||
* 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
|
||||
* 3. CR3 <= rdr->cr3
|
||||
* 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
|
||||
* [in restore_processor_state()]
|
||||
* 5. CR4 <= saved CR4
|
||||
* 6. CR3 <= saved CR3
|
||||
*
|
||||
* Our mmu_cr4_features has CR4.PCIDE=0, and toggling
|
||||
* CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
|
||||
* rdr->cr3 needs to point to valid page tables but must not
|
||||
* have any of the PCID bits set.
|
||||
*/
|
||||
rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
|
||||
|
||||
rdr->magic = RESTORE_MAGIC;
|
||||
|
||||
hibernation_e820_save(rdr->e820_digest);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* arch_hibernation_header_restore - read the architecture specific data
|
||||
* from the hibernation image header
|
||||
* @addr: address to read the data from
|
||||
*/
|
||||
int arch_hibernation_header_restore(void *addr)
|
||||
{
|
||||
struct restore_data_record *rdr = addr;
|
||||
|
||||
restore_jump_address = rdr->jump_address;
|
||||
jump_address_phys = rdr->jump_address_phys;
|
||||
restore_cr3 = rdr->cr3;
|
||||
|
||||
if (rdr->magic != RESTORE_MAGIC) {
|
||||
pr_crit("Unrecognized hibernate image header format!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (hibernation_e820_mismatch(rdr->e820_digest)) {
|
||||
pr_crit("Hibernate inconsistent memory map detected!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/page_types.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/frame.h>
|
||||
|
||||
.text
|
||||
|
||||
|
@ -24,13 +25,30 @@ ENTRY(swsusp_arch_suspend)
|
|||
pushfl
|
||||
popl saved_context_eflags
|
||||
|
||||
/* save cr3 */
|
||||
movl %cr3, %eax
|
||||
movl %eax, restore_cr3
|
||||
|
||||
FRAME_BEGIN
|
||||
call swsusp_save
|
||||
FRAME_END
|
||||
ret
|
||||
ENDPROC(swsusp_arch_suspend)
|
||||
|
||||
ENTRY(restore_image)
|
||||
/* prepare to jump to the image kernel */
|
||||
movl restore_jump_address, %ebx
|
||||
movl restore_cr3, %ebp
|
||||
|
||||
movl mmu_cr4_features, %ecx
|
||||
movl resume_pg_dir, %eax
|
||||
subl $__PAGE_OFFSET, %eax
|
||||
|
||||
/* jump to relocated restore code */
|
||||
movl relocated_restore_code, %eax
|
||||
jmpl *%eax
|
||||
|
||||
/* code below has been relocated to a safe page */
|
||||
ENTRY(core_restore_code)
|
||||
movl temp_pgt, %eax
|
||||
movl %eax, %cr3
|
||||
|
||||
jecxz 1f # cr4 Pentium and higher, skip if zero
|
||||
|
@ -49,7 +67,7 @@ copy_loop:
|
|||
movl pbe_address(%edx), %esi
|
||||
movl pbe_orig_address(%edx), %edi
|
||||
|
||||
movl $1024, %ecx
|
||||
movl $(PAGE_SIZE >> 2), %ecx
|
||||
rep
|
||||
movsl
|
||||
|
||||
|
@ -58,10 +76,13 @@ copy_loop:
|
|||
.p2align 4,,7
|
||||
|
||||
done:
|
||||
jmpl *%ebx
|
||||
|
||||
/* code below belongs to the image kernel */
|
||||
.align PAGE_SIZE
|
||||
ENTRY(restore_registers)
|
||||
/* go back to the original page tables */
|
||||
movl $swapper_pg_dir, %eax
|
||||
subl $__PAGE_OFFSET, %eax
|
||||
movl %eax, %cr3
|
||||
movl %ebp, %cr3
|
||||
movl mmu_cr4_features, %ecx
|
||||
jecxz 1f # cr4 Pentium and higher, skip if zero
|
||||
movl %ecx, %cr4; # turn PGE back on
|
||||
|
@ -82,4 +103,8 @@ done:
|
|||
|
||||
xorl %eax, %eax
|
||||
|
||||
/* tell the hibernation core that we've just restored the memory */
|
||||
movl %eax, in_suspend
|
||||
|
||||
ret
|
||||
ENDPROC(restore_registers)
|
||||
|
|
|
@ -59,7 +59,7 @@ ENTRY(restore_image)
|
|||
movq restore_cr3(%rip), %r9
|
||||
|
||||
/* prepare to switch to temporary page tables */
|
||||
movq temp_level4_pgt(%rip), %rax
|
||||
movq temp_pgt(%rip), %rax
|
||||
movq mmu_cr4_features(%rip), %rbx
|
||||
|
||||
/* prepare to copy image data to their original locations */
|
||||
|
|
|
@ -117,11 +117,17 @@ static void lpit_update_residency(struct lpit_residency_info *info,
|
|||
if (!info->iomem_addr)
|
||||
return;
|
||||
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
|
||||
return;
|
||||
|
||||
/* Silently fail, if cpuidle attribute group is not present */
|
||||
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
|
||||
&dev_attr_low_power_idle_system_residency_us.attr,
|
||||
"cpuidle");
|
||||
} else if (info->gaddr.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
|
||||
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
|
||||
return;
|
||||
|
||||
/* Silently fail, if cpuidle attribute group is not present */
|
||||
sysfs_add_file_to_group(&cpu_subsys.dev_root->kobj,
|
||||
&dev_attr_low_power_idle_cpu_residency_us.attr,
|
||||
|
|
|
@ -96,7 +96,7 @@ static int try_to_freeze_tasks(bool user_only)
|
|||
if (wq_busy)
|
||||
show_workqueue_state();
|
||||
|
||||
if (!wakeup) {
|
||||
if (!wakeup || pm_debug_messages_on) {
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process_thread(g, p) {
|
||||
if (p != current && !freezer_should_skip(p)
|
||||
|
|
Loading…
Reference in New Issue