Merge branch 'apei' into release
This commit is contained in:
commit
037d76f404
|
@ -714,7 +714,7 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HIBERNATION
|
#ifdef CONFIG_ACPI
|
||||||
/**
|
/**
|
||||||
* Mark ACPI NVS memory region, so that we can save/restore it during
|
* Mark ACPI NVS memory region, so that we can save/restore it during
|
||||||
* hibernation and the subsequent resume.
|
* hibernation and the subsequent resume.
|
||||||
|
@ -727,7 +727,7 @@ static int __init e820_mark_nvs_memory(void)
|
||||||
struct e820entry *ei = &e820.map[i];
|
struct e820entry *ei = &e820.map[i];
|
||||||
|
|
||||||
if (ei->type == E820_NVS)
|
if (ei->type == E820_NVS)
|
||||||
suspend_nvs_register(ei->addr, ei->size);
|
acpi_nvs_register(ei->addr, ei->size);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -20,11 +20,12 @@ obj-y += acpi.o \
|
||||||
# All the builtin files are in the "acpi." module_param namespace.
|
# All the builtin files are in the "acpi." module_param namespace.
|
||||||
acpi-y += osl.o utils.o reboot.o
|
acpi-y += osl.o utils.o reboot.o
|
||||||
acpi-y += atomicio.o
|
acpi-y += atomicio.o
|
||||||
|
acpi-y += nvs.o
|
||||||
|
|
||||||
# sleep related files
|
# sleep related files
|
||||||
acpi-y += wakeup.o
|
acpi-y += wakeup.o
|
||||||
acpi-y += sleep.o
|
acpi-y += sleep.o
|
||||||
acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o
|
acpi-$(CONFIG_ACPI_SLEEP) += proc.o
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
|
|
|
@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int apei_resources_add(struct apei_resources *resources,
|
||||||
|
unsigned long start, unsigned long size,
|
||||||
|
bool iomem)
|
||||||
|
{
|
||||||
|
if (iomem)
|
||||||
|
return apei_res_add(&resources->iomem, start, size);
|
||||||
|
else
|
||||||
|
return apei_res_add(&resources->ioport, start, size);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(apei_resources_add);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* EINJ has two groups of GARs (EINJ table entry and trigger table
|
* EINJ has two groups of GARs (EINJ table entry and trigger table
|
||||||
* entry), so common resources are subtracted from the trigger table
|
* entry), so common resources are subtracted from the trigger table
|
||||||
|
@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(apei_resources_sub);
|
EXPORT_SYMBOL_GPL(apei_resources_sub);
|
||||||
|
|
||||||
|
static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
|
||||||
|
{
|
||||||
|
struct apei_resources *resources = data;
|
||||||
|
return apei_res_add(&resources->iomem, start, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int apei_get_nvs_resources(struct apei_resources *resources)
|
||||||
|
{
|
||||||
|
return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IO memory/port rersource management mechanism is used to check
|
* IO memory/port resource management mechanism is used to check
|
||||||
* whether memory/port area used by GARs conflicts with normal memory
|
* whether memory/port area used by GARs conflicts with normal memory
|
||||||
* or IO memory/port of devices.
|
* or IO memory/port of devices.
|
||||||
*/
|
*/
|
||||||
|
@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources,
|
||||||
{
|
{
|
||||||
struct apei_res *res, *res_bak = NULL;
|
struct apei_res *res, *res_bak = NULL;
|
||||||
struct resource *r;
|
struct resource *r;
|
||||||
|
struct apei_resources nvs_resources;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
rc = apei_resources_sub(resources, &apei_resources_all);
|
rc = apei_resources_sub(resources, &apei_resources_all);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some firmware uses ACPI NVS region, that has been marked as
|
||||||
|
* busy, so exclude it from APEI resources to avoid false
|
||||||
|
* conflict.
|
||||||
|
*/
|
||||||
|
apei_resources_init(&nvs_resources);
|
||||||
|
rc = apei_get_nvs_resources(&nvs_resources);
|
||||||
|
if (rc)
|
||||||
|
goto res_fini;
|
||||||
|
rc = apei_resources_sub(resources, &nvs_resources);
|
||||||
|
if (rc)
|
||||||
|
goto res_fini;
|
||||||
|
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
list_for_each_entry(res, &resources->iomem, list) {
|
list_for_each_entry(res, &resources->iomem, list) {
|
||||||
r = request_mem_region(res->start, res->end - res->start,
|
r = request_mem_region(res->start, res->end - res->start,
|
||||||
desc);
|
desc);
|
||||||
if (!r) {
|
if (!r) {
|
||||||
pr_err(APEI_PFX
|
pr_err(APEI_PFX
|
||||||
"Can not request iomem region <%016llx-%016llx> for GARs.\n",
|
"Can not request [mem %#010llx-%#010llx] for %s registers\n",
|
||||||
(unsigned long long)res->start,
|
(unsigned long long)res->start,
|
||||||
(unsigned long long)res->end);
|
(unsigned long long)res->end - 1, desc);
|
||||||
res_bak = res;
|
res_bak = res;
|
||||||
goto err_unmap_iomem;
|
goto err_unmap_iomem;
|
||||||
}
|
}
|
||||||
|
@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources,
|
||||||
r = request_region(res->start, res->end - res->start, desc);
|
r = request_region(res->start, res->end - res->start, desc);
|
||||||
if (!r) {
|
if (!r) {
|
||||||
pr_err(APEI_PFX
|
pr_err(APEI_PFX
|
||||||
"Can not request ioport region <%016llx-%016llx> for GARs.\n",
|
"Can not request [io %#06llx-%#06llx] for %s registers\n",
|
||||||
(unsigned long long)res->start,
|
(unsigned long long)res->start,
|
||||||
(unsigned long long)res->end);
|
(unsigned long long)res->end - 1, desc);
|
||||||
res_bak = res;
|
res_bak = res;
|
||||||
goto err_unmap_ioport;
|
goto err_unmap_ioport;
|
||||||
}
|
}
|
||||||
|
@ -500,6 +536,8 @@ err_unmap_iomem:
|
||||||
break;
|
break;
|
||||||
release_mem_region(res->start, res->end - res->start);
|
release_mem_region(res->start, res->end - res->start);
|
||||||
}
|
}
|
||||||
|
res_fini:
|
||||||
|
apei_resources_fini(&nvs_resources);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(apei_resources_request);
|
EXPORT_SYMBOL_GPL(apei_resources_request);
|
||||||
|
|
|
@ -95,6 +95,9 @@ static inline void apei_resources_init(struct apei_resources *resources)
|
||||||
}
|
}
|
||||||
|
|
||||||
void apei_resources_fini(struct apei_resources *resources);
|
void apei_resources_fini(struct apei_resources *resources);
|
||||||
|
int apei_resources_add(struct apei_resources *resources,
|
||||||
|
unsigned long start, unsigned long size,
|
||||||
|
bool iomem);
|
||||||
int apei_resources_sub(struct apei_resources *resources1,
|
int apei_resources_sub(struct apei_resources *resources1,
|
||||||
struct apei_resources *resources2);
|
struct apei_resources *resources2);
|
||||||
int apei_resources_request(struct apei_resources *resources,
|
int apei_resources_request(struct apei_resources *resources,
|
||||||
|
|
|
@ -194,8 +194,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct acpi_generic_address *einj_get_trigger_parameter_region(
|
||||||
|
struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct acpi_whea_header *entry;
|
||||||
|
|
||||||
|
entry = (struct acpi_whea_header *)
|
||||||
|
((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
|
||||||
|
for (i = 0; i < trigger_tab->entry_count; i++) {
|
||||||
|
if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
|
||||||
|
entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
|
||||||
|
entry->register_region.space_id ==
|
||||||
|
ACPI_ADR_SPACE_SYSTEM_MEMORY &&
|
||||||
|
(entry->register_region.address & param2) == (param1 & param2))
|
||||||
|
return &entry->register_region;
|
||||||
|
entry++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
/* Execute instructions in trigger error action table */
|
/* Execute instructions in trigger error action table */
|
||||||
static int __einj_error_trigger(u64 trigger_paddr)
|
static int __einj_error_trigger(u64 trigger_paddr, u32 type,
|
||||||
|
u64 param1, u64 param2)
|
||||||
{
|
{
|
||||||
struct acpi_einj_trigger *trigger_tab = NULL;
|
struct acpi_einj_trigger *trigger_tab = NULL;
|
||||||
struct apei_exec_context trigger_ctx;
|
struct apei_exec_context trigger_ctx;
|
||||||
|
@ -204,14 +225,16 @@ static int __einj_error_trigger(u64 trigger_paddr)
|
||||||
struct resource *r;
|
struct resource *r;
|
||||||
u32 table_size;
|
u32 table_size;
|
||||||
int rc = -EIO;
|
int rc = -EIO;
|
||||||
|
struct acpi_generic_address *trigger_param_region = NULL;
|
||||||
|
|
||||||
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
|
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
|
||||||
"APEI EINJ Trigger Table");
|
"APEI EINJ Trigger Table");
|
||||||
if (!r) {
|
if (!r) {
|
||||||
pr_err(EINJ_PFX
|
pr_err(EINJ_PFX
|
||||||
"Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
|
"Can not request [mem %#010llx-%#010llx] for Trigger table\n",
|
||||||
(unsigned long long)trigger_paddr,
|
(unsigned long long)trigger_paddr,
|
||||||
(unsigned long long)trigger_paddr+sizeof(*trigger_tab));
|
(unsigned long long)trigger_paddr +
|
||||||
|
sizeof(*trigger_tab) - 1);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
|
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
|
||||||
|
@ -232,9 +255,9 @@ static int __einj_error_trigger(u64 trigger_paddr)
|
||||||
"APEI EINJ Trigger Table");
|
"APEI EINJ Trigger Table");
|
||||||
if (!r) {
|
if (!r) {
|
||||||
pr_err(EINJ_PFX
|
pr_err(EINJ_PFX
|
||||||
"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
|
"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
|
||||||
(unsigned long long)trigger_paddr+sizeof(*trigger_tab),
|
(unsigned long long)trigger_paddr + sizeof(*trigger_tab),
|
||||||
(unsigned long long)trigger_paddr + table_size);
|
(unsigned long long)trigger_paddr + table_size - 1);
|
||||||
goto out_rel_header;
|
goto out_rel_header;
|
||||||
}
|
}
|
||||||
iounmap(trigger_tab);
|
iounmap(trigger_tab);
|
||||||
|
@ -255,6 +278,30 @@ static int __einj_error_trigger(u64 trigger_paddr)
|
||||||
rc = apei_resources_sub(&trigger_resources, &einj_resources);
|
rc = apei_resources_sub(&trigger_resources, &einj_resources);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_fini;
|
goto out_fini;
|
||||||
|
/*
|
||||||
|
* Some firmware will access target address specified in
|
||||||
|
* param1 to trigger the error when injecting memory error.
|
||||||
|
* This will cause resource conflict with regular memory. So
|
||||||
|
* remove it from trigger table resources.
|
||||||
|
*/
|
||||||
|
if (param_extension && (type & 0x0038) && param2) {
|
||||||
|
struct apei_resources addr_resources;
|
||||||
|
apei_resources_init(&addr_resources);
|
||||||
|
trigger_param_region = einj_get_trigger_parameter_region(
|
||||||
|
trigger_tab, param1, param2);
|
||||||
|
if (trigger_param_region) {
|
||||||
|
rc = apei_resources_add(&addr_resources,
|
||||||
|
trigger_param_region->address,
|
||||||
|
trigger_param_region->bit_width/8, true);
|
||||||
|
if (rc)
|
||||||
|
goto out_fini;
|
||||||
|
rc = apei_resources_sub(&trigger_resources,
|
||||||
|
&addr_resources);
|
||||||
|
}
|
||||||
|
apei_resources_fini(&addr_resources);
|
||||||
|
if (rc)
|
||||||
|
goto out_fini;
|
||||||
|
}
|
||||||
rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
|
rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out_fini;
|
goto out_fini;
|
||||||
|
@ -324,7 +371,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
trigger_paddr = apei_exec_ctx_get_output(&ctx);
|
trigger_paddr = apei_exec_ctx_get_output(&ctx);
|
||||||
rc = __einj_error_trigger(trigger_paddr);
|
rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
|
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
|
||||||
|
@ -465,10 +512,9 @@ static int __init einj_init(void)
|
||||||
|
|
||||||
status = acpi_get_table(ACPI_SIG_EINJ, 0,
|
status = acpi_get_table(ACPI_SIG_EINJ, 0,
|
||||||
(struct acpi_table_header **)&einj_tab);
|
(struct acpi_table_header **)&einj_tab);
|
||||||
if (status == AE_NOT_FOUND) {
|
if (status == AE_NOT_FOUND)
|
||||||
pr_info(EINJ_PFX "Table is not found!\n");
|
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
} else if (ACPI_FAILURE(status)) {
|
else if (ACPI_FAILURE(status)) {
|
||||||
const char *msg = acpi_format_exception(status);
|
const char *msg = acpi_format_exception(status);
|
||||||
pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
|
pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -1125,10 +1125,9 @@ static int __init erst_init(void)
|
||||||
|
|
||||||
status = acpi_get_table(ACPI_SIG_ERST, 0,
|
status = acpi_get_table(ACPI_SIG_ERST, 0,
|
||||||
(struct acpi_table_header **)&erst_tab);
|
(struct acpi_table_header **)&erst_tab);
|
||||||
if (status == AE_NOT_FOUND) {
|
if (status == AE_NOT_FOUND)
|
||||||
pr_info(ERST_PFX "Table is not found!\n");
|
|
||||||
goto err;
|
goto err;
|
||||||
} else if (ACPI_FAILURE(status)) {
|
else if (ACPI_FAILURE(status)) {
|
||||||
const char *msg = acpi_format_exception(status);
|
const char *msg = acpi_format_exception(status);
|
||||||
pr_err(ERST_PFX "Failed to get table, %s\n", msg);
|
pr_err(ERST_PFX "Failed to get table, %s\n", msg);
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
|
|
|
@ -45,6 +45,8 @@
|
||||||
#include <linux/irq_work.h>
|
#include <linux/irq_work.h>
|
||||||
#include <linux/llist.h>
|
#include <linux/llist.h>
|
||||||
#include <linux/genalloc.h>
|
#include <linux/genalloc.h>
|
||||||
|
#include <linux/pci.h>
|
||||||
|
#include <linux/aer.h>
|
||||||
#include <acpi/apei.h>
|
#include <acpi/apei.h>
|
||||||
#include <acpi/atomicio.h>
|
#include <acpi/atomicio.h>
|
||||||
#include <acpi/hed.h>
|
#include <acpi/hed.h>
|
||||||
|
@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
#ifdef CONFIG_ACPI_APEI_PCIEAER
|
||||||
|
else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
|
||||||
|
CPER_SEC_PCIE)) {
|
||||||
|
struct cper_sec_pcie *pcie_err;
|
||||||
|
pcie_err = (struct cper_sec_pcie *)(gdata+1);
|
||||||
|
if (sev == GHES_SEV_RECOVERABLE &&
|
||||||
|
sec_sev == GHES_SEV_RECOVERABLE &&
|
||||||
|
pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
|
||||||
|
pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
|
||||||
|
unsigned int devfn;
|
||||||
|
int aer_severity;
|
||||||
|
devfn = PCI_DEVFN(pcie_err->device_id.device,
|
||||||
|
pcie_err->device_id.function);
|
||||||
|
aer_severity = cper_severity_to_aer(sev);
|
||||||
|
aer_recover_queue(pcie_err->device_id.segment,
|
||||||
|
pcie_err->device_id.bus,
|
||||||
|
devfn, aer_severity);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx,
|
||||||
const struct acpi_hest_generic *generic,
|
const struct acpi_hest_generic *generic,
|
||||||
const struct acpi_hest_generic_status *estatus)
|
const struct acpi_hest_generic_status *estatus)
|
||||||
{
|
{
|
||||||
|
static atomic_t seqno;
|
||||||
|
unsigned int curr_seqno;
|
||||||
|
char pfx_seq[64];
|
||||||
|
|
||||||
if (pfx == NULL) {
|
if (pfx == NULL) {
|
||||||
if (ghes_severity(estatus->error_severity) <=
|
if (ghes_severity(estatus->error_severity) <=
|
||||||
GHES_SEV_CORRECTED)
|
GHES_SEV_CORRECTED)
|
||||||
pfx = KERN_WARNING HW_ERR;
|
pfx = KERN_WARNING;
|
||||||
else
|
else
|
||||||
pfx = KERN_ERR HW_ERR;
|
pfx = KERN_ERR;
|
||||||
}
|
}
|
||||||
|
curr_seqno = atomic_inc_return(&seqno);
|
||||||
|
snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
|
||||||
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
|
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
|
||||||
pfx, generic->header.source_id);
|
pfx_seq, generic->header.source_id);
|
||||||
apei_estatus_print(pfx, estatus);
|
apei_estatus_print(pfx_seq, estatus);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ghes_print_estatus(const char *pfx,
|
static int ghes_print_estatus(const char *pfx,
|
||||||
|
@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ghes_proc_in_irq(struct irq_work *irq_work)
|
static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
|
||||||
{
|
{
|
||||||
struct llist_node *llnode, *next, *tail = NULL;
|
struct llist_node *next, *tail = NULL;
|
||||||
struct ghes_estatus_node *estatus_node;
|
|
||||||
struct acpi_hest_generic *generic;
|
|
||||||
struct acpi_hest_generic_status *estatus;
|
|
||||||
u32 len, node_len;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Because the time order of estatus in list is reversed,
|
|
||||||
* revert it back to proper order.
|
|
||||||
*/
|
|
||||||
llnode = llist_del_all(&ghes_estatus_llist);
|
|
||||||
while (llnode) {
|
while (llnode) {
|
||||||
next = llnode->next;
|
next = llnode->next;
|
||||||
llnode->next = tail;
|
llnode->next = tail;
|
||||||
tail = llnode;
|
tail = llnode;
|
||||||
llnode = next;
|
llnode = next;
|
||||||
}
|
}
|
||||||
llnode = tail;
|
|
||||||
|
return tail;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ghes_proc_in_irq(struct irq_work *irq_work)
|
||||||
|
{
|
||||||
|
struct llist_node *llnode, *next;
|
||||||
|
struct ghes_estatus_node *estatus_node;
|
||||||
|
struct acpi_hest_generic *generic;
|
||||||
|
struct acpi_hest_generic_status *estatus;
|
||||||
|
u32 len, node_len;
|
||||||
|
|
||||||
|
llnode = llist_del_all(&ghes_estatus_llist);
|
||||||
|
/*
|
||||||
|
* Because the time order of estatus in list is reversed,
|
||||||
|
* revert it back to proper order.
|
||||||
|
*/
|
||||||
|
llnode = llist_nodes_reverse(llnode);
|
||||||
while (llnode) {
|
while (llnode) {
|
||||||
next = llnode->next;
|
next = llnode->next;
|
||||||
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
|
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
|
||||||
|
@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ghes_print_queued_estatus(void)
|
||||||
|
{
|
||||||
|
struct llist_node *llnode;
|
||||||
|
struct ghes_estatus_node *estatus_node;
|
||||||
|
struct acpi_hest_generic *generic;
|
||||||
|
struct acpi_hest_generic_status *estatus;
|
||||||
|
u32 len, node_len;
|
||||||
|
|
||||||
|
llnode = llist_del_all(&ghes_estatus_llist);
|
||||||
|
/*
|
||||||
|
* Because the time order of estatus in list is reversed,
|
||||||
|
* revert it back to proper order.
|
||||||
|
*/
|
||||||
|
llnode = llist_nodes_reverse(llnode);
|
||||||
|
while (llnode) {
|
||||||
|
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
|
||||||
|
llnode);
|
||||||
|
estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
|
||||||
|
len = apei_estatus_len(estatus);
|
||||||
|
node_len = GHES_ESTATUS_NODE_LEN(len);
|
||||||
|
generic = estatus_node->generic;
|
||||||
|
ghes_print_estatus(NULL, generic, estatus);
|
||||||
|
llnode = llnode->next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct ghes *ghes, *ghes_global = NULL;
|
struct ghes *ghes, *ghes_global = NULL;
|
||||||
|
@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
|
||||||
|
|
||||||
if (sev_global >= GHES_SEV_PANIC) {
|
if (sev_global >= GHES_SEV_PANIC) {
|
||||||
oops_begin();
|
oops_begin();
|
||||||
__ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
|
ghes_print_queued_estatus();
|
||||||
|
__ghes_print_estatus(KERN_EMERG, ghes_global->generic,
|
||||||
ghes_global->estatus);
|
ghes_global->estatus);
|
||||||
/* reboot to log the error! */
|
/* reboot to log the error! */
|
||||||
if (panic_timeout == 0)
|
if (panic_timeout == 0)
|
||||||
|
|
|
@ -221,10 +221,9 @@ void __init acpi_hest_init(void)
|
||||||
|
|
||||||
status = acpi_get_table(ACPI_SIG_HEST, 0,
|
status = acpi_get_table(ACPI_SIG_HEST, 0,
|
||||||
(struct acpi_table_header **)&hest_tab);
|
(struct acpi_table_header **)&hest_tab);
|
||||||
if (status == AE_NOT_FOUND) {
|
if (status == AE_NOT_FOUND)
|
||||||
pr_info(HEST_PFX "Table not found.\n");
|
|
||||||
goto err;
|
goto err;
|
||||||
} else if (ACPI_FAILURE(status)) {
|
else if (ACPI_FAILURE(status)) {
|
||||||
const char *msg = acpi_format_exception(status);
|
const char *msg = acpi_format_exception(status);
|
||||||
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
|
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
|
||||||
rc = -EINVAL;
|
rc = -EINVAL;
|
||||||
|
|
|
@ -32,6 +32,8 @@
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/highmem.h>
|
||||||
#include <acpi/atomicio.h>
|
#include <acpi/atomicio.h>
|
||||||
|
|
||||||
#define ACPI_PFX "ACPI: "
|
#define ACPI_PFX "ACPI: "
|
||||||
|
@ -97,6 +99,37 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_IA64
|
||||||
|
#define should_use_kmap(pfn) page_is_ram(pfn)
|
||||||
|
#else
|
||||||
|
/* ioremap will take care of cache attributes */
|
||||||
|
#define should_use_kmap(pfn) 0
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
|
||||||
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
pfn = pg_off >> PAGE_SHIFT;
|
||||||
|
if (should_use_kmap(pfn)) {
|
||||||
|
if (pg_sz > PAGE_SIZE)
|
||||||
|
return NULL;
|
||||||
|
return (void __iomem __force *)kmap(pfn_to_page(pfn));
|
||||||
|
} else
|
||||||
|
return ioremap(pg_off, pg_sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
|
||||||
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
|
||||||
|
pfn = pg_off >> PAGE_SHIFT;
|
||||||
|
if (page_is_ram(pfn))
|
||||||
|
kunmap(pfn_to_page(pfn));
|
||||||
|
else
|
||||||
|
iounmap(vaddr);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Used to pre-map the specified IO memory area. First try to find
|
* Used to pre-map the specified IO memory area. First try to find
|
||||||
* whether the area is already pre-mapped, if it is, increase the
|
* whether the area is already pre-mapped, if it is, increase the
|
||||||
|
@ -119,7 +152,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
|
||||||
|
|
||||||
pg_off = paddr & PAGE_MASK;
|
pg_off = paddr & PAGE_MASK;
|
||||||
pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
|
pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
|
||||||
vaddr = ioremap(pg_off, pg_sz);
|
vaddr = acpi_map(pg_off, pg_sz);
|
||||||
if (!vaddr)
|
if (!vaddr)
|
||||||
return NULL;
|
return NULL;
|
||||||
map = kmalloc(sizeof(*map), GFP_KERNEL);
|
map = kmalloc(sizeof(*map), GFP_KERNEL);
|
||||||
|
@ -135,7 +168,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
|
||||||
vaddr = __acpi_try_ioremap(paddr, size);
|
vaddr = __acpi_try_ioremap(paddr, size);
|
||||||
if (vaddr) {
|
if (vaddr) {
|
||||||
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
|
spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
|
||||||
iounmap(map->vaddr);
|
acpi_unmap(pg_off, map->vaddr);
|
||||||
kfree(map);
|
kfree(map);
|
||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
}
|
||||||
|
@ -144,7 +177,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
|
||||||
|
|
||||||
return map->vaddr + (paddr - map->paddr);
|
return map->vaddr + (paddr - map->paddr);
|
||||||
err_unmap:
|
err_unmap:
|
||||||
iounmap(vaddr);
|
acpi_unmap(pg_off, vaddr);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -177,7 +210,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
iounmap(map->vaddr);
|
acpi_unmap(map->paddr, map->vaddr);
|
||||||
kfree(map);
|
kfree(map);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -260,6 +293,21 @@ int acpi_post_unmap_gar(struct acpi_generic_address *reg)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
|
EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
|
||||||
|
|
||||||
|
#ifdef readq
|
||||||
|
static inline u64 read64(const volatile void __iomem *addr)
|
||||||
|
{
|
||||||
|
return readq(addr);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline u64 read64(const volatile void __iomem *addr)
|
||||||
|
{
|
||||||
|
u64 l, h;
|
||||||
|
l = readl(addr);
|
||||||
|
h = readl(addr+4);
|
||||||
|
return l | (h << 32);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Can be used in atomic (including NMI) or process context. RCU read
|
* Can be used in atomic (including NMI) or process context. RCU read
|
||||||
* lock can only be released after the IO memory area accessing.
|
* lock can only be released after the IO memory area accessing.
|
||||||
|
@ -280,11 +328,9 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
|
||||||
case 32:
|
case 32:
|
||||||
*val = readl(addr);
|
*val = readl(addr);
|
||||||
break;
|
break;
|
||||||
#ifdef readq
|
|
||||||
case 64:
|
case 64:
|
||||||
*val = readq(addr);
|
*val = read64(addr);
|
||||||
break;
|
break;
|
||||||
#endif
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -293,6 +339,19 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef writeq
|
||||||
|
static inline void write64(u64 val, volatile void __iomem *addr)
|
||||||
|
{
|
||||||
|
writeq(val, addr);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline void write64(u64 val, volatile void __iomem *addr)
|
||||||
|
{
|
||||||
|
writel(val, addr);
|
||||||
|
writel(val>>32, addr+4);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
|
static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
|
||||||
{
|
{
|
||||||
void __iomem *addr;
|
void __iomem *addr;
|
||||||
|
@ -309,11 +368,9 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
|
||||||
case 32:
|
case 32:
|
||||||
writel(val, addr);
|
writel(val, addr);
|
||||||
break;
|
break;
|
||||||
#ifdef writeq
|
|
||||||
case 64:
|
case 64:
|
||||||
writeq(val, addr);
|
write64(val, addr);
|
||||||
break;
|
break;
|
||||||
#endif
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,56 @@
|
||||||
#include <linux/acpi_io.h>
|
#include <linux/acpi_io.h>
|
||||||
#include <acpi/acpiosxf.h>
|
#include <acpi/acpiosxf.h>
|
||||||
|
|
||||||
|
/* ACPI NVS regions, APEI may use it */
|
||||||
|
|
||||||
|
struct nvs_region {
|
||||||
|
__u64 phys_start;
|
||||||
|
__u64 size;
|
||||||
|
struct list_head node;
|
||||||
|
};
|
||||||
|
|
||||||
|
static LIST_HEAD(nvs_region_list);
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI_SLEEP
|
||||||
|
static int suspend_nvs_register(unsigned long start, unsigned long size);
|
||||||
|
#else
|
||||||
|
static inline int suspend_nvs_register(unsigned long a, unsigned long b)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int acpi_nvs_register(__u64 start, __u64 size)
|
||||||
|
{
|
||||||
|
struct nvs_region *region;
|
||||||
|
|
||||||
|
region = kmalloc(sizeof(*region), GFP_KERNEL);
|
||||||
|
if (!region)
|
||||||
|
return -ENOMEM;
|
||||||
|
region->phys_start = start;
|
||||||
|
region->size = size;
|
||||||
|
list_add_tail(®ion->node, &nvs_region_list);
|
||||||
|
|
||||||
|
return suspend_nvs_register(start, size);
|
||||||
|
}
|
||||||
|
|
||||||
|
int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
struct nvs_region *region;
|
||||||
|
|
||||||
|
list_for_each_entry(region, &nvs_region_list, node) {
|
||||||
|
rc = func(region->phys_start, region->size, data);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI_SLEEP
|
||||||
/*
|
/*
|
||||||
* Platforms, like ACPI, may want us to save some memory used by them during
|
* Platforms, like ACPI, may want us to save some memory used by them during
|
||||||
* suspend and to restore the contents of this memory during the subsequent
|
* suspend and to restore the contents of this memory during the subsequent
|
||||||
|
@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list);
|
||||||
* things so that the data from page-aligned addresses in this region will
|
* things so that the data from page-aligned addresses in this region will
|
||||||
* be copied into separate RAM pages.
|
* be copied into separate RAM pages.
|
||||||
*/
|
*/
|
||||||
int suspend_nvs_register(unsigned long start, unsigned long size)
|
static int suspend_nvs_register(unsigned long start, unsigned long size)
|
||||||
{
|
{
|
||||||
struct nvs_page *entry, *next;
|
struct nvs_page *entry, *next;
|
||||||
|
|
||||||
|
@ -159,3 +209,4 @@ void suspend_nvs_restore(void)
|
||||||
if (entry->data)
|
if (entry->data)
|
||||||
memcpy(entry->kaddr, entry->data, entry->size);
|
memcpy(entry->kaddr, entry->data, entry->size);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
|
@ -306,6 +306,11 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
|
||||||
u32 *mask, u32 req);
|
u32 *mask, u32 req);
|
||||||
extern void acpi_early_init(void);
|
extern void acpi_early_init(void);
|
||||||
|
|
||||||
|
extern int acpi_nvs_register(__u64 start, __u64 size);
|
||||||
|
|
||||||
|
extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
|
||||||
|
void *data);
|
||||||
|
|
||||||
#else /* !CONFIG_ACPI */
|
#else /* !CONFIG_ACPI */
|
||||||
|
|
||||||
#define acpi_disabled 1
|
#define acpi_disabled 1
|
||||||
|
@ -348,15 +353,18 @@ static inline int acpi_table_parse(char *id,
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_ACPI */
|
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_SLEEP
|
static inline int acpi_nvs_register(__u64 start, __u64 size)
|
||||||
int suspend_nvs_register(unsigned long start, unsigned long size);
|
|
||||||
#else
|
|
||||||
static inline int suspend_nvs_register(unsigned long a, unsigned long b)
|
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
|
||||||
|
void *data)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* !CONFIG_ACPI */
|
||||||
|
|
||||||
#endif /*_LINUX_ACPI_H*/
|
#endif /*_LINUX_ACPI_H*/
|
||||||
|
|
Loading…
Reference in New Issue