OpenCloudOS-Kernel/drivers/acpi/acpica/evgpe.c

838 lines
24 KiB
C
Raw Normal View History

// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/******************************************************************************
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
* Copyright (C) 2000 - 2018, Intel Corp.
*
*****************************************************************************/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpe")
#if (!ACPI_REDUCED_HARDWARE) /* Entire module */
ACPICA 20050408 from Bob Moore Fixed three cases in the interpreter where an "index" argument to an ASL function was still (internally) 32 bits instead of the required 64 bits. This was the Index argument to the Index, Mid, and Match operators. The "strupr" function is now permanently local (acpi_ut_strupr), since this is not a POSIX-defined function and not present in most kernel-level C libraries. References to the C library strupr function have been removed from the headers. Completed the deployment of static functions/prototypes. All prototypes with the static attribute have been moved from the headers to the owning C file. ACPICA 20050329 from Bob Moore An error is now generated if an attempt is made to create a Buffer Field of length zero (A CreateField with a length operand of zero.) The interpreter now issues a warning whenever executable code at the module level is detected during ACPI table load. This will give some idea of the prevalence of this type of code. Implemented support for references to named objects (other than control methods) within package objects. Enhanced package object output for the debug object. Package objects are now completely dumped, showing all elements. Enhanced miscellaneous object output for the debug object. Any object can now be written to the debug object (for example, a device object can be written, and the type of the object will be displayed.) The "static" qualifier has been added to all local functions across the core subsystem. The number of "long" lines (> 80 chars) within the source has been significantly reduced, by about 1/3. Cleaned up all header files to ensure that all CA/iASL functions are prototyped (even static functions) and the formatting is consistent. Two new header files have been added, acopcode.h and acnames.h. Removed several obsolete functions that were no longer used. Signed-off-by: Len Brown <len.brown@intel.com>
2005-04-19 10:49:35 +08:00
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context);
/*******************************************************************************
*
* FUNCTION: acpi_ev_update_gpe_enable_mask
*
* PARAMETERS: gpe_event_info - GPE to update
*
* RETURN: Status
*
* DESCRIPTION: Updates GPE register enable mask based upon whether there are
* runtime references to this GPE
*
******************************************************************************/
acpi_status
acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
{
struct acpi_gpe_register_info *gpe_register_info;
u32 register_bit;
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* Clear the run bit up front */
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
/* Set the mask bit only if there are references to this GPE */
if (gpe_event_info->runtime_count) {
ACPI_SET_BIT(gpe_register_info->enable_for_run,
(u8)register_bit);
}
gpe_register_info->enable_mask = gpe_register_info->enable_for_run;
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_enable_gpe
*
* PARAMETERS: gpe_event_info - GPE to enable
*
* RETURN: Status
*
* DESCRIPTION: Enable a GPE.
*
******************************************************************************/
acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_enable_gpe);
/* Enable the requested GPE */
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_mask_gpe
*
* PARAMETERS: gpe_event_info - GPE to be blocked/unblocked
* is_masked - Whether the GPE is masked or not
*
* RETURN: Status
*
* DESCRIPTION: Unconditionally mask/unmask a GPE during runtime.
*
******************************************************************************/
acpi_status
acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
{
struct acpi_gpe_register_info *gpe_register_info;
u32 register_bit;
ACPI_FUNCTION_TRACE(ev_mask_gpe);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* Perform the action */
if (is_masked) {
if (register_bit & gpe_register_info->mask_for_run) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
ACPI_SET_BIT(gpe_register_info->mask_for_run, (u8)register_bit);
} else {
if (!(register_bit & gpe_register_info->mask_for_run)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
ACPI_CLEAR_BIT(gpe_register_info->mask_for_run,
(u8)register_bit);
if (gpe_event_info->runtime_count
&& !gpe_event_info->disable_for_dispatch) {
(void)acpi_hw_low_set_gpe(gpe_event_info,
ACPI_GPE_ENABLE);
}
}
return_ACPI_STATUS(AE_OK);
}
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
/*******************************************************************************
*
* FUNCTION: acpi_ev_add_gpe_reference
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
*
* PARAMETERS: gpe_event_info - Add a reference to this GPE
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
*
* RETURN: Status
*
* DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
* hardware-enabled.
*
******************************************************************************/
acpi_status
acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ev_add_gpe_reference);
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
return_ACPI_STATUS(AE_LIMIT);
}
gpe_event_info->runtime_count++;
if (gpe_event_info->runtime_count == 1) {
/* Enable on first reference */
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status = acpi_ev_enable_gpe(gpe_event_info);
}
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count--;
}
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_remove_gpe_reference
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
*
* PARAMETERS: gpe_event_info - Remove a reference to this GPE
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
*
* RETURN: Status
*
* DESCRIPTION: Remove a reference to a GPE. When the last reference is
* removed, the GPE is hardware-disabled.
*
******************************************************************************/
acpi_status
acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ev_remove_gpe_reference);
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
if (!gpe_event_info->runtime_count) {
return_ACPI_STATUS(AE_LIMIT);
}
gpe_event_info->runtime_count--;
if (!gpe_event_info->runtime_count) {
/* Disable on last reference */
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
if (ACPI_SUCCESS(status)) {
status =
acpi_hw_low_set_gpe(gpe_event_info,
ACPI_GPE_DISABLE);
ACPI / ACPICA: Fix reference counting problems with GPE handlers If a handler is installed for a GPE associated with an AML method and such that it cannot wake up the system from sleep states, the GPE remains enabled after the handler has been installed, although it should be disabled in that case to avoid spurious execution of the handler. Fix this issue by making acpi_install_gpe_handler() disable GPEs that were previously associated with AML methods and cannot wake up the system from sleep states. Analogously, make acpi_remove_gpe_handler() enable the GPEs that are associated with AML methods after their handlers have been removed and cannot wake up the system from sleep states. In addition to that, fix a code ordering issue in acpi_remove_gpe_handler() that renders the locking ineffective (ACPI_MTX_EVENTS is released temporarily in the middle of the routine to wait for the completion of events already in progress). For this purpose introduce acpi_raw_disable_gpe() and acpi_raw_enable_gpe() to be called with acpi_gbl_gpe_lock held and rework acpi_disable_gpe() and acpi_enable_gpe(), respectively, to use them. Also rework acpi_gpe_can_wake() to use acpi_raw_disable_gpe() instead of calling acpi_disable_gpe() after releasing the lock to avoid the possible theoretical race with acpi_install_gpe_handler(). Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: "Moore, Robert" <robert.moore@intel.com> Cc: Lin Ming <ming.m.lin@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2010-08-04 05:55:14 +08:00
}
if (ACPI_FAILURE(status)) {
gpe_event_info->runtime_count++;
}
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_low_get_gpe_info
*
* PARAMETERS: gpe_number - Raw GPE number
* gpe_block - A GPE info block
*
* RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
* is not within the specified GPE block)
*
* DESCRIPTION: Returns the event_info struct associated with this GPE. This is
* the low-level implementation of ev_get_gpe_event_info.
*
******************************************************************************/
struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
struct acpi_gpe_block_info
*gpe_block)
{
u32 gpe_index;
/*
* Validate that the gpe_number is within the specified gpe_block.
* (Two steps)
*/
if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
return (NULL);
}
gpe_index = gpe_number - gpe_block->block_base_number;
if (gpe_index >= gpe_block->gpe_count) {
return (NULL);
}
return (&gpe_block->event_info[gpe_index]);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_event_info
*
* PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
* gpe_number - Raw GPE number
*
* RETURN: A GPE event_info struct. NULL if not a valid GPE
*
* DESCRIPTION: Returns the event_info struct associated with this GPE.
* Validates the gpe_block and the gpe_number
*
* Should be called only when the GPE lists are semaphore locked
* and not subject to change.
*
******************************************************************************/
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number)
{
union acpi_operand_object *obj_desc;
struct acpi_gpe_event_info *gpe_info;
u32 i;
ACPI_FUNCTION_ENTRY();
/* A NULL gpe_device means use the FADT-defined GPE block(s) */
if (!gpe_device) {
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
acpi_gbl_gpe_fadt_blocks
[i]);
if (gpe_info) {
return (gpe_info);
}
}
/* The gpe_number was not in the range of either FADT GPE block */
return (NULL);
}
/* A Non-NULL gpe_device means this is a GPE Block Device */
obj_desc =
acpi_ns_get_attached_object((struct acpi_namespace_node *)
gpe_device);
if (!obj_desc || !obj_desc->device.gpe_block) {
return (NULL);
}
return (acpi_ev_low_get_gpe_info
(gpe_number, obj_desc->device.gpe_block));
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_detect
*
* PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt.
* Can have multiple GPE blocks attached.
*
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
*
* DESCRIPTION: Detect if any GP events have occurred. This function is
* executed at interrupt level.
*
******************************************************************************/
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
{
struct acpi_gpe_block_info *gpe_block;
struct acpi_namespace_node *gpe_device;
struct acpi_gpe_register_info *gpe_register_info;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_number;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
[ACPI] ACPICA 20060127 Implemented support in the Resource Manager to allow unresolved namestring references within resource package objects for the _PRT method. This support is in addition to the previously implemented unresolved reference support within the AML parser. If the interpreter slack mode is enabled (true on Linux unless acpi=strict), these unresolved references will be passed through to the caller as a NULL package entry. http://bugzilla.kernel.org/show_bug.cgi?id=5741 Implemented and deployed new macros and functions for error and warning messages across the subsystem. These macros are simpler and generate less code than their predecessors. The new macros ACPI_ERROR, ACPI_EXCEPTION, ACPI_WARNING, and ACPI_INFO replace the ACPI_REPORT_* macros. Implemented the acpi_cpu_flags type to simplify host OS integration of the Acquire/Release Lock OSL interfaces. Suggested by Steven Rostedt and Andrew Morton. Fixed a problem where Alias ASL operators are sometimes not correctly resolved. causing AE_AML_INTERNAL http://bugzilla.kernel.org/show_bug.cgi?id=5189 http://bugzilla.kernel.org/show_bug.cgi?id=5674 Fixed several problems with the implementation of the ConcatenateResTemplate ASL operator. As per the ACPI specification, zero length buffers are now treated as a single EndTag. One-length buffers always cause a fatal exception. Non-zero length buffers that do not end with a full 2-byte EndTag cause a fatal exception. Fixed a possible structure overwrite in the AcpiGetObjectInfo external interface. (With assistance from Thomas Renninger) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-01-28 05:43:00 +08:00
acpi_cpu_flags flags;
u32 i;
u32 j;
ACPI: ACPICA 20060421 Removed a device initialization optimization introduced in 20051216 where the _STA method was not run unless an _INI was also present for the same device. This optimization could cause problems because it could allow _INI methods to be run within a not-present device subtree (If a not-present device had no _INI, _STA would not be run, the not-present status would not be discovered, and the children of the device would be incorrectly traversed.) Implemented a new _STA optimization where namespace subtrees that do not contain _INI are identified and ignored during device initialization. Selectively running _STA can significantly improve boot time on large machines (with assistance from Len Brown.) Implemented support for the device initialization case where the returned _STA flags indicate a device not-present but functioning. In this case, _INI is not run, but the device children are examined for presence, as per the ACPI specification. Implemented an additional change to the IndexField support in order to conform to MS behavior. The value written to the Index Register is not simply a byte offset, it is a byte offset in units of the access width of the parent Index Field. (Fiodor Suietov) Defined and deployed a new OSL interface, acpi_os_validate_address(). This interface is called during the creation of all AML operation regions, and allows the host OS to exert control over what addresses it will allow the AML code to access. Operation Regions whose addresses are disallowed will cause a runtime exception when they are actually accessed (will not affect or abort table loading.) Defined and deployed a new OSL interface, acpi_os_validate_interface(). This interface allows the host OS to match the various "optional" interface/behavior strings for the _OSI predefined control method as appropriate (with assistance from Bjorn Helgaas.) Restructured and corrected various problems in the exception handling code paths within DsCallControlMethod and DsTerminateControlMethod in dsmethod (with assistance from Takayoshi Kochi.) Modified the Linux source converter to ignore quoted string literals while converting identifiers from mixed to lower case. This will correct problems with the disassembler and other areas where such strings must not be modified. The ACPI_FUNCTION_* macros no longer require quotes around the function name. This allows the Linux source converter to convert the names, now that the converter ignores quoted strings. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-22 05:15:00 +08:00
ACPI_FUNCTION_NAME(ev_gpe_detect);
/* Check for the case where there are no GPEs */
if (!gpe_xrupt_list) {
return (int_status);
}
ACPI: ACPICA 20060623 Implemented a new acpi_spinlock type for the OSL lock interfaces. This allows the type to be customized to the host OS for improved efficiency (since a spinlock is usually a very small object.) Implemented support for "ignored" bits in the ACPI registers. According to the ACPI specification, these bits should be preserved when writing the registers via a read/modify/write cycle. There are 3 bits preserved in this manner: PM1_CONTROL[0] (SCI_EN), PM1_CONTROL[9], and PM1_STATUS[11]. http://bugzilla.kernel.org/show_bug.cgi?id=3691 Implemented the initial deployment of new OSL mutex interfaces. Since some host operating systems have separate mutex and semaphore objects, this feature was requested. The base code now uses mutexes (and the new mutex interfaces) wherever a binary semaphore was used previously. However, for the current release, the mutex interfaces are defined as macros to map them to the existing semaphore interfaces. Fixed several problems with the support for the control method SyncLevel parameter. The SyncLevel now works according to the ACPI specification and in concert with the Mutex SyncLevel parameter, since the current SyncLevel is a property of the executing thread. Mutual exclusion for control methods is now implemented with a mutex instead of a semaphore. Fixed three instances of the use of the C shift operator in the bitfield support code (exfldio.c) to avoid the use of a shift value larger than the target data width. The behavior of C compilers is undefined in this case and can cause unpredictable results, and therefore the case must be detected and avoided. (Fiodor Suietov) Added an info message whenever an SSDT or OEM table is loaded dynamically via the Load() or LoadTable() ASL operators. This should improve debugging capability since it will show exactly what tables have been loaded (beyond the tables present in the RSDT/XSDT.) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-06-24 05:04:00 +08:00
/*
* We need to obtain the GPE lock for both the data structs and registers
* Note: Not necessary to obtain the hardware lock, since the GPE
* registers are owned by the gpe_lock.
ACPI: ACPICA 20060623 Implemented a new acpi_spinlock type for the OSL lock interfaces. This allows the type to be customized to the host OS for improved efficiency (since a spinlock is usually a very small object.) Implemented support for "ignored" bits in the ACPI registers. According to the ACPI specification, these bits should be preserved when writing the registers via a read/modify/write cycle. There are 3 bits preserved in this manner: PM1_CONTROL[0] (SCI_EN), PM1_CONTROL[9], and PM1_STATUS[11]. http://bugzilla.kernel.org/show_bug.cgi?id=3691 Implemented the initial deployment of new OSL mutex interfaces. Since some host operating systems have separate mutex and semaphore objects, this feature was requested. The base code now uses mutexes (and the new mutex interfaces) wherever a binary semaphore was used previously. However, for the current release, the mutex interfaces are defined as macros to map them to the existing semaphore interfaces. Fixed several problems with the support for the control method SyncLevel parameter. The SyncLevel now works according to the ACPI specification and in concert with the Mutex SyncLevel parameter, since the current SyncLevel is a property of the executing thread. Mutual exclusion for control methods is now implemented with a mutex instead of a semaphore. Fixed three instances of the use of the C shift operator in the bitfield support code (exfldio.c) to avoid the use of a shift value larger than the target data width. The behavior of C compilers is undefined in this case and can cause unpredictable results, and therefore the case must be detected and avoided. (Fiodor Suietov) Added an info message whenever an SSDT or OEM table is loaded dynamically via the Load() or LoadTable() ASL operators. This should improve debugging capability since it will show exactly what tables have been loaded (beyond the tables present in the RSDT/XSDT.) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-06-24 05:04:00 +08:00
*/
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Examine all GPE blocks attached to this interrupt level */
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
gpe_device = gpe_block->node;
/*
* Read all of the 8-bit GPE status and enable registers in this GPE
* block, saving all of them. Find all currently active GP events.
*/
for (i = 0; i < gpe_block->register_count; i++) {
/* Get the next status/enable pair */
gpe_register_info = &gpe_block->register_info[i];
/*
* Optimization: If there are no GPEs enabled within this
* register, we can safely ignore the entire register.
*/
if (!(gpe_register_info->enable_for_run |
gpe_register_info->enable_for_wake)) {
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
"Ignore disabled registers for GPE %02X-%02X: "
"RunEnable=%02X, WakeEnable=%02X\n",
gpe_register_info->
base_gpe_number,
gpe_register_info->
base_gpe_number +
(ACPI_GPE_REGISTER_WIDTH - 1),
gpe_register_info->
enable_for_run,
gpe_register_info->
enable_for_wake));
continue;
}
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
ACPICA: Events: Add parallel GPE handling support to fix potential redundant _Exx evaluations There is a risk that a GPE method/handler may be invoked twice. Let's consider a case, both GPE0(RAW_HANDLER) and GPE1(_Exx) is triggered. =======================================+============================= IRQ handler (top-half) |IRQ polling =======================================+============================= acpi_ev_detect_gpe() | LOCK() | READ (GPE0-7 enable/status registers)| ^^^^^^^^^^^^ROOT CAUSE^^^^^^^^^^^^^^^| Walk GPE0 | UNLOCK() |LOCK() Invoke GPE0 RAW_HANDLER |READ (GPE1 enable/status bit) |acpi_ev_gpe_dispatch(irq=false) | CLEAR (GPE1 enable bit) | CLEAR (GPE1 status bit) LOCK() |UNLOCK() Walk GPE1 +============================= acpi_ev_gpe_dispatch(irq=true) |IRQ polling (defer) CLEAR (GPE1 enable bit) +============================= CLEAR (GPE1 status bit) |acpi_ev_async_execute_gpe_method() Walk others | Evaluate GPE1 _Exx fi | acpi_ev_async_enable_gpe() UNLOCK() | LOCK() =======================================+ SET (GPE enable bit) IRQ handler (bottom-half) | UNLOCK() =======================================+ acpi_ev_async_execute_gpe_method() | Evaluate GPE1 _Exx | acpi_ev_async_enable_gpe() | LOCK() | SET (GPE1 enable bit) | UNLOCK() | =======================================+============================= If acpi_ev_detect_gpe() is only invoked from the IRQ context, there won't be more than one _Lxx/_Exx evaluations for one status bit flagging if the IRQ handlers controlled by the underlying IRQ chip/driver (ex. APIC) are run in serial. Note that, this is a known potential gap and we had an approach, locking entire non-raw-handler processes in the top-half IRQ handler and handling all raw-handlers out of the locked loop to be friendly to those IRQ chip/driver. But the approach is too complicated while the issue is not so real, thus ACPICA treated such issue (if any) as a parallelism/quality issue of the underlying IRQ chip/driver to stop putting it on the radar. Bug in link #1 is suspiciously reflecting the same cause, and if so, it can also be fixed by this simpler approach. But it will be no excuse an ACPICA problem now if ACPICA starts to poll IRQs itself. In the changed scenario, _Exx will be evaluated from the task context due to new ACPICA provided "polling after enabling GPEs" mechanism. And the above figure uses edge-triggered GPEs demonstrating the possibility of evaluating _Exx twice for one status bit flagging. As a conclusion, there is now an increased chance of evaluating _Lxx/_Exx more than once for one status bit flagging. However this is still not a real problem if the _Lxx/_Exx checks the underlying hardware IRQ reasoning and finally just changes the 2nd and the follow-up evaluations into no-ops. Note that _Lxx should always be written in this way as a level-trigger GPE could have it's status wrongly duplicated by the underlying IRQ delivery mechanisms. But _Exx may have very low quality BIOS by BIOS to trigger real issues. For example, trigger duplicated button notifications. To solve this issue, we need to stop reading a bunch of enable/status register bits, but read only one GPE's enable/status bit. And GPE status register's W1C nature ensures that acknowledging one GPE won't affect another GPEs' status bits. Thus the hardware GPE architecture has already provided us with the mechanism of implementing such parallelism. So we can lock around one GPE handling process to achieve the parallelism: 1. If we can incorporate GPE enable bit check in detection and ensure the atomicity of the following process (top-half IRQ handler): READ (enable/status bit) if (enabled && raised) CLEAR (enable bit) and handle the GPE after this process, we can ensure that we will only invoke GPE handler once for one status bit flagging. 2. In addtion for edge-triggered GPEs, if we can ensure the atomicity of the following process (top-half IRQ handler): READ (enable/status bit) if (enabled && raised) CLEAR (enable bit) CLEAR (status bit) and handle the GPE after this process, we can ensure that we will only invoke GPE handler once for one status bit flagging. By doing a cleanup in this way, we can remove duplicate GPE handling code and ensure that all logics are collected in 1 function. And the function will be safe for both IRQ interrupt and IRQ polling, and will be safe for us to release and re-acquire acpi_gbl_gpe_lock at any time rather than raw handler only during the top-half IRQ handler. Lv Zheng. Link: https://bugzilla.kernel.org/show_bug.cgi?id=196703 [#1] Signed-off-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Erik Schmauss <erik.schmauss@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-03-15 07:12:58 +08:00
/* Detect and dispatch one GPE bit */
gpe_event_info =
&gpe_block->
event_info[((acpi_size)i *
ACPI_GPE_REGISTER_WIDTH) + j];
gpe_number =
j + gpe_register_info->base_gpe_number;
ACPICA: Events: Add parallel GPE handling support to fix potential redundant _Exx evaluations There is a risk that a GPE method/handler may be invoked twice. Let's consider a case, both GPE0(RAW_HANDLER) and GPE1(_Exx) is triggered. =======================================+============================= IRQ handler (top-half) |IRQ polling =======================================+============================= acpi_ev_detect_gpe() | LOCK() | READ (GPE0-7 enable/status registers)| ^^^^^^^^^^^^ROOT CAUSE^^^^^^^^^^^^^^^| Walk GPE0 | UNLOCK() |LOCK() Invoke GPE0 RAW_HANDLER |READ (GPE1 enable/status bit) |acpi_ev_gpe_dispatch(irq=false) | CLEAR (GPE1 enable bit) | CLEAR (GPE1 status bit) LOCK() |UNLOCK() Walk GPE1 +============================= acpi_ev_gpe_dispatch(irq=true) |IRQ polling (defer) CLEAR (GPE1 enable bit) +============================= CLEAR (GPE1 status bit) |acpi_ev_async_execute_gpe_method() Walk others | Evaluate GPE1 _Exx fi | acpi_ev_async_enable_gpe() UNLOCK() | LOCK() =======================================+ SET (GPE enable bit) IRQ handler (bottom-half) | UNLOCK() =======================================+ acpi_ev_async_execute_gpe_method() | Evaluate GPE1 _Exx | acpi_ev_async_enable_gpe() | LOCK() | SET (GPE1 enable bit) | UNLOCK() | =======================================+============================= If acpi_ev_detect_gpe() is only invoked from the IRQ context, there won't be more than one _Lxx/_Exx evaluations for one status bit flagging if the IRQ handlers controlled by the underlying IRQ chip/driver (ex. APIC) are run in serial. Note that, this is a known potential gap and we had an approach, locking entire non-raw-handler processes in the top-half IRQ handler and handling all raw-handlers out of the locked loop to be friendly to those IRQ chip/driver. But the approach is too complicated while the issue is not so real, thus ACPICA treated such issue (if any) as a parallelism/quality issue of the underlying IRQ chip/driver to stop putting it on the radar. Bug in link #1 is suspiciously reflecting the same cause, and if so, it can also be fixed by this simpler approach. But it will be no excuse an ACPICA problem now if ACPICA starts to poll IRQs itself. In the changed scenario, _Exx will be evaluated from the task context due to new ACPICA provided "polling after enabling GPEs" mechanism. And the above figure uses edge-triggered GPEs demonstrating the possibility of evaluating _Exx twice for one status bit flagging. As a conclusion, there is now an increased chance of evaluating _Lxx/_Exx more than once for one status bit flagging. However this is still not a real problem if the _Lxx/_Exx checks the underlying hardware IRQ reasoning and finally just changes the 2nd and the follow-up evaluations into no-ops. Note that _Lxx should always be written in this way as a level-trigger GPE could have it's status wrongly duplicated by the underlying IRQ delivery mechanisms. But _Exx may have very low quality BIOS by BIOS to trigger real issues. For example, trigger duplicated button notifications. To solve this issue, we need to stop reading a bunch of enable/status register bits, but read only one GPE's enable/status bit. And GPE status register's W1C nature ensures that acknowledging one GPE won't affect another GPEs' status bits. Thus the hardware GPE architecture has already provided us with the mechanism of implementing such parallelism. So we can lock around one GPE handling process to achieve the parallelism: 1. If we can incorporate GPE enable bit check in detection and ensure the atomicity of the following process (top-half IRQ handler): READ (enable/status bit) if (enabled && raised) CLEAR (enable bit) and handle the GPE after this process, we can ensure that we will only invoke GPE handler once for one status bit flagging. 2. In addtion for edge-triggered GPEs, if we can ensure the atomicity of the following process (top-half IRQ handler): READ (enable/status bit) if (enabled && raised) CLEAR (enable bit) CLEAR (status bit) and handle the GPE after this process, we can ensure that we will only invoke GPE handler once for one status bit flagging. By doing a cleanup in this way, we can remove duplicate GPE handling code and ensure that all logics are collected in 1 function. And the function will be safe for both IRQ interrupt and IRQ polling, and will be safe for us to release and re-acquire acpi_gbl_gpe_lock at any time rather than raw handler only during the top-half IRQ handler. Lv Zheng. Link: https://bugzilla.kernel.org/show_bug.cgi?id=196703 [#1] Signed-off-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Erik Schmauss <erik.schmauss@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-03-15 07:12:58 +08:00
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
int_status |=
acpi_ev_detect_gpe(gpe_device,
gpe_event_info,
gpe_number);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
}
}
gpe_block = gpe_block->next;
}
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return (int_status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_asynch_execute_gpe_method
*
* PARAMETERS: Context (gpe_event_info) - Info for this GPE
*
* RETURN: None
*
ACPI: ACPICA 20060526 Restructured, flattened, and simplified the internal interfaces for namespace object evaluation - resulting in smaller code, less CPU stack use, and fewer interfaces. (With assistance from Mikhail Kouzmich) Fixed a problem with the CopyObject operator where the first parameter was not typed correctly for the parser, interpreter, compiler, and disassembler. Caused various errors and unexpected behavior. Fixed a problem where a ShiftLeft or ShiftRight of more than 64 bits produced incorrect results with some C compilers. Since the behavior of C compilers when the shift value is larger than the datatype width is apparently not well defined, the interpreter now detects this condition and simply returns zero as expected in all such cases. (BZ 395) Fixed problem reports (Valery Podrezov) integrated: - Update String-to-Integer conversion to match ACPI 3.0A spec http://bugzilla.kernel.org/show_bug.cgi?id=5329 Allow interpreter to handle nested method declarations http://bugzilla.kernel.org/show_bug.cgi?id=5361 Fixed problem reports (Fiodor Suietov) integrated: - acpi_terminate() doesn't free debug memory allocation list objects (BZ 355) - After Core Subsystem shutdown, acpi_subsystem_status() returns AE_OK (BZ 356) - acpi_os_unmap_memory() for RSDP can be invoked inconsistently (BZ 357) - Resource Manager should return AE_TYPE for non-device objects (BZ 358) - Incomplete cleanup branch in AcpiNsEvaluateRelative (BZ 359) - Use acpi_os_free() instead of ACPI_FREE in acpi_rs_set_srs_method_data (BZ 360) - Incomplete cleanup branch in acpi_ps_parse_aml (BZ 361) - Incomplete cleanup branch in acpi_ds_delete_walk_state (BZ 362) - acpi_get_table_header returns AE_NO_ACPI_TABLES until DSDT is loaded (BZ 365) - Status of the Global Initialization Handler call not used (BZ 366) - Incorrect object parameter to Global Initialization Handler (BZ 367) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-05-27 04:36:00 +08:00
* DESCRIPTION: Perform the actual execution of a GPE control method. This
* function is called from an invocation of acpi_os_execute and
* therefore does NOT execute at interrupt level - so that
* the control method itself is not executed in the context of
* an interrupt handler.
*
******************************************************************************/
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = context;
acpi_status status = AE_OK;
ACPI: ACPICA 20060526 Restructured, flattened, and simplified the internal interfaces for namespace object evaluation - resulting in smaller code, less CPU stack use, and fewer interfaces. (With assistance from Mikhail Kouzmich) Fixed a problem with the CopyObject operator where the first parameter was not typed correctly for the parser, interpreter, compiler, and disassembler. Caused various errors and unexpected behavior. Fixed a problem where a ShiftLeft or ShiftRight of more than 64 bits produced incorrect results with some C compilers. Since the behavior of C compilers when the shift value is larger than the datatype width is apparently not well defined, the interpreter now detects this condition and simply returns zero as expected in all such cases. (BZ 395) Fixed problem reports (Valery Podrezov) integrated: - Update String-to-Integer conversion to match ACPI 3.0A spec http://bugzilla.kernel.org/show_bug.cgi?id=5329 Allow interpreter to handle nested method declarations http://bugzilla.kernel.org/show_bug.cgi?id=5361 Fixed problem reports (Fiodor Suietov) integrated: - acpi_terminate() doesn't free debug memory allocation list objects (BZ 355) - After Core Subsystem shutdown, acpi_subsystem_status() returns AE_OK (BZ 356) - acpi_os_unmap_memory() for RSDP can be invoked inconsistently (BZ 357) - Resource Manager should return AE_TYPE for non-device objects (BZ 358) - Incomplete cleanup branch in AcpiNsEvaluateRelative (BZ 359) - Use acpi_os_free() instead of ACPI_FREE in acpi_rs_set_srs_method_data (BZ 360) - Incomplete cleanup branch in acpi_ps_parse_aml (BZ 361) - Incomplete cleanup branch in acpi_ds_delete_walk_state (BZ 362) - acpi_get_table_header returns AE_NO_ACPI_TABLES until DSDT is loaded (BZ 365) - Status of the Global Initialization Handler call not used (BZ 366) - Incorrect object parameter to Global Initialization Handler (BZ 367) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-05-27 04:36:00 +08:00
struct acpi_evaluate_info *info;
struct acpi_gpe_notify_info *notify;
ACPI: ACPICA 20060421 Removed a device initialization optimization introduced in 20051216 where the _STA method was not run unless an _INI was also present for the same device. This optimization could cause problems because it could allow _INI methods to be run within a not-present device subtree (If a not-present device had no _INI, _STA would not be run, the not-present status would not be discovered, and the children of the device would be incorrectly traversed.) Implemented a new _STA optimization where namespace subtrees that do not contain _INI are identified and ignored during device initialization. Selectively running _STA can significantly improve boot time on large machines (with assistance from Len Brown.) Implemented support for the device initialization case where the returned _STA flags indicate a device not-present but functioning. In this case, _INI is not run, but the device children are examined for presence, as per the ACPI specification. Implemented an additional change to the IndexField support in order to conform to MS behavior. The value written to the Index Register is not simply a byte offset, it is a byte offset in units of the access width of the parent Index Field. (Fiodor Suietov) Defined and deployed a new OSL interface, acpi_os_validate_address(). This interface is called during the creation of all AML operation regions, and allows the host OS to exert control over what addresses it will allow the AML code to access. Operation Regions whose addresses are disallowed will cause a runtime exception when they are actually accessed (will not affect or abort table loading.) Defined and deployed a new OSL interface, acpi_os_validate_interface(). This interface allows the host OS to match the various "optional" interface/behavior strings for the _OSI predefined control method as appropriate (with assistance from Bjorn Helgaas.) Restructured and corrected various problems in the exception handling code paths within DsCallControlMethod and DsTerminateControlMethod in dsmethod (with assistance from Takayoshi Kochi.) Modified the Linux source converter to ignore quoted string literals while converting identifiers from mixed to lower case. This will correct problems with the disassembler and other areas where such strings must not be modified. The ACPI_FUNCTION_* macros no longer require quotes around the function name. This allows the Linux source converter to convert the names, now that the converter ignores quoted strings. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-22 05:15:00 +08:00
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
/* Do the correct dispatch - normal method or implicit notify */
ACPICA: Events: Cleanup GPE dispatcher type obtaining code ACPICA commit 7926d5ca9452c87f866938dcea8f12e1efb58f89 There is an issue in acpi_install_gpe_handler() and acpi_remove_gpe_handler(). The code to obtain the GPE dispatcher type from the Handler->original_flags is wrong: if (((Handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || (Handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && ACPI_GPE_DISPATCH_NOTIFY is 0x03 and ACPI_GPE_DISPATCH_METHOD is 0x02, thus this statement is TRUE for the following dispatcher types: 0x01 (ACPI_GPE_DISPATCH_HANDLER): not expected 0x02 (ACPI_GPE_DISPATCH_METHOD): expected 0x03 (ACPI_GPE_DISPATCH_NOTIFY): expected There is no functional issue due to this because Handler->original_flags is only set in acpi_install_gpe_handler(), and an earlier checker has excluded the ACPI_GPE_DISPATCH_HANDLER: if ((gpe_event_info->Flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { Status = AE_ALREADY_EXISTS; goto free_and_exit; } ... Handler->original_flags = (u8) (gpe_event_info->Flags & (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK)); We need to clean this up before modifying the GPE dispatcher type values. In order to prevent such issue from happening in the future, this patch introduces ACPI_GPE_DISPATCH_TYPE() macro to be used to obtain the GPE dispatcher types. Lv Zheng. Link: https://github.com/acpica/acpica/commit/7926d5ca Signed-off-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: David E. Box <david.e.box@linux.intel.com> Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-02-05 15:20:29 +08:00
switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
case ACPI_GPE_DISPATCH_NOTIFY:
/*
* Implicit notify.
* Dispatch a DEVICE_WAKE notify to the appropriate handler.
* NOTE: the request is queued for execution after this method
* completes. The notify handlers are NOT invoked synchronously
* from this thread -- because handlers may in turn run other
* control methods.
*
* June 2012: Expand implicit notify mechanism to support
* notifies on multiple device objects.
*/
notify = gpe_event_info->dispatch.notify_list;
while (ACPI_SUCCESS(status) && notify) {
status =
acpi_ev_queue_notify_request(notify->device_node,
ACPI_NOTIFY_DEVICE_WAKE);
notify = notify->next;
}
break;
case ACPI_GPE_DISPATCH_METHOD:
ACPI: ACPICA 20060526 Restructured, flattened, and simplified the internal interfaces for namespace object evaluation - resulting in smaller code, less CPU stack use, and fewer interfaces. (With assistance from Mikhail Kouzmich) Fixed a problem with the CopyObject operator where the first parameter was not typed correctly for the parser, interpreter, compiler, and disassembler. Caused various errors and unexpected behavior. Fixed a problem where a ShiftLeft or ShiftRight of more than 64 bits produced incorrect results with some C compilers. Since the behavior of C compilers when the shift value is larger than the datatype width is apparently not well defined, the interpreter now detects this condition and simply returns zero as expected in all such cases. (BZ 395) Fixed problem reports (Valery Podrezov) integrated: - Update String-to-Integer conversion to match ACPI 3.0A spec http://bugzilla.kernel.org/show_bug.cgi?id=5329 Allow interpreter to handle nested method declarations http://bugzilla.kernel.org/show_bug.cgi?id=5361 Fixed problem reports (Fiodor Suietov) integrated: - acpi_terminate() doesn't free debug memory allocation list objects (BZ 355) - After Core Subsystem shutdown, acpi_subsystem_status() returns AE_OK (BZ 356) - acpi_os_unmap_memory() for RSDP can be invoked inconsistently (BZ 357) - Resource Manager should return AE_TYPE for non-device objects (BZ 358) - Incomplete cleanup branch in AcpiNsEvaluateRelative (BZ 359) - Use acpi_os_free() instead of ACPI_FREE in acpi_rs_set_srs_method_data (BZ 360) - Incomplete cleanup branch in acpi_ps_parse_aml (BZ 361) - Incomplete cleanup branch in acpi_ds_delete_walk_state (BZ 362) - acpi_get_table_header returns AE_NO_ACPI_TABLES until DSDT is loaded (BZ 365) - Status of the Global Initialization Handler call not used (BZ 366) - Incorrect object parameter to Global Initialization Handler (BZ 367) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-05-27 04:36:00 +08:00
/* Allocate the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
status = AE_NO_MEMORY;
} else {
/*
* Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the
* _Lxx/_Exx control method that corresponds to this GPE
ACPI: ACPICA 20060526 Restructured, flattened, and simplified the internal interfaces for namespace object evaluation - resulting in smaller code, less CPU stack use, and fewer interfaces. (With assistance from Mikhail Kouzmich) Fixed a problem with the CopyObject operator where the first parameter was not typed correctly for the parser, interpreter, compiler, and disassembler. Caused various errors and unexpected behavior. Fixed a problem where a ShiftLeft or ShiftRight of more than 64 bits produced incorrect results with some C compilers. Since the behavior of C compilers when the shift value is larger than the datatype width is apparently not well defined, the interpreter now detects this condition and simply returns zero as expected in all such cases. (BZ 395) Fixed problem reports (Valery Podrezov) integrated: - Update String-to-Integer conversion to match ACPI 3.0A spec http://bugzilla.kernel.org/show_bug.cgi?id=5329 Allow interpreter to handle nested method declarations http://bugzilla.kernel.org/show_bug.cgi?id=5361 Fixed problem reports (Fiodor Suietov) integrated: - acpi_terminate() doesn't free debug memory allocation list objects (BZ 355) - After Core Subsystem shutdown, acpi_subsystem_status() returns AE_OK (BZ 356) - acpi_os_unmap_memory() for RSDP can be invoked inconsistently (BZ 357) - Resource Manager should return AE_TYPE for non-device objects (BZ 358) - Incomplete cleanup branch in AcpiNsEvaluateRelative (BZ 359) - Use acpi_os_free() instead of ACPI_FREE in acpi_rs_set_srs_method_data (BZ 360) - Incomplete cleanup branch in acpi_ps_parse_aml (BZ 361) - Incomplete cleanup branch in acpi_ds_delete_walk_state (BZ 362) - acpi_get_table_header returns AE_NO_ACPI_TABLES until DSDT is loaded (BZ 365) - Status of the Global Initialization Handler call not used (BZ 366) - Incorrect object parameter to Global Initialization Handler (BZ 367) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-05-27 04:36:00 +08:00
*/
info->prefix_node =
gpe_event_info->dispatch.method_node;
ACPI: ACPICA 20060526 Restructured, flattened, and simplified the internal interfaces for namespace object evaluation - resulting in smaller code, less CPU stack use, and fewer interfaces. (With assistance from Mikhail Kouzmich) Fixed a problem with the CopyObject operator where the first parameter was not typed correctly for the parser, interpreter, compiler, and disassembler. Caused various errors and unexpected behavior. Fixed a problem where a ShiftLeft or ShiftRight of more than 64 bits produced incorrect results with some C compilers. Since the behavior of C compilers when the shift value is larger than the datatype width is apparently not well defined, the interpreter now detects this condition and simply returns zero as expected in all such cases. (BZ 395) Fixed problem reports (Valery Podrezov) integrated: - Update String-to-Integer conversion to match ACPI 3.0A spec http://bugzilla.kernel.org/show_bug.cgi?id=5329 Allow interpreter to handle nested method declarations http://bugzilla.kernel.org/show_bug.cgi?id=5361 Fixed problem reports (Fiodor Suietov) integrated: - acpi_terminate() doesn't free debug memory allocation list objects (BZ 355) - After Core Subsystem shutdown, acpi_subsystem_status() returns AE_OK (BZ 356) - acpi_os_unmap_memory() for RSDP can be invoked inconsistently (BZ 357) - Resource Manager should return AE_TYPE for non-device objects (BZ 358) - Incomplete cleanup branch in AcpiNsEvaluateRelative (BZ 359) - Use acpi_os_free() instead of ACPI_FREE in acpi_rs_set_srs_method_data (BZ 360) - Incomplete cleanup branch in acpi_ps_parse_aml (BZ 361) - Incomplete cleanup branch in acpi_ds_delete_walk_state (BZ 362) - acpi_get_table_header returns AE_NO_ACPI_TABLES until DSDT is loaded (BZ 365) - Status of the Global Initialization Handler call not used (BZ 366) - Incorrect object parameter to Global Initialization Handler (BZ 367) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-05-27 04:36:00 +08:00
info->flags = ACPI_IGNORE_RETURN_VALUE;
status = acpi_ns_evaluate(info);
ACPI_FREE(info);
}
if (ACPI_FAILURE(status)) {
[ACPI] ACPICA 20060127 Implemented support in the Resource Manager to allow unresolved namestring references within resource package objects for the _PRT method. This support is in addition to the previously implemented unresolved reference support within the AML parser. If the interpreter slack mode is enabled (true on Linux unless acpi=strict), these unresolved references will be passed through to the caller as a NULL package entry. http://bugzilla.kernel.org/show_bug.cgi?id=5741 Implemented and deployed new macros and functions for error and warning messages across the subsystem. These macros are simpler and generate less code than their predecessors. The new macros ACPI_ERROR, ACPI_EXCEPTION, ACPI_WARNING, and ACPI_INFO replace the ACPI_REPORT_* macros. Implemented the acpi_cpu_flags type to simplify host OS integration of the Acquire/Release Lock OSL interfaces. Suggested by Steven Rostedt and Andrew Morton. Fixed a problem where Alias ASL operators are sometimes not correctly resolved. causing AE_AML_INTERNAL http://bugzilla.kernel.org/show_bug.cgi?id=5189 http://bugzilla.kernel.org/show_bug.cgi?id=5674 Fixed several problems with the implementation of the ConcatenateResTemplate ASL operator. As per the ACPI specification, zero length buffers are now treated as a single EndTag. One-length buffers always cause a fatal exception. Non-zero length buffers that do not end with a full 2-byte EndTag cause a fatal exception. Fixed a possible structure overwrite in the AcpiGetObjectInfo external interface. (With assistance from Thomas Renninger) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-01-28 05:43:00 +08:00
ACPI_EXCEPTION((AE_INFO, status,
"while evaluating GPE method [%4.4s]",
acpi_ut_get_node_name(gpe_event_info->
dispatch.
method_node)));
}
break;
default:
goto error_exit; /* Should never happen */
}
/* Defer enabling of GPE until all notify handlers are done */
status = acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_ev_asynch_enable_gpe, gpe_event_info);
if (ACPI_SUCCESS(status)) {
return_VOID;
}
error_exit:
acpi_ev_asynch_enable_gpe(gpe_event_info);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_asynch_enable_gpe
*
* PARAMETERS: Context (gpe_event_info) - Info for this GPE
* Callback from acpi_os_execute
*
* RETURN: None
*
* DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to
* complete (i.e., finish execution of Notify)
*
******************************************************************************/
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = context;
acpi_cpu_flags flags;
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
(void)acpi_ev_finish_gpe(gpe_event_info);
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return;
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_finish_gpe
*
* PARAMETERS: gpe_event_info - Info for this GPE
*
* RETURN: Status
*
* DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution
* of a GPE method or a synchronous or asynchronous GPE handler.
*
******************************************************************************/
acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_LEVEL_TRIGGERED) {
/*
* GPE is level-triggered, we clear the GPE status bit after
* handling the event.
*/
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return (status);
}
}
/*
* Enable this GPE, conditionally. This means that the GPE will
ACPICA: Save current masks of enabled GPEs after enable register writes There is a race condition between acpi_hw_disable_all_gpes() or acpi_enable_all_wakeup_gpes() and acpi_ev_asynch_enable_gpe() such that if the latter wins the race, it may mistakenly enable a GPE disabled by the former. This may lead to premature system wakeups during system suspend and potentially to more serious consequences. The source of the problem is how acpi_hw_low_set_gpe() works when passed ACPI_GPE_CONDITIONAL_ENABLE as the second argument. In that case, the GPE will be enabled if the corresponding bit is set in the enable_for_run mask of the GPE enable register containing that bit. However, acpi_hw_disable_all_gpes() and acpi_enable_all_wakeup_gpes() don't modify the enable_for_run masks of GPE registers when writing to them. In consequence, if acpi_ev_asynch_enable_gpe(), which eventually calls acpi_hw_low_set_gpe() with the second argument equal to ACPI_GPE_CONDITIONAL_ENABLE, is executed in parallel with one of these functions, it may reverse changes made by them. To fix the problem, introduce a new enable_mask field in struct acpi_gpe_register_info in which to store the current mask of enabled GPEs and modify acpi_hw_low_set_gpe() to take this mask into account instead of enable_for_run when its second argument is equal to ACPI_GPE_CONDITIONAL_ENABLE. Also modify the low-level routines called by acpi_hw_disable_all_gpes(), acpi_enable_all_wakeup_gpes() and acpi_enable_all_runtime_gpes() to update the enable_mask masks of GPE registers after all (successful) writes to those registers. Acked-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2014-12-02 06:50:16 +08:00
* only be physically enabled if the enable_mask bit is set
* in the event_info.
*/
(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE);
gpe_event_info->disable_for_dispatch = FALSE;
return (AE_OK);
}
ACPICA: Events: Add parallel GPE handling support to fix potential redundant _Exx evaluations There is a risk that a GPE method/handler may be invoked twice. Let's consider a case, both GPE0(RAW_HANDLER) and GPE1(_Exx) is triggered. =======================================+============================= IRQ handler (top-half) |IRQ polling =======================================+============================= acpi_ev_detect_gpe() | LOCK() | READ (GPE0-7 enable/status registers)| ^^^^^^^^^^^^ROOT CAUSE^^^^^^^^^^^^^^^| Walk GPE0 | UNLOCK() |LOCK() Invoke GPE0 RAW_HANDLER |READ (GPE1 enable/status bit) |acpi_ev_gpe_dispatch(irq=false) | CLEAR (GPE1 enable bit) | CLEAR (GPE1 status bit) LOCK() |UNLOCK() Walk GPE1 +============================= acpi_ev_gpe_dispatch(irq=true) |IRQ polling (defer) CLEAR (GPE1 enable bit) +============================= CLEAR (GPE1 status bit) |acpi_ev_async_execute_gpe_method() Walk others | Evaluate GPE1 _Exx fi | acpi_ev_async_enable_gpe() UNLOCK() | LOCK() =======================================+ SET (GPE enable bit) IRQ handler (bottom-half) | UNLOCK() =======================================+ acpi_ev_async_execute_gpe_method() | Evaluate GPE1 _Exx | acpi_ev_async_enable_gpe() | LOCK() | SET (GPE1 enable bit) | UNLOCK() | =======================================+============================= If acpi_ev_detect_gpe() is only invoked from the IRQ context, there won't be more than one _Lxx/_Exx evaluations for one status bit flagging if the IRQ handlers controlled by the underlying IRQ chip/driver (ex. APIC) are run in serial. Note that, this is a known potential gap and we had an approach, locking entire non-raw-handler processes in the top-half IRQ handler and handling all raw-handlers out of the locked loop to be friendly to those IRQ chip/driver. But the approach is too complicated while the issue is not so real, thus ACPICA treated such issue (if any) as a parallelism/quality issue of the underlying IRQ chip/driver to stop putting it on the radar. Bug in link #1 is suspiciously reflecting the same cause, and if so, it can also be fixed by this simpler approach. But it will be no excuse an ACPICA problem now if ACPICA starts to poll IRQs itself. In the changed scenario, _Exx will be evaluated from the task context due to new ACPICA provided "polling after enabling GPEs" mechanism. And the above figure uses edge-triggered GPEs demonstrating the possibility of evaluating _Exx twice for one status bit flagging. As a conclusion, there is now an increased chance of evaluating _Lxx/_Exx more than once for one status bit flagging. However this is still not a real problem if the _Lxx/_Exx checks the underlying hardware IRQ reasoning and finally just changes the 2nd and the follow-up evaluations into no-ops. Note that _Lxx should always be written in this way as a level-trigger GPE could have it's status wrongly duplicated by the underlying IRQ delivery mechanisms. But _Exx may have very low quality BIOS by BIOS to trigger real issues. For example, trigger duplicated button notifications. To solve this issue, we need to stop reading a bunch of enable/status register bits, but read only one GPE's enable/status bit. And GPE status register's W1C nature ensures that acknowledging one GPE won't affect another GPEs' status bits. Thus the hardware GPE architecture has already provided us with the mechanism of implementing such parallelism. So we can lock around one GPE handling process to achieve the parallelism: 1. If we can incorporate GPE enable bit check in detection and ensure the atomicity of the following process (top-half IRQ handler): READ (enable/status bit) if (enabled && raised) CLEAR (enable bit) and handle the GPE after this process, we can ensure that we will only invoke GPE handler once for one status bit flagging. 2. In addtion for edge-triggered GPEs, if we can ensure the atomicity of the following process (top-half IRQ handler): READ (enable/status bit) if (enabled && raised) CLEAR (enable bit) CLEAR (status bit) and handle the GPE after this process, we can ensure that we will only invoke GPE handler once for one status bit flagging. By doing a cleanup in this way, we can remove duplicate GPE handling code and ensure that all logics are collected in 1 function. And the function will be safe for both IRQ interrupt and IRQ polling, and will be safe for us to release and re-acquire acpi_gbl_gpe_lock at any time rather than raw handler only during the top-half IRQ handler. Lv Zheng. Link: https://bugzilla.kernel.org/show_bug.cgi?id=196703 [#1] Signed-off-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: Erik Schmauss <erik.schmauss@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2018-03-15 07:12:58 +08:00
/*******************************************************************************
*
* FUNCTION: acpi_ev_detect_gpe
*
* PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
* gpe_event_info - Info for this GPE
* gpe_number - Number relative to the parent GPE block
*
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
*
* DESCRIPTION: Detect and dispatch a General Purpose Event to either a function
* (e.g. EC) or method (e.g. _Lxx/_Exx) handler.
* NOTE: GPE is W1C, so it is possible to handle a single GPE from both
* task and irq context in parallel as long as the process to
* detect and mask the GPE is atomic.
* However the atomicity of ACPI_GPE_DISPATCH_RAW_HANDLER is
* dependent on the raw handler itself.
*
******************************************************************************/
u32
acpi_ev_detect_gpe(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
u64 status_reg;
u64 enable_reg;
u32 register_bit;
struct acpi_gpe_register_info *gpe_register_info;
struct acpi_gpe_handler_info *gpe_handler_info;
acpi_cpu_flags flags;
acpi_status status;
ACPI_FUNCTION_TRACE(ev_gpe_detect);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Get the info block for the entire GPE register */
gpe_register_info = gpe_event_info->register_info;
/* Get the register bitmask for this GPE */
register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
/* GPE currently enabled (enable bit == 1)? */
status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
/* GPE currently active (status bit == 1)? */
status = acpi_hw_read(&status_reg, &gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
/* Check if there is anything active at all in this GPE */
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
"Read registers for GPE %02X: Status=%02X, Enable=%02X, "
"RunEnable=%02X, WakeEnable=%02X\n",
gpe_number,
(u32)(status_reg & register_bit),
(u32)(enable_reg & register_bit),
gpe_register_info->enable_for_run,
gpe_register_info->enable_for_wake));
enabled_status_byte = (u8)(status_reg & enable_reg);
if (!(enabled_status_byte & register_bit)) {
goto error_exit;
}
/* Invoke global event handler if present */
acpi_gpe_count++;
if (acpi_gbl_global_event_handler) {
acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE,
gpe_device, gpe_number,
acpi_gbl_global_event_handler_context);
}
/* Found an active GPE */
if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
ACPI_GPE_DISPATCH_RAW_HANDLER) {
/* Dispatch the event to a raw handler */
gpe_handler_info = gpe_event_info->dispatch.handler;
/*
* There is no protection around the namespace node
* and the GPE handler to ensure a safe destruction
* because:
* 1. The namespace node is expected to always
* exist after loading a table.
* 2. The GPE handler is expected to be flushed by
* acpi_os_wait_events_complete() before the
* destruction.
*/
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
int_status |=
gpe_handler_info->address(gpe_device, gpe_number,
gpe_handler_info->context);
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
} else {
/* Dispatch the event to a standard handler or method. */
int_status |= acpi_ev_gpe_dispatch(gpe_device,
gpe_event_info, gpe_number);
}
error_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return (int_status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_dispatch
*
* PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
* gpe_event_info - Info for this GPE
* gpe_number - Number relative to the parent GPE block
*
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
*
* DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
* or method (e.g. _Lxx/_Exx) handler.
*
******************************************************************************/
u32
acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
acpi_status status;
u32 return_value;
ACPI: ACPICA 20060421 Removed a device initialization optimization introduced in 20051216 where the _STA method was not run unless an _INI was also present for the same device. This optimization could cause problems because it could allow _INI methods to be run within a not-present device subtree (If a not-present device had no _INI, _STA would not be run, the not-present status would not be discovered, and the children of the device would be incorrectly traversed.) Implemented a new _STA optimization where namespace subtrees that do not contain _INI are identified and ignored during device initialization. Selectively running _STA can significantly improve boot time on large machines (with assistance from Len Brown.) Implemented support for the device initialization case where the returned _STA flags indicate a device not-present but functioning. In this case, _INI is not run, but the device children are examined for presence, as per the ACPI specification. Implemented an additional change to the IndexField support in order to conform to MS behavior. The value written to the Index Register is not simply a byte offset, it is a byte offset in units of the access width of the parent Index Field. (Fiodor Suietov) Defined and deployed a new OSL interface, acpi_os_validate_address(). This interface is called during the creation of all AML operation regions, and allows the host OS to exert control over what addresses it will allow the AML code to access. Operation Regions whose addresses are disallowed will cause a runtime exception when they are actually accessed (will not affect or abort table loading.) Defined and deployed a new OSL interface, acpi_os_validate_interface(). This interface allows the host OS to match the various "optional" interface/behavior strings for the _OSI predefined control method as appropriate (with assistance from Bjorn Helgaas.) Restructured and corrected various problems in the exception handling code paths within DsCallControlMethod and DsTerminateControlMethod in dsmethod (with assistance from Takayoshi Kochi.) Modified the Linux source converter to ignore quoted string literals while converting identifiers from mixed to lower case. This will correct problems with the disassembler and other areas where such strings must not be modified. The ACPI_FUNCTION_* macros no longer require quotes around the function name. This allows the Linux source converter to convert the names, now that the converter ignores quoted strings. Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-04-22 05:15:00 +08:00
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
/*
* Always disable the GPE so that it does not keep firing before
* any asynchronous activity completes (either from the execution
* of a GPE method or an asynchronous GPE handler.)
*
* If there is no handler or method to run, just disable the
* GPE and leave it disabled permanently to prevent further such
* pointless events from firing.
*/
status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE %02X", gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
* If edge-triggered, clear the GPE status bit now. Note that
* level-triggered events are cleared after the GPE is serviced.
*/
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_EDGE_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE %02X",
gpe_number));
(void)acpi_hw_low_set_gpe(gpe_event_info,
ACPI_GPE_CONDITIONAL_ENABLE);
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
gpe_event_info->disable_for_dispatch = TRUE;
/*
* Dispatch the GPE to either an installed handler or the control
* method associated with this GPE (_Lxx or _Exx). If a handler
* exists, we invoke it and do not attempt to run the method.
* If there is neither a handler nor a method, leave the GPE
* disabled.
*/
ACPICA: Events: Cleanup GPE dispatcher type obtaining code ACPICA commit 7926d5ca9452c87f866938dcea8f12e1efb58f89 There is an issue in acpi_install_gpe_handler() and acpi_remove_gpe_handler(). The code to obtain the GPE dispatcher type from the Handler->original_flags is wrong: if (((Handler->original_flags & ACPI_GPE_DISPATCH_METHOD) || (Handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) && ACPI_GPE_DISPATCH_NOTIFY is 0x03 and ACPI_GPE_DISPATCH_METHOD is 0x02, thus this statement is TRUE for the following dispatcher types: 0x01 (ACPI_GPE_DISPATCH_HANDLER): not expected 0x02 (ACPI_GPE_DISPATCH_METHOD): expected 0x03 (ACPI_GPE_DISPATCH_NOTIFY): expected There is no functional issue due to this because Handler->original_flags is only set in acpi_install_gpe_handler(), and an earlier checker has excluded the ACPI_GPE_DISPATCH_HANDLER: if ((gpe_event_info->Flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_HANDLER) { Status = AE_ALREADY_EXISTS; goto free_and_exit; } ... Handler->original_flags = (u8) (gpe_event_info->Flags & (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK)); We need to clean this up before modifying the GPE dispatcher type values. In order to prevent such issue from happening in the future, this patch introduces ACPI_GPE_DISPATCH_TYPE() macro to be used to obtain the GPE dispatcher types. Lv Zheng. Link: https://github.com/acpica/acpica/commit/7926d5ca Signed-off-by: Lv Zheng <lv.zheng@intel.com> Signed-off-by: David E. Box <david.e.box@linux.intel.com> Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-02-05 15:20:29 +08:00
switch (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)) {
case ACPI_GPE_DISPATCH_HANDLER:
/* Invoke the installed handler (at interrupt level) */
return_value =
gpe_event_info->dispatch.handler->address(gpe_device,
gpe_number,
gpe_event_info->
dispatch.handler->
context);
/* If requested, clear (if level-triggered) and reenable the GPE */
if (return_value & ACPI_REENABLE_GPE) {
(void)acpi_ev_finish_gpe(gpe_event_info);
}
break;
case ACPI_GPE_DISPATCH_METHOD:
case ACPI_GPE_DISPATCH_NOTIFY:
/*
* Execute the method associated with the GPE
* NOTE: Level-triggered GPEs are cleared after the method completes.
*/
status = acpi_os_execute(OSL_GPE_HANDLER,
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
if (ACPI_FAILURE(status)) {
[ACPI] ACPICA 20060127 Implemented support in the Resource Manager to allow unresolved namestring references within resource package objects for the _PRT method. This support is in addition to the previously implemented unresolved reference support within the AML parser. If the interpreter slack mode is enabled (true on Linux unless acpi=strict), these unresolved references will be passed through to the caller as a NULL package entry. http://bugzilla.kernel.org/show_bug.cgi?id=5741 Implemented and deployed new macros and functions for error and warning messages across the subsystem. These macros are simpler and generate less code than their predecessors. The new macros ACPI_ERROR, ACPI_EXCEPTION, ACPI_WARNING, and ACPI_INFO replace the ACPI_REPORT_* macros. Implemented the acpi_cpu_flags type to simplify host OS integration of the Acquire/Release Lock OSL interfaces. Suggested by Steven Rostedt and Andrew Morton. Fixed a problem where Alias ASL operators are sometimes not correctly resolved. causing AE_AML_INTERNAL http://bugzilla.kernel.org/show_bug.cgi?id=5189 http://bugzilla.kernel.org/show_bug.cgi?id=5674 Fixed several problems with the implementation of the ConcatenateResTemplate ASL operator. As per the ACPI specification, zero length buffers are now treated as a single EndTag. One-length buffers always cause a fatal exception. Non-zero length buffers that do not end with a full 2-byte EndTag cause a fatal exception. Fixed a possible structure overwrite in the AcpiGetObjectInfo external interface. (With assistance from Thomas Renninger) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-01-28 05:43:00 +08:00
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE %02X - event disabled",
[ACPI] ACPICA 20060127 Implemented support in the Resource Manager to allow unresolved namestring references within resource package objects for the _PRT method. This support is in addition to the previously implemented unresolved reference support within the AML parser. If the interpreter slack mode is enabled (true on Linux unless acpi=strict), these unresolved references will be passed through to the caller as a NULL package entry. http://bugzilla.kernel.org/show_bug.cgi?id=5741 Implemented and deployed new macros and functions for error and warning messages across the subsystem. These macros are simpler and generate less code than their predecessors. The new macros ACPI_ERROR, ACPI_EXCEPTION, ACPI_WARNING, and ACPI_INFO replace the ACPI_REPORT_* macros. Implemented the acpi_cpu_flags type to simplify host OS integration of the Acquire/Release Lock OSL interfaces. Suggested by Steven Rostedt and Andrew Morton. Fixed a problem where Alias ASL operators are sometimes not correctly resolved. causing AE_AML_INTERNAL http://bugzilla.kernel.org/show_bug.cgi?id=5189 http://bugzilla.kernel.org/show_bug.cgi?id=5674 Fixed several problems with the implementation of the ConcatenateResTemplate ASL operator. As per the ACPI specification, zero length buffers are now treated as a single EndTag. One-length buffers always cause a fatal exception. Non-zero length buffers that do not end with a full 2-byte EndTag cause a fatal exception. Fixed a possible structure overwrite in the AcpiGetObjectInfo external interface. (With assistance from Thomas Renninger) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-01-28 05:43:00 +08:00
gpe_number));
}
break;
default:
/*
* No handler or method to run!
* 03/2010: This case should no longer be possible. We will not allow
* a GPE to be enabled if it has no handler or method.
*/
[ACPI] ACPICA 20060127 Implemented support in the Resource Manager to allow unresolved namestring references within resource package objects for the _PRT method. This support is in addition to the previously implemented unresolved reference support within the AML parser. If the interpreter slack mode is enabled (true on Linux unless acpi=strict), these unresolved references will be passed through to the caller as a NULL package entry. http://bugzilla.kernel.org/show_bug.cgi?id=5741 Implemented and deployed new macros and functions for error and warning messages across the subsystem. These macros are simpler and generate less code than their predecessors. The new macros ACPI_ERROR, ACPI_EXCEPTION, ACPI_WARNING, and ACPI_INFO replace the ACPI_REPORT_* macros. Implemented the acpi_cpu_flags type to simplify host OS integration of the Acquire/Release Lock OSL interfaces. Suggested by Steven Rostedt and Andrew Morton. Fixed a problem where Alias ASL operators are sometimes not correctly resolved. causing AE_AML_INTERNAL http://bugzilla.kernel.org/show_bug.cgi?id=5189 http://bugzilla.kernel.org/show_bug.cgi?id=5674 Fixed several problems with the implementation of the ConcatenateResTemplate ASL operator. As per the ACPI specification, zero length buffers are now treated as a single EndTag. One-length buffers always cause a fatal exception. Non-zero length buffers that do not end with a full 2-byte EndTag cause a fatal exception. Fixed a possible structure overwrite in the AcpiGetObjectInfo external interface. (With assistance from Thomas Renninger) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-01-28 05:43:00 +08:00
ACPI_ERROR((AE_INFO,
"No handler or method for GPE %02X, disabling event",
[ACPI] ACPICA 20060127 Implemented support in the Resource Manager to allow unresolved namestring references within resource package objects for the _PRT method. This support is in addition to the previously implemented unresolved reference support within the AML parser. If the interpreter slack mode is enabled (true on Linux unless acpi=strict), these unresolved references will be passed through to the caller as a NULL package entry. http://bugzilla.kernel.org/show_bug.cgi?id=5741 Implemented and deployed new macros and functions for error and warning messages across the subsystem. These macros are simpler and generate less code than their predecessors. The new macros ACPI_ERROR, ACPI_EXCEPTION, ACPI_WARNING, and ACPI_INFO replace the ACPI_REPORT_* macros. Implemented the acpi_cpu_flags type to simplify host OS integration of the Acquire/Release Lock OSL interfaces. Suggested by Steven Rostedt and Andrew Morton. Fixed a problem where Alias ASL operators are sometimes not correctly resolved. causing AE_AML_INTERNAL http://bugzilla.kernel.org/show_bug.cgi?id=5189 http://bugzilla.kernel.org/show_bug.cgi?id=5674 Fixed several problems with the implementation of the ConcatenateResTemplate ASL operator. As per the ACPI specification, zero length buffers are now treated as a single EndTag. One-length buffers always cause a fatal exception. Non-zero length buffers that do not end with a full 2-byte EndTag cause a fatal exception. Fixed a possible structure overwrite in the AcpiGetObjectInfo external interface. (With assistance from Thomas Renninger) Signed-off-by: Bob Moore <robert.moore@intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
2006-01-28 05:43:00 +08:00
gpe_number));
break;
}
return_UINT32(ACPI_INTERRUPT_HANDLED);
}
#endif /* !ACPI_REDUCED_HARDWARE */