Power management fixes for 5.6-rc2
Fix three issues related to the handling of wakeup events signaled through the ACPI SCI while suspended to idle (Rafael Wysocki) and unexport an internal cpufreq variable (Yangtao Li). -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAl5GbaoSHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxwHYP/iINPkW3IErX3TulA/2U+JjAK8TBsjl6 TY602el2noTddiUTvwf6AZ0N9B8QyPDLd66iC2/hGLIrM+Tg1Jd3/cZ5pwI79CYg NV7+kzW/39mscPpH/aPd5KRDXssQ2HRjBDv+GgwPsJxoycKDdppuOXDHzu87/57f oteVtP8VGwvMayZIi9d4tYO1x2vYtqd7QJioj7Ob3xMMWABjeMsputRkOzGaQfOB jOfgpWmPIePSjg6F4gGPRKgVzQmoeZvibtRLyaBe+s9go4vhKW+/X8XCoBM8ZNAM edKp8Z0FSv005GtbCcq2bukZTO4eE1KN/vFTOSzqmjuojsjN7hpu9jyUk3gga/si J7xmgo6w3L8VWGVY2Yiqo+iM/qVXvoNN06dJ8KRm0ZRq7m2iNcRvL3YksXgbACx4 a2JGzfEoQ4DTO/Vy8nHR0GCXXByddQap1uCHmLeery4BiIBDMxll76hsSZyxsaPh fqYwoHCy4NXn+Hp4zDB5y7NpUNra5xqqUVVtnPlc1RbwL9tuurBPG4QKnxB51lkR iUBV340abeqqeJEE7/txsFSB/AEzA+Ff/f/JER/WBfsYL3bwzRxadPqiQA5+QZnt TQD7vnTk5ILKBxw9dBV+HjwDXf5meGOJDuPKM1/tspT572U3K5rrmlI1ZKpnU5fB /2ZwjQviBNhq =uE2l -----END PGP SIGNATURE----- Merge tag 'pm-5.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management fixes from Rafael Wysocki: "Fix three issues related to the handling of wakeup events signaled through the ACPI SCI while suspended to idle (Rafael Wysocki) and unexport an internal cpufreq variable (Yangtao Li)" * tag 'pm-5.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system ACPICA: Introduce acpi_any_gpe_status_set() ACPI: PM: s2idle: Avoid possible race related to the EC GPE ACPI: EC: Fix flushing of pending work cpufreq: Make cpufreq_global_kobject static
This commit is contained in:
commit
4e03e4e6d2
|
@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
|
|||
|
||||
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
|
||||
|
||||
u8 acpi_hw_check_all_gpes(void);
|
||||
|
||||
acpi_status
|
||||
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
||||
struct acpi_gpe_block_info *gpe_block,
|
||||
|
|
|
@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
|
|||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_any_gpe_status_set
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Whether or not the status bit is set for any GPE
|
||||
*
|
||||
* DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
|
||||
* of them is set or FALSE otherwise.
|
||||
*
|
||||
******************************************************************************/
|
||||
u32 acpi_any_gpe_status_set(void)
|
||||
{
|
||||
acpi_status status;
|
||||
u8 ret;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
|
||||
|
||||
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
ret = acpi_hw_check_all_gpes();
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
|
||||
|
||||
/*******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_install_gpe_block
|
||||
|
|
|
@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
|||
return (AE_OK);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_get_gpe_block_status
|
||||
*
|
||||
* PARAMETERS: gpe_xrupt_info - GPE Interrupt info
|
||||
* gpe_block - Gpe Block info
|
||||
*
|
||||
* RETURN: Success
|
||||
*
|
||||
* DESCRIPTION: Produce a combined GPE status bits mask for the given block.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
static acpi_status
|
||||
acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
|
||||
struct acpi_gpe_block_info *gpe_block,
|
||||
void *ret_ptr)
|
||||
{
|
||||
struct acpi_gpe_register_info *gpe_register_info;
|
||||
u64 in_enable, in_status;
|
||||
acpi_status status;
|
||||
u8 *ret = ret_ptr;
|
||||
u32 i;
|
||||
|
||||
/* Examine each GPE Register within the block */
|
||||
|
||||
for (i = 0; i < gpe_block->register_count; i++) {
|
||||
gpe_register_info = &gpe_block->register_info[i];
|
||||
|
||||
status = acpi_hw_read(&in_enable,
|
||||
&gpe_register_info->enable_address);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
status = acpi_hw_read(&in_status,
|
||||
&gpe_register_info->status_address);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
*ret |= in_enable & in_status;
|
||||
}
|
||||
|
||||
return (AE_OK);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_disable_all_gpes
|
||||
|
@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
|
|||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* FUNCTION: acpi_hw_check_all_gpes
|
||||
*
|
||||
* PARAMETERS: None
|
||||
*
|
||||
* RETURN: Combined status of all GPEs
|
||||
*
|
||||
* DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
|
||||
* status bit is set for at least one of them of FALSE otherwise.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
u8 acpi_hw_check_all_gpes(void)
|
||||
{
|
||||
u8 ret = 0;
|
||||
|
||||
ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
|
||||
|
||||
(void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
|
||||
|
||||
return (ret != 0);
|
||||
}
|
||||
|
||||
#endif /* !ACPI_REDUCED_HARDWARE */
|
||||
|
|
|
@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
|
|||
|
||||
static struct acpi_ec *boot_ec;
|
||||
static bool boot_ec_is_ecdt = false;
|
||||
static struct workqueue_struct *ec_wq;
|
||||
static struct workqueue_struct *ec_query_wq;
|
||||
|
||||
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
|
||||
|
@ -469,7 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
|
|||
ec_dbg_evt("Command(%s) submitted/blocked",
|
||||
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
|
||||
ec->nr_pending_queries++;
|
||||
schedule_work(&ec->work);
|
||||
queue_work(ec_wq, &ec->work);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -535,7 +536,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
|
|||
#ifdef CONFIG_PM_SLEEP
|
||||
static void __acpi_ec_flush_work(void)
|
||||
{
|
||||
flush_scheduled_work(); /* flush ec->work */
|
||||
drain_workqueue(ec_wq); /* flush ec->work */
|
||||
flush_workqueue(ec_query_wq); /* flush queries */
|
||||
}
|
||||
|
||||
|
@ -556,8 +557,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
|
|||
|
||||
void acpi_ec_flush_work(void)
|
||||
{
|
||||
/* Without ec_query_wq there is nothing to flush. */
|
||||
if (!ec_query_wq)
|
||||
/* Without ec_wq there is nothing to flush. */
|
||||
if (!ec_wq)
|
||||
return;
|
||||
|
||||
__acpi_ec_flush_work();
|
||||
|
@ -2107,25 +2108,33 @@ static struct acpi_driver acpi_ec_driver = {
|
|||
.drv.pm = &acpi_ec_pm,
|
||||
};
|
||||
|
||||
static inline int acpi_ec_query_init(void)
|
||||
static void acpi_ec_destroy_workqueues(void)
|
||||
{
|
||||
if (!ec_query_wq) {
|
||||
ec_query_wq = alloc_workqueue("kec_query", 0,
|
||||
ec_max_queries);
|
||||
if (!ec_query_wq)
|
||||
return -ENODEV;
|
||||
if (ec_wq) {
|
||||
destroy_workqueue(ec_wq);
|
||||
ec_wq = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void acpi_ec_query_exit(void)
|
||||
{
|
||||
if (ec_query_wq) {
|
||||
destroy_workqueue(ec_query_wq);
|
||||
ec_query_wq = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int acpi_ec_init_workqueues(void)
|
||||
{
|
||||
if (!ec_wq)
|
||||
ec_wq = alloc_ordered_workqueue("kec", 0);
|
||||
|
||||
if (!ec_query_wq)
|
||||
ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
|
||||
|
||||
if (!ec_wq || !ec_query_wq) {
|
||||
acpi_ec_destroy_workqueues();
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
|
||||
{
|
||||
.ident = "Thinkpad X1 Carbon 6th",
|
||||
|
@ -2156,8 +2165,7 @@ int __init acpi_ec_init(void)
|
|||
int result;
|
||||
int ecdt_fail, dsdt_fail;
|
||||
|
||||
/* register workqueue for _Qxx evaluations */
|
||||
result = acpi_ec_query_init();
|
||||
result = acpi_ec_init_workqueues();
|
||||
if (result)
|
||||
return result;
|
||||
|
||||
|
@ -2188,6 +2196,6 @@ static void __exit acpi_ec_exit(void)
|
|||
{
|
||||
|
||||
acpi_bus_unregister_driver(&acpi_ec_driver);
|
||||
acpi_ec_query_exit();
|
||||
acpi_ec_destroy_workqueues();
|
||||
}
|
||||
#endif /* 0 */
|
||||
|
|
|
@ -990,21 +990,34 @@ static void acpi_s2idle_sync(void)
|
|||
acpi_os_wait_events_complete(); /* synchronize Notify handling */
|
||||
}
|
||||
|
||||
static void acpi_s2idle_wake(void)
|
||||
static bool acpi_s2idle_wake(void)
|
||||
{
|
||||
/*
|
||||
* If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
|
||||
* not triggered while suspended, so bail out.
|
||||
*/
|
||||
if (!acpi_sci_irq_valid() ||
|
||||
irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
|
||||
return;
|
||||
if (!acpi_sci_irq_valid())
|
||||
return pm_wakeup_pending();
|
||||
|
||||
while (pm_wakeup_pending()) {
|
||||
/*
|
||||
* If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
|
||||
* SCI has not triggered while suspended, so bail out (the
|
||||
* wakeup is pending anyway and the SCI is not the source of
|
||||
* it).
|
||||
*/
|
||||
if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If there are no EC events to process and at least one of the
|
||||
* other enabled GPEs is active, the wakeup is regarded as a
|
||||
* genuine one.
|
||||
*
|
||||
* Note that the checks below must be carried out in this order
|
||||
* to avoid returning prematurely due to a change of the EC GPE
|
||||
* status bit from unset to set between the checks with the
|
||||
* status bits of all the other GPEs unset.
|
||||
*/
|
||||
if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If there are EC events to process, the wakeup may be a spurious one
|
||||
* coming from the EC.
|
||||
*/
|
||||
if (acpi_ec_dispatch_gpe()) {
|
||||
/*
|
||||
* Cancel the wakeup and process all pending events in case
|
||||
* there are any wakeup ones in there.
|
||||
|
@ -1017,8 +1030,19 @@ static void acpi_s2idle_wake(void)
|
|||
|
||||
acpi_s2idle_sync();
|
||||
|
||||
/*
|
||||
* The SCI is in the "suspended" state now and it cannot produce
|
||||
* new wakeup events till the rearming below, so if any of them
|
||||
* are pending here, they must be resulting from the processing
|
||||
* of EC events above or coming from somewhere else.
|
||||
*/
|
||||
if (pm_wakeup_pending())
|
||||
return true;
|
||||
|
||||
rearm_wake_irq(acpi_sci_irq);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void acpi_s2idle_restore_early(void)
|
||||
|
|
|
@ -105,6 +105,8 @@ bool have_governor_per_policy(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(have_governor_per_policy);
|
||||
|
||||
static struct kobject *cpufreq_global_kobject;
|
||||
|
||||
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
|
||||
{
|
||||
if (have_governor_per_policy())
|
||||
|
@ -2745,9 +2747,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
|
||||
|
||||
struct kobject *cpufreq_global_kobject;
|
||||
EXPORT_SYMBOL(cpufreq_global_kobject);
|
||||
|
||||
static int __init cpufreq_core_init(void)
|
||||
{
|
||||
if (cpufreq_disabled())
|
||||
|
|
|
@ -752,6 +752,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
|
|||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
|
||||
ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
|
||||
|
||||
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
|
||||
acpi_get_gpe_device(u32 gpe_index,
|
||||
|
|
|
@ -201,9 +201,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
|
|||
return cpumask_weight(policy->cpus) > 1;
|
||||
}
|
||||
|
||||
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
|
||||
extern struct kobject *cpufreq_global_kobject;
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
unsigned int cpufreq_get(unsigned int cpu);
|
||||
unsigned int cpufreq_quick_get(unsigned int cpu);
|
||||
|
|
|
@ -191,7 +191,7 @@ struct platform_s2idle_ops {
|
|||
int (*begin)(void);
|
||||
int (*prepare)(void);
|
||||
int (*prepare_late)(void);
|
||||
void (*wake)(void);
|
||||
bool (*wake)(void);
|
||||
void (*restore_early)(void);
|
||||
void (*restore)(void);
|
||||
void (*end)(void);
|
||||
|
|
|
@ -131,11 +131,12 @@ static void s2idle_loop(void)
|
|||
* to avoid them upfront.
|
||||
*/
|
||||
for (;;) {
|
||||
if (s2idle_ops && s2idle_ops->wake)
|
||||
s2idle_ops->wake();
|
||||
|
||||
if (pm_wakeup_pending())
|
||||
if (s2idle_ops && s2idle_ops->wake) {
|
||||
if (s2idle_ops->wake())
|
||||
break;
|
||||
} else if (pm_wakeup_pending()) {
|
||||
break;
|
||||
}
|
||||
|
||||
pm_wakeup_clear(false);
|
||||
|
||||
|
|
Loading…
Reference in New Issue