Merge branches 'intel_pstate' and 'pm-sleep'
* intel_pstate: cpufreq: intel_pstate: Avoid division by 0 in min_perf_pct_min() * pm-sleep: Revert "ACPI / sleep: Ignore spurious SCI wakeups from suspend-to-idle"
This commit is contained in:
commit
fbd78afe34
|
@ -782,7 +782,7 @@ static int acpi_battery_update(struct acpi_battery *battery, bool resume)
|
|||
if ((battery->state & ACPI_BATTERY_STATE_CRITICAL) ||
|
||||
(test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags) &&
|
||||
(battery->capacity_now <= battery->alarm)))
|
||||
pm_wakeup_hard_event(&battery->device->dev);
|
||||
pm_wakeup_event(&battery->device->dev, 0);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -217,7 +217,7 @@ static int acpi_lid_notify_state(struct acpi_device *device, int state)
|
|||
}
|
||||
|
||||
if (state)
|
||||
pm_wakeup_hard_event(&device->dev);
|
||||
pm_wakeup_event(&device->dev, 0);
|
||||
|
||||
ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device);
|
||||
if (ret == NOTIFY_DONE)
|
||||
|
@ -402,7 +402,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
|
|||
} else {
|
||||
int keycode;
|
||||
|
||||
pm_wakeup_hard_event(&device->dev);
|
||||
pm_wakeup_event(&device->dev, 0);
|
||||
if (button->suspended)
|
||||
break;
|
||||
|
||||
|
@ -534,7 +534,6 @@ static int acpi_button_add(struct acpi_device *device)
|
|||
lid_device = device;
|
||||
}
|
||||
|
||||
device_init_wakeup(&device->dev, true);
|
||||
printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device));
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/pm_qos.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/suspend.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
|
@ -400,7 +399,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
|
|||
mutex_lock(&acpi_pm_notifier_lock);
|
||||
|
||||
if (adev->wakeup.flags.notifier_present) {
|
||||
pm_wakeup_ws_event(adev->wakeup.ws, 0, true);
|
||||
__pm_wakeup_event(adev->wakeup.ws, 0);
|
||||
if (adev->wakeup.context.work.func)
|
||||
queue_pm_work(&adev->wakeup.context.work);
|
||||
}
|
||||
|
|
|
@ -663,40 +663,14 @@ static int acpi_freeze_prepare(void)
|
|||
acpi_os_wait_events_complete();
|
||||
if (acpi_sci_irq_valid())
|
||||
enable_irq_wake(acpi_sci_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_freeze_wake(void)
|
||||
{
|
||||
/*
|
||||
* If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
|
||||
* that the SCI has triggered while suspended, so cancel the wakeup in
|
||||
* case it has not been a wakeup event (the GPEs will be checked later).
|
||||
*/
|
||||
if (acpi_sci_irq_valid() &&
|
||||
!irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
|
||||
pm_system_cancel_wakeup();
|
||||
}
|
||||
|
||||
static void acpi_freeze_sync(void)
|
||||
{
|
||||
/*
|
||||
* Process all pending events in case there are any wakeup ones.
|
||||
*
|
||||
* The EC driver uses the system workqueue, so that one needs to be
|
||||
* flushed too.
|
||||
*/
|
||||
acpi_os_wait_events_complete();
|
||||
flush_scheduled_work();
|
||||
}
|
||||
|
||||
static void acpi_freeze_restore(void)
|
||||
{
|
||||
acpi_disable_wakeup_devices(ACPI_STATE_S0);
|
||||
if (acpi_sci_irq_valid())
|
||||
disable_irq_wake(acpi_sci_irq);
|
||||
|
||||
acpi_enable_all_runtime_gpes();
|
||||
}
|
||||
|
||||
|
@ -708,8 +682,6 @@ static void acpi_freeze_end(void)
|
|||
static const struct platform_freeze_ops acpi_freeze_ops = {
|
||||
.begin = acpi_freeze_begin,
|
||||
.prepare = acpi_freeze_prepare,
|
||||
.wake = acpi_freeze_wake,
|
||||
.sync = acpi_freeze_sync,
|
||||
.restore = acpi_freeze_restore,
|
||||
.end = acpi_freeze_end,
|
||||
};
|
||||
|
|
|
@ -1091,6 +1091,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
|
|||
if (async_error)
|
||||
goto Complete;
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
async_error = -EBUSY;
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
goto Complete;
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@ bool events_check_enabled __read_mostly;
|
|||
/* First wakeup IRQ seen by the kernel in the last cycle. */
|
||||
unsigned int pm_wakeup_irq __read_mostly;
|
||||
|
||||
/* If greater than 0 and the system is suspending, terminate the suspend. */
|
||||
static atomic_t pm_abort_suspend __read_mostly;
|
||||
/* If set and the system is suspending, terminate the suspend. */
|
||||
static bool pm_abort_suspend __read_mostly;
|
||||
|
||||
/*
|
||||
* Combined counters of registered wakeup events and wakeup events in progress.
|
||||
|
@ -855,26 +855,20 @@ bool pm_wakeup_pending(void)
|
|||
pm_print_active_wakeup_sources();
|
||||
}
|
||||
|
||||
return ret || atomic_read(&pm_abort_suspend) > 0;
|
||||
return ret || pm_abort_suspend;
|
||||
}
|
||||
|
||||
void pm_system_wakeup(void)
|
||||
{
|
||||
atomic_inc(&pm_abort_suspend);
|
||||
pm_abort_suspend = true;
|
||||
freeze_wake();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_system_wakeup);
|
||||
|
||||
void pm_system_cancel_wakeup(void)
|
||||
{
|
||||
atomic_dec(&pm_abort_suspend);
|
||||
}
|
||||
|
||||
void pm_wakeup_clear(bool reset)
|
||||
void pm_wakeup_clear(void)
|
||||
{
|
||||
pm_abort_suspend = false;
|
||||
pm_wakeup_irq = 0;
|
||||
if (reset)
|
||||
atomic_set(&pm_abort_suspend, 0);
|
||||
}
|
||||
|
||||
void pm_system_irq_wakeup(unsigned int irq_number)
|
||||
|
|
|
@ -571,9 +571,10 @@ static inline void update_turbo_state(void)
|
|||
static int min_perf_pct_min(void)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[0];
|
||||
int turbo_pstate = cpu->pstate.turbo_pstate;
|
||||
|
||||
return DIV_ROUND_UP(cpu->pstate.min_pstate * 100,
|
||||
cpu->pstate.turbo_pstate);
|
||||
return turbo_pstate ?
|
||||
DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0;
|
||||
}
|
||||
|
||||
static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
|
||||
|
|
|
@ -189,8 +189,6 @@ struct platform_suspend_ops {
|
|||
struct platform_freeze_ops {
|
||||
int (*begin)(void);
|
||||
int (*prepare)(void);
|
||||
void (*wake)(void);
|
||||
void (*sync)(void);
|
||||
void (*restore)(void);
|
||||
void (*end)(void);
|
||||
};
|
||||
|
@ -430,8 +428,7 @@ extern unsigned int pm_wakeup_irq;
|
|||
|
||||
extern bool pm_wakeup_pending(void);
|
||||
extern void pm_system_wakeup(void);
|
||||
extern void pm_system_cancel_wakeup(void);
|
||||
extern void pm_wakeup_clear(bool reset);
|
||||
extern void pm_wakeup_clear(void);
|
||||
extern void pm_system_irq_wakeup(unsigned int irq_number);
|
||||
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
|
||||
extern bool pm_save_wakeup_count(unsigned int count);
|
||||
|
@ -481,7 +478,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
|
|||
|
||||
static inline bool pm_wakeup_pending(void) { return false; }
|
||||
static inline void pm_system_wakeup(void) {}
|
||||
static inline void pm_wakeup_clear(bool reset) {}
|
||||
static inline void pm_wakeup_clear(void) {}
|
||||
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
|
||||
|
||||
static inline void lock_system_sleep(void) {}
|
||||
|
|
|
@ -132,7 +132,7 @@ int freeze_processes(void)
|
|||
if (!pm_freezing)
|
||||
atomic_inc(&system_freezing_cnt);
|
||||
|
||||
pm_wakeup_clear(true);
|
||||
pm_wakeup_clear();
|
||||
pr_info("Freezing user space processes ... ");
|
||||
pm_freezing = true;
|
||||
error = try_to_freeze_tasks(true);
|
||||
|
|
|
@ -72,8 +72,6 @@ static void freeze_begin(void)
|
|||
|
||||
static void freeze_enter(void)
|
||||
{
|
||||
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true);
|
||||
|
||||
spin_lock_irq(&suspend_freeze_lock);
|
||||
if (pm_wakeup_pending())
|
||||
goto out;
|
||||
|
@ -100,27 +98,6 @@ static void freeze_enter(void)
|
|||
out:
|
||||
suspend_freeze_state = FREEZE_STATE_NONE;
|
||||
spin_unlock_irq(&suspend_freeze_lock);
|
||||
|
||||
trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false);
|
||||
}
|
||||
|
||||
static void s2idle_loop(void)
|
||||
{
|
||||
do {
|
||||
freeze_enter();
|
||||
|
||||
if (freeze_ops && freeze_ops->wake)
|
||||
freeze_ops->wake();
|
||||
|
||||
dpm_resume_noirq(PMSG_RESUME);
|
||||
if (freeze_ops && freeze_ops->sync)
|
||||
freeze_ops->sync();
|
||||
|
||||
if (pm_wakeup_pending())
|
||||
break;
|
||||
|
||||
pm_wakeup_clear(false);
|
||||
} while (!dpm_suspend_noirq(PMSG_SUSPEND));
|
||||
}
|
||||
|
||||
void freeze_wake(void)
|
||||
|
@ -394,8 +371,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
|
|||
* all the devices are suspended.
|
||||
*/
|
||||
if (state == PM_SUSPEND_FREEZE) {
|
||||
s2idle_loop();
|
||||
goto Platform_early_resume;
|
||||
trace_suspend_resume(TPS("machine_suspend"), state, true);
|
||||
freeze_enter();
|
||||
trace_suspend_resume(TPS("machine_suspend"), state, false);
|
||||
goto Platform_wake;
|
||||
}
|
||||
|
||||
error = disable_nonboot_cpus();
|
||||
|
|
Loading…
Reference in New Issue