Merge branch 'pm-assorted'

* pm-assorted:
  suspend: enable freeze timeout configuration through sys
  ACPI: enable ACPI SCI during suspend
  PM: Introduce suspend state PM_SUSPEND_FREEZE
  PM / Runtime: Add new helper function: pm_runtime_active()
  PM / tracing: remove deprecated power trace API
  PM: don't use [delayed_]work_pending()
  PM / Domains: don't use [delayed_]work_pending()
This commit is contained in:
Rafael J. Wysocki 2013-02-15 13:58:54 +01:00
commit 7113fe74c1
21 changed files with 125 additions and 174 deletions

View File

@ -223,3 +223,8 @@ since they ask the freezer to skip freezing this task, since it is anyway
only after the entire suspend/hibernation sequence is complete.
So, to summarize, use [un]lock_system_sleep() instead of directly using
mutex_[un]lock(&pm_mutex). That would prevent freezing failures.
V. Miscellaneous
/sys/power/pm_freeze_timeout controls how long it will cost at most to freeze
all user space processes or all freezable kernel threads, in unit of millisecond.
The default value is 20000, with range of unsigned integer.

View File

@ -426,6 +426,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
'power.runtime_error' is set or 'power.disable_depth' is greater than
zero)
bool pm_runtime_active(struct device *dev);
- return true if the device's runtime PM status is 'active' or its
'power.disable_depth' field is not equal to zero, or false otherwise
bool pm_runtime_suspended(struct device *dev);
- return true if the device's runtime PM status is 'suspended' and its
'power.disable_depth' field is equal to zero, or false otherwise

View File

@ -17,7 +17,7 @@ Cf. include/trace/events/power.h for the events definitions.
1. Power state switch events
============================
1.1 New trace API
1.1 Trace API
-----------------
A 'cpu' event class gathers the CPU-related events: cpuidle and
@ -41,31 +41,6 @@ The event which has 'state=4294967295' in the trace is very important to the use
space tools which are using it to detect the end of the current state, and so to
correctly draw the states diagrams and to calculate accurate statistics etc.
1.2 DEPRECATED trace API
------------------------
A new Kconfig option CONFIG_EVENT_POWER_TRACING_DEPRECATED with the default value of
'y' has been created. This allows the legacy trace power API to be used conjointly
with the new trace API.
The Kconfig option, the old trace API (in include/trace/events/power.h) and the
old trace points will disappear in a future release (namely 2.6.41).
power_start "type=%lu state=%lu cpu_id=%lu"
power_frequency "type=%lu state=%lu cpu_id=%lu"
power_end "cpu_id=%lu"
The 'type' parameter takes one of those macros:
. POWER_NONE = 0,
. POWER_CSTATE = 1, /* C-State */
. POWER_PSTATE = 2, /* Frequency change or DVFS */
The 'state' parameter is set depending on the type:
. Target C-state for type=POWER_CSTATE,
. Target frequency for type=POWER_PSTATE,
power_end is used to indicate the exit of a state, corresponding to the latest
power_start event.
2. Clocks events
================
The clock events are used for clock enable/disable and for

View File

@ -351,12 +351,10 @@ static void omap3_pm_idle(void)
if (omap_irq_pending())
goto out;
trace_power_start(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle(1, smp_processor_id());
omap_sram_idle();
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
out:

View File

@ -375,7 +375,6 @@ void cpu_idle(void)
*/
void default_idle(void)
{
trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
current_thread_info()->status &= ~TS_POLLING;
/*
@ -389,7 +388,6 @@ void default_idle(void)
else
local_irq_enable();
current_thread_info()->status |= TS_POLLING;
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
#ifdef CONFIG_APM_MODULE
@ -423,7 +421,6 @@ void stop_this_cpu(void *dummy)
static void mwait_idle(void)
{
if (!need_resched()) {
trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
clflush((void *)&current_thread_info()->flags);
@ -434,7 +431,6 @@ static void mwait_idle(void)
__sti_mwait(0, 0);
else
local_irq_enable();
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else
local_irq_enable();
@ -447,12 +443,10 @@ static void mwait_idle(void)
*/
static void poll_idle(void)
{
trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle_rcuidle(0, smp_processor_id());
local_irq_enable();
while (!need_resched())
cpu_relax();
trace_power_end_rcuidle(smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}

View File

@ -787,7 +787,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
acpi_irq_handler = handler;
acpi_irq_context = context;
if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) {
printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
acpi_irq_handler = NULL;
return AE_NOT_ACQUIRED;

View File

@ -433,8 +433,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
*/
void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
if (!work_pending(&genpd->power_off_work))
queue_work(pm_wq, &genpd->power_off_work);
queue_work(pm_wq, &genpd->power_off_work);
}
/**

View File

@ -382,6 +382,12 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
/*
* active wakeup source should bring the system
* out of PM_SUSPEND_FREEZE state
*/
freeze_wake();
ws->active = true;
ws->active_count++;
ws->last_time = ktime_get();

View File

@ -294,7 +294,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
(unsigned long)freqs->cpu);
trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);

View File

@ -144,7 +144,6 @@ int cpuidle_idle_call(void)
return 0;
}
trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle_rcuidle(next_state, dev->cpu);
if (cpuidle_state_is_coupled(dev, drv, next_state))
@ -153,7 +152,6 @@ int cpuidle_idle_call(void)
else
entered_state = cpuidle_enter_state(dev, drv, next_state);
trace_power_end_rcuidle(dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* give the governor an opportunity to reflect on the outcome */

View File

@ -12,6 +12,11 @@ extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
extern bool pm_freezing; /* PM freezing in effect */
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
/*
* Timeout for stopping processes
*/
extern unsigned int freeze_timeout_msecs;
/*
* Check if a process has been frozen
*/

View File

@ -80,6 +80,12 @@ static inline bool pm_runtime_suspended(struct device *dev)
&& !dev->power.disable_depth;
}
static inline bool pm_runtime_active(struct device *dev)
{
return dev->power.runtime_status == RPM_ACTIVE
|| dev->power.disable_depth;
}
static inline bool pm_runtime_status_suspended(struct device *dev)
{
return dev->power.runtime_status == RPM_SUSPENDED;
@ -132,6 +138,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool device_run_wake(struct device *dev) { return false; }
static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_active(struct device *dev) { return true; }
static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }

View File

@ -34,8 +34,10 @@ static inline void pm_restore_console(void)
typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_ON ((__force suspend_state_t) 0)
#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
#define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1)
#define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2)
#define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
#define PM_SUSPEND_MIN PM_SUSPEND_FREEZE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
enum suspend_stat_step {
@ -192,6 +194,7 @@ struct platform_suspend_ops {
*/
extern void suspend_set_ops(const struct platform_suspend_ops *ops);
extern int suspend_valid_only_mem(suspend_state_t state);
extern void freeze_wake(void);
/**
* arch_suspend_disable_irqs - disable IRQs for suspend
@ -217,6 +220,7 @@ extern int pm_suspend(suspend_state_t state);
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
static inline void freeze_wake(void) {}
#endif /* !CONFIG_SUSPEND */
/* struct pbe is used for creating lists of pages that should be restored

View File

@ -99,98 +99,6 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
TP_ARGS(name, state)
);
#ifdef CONFIG_EVENT_POWER_TRACING_DEPRECATED
/*
* The power events are used for cpuidle & suspend (power_start, power_end)
* and for cpufreq (power_frequency)
*/
DECLARE_EVENT_CLASS(power,
TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
TP_ARGS(type, state, cpu_id),
TP_STRUCT__entry(
__field( u64, type )
__field( u64, state )
__field( u64, cpu_id )
),
TP_fast_assign(
__entry->type = type;
__entry->state = state;
__entry->cpu_id = cpu_id;
),
TP_printk("type=%lu state=%lu cpu_id=%lu", (unsigned long)__entry->type,
(unsigned long)__entry->state, (unsigned long)__entry->cpu_id)
);
DEFINE_EVENT(power, power_start,
TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
TP_ARGS(type, state, cpu_id)
);
DEFINE_EVENT(power, power_frequency,
TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id),
TP_ARGS(type, state, cpu_id)
);
TRACE_EVENT(power_end,
TP_PROTO(unsigned int cpu_id),
TP_ARGS(cpu_id),
TP_STRUCT__entry(
__field( u64, cpu_id )
),
TP_fast_assign(
__entry->cpu_id = cpu_id;
),
TP_printk("cpu_id=%lu", (unsigned long)__entry->cpu_id)
);
/* Deprecated dummy functions must be protected against multi-declartion */
#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
enum {
POWER_NONE = 0,
POWER_CSTATE = 1,
POWER_PSTATE = 2,
};
#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
#else /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
#ifndef _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
#define _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED
enum {
POWER_NONE = 0,
POWER_CSTATE = 1,
POWER_PSTATE = 2,
};
/* These dummy declaration have to be ripped out when the deprecated
events get removed */
static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end(u64 cpuid) {};
static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
static inline void trace_power_end_rcuidle(u64 cpuid) {};
static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
#endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
#endif /* CONFIG_EVENT_POWER_TRACING_DEPRECATED */
/*
* The clock events are used for clock enable/disable and for
* clock rate change

View File

@ -66,7 +66,7 @@ static DECLARE_WORK(suspend_work, try_to_suspend);
void queue_up_suspend_work(void)
{
if (!work_pending(&suspend_work) && autosleep_state > PM_SUSPEND_ON)
if (autosleep_state > PM_SUSPEND_ON)
queue_work(autosleep_wq, &suspend_work);
}

View File

@ -313,7 +313,7 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
static suspend_state_t decode_state(const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
suspend_state_t state = PM_SUSPEND_STANDBY;
suspend_state_t state = PM_SUSPEND_MIN;
const char * const *s;
#endif
char *p;
@ -553,6 +553,30 @@ power_attr(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
#ifdef CONFIG_FREEZER
static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", freeze_timeout_msecs);
}
static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
freeze_timeout_msecs = val;
return n;
}
power_attr(pm_freeze_timeout);
#endif /* CONFIG_FREEZER*/
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
@ -575,6 +599,9 @@ static struct attribute * g[] = {
#ifdef CONFIG_PM_SLEEP_DEBUG
&pm_print_times_attr.attr,
#endif
#endif
#ifdef CONFIG_FREEZER
&pm_freeze_timeout_attr.attr,
#endif
NULL,
};

View File

@ -21,7 +21,7 @@
/*
* Timeout for stopping processes
*/
#define TIMEOUT (20 * HZ)
unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
static int try_to_freeze_tasks(bool user_only)
{
@ -36,7 +36,7 @@ static int try_to_freeze_tasks(bool user_only)
do_gettimeofday(&start);
end_time = jiffies + TIMEOUT;
end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);
if (!user_only)
freeze_workqueues_begin();

View File

@ -359,8 +359,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio)
pm_qos_update_target(
@ -386,8 +385,7 @@ void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
"%s called for unknown object.", __func__))
return;
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
cancel_delayed_work_sync(&req->work);
if (new_value != req->node.prio)
pm_qos_update_target(
@ -416,8 +414,7 @@ void pm_qos_remove_request(struct pm_qos_request *req)
return;
}
if (delayed_work_pending(&req->work))
cancel_delayed_work_sync(&req->work);
cancel_delayed_work_sync(&req->work);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ,

View File

@ -30,12 +30,38 @@
#include "power.h"
const char *const pm_states[PM_SUSPEND_MAX] = {
[PM_SUSPEND_FREEZE] = "freeze",
[PM_SUSPEND_STANDBY] = "standby",
[PM_SUSPEND_MEM] = "mem",
};
static const struct platform_suspend_ops *suspend_ops;
static bool need_suspend_ops(suspend_state_t state)
{
return !!(state > PM_SUSPEND_FREEZE);
}
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
static bool suspend_freeze_wake;
static void freeze_begin(void)
{
suspend_freeze_wake = false;
}
static void freeze_enter(void)
{
wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
}
void freeze_wake(void)
{
suspend_freeze_wake = true;
wake_up(&suspend_freeze_wait_head);
}
EXPORT_SYMBOL_GPL(freeze_wake);
/**
* suspend_set_ops - Set the global suspend method table.
* @ops: Suspend operations to use.
@ -50,8 +76,11 @@ EXPORT_SYMBOL_GPL(suspend_set_ops);
bool valid_state(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE)
return true;
/*
* All states need lowlevel support and need to be valid to the lowlevel
* PM_SUSPEND_STANDBY and PM_SUSPEND_MEMORY states need lowlevel
* support and need to be valid to the lowlevel
* implementation, no valid callback implies that none are valid.
*/
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
@ -89,11 +118,11 @@ static int suspend_test(int level)
* hibernation). Run suspend notifiers, allocate the "suspend" console and
* freeze processes.
*/
static int suspend_prepare(void)
static int suspend_prepare(suspend_state_t state)
{
int error;
if (!suspend_ops || !suspend_ops->enter)
if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter))
return -EPERM;
pm_prepare_console();
@ -137,7 +166,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
if (suspend_ops->prepare) {
if (need_suspend_ops(state) && suspend_ops->prepare) {
error = suspend_ops->prepare();
if (error)
goto Platform_finish;
@ -149,12 +178,23 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
goto Platform_finish;
}
if (suspend_ops->prepare_late) {
if (need_suspend_ops(state) && suspend_ops->prepare_late) {
error = suspend_ops->prepare_late();
if (error)
goto Platform_wake;
}
/*
* PM_SUSPEND_FREEZE equals
* frozen processes + suspended devices + idle processors.
* Thus we should invoke freeze_enter() soon after
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
freeze_enter();
goto Platform_wake;
}
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
@ -182,13 +222,13 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
enable_nonboot_cpus();
Platform_wake:
if (suspend_ops->wake)
if (need_suspend_ops(state) && suspend_ops->wake)
suspend_ops->wake();
dpm_resume_start(PMSG_RESUME);
Platform_finish:
if (suspend_ops->finish)
if (need_suspend_ops(state) && suspend_ops->finish)
suspend_ops->finish();
return error;
@ -203,11 +243,11 @@ int suspend_devices_and_enter(suspend_state_t state)
int error;
bool wakeup = false;
if (!suspend_ops)
if (need_suspend_ops(state) && !suspend_ops)
return -ENOSYS;
trace_machine_suspend(state);
if (suspend_ops->begin) {
if (need_suspend_ops(state) && suspend_ops->begin) {
error = suspend_ops->begin(state);
if (error)
goto Close;
@ -226,7 +266,7 @@ int suspend_devices_and_enter(suspend_state_t state)
do {
error = suspend_enter(state, &wakeup);
} while (!error && !wakeup
} while (!error && !wakeup && need_suspend_ops(state)
&& suspend_ops->suspend_again && suspend_ops->suspend_again());
Resume_devices:
@ -236,13 +276,13 @@ int suspend_devices_and_enter(suspend_state_t state)
ftrace_start();
resume_console();
Close:
if (suspend_ops->end)
if (need_suspend_ops(state) && suspend_ops->end)
suspend_ops->end();
trace_machine_suspend(PWR_EVENT_EXIT);
return error;
Recover_platform:
if (suspend_ops->recover)
if (need_suspend_ops(state) && suspend_ops->recover)
suspend_ops->recover();
goto Resume_devices;
}
@ -278,12 +318,15 @@ static int enter_state(suspend_state_t state)
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
if (state == PM_SUSPEND_FREEZE)
freeze_begin();
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
error = suspend_prepare();
error = suspend_prepare(state);
if (error)
goto Unlock;

View File

@ -78,21 +78,6 @@ config EVENT_TRACING
select CONTEXT_SWITCH_TRACER
bool
config EVENT_POWER_TRACING_DEPRECATED
depends on EVENT_TRACING
bool "Deprecated power event trace API, to be removed"
default y
help
Provides old power event types:
C-state/idle accounting events:
power:power_start
power:power_end
and old cpufreq accounting event:
power:power_frequency
This is for userspace compatibility
and will vanish after 5 kernel iterations,
namely 3.1.
config CONTEXT_SWITCH_TRACER
bool

View File

@ -13,8 +13,5 @@
#define CREATE_TRACE_POINTS
#include <trace/events/power.h>
#ifdef EVENT_POWER_TRACING_DEPRECATED
EXPORT_TRACEPOINT_SYMBOL_GPL(power_start);
#endif
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);