2006-12-07 12:34:23 +08:00
|
|
|
/* Freezer declarations */
|
|
|
|
|
2007-07-17 19:03:35 +08:00
|
|
|
#ifndef FREEZER_H_INCLUDED
|
|
|
|
#define FREEZER_H_INCLUDED
|
|
|
|
|
2013-05-07 07:50:09 +08:00
|
|
|
#include <linux/debug_locks.h>
|
2006-12-10 18:18:58 +08:00
|
|
|
#include <linux/sched.h>
|
2007-10-18 18:04:45 +08:00
|
|
|
#include <linux/wait.h>
|
2011-11-22 04:32:25 +08:00
|
|
|
#include <linux/atomic.h>
|
2006-12-10 18:18:58 +08:00
|
|
|
|
2008-10-19 11:27:19 +08:00
|
|
|
#ifdef CONFIG_FREEZER
|
2011-11-22 04:32:25 +08:00
|
|
|
extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
|
|
|
|
extern bool pm_freezing; /* PM freezing in effect */
|
|
|
|
extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
|
|
|
|
|
2013-02-01 16:56:03 +08:00
|
|
|
/*
|
|
|
|
* Timeout for stopping processes
|
|
|
|
*/
|
|
|
|
extern unsigned int freeze_timeout_msecs;
|
|
|
|
|
2006-12-07 12:34:23 +08:00
|
|
|
/*
|
|
|
|
* Check if a process has been frozen
|
|
|
|
*/
|
2011-11-22 04:32:25 +08:00
|
|
|
static inline bool frozen(struct task_struct *p)
|
2006-12-07 12:34:23 +08:00
|
|
|
{
|
|
|
|
return p->flags & PF_FROZEN;
|
|
|
|
}
|
|
|
|
|
2011-11-22 04:32:25 +08:00
|
|
|
extern bool freezing_slow_path(struct task_struct *p);
|
2006-12-07 12:34:23 +08:00
|
|
|
|
|
|
|
/*
|
2011-11-22 04:32:25 +08:00
|
|
|
* Check if there is a request to freeze a process
|
2006-12-07 12:34:23 +08:00
|
|
|
*/
|
2011-11-22 04:32:25 +08:00
|
|
|
static inline bool freezing(struct task_struct *p)
|
2006-12-07 12:34:23 +08:00
|
|
|
{
|
2011-11-22 04:32:25 +08:00
|
|
|
if (likely(!atomic_read(&system_freezing_cnt)))
|
|
|
|
return false;
|
|
|
|
return freezing_slow_path(p);
|
2006-12-07 12:34:23 +08:00
|
|
|
}
|
|
|
|
|
2008-10-19 11:27:21 +08:00
|
|
|
/* Takes and releases task alloc lock using task_lock() */
|
2011-11-22 04:32:23 +08:00
|
|
|
extern void __thaw_task(struct task_struct *t);
|
2006-12-07 12:34:23 +08:00
|
|
|
|
2011-11-22 04:32:23 +08:00
|
|
|
extern bool __refrigerator(bool check_kthr_stop);
|
2006-12-07 12:34:23 +08:00
|
|
|
extern int freeze_processes(void);
|
2011-09-27 02:32:27 +08:00
|
|
|
extern int freeze_kernel_threads(void);
|
2006-12-07 12:34:37 +08:00
|
|
|
extern void thaw_processes(void);
|
2012-01-30 03:35:52 +08:00
|
|
|
extern void thaw_kernel_threads(void);
|
2006-12-07 12:34:23 +08:00
|
|
|
|
2013-05-07 07:50:06 +08:00
|
|
|
/*
|
|
|
|
* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
|
|
|
|
* If try_to_freeze causes a lockdep warning it means the caller may deadlock
|
|
|
|
*/
|
|
|
|
static inline bool try_to_freeze_unsafe(void)
|
2006-12-07 12:34:23 +08:00
|
|
|
{
|
2011-11-22 04:32:22 +08:00
|
|
|
might_sleep();
|
|
|
|
if (likely(!freezing(current)))
|
|
|
|
return false;
|
2011-11-22 04:32:23 +08:00
|
|
|
return __refrigerator(false);
|
2006-12-07 12:34:23 +08:00
|
|
|
}
|
2006-12-07 12:34:28 +08:00
|
|
|
|
2013-05-07 07:50:06 +08:00
|
|
|
static inline bool try_to_freeze(void)
|
|
|
|
{
|
2013-05-07 07:50:09 +08:00
|
|
|
if (!(current->flags & PF_NOFREEZE))
|
|
|
|
debug_check_no_locks_held();
|
2013-05-07 07:50:06 +08:00
|
|
|
return try_to_freeze_unsafe();
|
|
|
|
}
|
|
|
|
|
2011-11-22 04:32:26 +08:00
|
|
|
extern bool freeze_task(struct task_struct *p);
|
2011-11-24 01:28:17 +08:00
|
|
|
extern bool set_freezable(void);
|
2008-10-19 11:27:19 +08:00
|
|
|
|
2008-10-19 11:27:21 +08:00
|
|
|
#ifdef CONFIG_CGROUP_FREEZER
|
2011-11-22 04:32:25 +08:00
|
|
|
extern bool cgroup_freezing(struct task_struct *task);
|
2008-10-19 11:27:21 +08:00
|
|
|
#else /* !CONFIG_CGROUP_FREEZER */
|
2011-11-22 04:32:25 +08:00
|
|
|
static inline bool cgroup_freezing(struct task_struct *task)
|
2010-03-27 06:51:44 +08:00
|
|
|
{
|
2011-11-22 04:32:25 +08:00
|
|
|
return false;
|
2010-03-27 06:51:44 +08:00
|
|
|
}
|
2008-10-19 11:27:21 +08:00
|
|
|
#endif /* !CONFIG_CGROUP_FREEZER */
|
|
|
|
|
2007-05-24 04:57:25 +08:00
|
|
|
/*
|
|
|
|
* The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
|
|
|
|
* calls wait_for_completion(&vfork) and reset right after it returns from this
|
|
|
|
* function. Next, the parent should call try_to_freeze() to freeze itself
|
|
|
|
* appropriately in case the child has exited before the freezing of tasks is
|
|
|
|
* complete. However, we don't want kernel threads to be frozen in unexpected
|
|
|
|
* places, so we allow them to block freeze_processes() instead or to set
|
2011-12-07 06:17:51 +08:00
|
|
|
* PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
|
|
|
|
* parent won't really block freeze_processes(), since ____call_usermodehelper()
|
|
|
|
* (the child) does a little before exec/exit and it can't be frozen before
|
|
|
|
* waking up the parent.
|
2007-05-24 04:57:25 +08:00
|
|
|
*/
|
|
|
|
|
2011-12-07 06:17:51 +08:00
|
|
|
|
2012-10-17 06:03:14 +08:00
|
|
|
/**
|
|
|
|
* freezer_do_not_count - tell freezer to ignore %current
|
|
|
|
*
|
|
|
|
* Tell freezers to ignore the current task when determining whether the
|
|
|
|
* target frozen state is reached. IOW, the current task will be
|
|
|
|
* considered frozen enough by freezers.
|
|
|
|
*
|
|
|
|
* The caller shouldn't do anything which isn't allowed for a frozen task
|
|
|
|
* until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
|
|
|
|
* wrap a scheduling operation and nothing much else.
|
|
|
|
*/
|
2007-05-24 04:57:25 +08:00
|
|
|
static inline void freezer_do_not_count(void)
|
|
|
|
{
|
2011-12-07 06:17:51 +08:00
|
|
|
current->flags |= PF_FREEZER_SKIP;
|
2007-05-24 04:57:25 +08:00
|
|
|
}
|
|
|
|
|
2012-10-17 06:03:14 +08:00
|
|
|
/**
|
|
|
|
* freezer_count - tell freezer to stop ignoring %current
|
|
|
|
*
|
|
|
|
* Undo freezer_do_not_count(). It tells freezers that %current should be
|
|
|
|
* considered again and tries to freeze if freezing condition is already in
|
|
|
|
* effect.
|
2007-05-24 04:57:25 +08:00
|
|
|
*/
|
|
|
|
static inline void freezer_count(void)
|
|
|
|
{
|
2011-12-07 06:17:51 +08:00
|
|
|
current->flags &= ~PF_FREEZER_SKIP;
|
2012-10-17 06:03:14 +08:00
|
|
|
/*
|
|
|
|
* If freezing is in progress, the following paired with smp_mb()
|
|
|
|
* in freezer_should_skip() ensures that either we see %true
|
|
|
|
* freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
2011-12-07 06:17:51 +08:00
|
|
|
try_to_freeze();
|
2007-05-24 04:57:25 +08:00
|
|
|
}
|
|
|
|
|
2013-05-07 07:50:06 +08:00
|
|
|
/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
|
|
|
|
static inline void freezer_count_unsafe(void)
|
|
|
|
{
|
|
|
|
current->flags &= ~PF_FREEZER_SKIP;
|
|
|
|
smp_mb();
|
|
|
|
try_to_freeze_unsafe();
|
|
|
|
}
|
|
|
|
|
2012-10-17 06:03:14 +08:00
|
|
|
/**
|
|
|
|
* freezer_should_skip - whether to skip a task when determining frozen
|
|
|
|
* state is reached
|
|
|
|
* @p: task in quesion
|
|
|
|
*
|
|
|
|
* This function is used by freezers after establishing %true freezing() to
|
|
|
|
* test whether a task should be skipped when determining the target frozen
|
|
|
|
* state is reached. IOW, if this function returns %true, @p is considered
|
|
|
|
* frozen enough.
|
2007-05-24 04:57:25 +08:00
|
|
|
*/
|
2012-10-17 06:03:14 +08:00
|
|
|
static inline bool freezer_should_skip(struct task_struct *p)
|
2007-05-24 04:57:25 +08:00
|
|
|
{
|
2012-10-17 06:03:14 +08:00
|
|
|
/*
|
|
|
|
* The following smp_mb() paired with the one in freezer_count()
|
|
|
|
* ensures that either freezer_count() sees %true freezing() or we
|
|
|
|
* see cleared %PF_FREEZER_SKIP and return %false. This makes it
|
|
|
|
* impossible for a task to slip frozen state testing after
|
|
|
|
* clearing %PF_FREEZER_SKIP.
|
|
|
|
*/
|
|
|
|
smp_mb();
|
|
|
|
return p->flags & PF_FREEZER_SKIP;
|
2007-05-24 04:57:25 +08:00
|
|
|
}
|
2006-12-07 12:34:28 +08:00
|
|
|
|
2011-12-02 05:44:39 +08:00
|
|
|
/*
|
2013-05-07 07:50:13 +08:00
|
|
|
* These functions are intended to be used whenever you want allow a sleeping
|
2012-10-27 01:46:06 +08:00
|
|
|
* task to be frozen. Note that neither return any clear indication of
|
|
|
|
* whether a freeze event happened while in this function.
|
2011-12-02 05:44:39 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Like schedule(), but should not block the freezer. */
|
2013-05-07 07:50:13 +08:00
|
|
|
static inline void freezable_schedule(void)
|
|
|
|
{
|
|
|
|
freezer_do_not_count();
|
|
|
|
schedule();
|
|
|
|
freezer_count();
|
|
|
|
}
|
2011-12-02 05:44:39 +08:00
|
|
|
|
2013-05-07 07:50:06 +08:00
|
|
|
/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
|
2013-05-07 07:50:13 +08:00
|
|
|
static inline void freezable_schedule_unsafe(void)
|
|
|
|
{
|
|
|
|
freezer_do_not_count();
|
|
|
|
schedule();
|
|
|
|
freezer_count_unsafe();
|
|
|
|
}
|
2013-05-07 07:50:06 +08:00
|
|
|
|
2013-05-07 07:50:14 +08:00
|
|
|
/*
|
|
|
|
* Like freezable_schedule_timeout(), but should not block the freezer. Do not
|
|
|
|
* call this with locks held.
|
|
|
|
*/
|
|
|
|
static inline long freezable_schedule_timeout(long timeout)
|
|
|
|
{
|
|
|
|
long __retval;
|
|
|
|
freezer_do_not_count();
|
|
|
|
__retval = schedule_timeout(timeout);
|
|
|
|
freezer_count();
|
|
|
|
return __retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Like schedule_timeout_interruptible(), but should not block the freezer. Do not
|
|
|
|
* call this with locks held.
|
|
|
|
*/
|
|
|
|
static inline long freezable_schedule_timeout_interruptible(long timeout)
|
|
|
|
{
|
|
|
|
long __retval;
|
|
|
|
freezer_do_not_count();
|
|
|
|
__retval = schedule_timeout_interruptible(timeout);
|
|
|
|
freezer_count();
|
|
|
|
return __retval;
|
|
|
|
}
|
|
|
|
|
2011-12-02 05:44:39 +08:00
|
|
|
/* Like schedule_timeout_killable(), but should not block the freezer. */
|
2013-05-07 07:50:13 +08:00
|
|
|
static inline long freezable_schedule_timeout_killable(long timeout)
|
|
|
|
{
|
|
|
|
long __retval;
|
|
|
|
freezer_do_not_count();
|
|
|
|
__retval = schedule_timeout_killable(timeout);
|
|
|
|
freezer_count();
|
|
|
|
return __retval;
|
|
|
|
}
|
2011-12-02 05:44:39 +08:00
|
|
|
|
2013-05-07 07:50:06 +08:00
|
|
|
/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
|
2013-05-07 07:50:13 +08:00
|
|
|
static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
|
|
|
|
{
|
|
|
|
long __retval;
|
|
|
|
freezer_do_not_count();
|
|
|
|
__retval = schedule_timeout_killable(timeout);
|
|
|
|
freezer_count_unsafe();
|
|
|
|
return __retval;
|
|
|
|
}
|
2013-05-07 07:50:06 +08:00
|
|
|
|
2013-05-07 07:50:14 +08:00
|
|
|
/*
|
|
|
|
* Like schedule_hrtimeout_range(), but should not block the freezer. Do not
|
|
|
|
* call this with locks held.
|
|
|
|
*/
|
|
|
|
static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
|
timer: convert timer_slack_ns from unsigned long to u64
This patchset introduces a /proc/<pid>/timerslack_ns interface which
would allow controlling processes to be able to set the timerslack value
on other processes in order to save power by avoiding wakeups (Something
Android currently does via out-of-tree patches).
The first patch tries to fix the internal timer_slack_ns usage which was
defined as a long, which limits the slack range to ~4 seconds on 32bit
systems. It converts it to a u64, which provides the same basically
unlimited slack (500 years) on both 32bit and 64bit machines.
The second patch introduces the /proc/<pid>/timerslack_ns interface
which allows the full 64bit slack range for a task to be read or set on
both 32bit and 64bit machines.
With these two patches, on a 32bit machine, after setting the slack on
bash to 10 seconds:
$ time sleep 1
real 0m10.747s
user 0m0.001s
sys 0m0.005s
The first patch is a little ugly, since I had to chase the slack delta
arguments through a number of functions converting them to u64s. Let me
know if it makes sense to break that up more or not.
Other than that things are fairly straightforward.
This patch (of 2):
The timer_slack_ns value in the task struct is currently a unsigned
long. This means that on 32bit applications, the maximum slack is just
over 4 seconds. However, on 64bit machines, its much much larger (~500
years).
This disparity could make application development a little (as well as
the default_slack) to a u64. This means both 32bit and 64bit systems
have the same effective internal slack range.
Now the existing ABI via PR_GET_TIMERSLACK and PR_SET_TIMERSLACK specify
the interface as a unsigned long, so we preserve that limitation on
32bit systems, where SET_TIMERSLACK can only set the slack to a unsigned
long value, and GET_TIMERSLACK will return ULONG_MAX if the slack is
actually larger then what can be stored by an unsigned long.
This patch also modifies hrtimer functions which specified the slack
delta as a unsigned long.
Signed-off-by: John Stultz <john.stultz@linaro.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Oren Laadan <orenl@cellrox.com>
Cc: Ruchi Kandoi <kandoiruchi@google.com>
Cc: Rom Lemarchand <romlem@android.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Android Kernel Team <kernel-team@android.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-03-18 05:20:51 +08:00
|
|
|
u64 delta, const enum hrtimer_mode mode)
|
2013-05-07 07:50:14 +08:00
|
|
|
{
|
|
|
|
int __retval;
|
|
|
|
freezer_do_not_count();
|
|
|
|
__retval = schedule_hrtimeout_range(expires, delta, mode);
|
|
|
|
freezer_count();
|
|
|
|
return __retval;
|
|
|
|
}
|
|
|
|
|
2007-10-18 18:04:45 +08:00
|
|
|
/*
|
2011-10-20 03:30:40 +08:00
|
|
|
* Freezer-friendly wrappers around wait_event_interruptible(),
|
|
|
|
* wait_event_killable() and wait_event_interruptible_timeout(), originally
|
|
|
|
* defined in <linux/wait.h>
|
2007-10-18 18:04:45 +08:00
|
|
|
*/
|
|
|
|
|
2013-05-08 01:52:05 +08:00
|
|
|
/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
|
|
|
|
#define wait_event_freezekillable_unsafe(wq, condition) \
|
|
|
|
({ \
|
|
|
|
int __retval; \
|
|
|
|
freezer_do_not_count(); \
|
|
|
|
__retval = wait_event_killable(wq, (condition)); \
|
|
|
|
freezer_count_unsafe(); \
|
|
|
|
__retval; \
|
|
|
|
})
|
|
|
|
|
2008-10-19 11:27:19 +08:00
|
|
|
#else /* !CONFIG_FREEZER */
|
2011-11-22 04:32:25 +08:00
|
|
|
static inline bool frozen(struct task_struct *p) { return false; }
|
2011-11-22 04:32:25 +08:00
|
|
|
static inline bool freezing(struct task_struct *p) { return false; }
|
2011-11-25 07:44:55 +08:00
|
|
|
static inline void __thaw_task(struct task_struct *t) {}
|
2006-12-07 12:34:23 +08:00
|
|
|
|
2011-11-22 04:32:23 +08:00
|
|
|
static inline bool __refrigerator(bool check_kthr_stop) { return false; }
|
2011-09-27 02:32:27 +08:00
|
|
|
static inline int freeze_processes(void) { return -ENOSYS; }
|
|
|
|
static inline int freeze_kernel_threads(void) { return -ENOSYS; }
|
2006-12-07 12:34:23 +08:00
|
|
|
static inline void thaw_processes(void) {}
|
2012-01-30 03:35:52 +08:00
|
|
|
static inline void thaw_kernel_threads(void) {}
|
2006-12-07 12:34:23 +08:00
|
|
|
|
2012-11-24 04:55:19 +08:00
|
|
|
static inline bool try_to_freeze_nowarn(void) { return false; }
|
2011-11-22 04:32:22 +08:00
|
|
|
static inline bool try_to_freeze(void) { return false; }
|
2006-12-07 12:34:23 +08:00
|
|
|
|
2007-05-24 04:57:25 +08:00
|
|
|
static inline void freezer_do_not_count(void) {}
|
|
|
|
static inline void freezer_count(void) {}
|
|
|
|
static inline int freezer_should_skip(struct task_struct *p) { return 0; }
|
2007-07-17 19:03:35 +08:00
|
|
|
static inline void set_freezable(void) {}
|
2007-10-18 18:04:45 +08:00
|
|
|
|
2011-12-02 05:44:39 +08:00
|
|
|
#define freezable_schedule() schedule()
|
|
|
|
|
2013-05-07 07:50:06 +08:00
|
|
|
#define freezable_schedule_unsafe() schedule()
|
|
|
|
|
2013-05-07 07:50:14 +08:00
|
|
|
#define freezable_schedule_timeout(timeout) schedule_timeout(timeout)
|
|
|
|
|
|
|
|
#define freezable_schedule_timeout_interruptible(timeout) \
|
|
|
|
schedule_timeout_interruptible(timeout)
|
|
|
|
|
2011-12-02 05:44:39 +08:00
|
|
|
#define freezable_schedule_timeout_killable(timeout) \
|
|
|
|
schedule_timeout_killable(timeout)
|
|
|
|
|
2013-05-07 07:50:06 +08:00
|
|
|
#define freezable_schedule_timeout_killable_unsafe(timeout) \
|
|
|
|
schedule_timeout_killable(timeout)
|
|
|
|
|
2013-05-07 07:50:14 +08:00
|
|
|
#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
|
|
|
|
schedule_hrtimeout_range(expires, delta, mode)
|
|
|
|
|
2013-05-08 01:52:05 +08:00
|
|
|
#define wait_event_freezekillable_unsafe(wq, condition) \
|
|
|
|
wait_event_killable(wq, condition)
|
|
|
|
|
2008-10-19 11:27:19 +08:00
|
|
|
#endif /* !CONFIG_FREEZER */
|
2007-07-17 19:03:35 +08:00
|
|
|
|
|
|
|
#endif /* FREEZER_H_INCLUDED */
|