timer: convert timer_slack_ns from unsigned long to u64
This patchset introduces a /proc/<pid>/timerslack_ns interface which would allow controlling processes to be able to set the timerslack value on other processes in order to save power by avoiding wakeups (Something Android currently does via out-of-tree patches). The first patch tries to fix the internal timer_slack_ns usage which was defined as a long, which limits the slack range to ~4 seconds on 32bit systems. It converts it to a u64, which provides the same basically unlimited slack (500 years) on both 32bit and 64bit machines. The second patch introduces the /proc/<pid>/timerslack_ns interface which allows the full 64bit slack range for a task to be read or set on both 32bit and 64bit machines. With these two patches, on a 32bit machine, after setting the slack on bash to 10 seconds: $ time sleep 1 real 0m10.747s user 0m0.001s sys 0m0.005s The first patch is a little ugly, since I had to chase the slack delta arguments through a number of functions converting them to u64s. Let me know if it makes sense to break that up more or not. Other than that things are fairly straightforward. This patch (of 2): The timer_slack_ns value in the task struct is currently a unsigned long. This means that on 32bit applications, the maximum slack is just over 4 seconds. However, on 64bit machines, its much much larger (~500 years). This disparity could make application development a little (as well as the default_slack) to a u64. This means both 32bit and 64bit systems have the same effective internal slack range. Now the existing ABI via PR_GET_TIMERSLACK and PR_SET_TIMERSLACK specify the interface as a unsigned long, so we preserve that limitation on 32bit systems, where SET_TIMERSLACK can only set the slack to a unsigned long value, and GET_TIMERSLACK will return ULONG_MAX if the slack is actually larger then what can be stored by an unsigned long. This patch also modifies hrtimer functions which specified the slack delta as a unsigned long. Signed-off-by: John Stultz <john.stultz@linaro.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Oren Laadan <orenl@cellrox.com> Cc: Ruchi Kandoi <kandoiruchi@google.com> Cc: Rom Lemarchand <romlem@android.com> Cc: Kees Cook <keescook@chromium.org> Cc: Android Kernel Team <kernel-team@android.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0a687aace3
commit
da8b44d5a9
|
@ -1616,7 +1616,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
|||
{
|
||||
int res = 0, eavail, timed_out = 0;
|
||||
unsigned long flags;
|
||||
long slack = 0;
|
||||
u64 slack = 0;
|
||||
wait_queue_t wait;
|
||||
ktime_t expires, *to = NULL;
|
||||
|
||||
|
|
|
@ -70,9 +70,9 @@ static long __estimate_accuracy(struct timespec *tv)
|
|||
return slack;
|
||||
}
|
||||
|
||||
long select_estimate_accuracy(struct timespec *tv)
|
||||
u64 select_estimate_accuracy(struct timespec *tv)
|
||||
{
|
||||
unsigned long ret;
|
||||
u64 ret;
|
||||
struct timespec now;
|
||||
|
||||
/*
|
||||
|
@ -402,7 +402,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
|
|||
struct poll_wqueues table;
|
||||
poll_table *wait;
|
||||
int retval, i, timed_out = 0;
|
||||
unsigned long slack = 0;
|
||||
u64 slack = 0;
|
||||
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
|
||||
unsigned long busy_end = 0;
|
||||
|
||||
|
@ -784,7 +784,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait,
|
|||
poll_table* pt = &wait->pt;
|
||||
ktime_t expire, *to = NULL;
|
||||
int timed_out = 0, count = 0;
|
||||
unsigned long slack = 0;
|
||||
u64 slack = 0;
|
||||
unsigned int busy_flag = net_busy_loop_on() ? POLL_BUSY_LOOP : 0;
|
||||
unsigned long busy_end = 0;
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ static inline long freezable_schedule_timeout_killable_unsafe(long timeout)
|
|||
* call this with locks held.
|
||||
*/
|
||||
static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
|
||||
unsigned long delta, const enum hrtimer_mode mode)
|
||||
u64 delta, const enum hrtimer_mode mode)
|
||||
{
|
||||
int __retval;
|
||||
freezer_do_not_count();
|
||||
|
|
|
@ -220,7 +220,7 @@ static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time
|
|||
timer->node.expires = ktime_add_safe(time, delta);
|
||||
}
|
||||
|
||||
static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
|
||||
static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, u64 delta)
|
||||
{
|
||||
timer->_softexpires = time;
|
||||
timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
|
||||
|
@ -378,7 +378,7 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
|
|||
|
||||
/* Basic timer operations: */
|
||||
extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
unsigned long range_ns, const enum hrtimer_mode mode);
|
||||
u64 range_ns, const enum hrtimer_mode mode);
|
||||
|
||||
/**
|
||||
* hrtimer_start - (re)start an hrtimer on the current CPU
|
||||
|
@ -399,7 +399,7 @@ extern int hrtimer_try_to_cancel(struct hrtimer *timer);
|
|||
static inline void hrtimer_start_expires(struct hrtimer *timer,
|
||||
enum hrtimer_mode mode)
|
||||
{
|
||||
unsigned long delta;
|
||||
u64 delta;
|
||||
ktime_t soft, hard;
|
||||
soft = hrtimer_get_softexpires(timer);
|
||||
hard = hrtimer_get_expires(timer);
|
||||
|
@ -477,10 +477,12 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
|
|||
extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
|
||||
struct task_struct *tsk);
|
||||
|
||||
extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
|
||||
extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
|
||||
const enum hrtimer_mode mode);
|
||||
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
|
||||
unsigned long delta, const enum hrtimer_mode mode, int clock);
|
||||
u64 delta,
|
||||
const enum hrtimer_mode mode,
|
||||
int clock);
|
||||
extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
|
||||
|
||||
/* Soft interrupt function to run the hrtimer queues: */
|
||||
|
|
|
@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
|
|||
extern void poll_freewait(struct poll_wqueues *pwq);
|
||||
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
|
||||
ktime_t *expires, unsigned long slack);
|
||||
extern long select_estimate_accuracy(struct timespec *tv);
|
||||
extern u64 select_estimate_accuracy(struct timespec *tv);
|
||||
|
||||
|
||||
static inline int poll_schedule(struct poll_wqueues *pwq, int state)
|
||||
|
|
|
@ -1792,8 +1792,8 @@ struct task_struct {
|
|||
* time slack values; these are used to round up poll() and
|
||||
* select() etc timeout values. These are in nanoseconds.
|
||||
*/
|
||||
unsigned long timer_slack_ns;
|
||||
unsigned long default_timer_slack_ns;
|
||||
u64 timer_slack_ns;
|
||||
u64 default_timer_slack_ns;
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
unsigned int kasan_depth;
|
||||
|
|
|
@ -2169,6 +2169,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
|||
error = perf_event_task_enable();
|
||||
break;
|
||||
case PR_GET_TIMERSLACK:
|
||||
if (current->timer_slack_ns > ULONG_MAX)
|
||||
error = ULONG_MAX;
|
||||
else
|
||||
error = current->timer_slack_ns;
|
||||
break;
|
||||
case PR_SET_TIMERSLACK:
|
||||
|
|
|
@ -979,7 +979,7 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
|
|||
* relative (HRTIMER_MODE_REL)
|
||||
*/
|
||||
void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
|
||||
unsigned long delta_ns, const enum hrtimer_mode mode)
|
||||
u64 delta_ns, const enum hrtimer_mode mode)
|
||||
{
|
||||
struct hrtimer_clock_base *base, *new_base;
|
||||
unsigned long flags;
|
||||
|
@ -1548,7 +1548,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
|
|||
struct restart_block *restart;
|
||||
struct hrtimer_sleeper t;
|
||||
int ret = 0;
|
||||
unsigned long slack;
|
||||
u64 slack;
|
||||
|
||||
slack = current->timer_slack_ns;
|
||||
if (dl_task(current) || rt_task(current))
|
||||
|
@ -1724,7 +1724,7 @@ void __init hrtimers_init(void)
|
|||
* @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
|
||||
*/
|
||||
int __sched
|
||||
schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
|
||||
schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta,
|
||||
const enum hrtimer_mode mode, int clock)
|
||||
{
|
||||
struct hrtimer_sleeper t;
|
||||
|
@ -1792,7 +1792,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
|
|||
*
|
||||
* Returns 0 when the timer has expired otherwise -EINTR
|
||||
*/
|
||||
int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
|
||||
int __sched schedule_hrtimeout_range(ktime_t *expires, u64 delta,
|
||||
const enum hrtimer_mode mode)
|
||||
{
|
||||
return schedule_hrtimeout_range_clock(expires, delta, mode,
|
||||
|
|
|
@ -1698,10 +1698,10 @@ EXPORT_SYMBOL(msleep_interruptible);
|
|||
static void __sched do_usleep_range(unsigned long min, unsigned long max)
|
||||
{
|
||||
ktime_t kmin;
|
||||
unsigned long delta;
|
||||
u64 delta;
|
||||
|
||||
kmin = ktime_set(0, min * NSEC_PER_USEC);
|
||||
delta = (max - min) * NSEC_PER_USEC;
|
||||
delta = (u64)(max - min) * NSEC_PER_USEC;
|
||||
schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue