tick: Consolidate tick handling for high and low res handlers
Besides unifying code, this also adds the idle check before processing idle accounting specifics on the low res handler. This way we also generalize this part of the nohz code for !CONFIG_HIGH_RES_TIMERS to prepare for the adaptive tickless features. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
parent
5bb962269c
commit
9e8f559b08
|
@ -120,6 +120,25 @@ static void tick_sched_do_timer(ktime_t now)
|
||||||
tick_do_update_jiffies64(now);
|
tick_do_update_jiffies64(now);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* When we are idle and the tick is stopped, we have to touch
|
||||||
|
* the watchdog as we might not schedule for a really long
|
||||||
|
* time. This happens on complete idle SMP systems while
|
||||||
|
* waiting on the login prompt. We also increment the "start of
|
||||||
|
* idle" jiffy stamp so the idle accounting adjustment we do
|
||||||
|
* when we go busy again does not account too much ticks.
|
||||||
|
*/
|
||||||
|
if (ts->tick_stopped) {
|
||||||
|
touch_softlockup_watchdog();
|
||||||
|
if (is_idle_task(current))
|
||||||
|
ts->idle_jiffies++;
|
||||||
|
}
|
||||||
|
update_process_times(user_mode(regs));
|
||||||
|
profile_tick(CPU_PROFILING);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOHZ - aka dynamic tick functionality
|
* NOHZ - aka dynamic tick functionality
|
||||||
*/
|
*/
|
||||||
|
@ -675,22 +694,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
|
||||||
dev->next_event.tv64 = KTIME_MAX;
|
dev->next_event.tv64 = KTIME_MAX;
|
||||||
|
|
||||||
tick_sched_do_timer(now);
|
tick_sched_do_timer(now);
|
||||||
|
tick_sched_handle(ts, regs);
|
||||||
/*
|
|
||||||
* When we are idle and the tick is stopped, we have to touch
|
|
||||||
* the watchdog as we might not schedule for a really long
|
|
||||||
* time. This happens on complete idle SMP systems while
|
|
||||||
* waiting on the login prompt. We also increment the "start
|
|
||||||
* of idle" jiffy stamp so the idle accounting adjustment we
|
|
||||||
* do when we go busy again does not account too much ticks.
|
|
||||||
*/
|
|
||||||
if (ts->tick_stopped) {
|
|
||||||
touch_softlockup_watchdog();
|
|
||||||
ts->idle_jiffies++;
|
|
||||||
}
|
|
||||||
|
|
||||||
update_process_times(user_mode(regs));
|
|
||||||
profile_tick(CPU_PROFILING);
|
|
||||||
|
|
||||||
while (tick_nohz_reprogram(ts, now)) {
|
while (tick_nohz_reprogram(ts, now)) {
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
|
@ -818,23 +822,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
|
||||||
* Do not call, when we are not in irq context and have
|
* Do not call, when we are not in irq context and have
|
||||||
* no valid regs pointer
|
* no valid regs pointer
|
||||||
*/
|
*/
|
||||||
if (regs) {
|
if (regs)
|
||||||
/*
|
tick_sched_handle(ts, regs);
|
||||||
* When we are idle and the tick is stopped, we have to touch
|
|
||||||
* the watchdog as we might not schedule for a really long
|
|
||||||
* time. This happens on complete idle SMP systems while
|
|
||||||
* waiting on the login prompt. We also increment the "start of
|
|
||||||
* idle" jiffy stamp so the idle accounting adjustment we do
|
|
||||||
* when we go busy again does not account too much ticks.
|
|
||||||
*/
|
|
||||||
if (ts->tick_stopped) {
|
|
||||||
touch_softlockup_watchdog();
|
|
||||||
if (is_idle_task(current))
|
|
||||||
ts->idle_jiffies++;
|
|
||||||
}
|
|
||||||
update_process_times(user_mode(regs));
|
|
||||||
profile_tick(CPU_PROFILING);
|
|
||||||
}
|
|
||||||
|
|
||||||
hrtimer_forward(timer, now, tick_period);
|
hrtimer_forward(timer, now, tick_period);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue