tick/sched: Optimize tick_do_update_jiffies64() further

Now that it's clear that there is always one tick to account, simplify the
calculations some more.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20201117132006.565663056@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-11-17 14:19:47 +01:00
parent 94ad2e3ced
commit 7a35bf2a6a
1 changed files with 6 additions and 5 deletions

View File

@ -55,7 +55,7 @@ static ktime_t last_jiffies_update;
*/
static void tick_do_update_jiffies64(ktime_t now)
{
unsigned long ticks = 0;
unsigned long ticks = 1;
ktime_t delta;
/*
@ -93,20 +93,21 @@ static void tick_do_update_jiffies64(ktime_t now)
write_seqcount_begin(&jiffies_seq);
last_jiffies_update = ktime_add(last_jiffies_update, tick_period);
delta = ktime_sub(now, tick_next_period);
if (unlikely(delta >= tick_period)) {
/* Slow path for long idle sleep times */
s64 incr = ktime_to_ns(tick_period);
ticks = ktime_divns(delta, incr);
ticks += ktime_divns(delta, incr);
last_jiffies_update = ktime_add_ns(last_jiffies_update,
incr * ticks);
} else {
last_jiffies_update = ktime_add(last_jiffies_update,
tick_period);
}
do_timer(++ticks);
do_timer(ticks);
/*
* Keep the tick_next_period variable up to date. WRITE_ONCE()