timer: Minimize nohz off overhead

If nohz is disabled on the kernel command line the [hr]timer code
still calls wake_up_nohz_cpu() and tick_nohz_full_cpu(), a pretty
pointless exercise. Cache nohz_active in [hr]timer per cpu bases and
avoid the overhead.

Before:
  48.10%  hog       [.] main
  15.25%  [kernel]  [k] _raw_spin_lock_irqsave
   9.76%  [kernel]  [k] _raw_spin_unlock_irqrestore
   6.50%  [kernel]  [k] mod_timer
   6.44%  [kernel]  [k] lock_timer_base.isra.38
   3.87%  [kernel]  [k] detach_if_pending
   3.80%  [kernel]  [k] del_timer
   2.67%  [kernel]  [k] internal_add_timer
   1.33%  [kernel]  [k] __internal_add_timer
   0.73%  [kernel]  [k] timerfn
   0.54%  [kernel]  [k] wake_up_nohz_cpu

After:
  48.73%  hog       [.] main
  15.36%  [kernel]  [k] _raw_spin_lock_irqsave
   9.77%  [kernel]  [k] _raw_spin_unlock_irqrestore
   6.61%  [kernel]  [k] lock_timer_base.isra.38
   6.42%  [kernel]  [k] mod_timer
   3.90%  [kernel]  [k] detach_if_pending
   3.76%  [kernel]  [k] del_timer
   2.41%  [kernel]  [k] internal_add_timer
   1.39%  [kernel]  [k] __internal_add_timer
   0.76%  [kernel]  [k] timerfn

We probably should have a cached value for nohz full in the per cpu
bases as well to avoid the cpumask check. The base cache line is hot
already, the cpumask not necessarily.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Joonwoo Park <joonwoop@codeaurora.org>
Cc: Wenbo Wang <wenbo.wang@memblaze.com>
Link: http://lkml.kernel.org/r/20150526224512.207378134@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2015-05-26 22:50:35 +00:00
parent bc7a34b8b9
commit 683be13a28
5 changed files with 19 additions and 8 deletions

View File

@ -164,6 +164,7 @@ enum hrtimer_base_type {
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
* @migration_enabled: The migration of hrtimers to other cpus is enabled
* @nohz_active: The nohz functionality is enabled
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @next_timer: Pointer to the first expiring timer
@ -188,6 +189,7 @@ struct hrtimer_cpu_base {
unsigned int active_bases;
unsigned int clock_was_set_seq;
bool migration_enabled;
bool nohz_active;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1,
hres_active : 1,

View File

@ -994,6 +994,7 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* Kick to reschedule the next tick to handle the new timer
* on dynticks target.
*/
if (new_base->cpu_base->nohz_active)
wake_up_nohz_cpu(new_base->cpu_base->cpu);
} else {
hrtimer_reprogram(timer, new_base);

View File

@ -156,9 +156,9 @@ extern unsigned long tick_nohz_active;
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void timers_update_migration(void);
extern void timers_update_migration(bool update_nohz);
#else
static inline void timers_update_migration(void) { }
static inline void timers_update_migration(bool update_nohz) { }
#endif
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);

View File

@ -963,7 +963,7 @@ static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
ts->nohz_mode = mode;
/* One update is enough */
if (!test_and_set_bit(0, &tick_nohz_active))
timers_update_migration();
timers_update_migration(true);
}
/**

View File

@ -86,6 +86,7 @@ struct tvec_base {
unsigned long all_timers;
int cpu;
bool migration_enabled;
bool nohz_active;
struct tvec_root tv1;
struct tvec tv2;
struct tvec tv3;
@ -99,7 +100,7 @@ static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
unsigned int sysctl_timer_migration = 1;
void timers_update_migration(void)
void timers_update_migration(bool update_nohz)
{
bool on = sysctl_timer_migration && tick_nohz_active;
unsigned int cpu;
@ -111,6 +112,10 @@ void timers_update_migration(void)
for_each_possible_cpu(cpu) {
per_cpu(tvec_bases.migration_enabled, cpu) = on;
per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
if (!update_nohz)
continue;
per_cpu(tvec_bases.nohz_active, cpu) = true;
per_cpu(hrtimer_bases.nohz_active, cpu) = true;
}
}
@ -124,7 +129,7 @@ int timer_migration_handler(struct ctl_table *table, int write,
mutex_lock(&mutex);
ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (!ret && write)
timers_update_migration();
timers_update_migration(false);
mutex_unlock(&mutex);
return ret;
}
@ -436,9 +441,12 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
* require special care against races with idle_cpu(), lets deal
* with that later.
*/
if (!(timer->flags & TIMER_DEFERRABLE) || tick_nohz_full_cpu(base->cpu))
if (base->nohz_active) {
if (!(timer->flags & TIMER_DEFERRABLE) ||
tick_nohz_full_cpu(base->cpu))
wake_up_nohz_cpu(base->cpu);
}
}
#ifdef CONFIG_TIMER_STATS
void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)