hrtimer: Keep pointer to first timer and simplify __remove_hrtimer()
__remove_hrtimer() needs to evaluate the expiry time to figure out whether the timer which is removed is eventually the first expiring timer on the cpu. Keep a pointer to it, which is lazily updated, so we can avoid the evaluation dance and retrieve the information from there. Generates slightly better code. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/20150414203501.752838019@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
b97f44c9b6
commit
895bdfa793
|
@ -172,6 +172,7 @@ enum hrtimer_base_type {
|
||||||
* @clock_was_set_seq: Sequence counter of clock was set events
|
* @clock_was_set_seq: Sequence counter of clock was set events
|
||||||
* @expires_next: absolute time of the next event which was scheduled
|
* @expires_next: absolute time of the next event which was scheduled
|
||||||
* via clock_set_next_event()
|
* via clock_set_next_event()
|
||||||
|
* @next_timer: Pointer to the first expiring timer
|
||||||
* @in_hrtirq: hrtimer_interrupt() is currently executing
|
* @in_hrtirq: hrtimer_interrupt() is currently executing
|
||||||
* @hres_active: State of high resolution mode
|
* @hres_active: State of high resolution mode
|
||||||
* @hang_detected: The last hrtimer interrupt detected a hang
|
* @hang_detected: The last hrtimer interrupt detected a hang
|
||||||
|
@ -180,6 +181,10 @@ enum hrtimer_base_type {
|
||||||
* @nr_hangs: Total number of hrtimer interrupt hangs
|
* @nr_hangs: Total number of hrtimer interrupt hangs
|
||||||
* @max_hang_time: Maximum time spent in hrtimer_interrupt
|
* @max_hang_time: Maximum time spent in hrtimer_interrupt
|
||||||
* @clock_base: array of clock bases for this cpu
|
* @clock_base: array of clock bases for this cpu
|
||||||
|
*
|
||||||
|
* Note: next_timer is just an optimization for __remove_hrtimer().
|
||||||
|
* Do not dereference the pointer because it is not reliable on
|
||||||
|
* cross cpu removals.
|
||||||
*/
|
*/
|
||||||
struct hrtimer_cpu_base {
|
struct hrtimer_cpu_base {
|
||||||
raw_spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
|
@ -191,6 +196,7 @@ struct hrtimer_cpu_base {
|
||||||
hres_active : 1,
|
hres_active : 1,
|
||||||
hang_detected : 1;
|
hang_detected : 1;
|
||||||
ktime_t expires_next;
|
ktime_t expires_next;
|
||||||
|
struct hrtimer *next_timer;
|
||||||
unsigned int nr_events;
|
unsigned int nr_events;
|
||||||
unsigned int nr_retries;
|
unsigned int nr_retries;
|
||||||
unsigned int nr_hangs;
|
unsigned int nr_hangs;
|
||||||
|
|
|
@ -415,12 +415,21 @@ static inline void debug_deactivate(struct hrtimer *timer)
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
|
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
|
||||||
|
static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
|
||||||
|
struct hrtimer *timer)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||||
|
cpu_base->next_timer = timer;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
|
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
|
||||||
{
|
{
|
||||||
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
struct hrtimer_clock_base *base = cpu_base->clock_base;
|
||||||
ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
|
ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
|
||||||
unsigned int active = cpu_base->active_bases;
|
unsigned int active = cpu_base->active_bases;
|
||||||
|
|
||||||
|
hrtimer_update_next_timer(cpu_base, NULL);
|
||||||
for (; active; base++, active >>= 1) {
|
for (; active; base++, active >>= 1) {
|
||||||
struct timerqueue_node *next;
|
struct timerqueue_node *next;
|
||||||
struct hrtimer *timer;
|
struct hrtimer *timer;
|
||||||
|
@ -431,8 +440,10 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
|
||||||
next = timerqueue_getnext(&base->active);
|
next = timerqueue_getnext(&base->active);
|
||||||
timer = container_of(next, struct hrtimer, node);
|
timer = container_of(next, struct hrtimer, node);
|
||||||
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
|
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
|
||||||
if (expires.tv64 < expires_next.tv64)
|
if (expires.tv64 < expires_next.tv64) {
|
||||||
expires_next = expires;
|
expires_next = expires;
|
||||||
|
hrtimer_update_next_timer(cpu_base, timer);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* clock_was_set() might have changed base->offset of any of
|
* clock_was_set() might have changed base->offset of any of
|
||||||
|
@ -597,6 +608,8 @@ static int hrtimer_reprogram(struct hrtimer *timer,
|
||||||
if (cpu_base->in_hrtirq)
|
if (cpu_base->in_hrtirq)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
cpu_base->next_timer = timer;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a hang was detected in the last timer interrupt then we
|
* If a hang was detected in the last timer interrupt then we
|
||||||
* do not schedule a timer which is earlier than the expiry
|
* do not schedule a timer which is earlier than the expiry
|
||||||
|
@ -868,30 +881,27 @@ static void __remove_hrtimer(struct hrtimer *timer,
|
||||||
unsigned long newstate, int reprogram)
|
unsigned long newstate, int reprogram)
|
||||||
{
|
{
|
||||||
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
|
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
|
||||||
struct timerqueue_node *next_timer;
|
unsigned int state = timer->state;
|
||||||
|
|
||||||
if (!(timer->state & HRTIMER_STATE_ENQUEUED))
|
timer->state = newstate;
|
||||||
goto out;
|
if (!(state & HRTIMER_STATE_ENQUEUED))
|
||||||
|
return;
|
||||||
|
|
||||||
next_timer = timerqueue_getnext(&base->active);
|
|
||||||
if (!timerqueue_del(&base->active, &timer->node))
|
if (!timerqueue_del(&base->active, &timer->node))
|
||||||
cpu_base->active_bases &= ~(1 << base->index);
|
cpu_base->active_bases &= ~(1 << base->index);
|
||||||
|
|
||||||
if (&timer->node == next_timer) {
|
|
||||||
#ifdef CONFIG_HIGH_RES_TIMERS
|
#ifdef CONFIG_HIGH_RES_TIMERS
|
||||||
/* Reprogram the clock event device. if enabled */
|
/*
|
||||||
if (reprogram && cpu_base->hres_active) {
|
* Note: If reprogram is false we do not update
|
||||||
ktime_t expires;
|
* cpu_base->next_timer. This happens when we remove the first
|
||||||
|
* timer on a remote cpu. No harm as we never dereference
|
||||||
expires = ktime_sub(hrtimer_get_expires(timer),
|
* cpu_base->next_timer. So the worst thing what can happen is
|
||||||
base->offset);
|
* an superflous call to hrtimer_force_reprogram() on the
|
||||||
if (cpu_base->expires_next.tv64 == expires.tv64)
|
* remote cpu later on if the same timer gets enqueued again.
|
||||||
hrtimer_force_reprogram(cpu_base, 1);
|
*/
|
||||||
}
|
if (reprogram && timer == cpu_base->next_timer)
|
||||||
|
hrtimer_force_reprogram(cpu_base, 1);
|
||||||
#endif
|
#endif
|
||||||
}
|
|
||||||
out:
|
|
||||||
timer->state = newstate;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue