sched: core: Use hrtimer_start[_expires]()
hrtimer_start() now enforces a timer interrupt when an already expired timer is enqueued. Get rid of the __hrtimer_start_range_ns() invocations and the loops around it. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Preeti U Murthy <preeti@linux.vnet.ibm.com> Cc: Viresh Kumar <viresh.kumar@linaro.org> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/r/20150414203502.531131739@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
3497d206c4
commit
4961b6e118
|
@ -92,22 +92,11 @@
|
|||
|
||||
void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
|
||||
{
|
||||
unsigned long delta;
|
||||
ktime_t soft, hard, now;
|
||||
if (hrtimer_active(period_timer))
|
||||
return;
|
||||
|
||||
for (;;) {
|
||||
if (hrtimer_active(period_timer))
|
||||
break;
|
||||
|
||||
now = hrtimer_cb_get_time(period_timer);
|
||||
hrtimer_forward(period_timer, now, period);
|
||||
|
||||
soft = hrtimer_get_softexpires(period_timer);
|
||||
hard = hrtimer_get_expires(period_timer);
|
||||
delta = ktime_to_ns(ktime_sub(hard, soft));
|
||||
__hrtimer_start_range_ns(period_timer, soft, delta,
|
||||
HRTIMER_MODE_ABS_PINNED, 0);
|
||||
}
|
||||
hrtimer_forward_now(period_timer, period);
|
||||
hrtimer_start_expires(period_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
|
||||
DEFINE_MUTEX(sched_domains_mutex);
|
||||
|
@ -355,12 +344,11 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static int __hrtick_restart(struct rq *rq)
|
||||
static void __hrtick_restart(struct rq *rq)
|
||||
{
|
||||
struct hrtimer *timer = &rq->hrtick_timer;
|
||||
ktime_t time = hrtimer_get_softexpires(timer);
|
||||
|
||||
return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
|
||||
hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -440,8 +428,8 @@ void hrtick_start(struct rq *rq, u64 delay)
|
|||
* doesn't make sense. Rely on vruntime for fairness.
|
||||
*/
|
||||
delay = max_t(u64, delay, 10000LL);
|
||||
__hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
|
||||
HRTIMER_MODE_REL_PINNED, 0);
|
||||
hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
|
||||
HRTIMER_MODE_REL_PINNED);
|
||||
}
|
||||
|
||||
static inline void init_hrtick(void)
|
||||
|
|
|
@ -3850,7 +3850,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
|
|||
* Are we near the end of the current quota period?
|
||||
*
|
||||
* Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
|
||||
* hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
|
||||
* hrtimer base being cleared by hrtimer_start. In the case of
|
||||
* migrate_hrtimers, base is never cleared, so we are fine.
|
||||
*/
|
||||
static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
|
||||
|
|
Loading…
Reference in New Issue