sched/rt: Minimize rq->lock contention in do_sched_rt_period_timer()
With CONFIG_RT_GROUP_SCHED=y, do_sched_rt_period_timer() sequentially takes each CPU's rq->lock. On a large, busy system, the cumulative time it takes to acquire each lock can be excessive, even triggering a watchdog timeout. If rt_rq->rt_time and rt_rq->rt_nr_running are both zero, this function does nothing while holding the lock, so don't bother taking it at all. Signed-off-by: Dave Kleikamp <dave.kleikamp@oracle.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/a767637b-df85-912f-ba69-c90ee00a3fb6@oracle.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
896bbb2522
commit
c249f255aa
|
@ -840,6 +840,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||||
int enqueue = 0;
|
int enqueue = 0;
|
||||||
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
||||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||||
|
int skip;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When span == cpu_online_mask, taking each rq->lock
|
||||||
|
* can be time-consuming. Try to avoid it when possible.
|
||||||
|
*/
|
||||||
|
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||||
|
skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
|
||||||
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||||
|
if (skip)
|
||||||
|
continue;
|
||||||
|
|
||||||
raw_spin_lock(&rq->lock);
|
raw_spin_lock(&rq->lock);
|
||||||
if (rt_rq->rt_time) {
|
if (rt_rq->rt_time) {
|
||||||
|
|
Loading…
Reference in New Issue