sched/fair: Call cpufreq update util handlers less frequently on UP
For SMP systems, update_load_avg() calls the cpufreq update util
handlers only for the top level cfs_rq (i.e. rq->cfs).
But that is not the case for UP systems. update_load_avg() calls util
handler for any cfs_rq for which it is called. This would result in way
too many calls from the scheduler to the cpufreq governors when
CONFIG_FAIR_GROUP_SCHED is enabled.
Reduce the frequency of these calls by copying the behavior from the SMP
case, i.e. Only call util handlers for the top level cfs_rq.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: linaro-kernel@lists.linaro.org
Fixes: 536bd00cdb
("sched/fair: Fix !CONFIG_SMP kernel cpufreq governor breakage")
Link: http://lkml.kernel.org/r/6abf69a2107525885b616a2c1ec03d9c0946171c.1495603536.git.viresh.kumar@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
fc33a8943e
commit
a030d7381d
|
@ -2790,6 +2790,29 @@ static inline void update_cfs_shares(struct sched_entity *se)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
|
|
||||||
|
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
||||||
|
{
|
||||||
|
if (&this_rq()->cfs == cfs_rq) {
|
||||||
|
/*
|
||||||
|
* There are a few boundary cases this might miss but it should
|
||||||
|
* get called often enough that that should (hopefully) not be
|
||||||
|
* a real problem -- added to that it only calls on the local
|
||||||
|
* CPU, so if we enqueue remotely we'll miss an update, but
|
||||||
|
* the next tick/schedule should update.
|
||||||
|
*
|
||||||
|
* It will not get called when we go idle, because the idle
|
||||||
|
* thread is a different class (!fair), nor will the utilization
|
||||||
|
* number include things like RT tasks.
|
||||||
|
*
|
||||||
|
* As is, the util number is not freq-invariant (we'd have to
|
||||||
|
* implement arch_scale_freq_capacity() for that).
|
||||||
|
*
|
||||||
|
* See cpu_util().
|
||||||
|
*/
|
||||||
|
cpufreq_update_util(rq_of(cfs_rq), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/*
|
/*
|
||||||
* Approximate:
|
* Approximate:
|
||||||
|
@ -3276,29 +3299,6 @@ static inline void set_tg_cfs_propagate(struct cfs_rq *cfs_rq) {}
|
||||||
|
|
||||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
|
|
||||||
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
|
||||||
{
|
|
||||||
if (&this_rq()->cfs == cfs_rq) {
|
|
||||||
/*
|
|
||||||
* There are a few boundary cases this might miss but it should
|
|
||||||
* get called often enough that that should (hopefully) not be
|
|
||||||
* a real problem -- added to that it only calls on the local
|
|
||||||
* CPU, so if we enqueue remotely we'll miss an update, but
|
|
||||||
* the next tick/schedule should update.
|
|
||||||
*
|
|
||||||
* It will not get called when we go idle, because the idle
|
|
||||||
* thread is a different class (!fair), nor will the utilization
|
|
||||||
* number include things like RT tasks.
|
|
||||||
*
|
|
||||||
* As is, the util number is not freq-invariant (we'd have to
|
|
||||||
* implement arch_scale_freq_capacity() for that).
|
|
||||||
*
|
|
||||||
* See cpu_util().
|
|
||||||
*/
|
|
||||||
cpufreq_update_util(rq_of(cfs_rq), 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unsigned subtract and clamp on underflow.
|
* Unsigned subtract and clamp on underflow.
|
||||||
*
|
*
|
||||||
|
@ -3544,7 +3544,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
||||||
|
|
||||||
static inline void update_load_avg(struct sched_entity *se, int not_used1)
|
static inline void update_load_avg(struct sched_entity *se, int not_used1)
|
||||||
{
|
{
|
||||||
cpufreq_update_util(rq_of(cfs_rq_of(se)), 0);
|
cfs_rq_util_change(cfs_rq_of(se));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
|
|
Loading…
Reference in New Issue