sched/fair: Call cpufreq hook in additional paths
The cpufreq hook should be called any time the root CFS rq utilization changes. This can occur when a task is switched to or from the fair class, or a task moves between groups or CPUs, but these paths currently do not call the cpufreq hook. Fix this by adding the hook to attach_entity_load_avg() and detach_entity_load_avg(). Suggested-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Steve Muckle <smuckle@linaro.org> [ Added the .update_freq argument to update_cfs_rq_load_avg() to avoid a double cpufreq call. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Byungchul Park <byungchul.park@lge.com> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Juri Lelli <Juri.Lelli@arm.com> Cc: Michael Turquette <mturquette@baylibre.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Morten Rasmussen <morten.rasmussen@arm.com> Cc: Patrick Bellasi <patrick.bellasi@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rafael J. Wysocki <rafael@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1458858367-2831-1-git-send-email-smuckle@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
41e0d37f7a
commit
a2c6c91f98
|
@ -2874,38 +2874,12 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
|
|||
|
||||
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
|
||||
|
||||
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
|
||||
static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
||||
static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
|
||||
{
|
||||
struct sched_avg *sa = &cfs_rq->avg;
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
int decayed, removed_load = 0, removed_util = 0;
|
||||
int cpu = cpu_of(rq);
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
|
||||
s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
|
||||
sa->load_avg = max_t(long, sa->load_avg - r, 0);
|
||||
sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
|
||||
removed_load = 1;
|
||||
}
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
|
||||
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
|
||||
sa->util_avg = max_t(long, sa->util_avg - r, 0);
|
||||
sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
|
||||
removed_util = 1;
|
||||
}
|
||||
|
||||
decayed = __update_load_avg(now, cpu, sa,
|
||||
scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
smp_wmb();
|
||||
cfs_rq->load_last_update_time_copy = sa->last_update_time;
|
||||
#endif
|
||||
|
||||
if (cpu == smp_processor_id() && &rq->cfs == cfs_rq &&
|
||||
(decayed || removed_util)) {
|
||||
if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
|
||||
unsigned long max = rq->cpu_capacity_orig;
|
||||
|
||||
/*
|
||||
|
@ -2925,8 +2899,41 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
|
|||
* See cpu_util().
|
||||
*/
|
||||
cpufreq_update_util(rq_clock(rq),
|
||||
min(sa->util_avg, max), max);
|
||||
min(cfs_rq->avg.util_avg, max), max);
|
||||
}
|
||||
}
|
||||
|
||||
/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
|
||||
static inline int
|
||||
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
||||
{
|
||||
struct sched_avg *sa = &cfs_rq->avg;
|
||||
int decayed, removed_load = 0, removed_util = 0;
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_load_avg)) {
|
||||
s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
|
||||
sa->load_avg = max_t(long, sa->load_avg - r, 0);
|
||||
sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
|
||||
removed_load = 1;
|
||||
}
|
||||
|
||||
if (atomic_long_read(&cfs_rq->removed_util_avg)) {
|
||||
long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
|
||||
sa->util_avg = max_t(long, sa->util_avg - r, 0);
|
||||
sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
|
||||
removed_util = 1;
|
||||
}
|
||||
|
||||
decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
|
||||
scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
smp_wmb();
|
||||
cfs_rq->load_last_update_time_copy = sa->last_update_time;
|
||||
#endif
|
||||
|
||||
if (update_freq && (decayed || removed_util))
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
|
||||
return decayed || removed_load;
|
||||
}
|
||||
|
@ -2947,7 +2954,7 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
|
|||
se->on_rq * scale_load_down(se->load.weight),
|
||||
cfs_rq->curr == se, NULL);
|
||||
|
||||
if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
|
||||
if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
|
||||
update_tg_load_avg(cfs_rq, 0);
|
||||
}
|
||||
|
||||
|
@ -2976,6 +2983,8 @@ skip_aging:
|
|||
cfs_rq->avg.load_sum += se->avg.load_sum;
|
||||
cfs_rq->avg.util_avg += se->avg.util_avg;
|
||||
cfs_rq->avg.util_sum += se->avg.util_sum;
|
||||
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
}
|
||||
|
||||
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
|
@ -2988,6 +2997,8 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
|
|||
cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
|
||||
cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
|
||||
cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
|
||||
|
||||
cfs_rq_util_change(cfs_rq);
|
||||
}
|
||||
|
||||
/* Add the load generated by se into cfs_rq's load average */
|
||||
|
@ -3005,7 +3016,7 @@ enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
cfs_rq->curr == se, NULL);
|
||||
}
|
||||
|
||||
decayed = update_cfs_rq_load_avg(now, cfs_rq);
|
||||
decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
|
||||
|
||||
cfs_rq->runnable_load_avg += sa->load_avg;
|
||||
cfs_rq->runnable_load_sum += sa->load_sum;
|
||||
|
@ -6213,7 +6224,7 @@ static void update_blocked_averages(int cpu)
|
|||
if (throttled_hierarchy(cfs_rq))
|
||||
continue;
|
||||
|
||||
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
|
||||
if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
|
||||
update_tg_load_avg(cfs_rq, 0);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
@ -6274,7 +6285,7 @@ static inline void update_blocked_averages(int cpu)
|
|||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
update_rq_clock(rq);
|
||||
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
|
||||
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue