sched/fair: calculate delta runnable load only when it's needed
Move the code of calculation for delta_sum/delta_avg to where it is really needed to be done. Signed-off-by: Peng Wang <rocking@linux.alibaba.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/20200103114400.17668-1-rocking@linux.alibaba.com
This commit is contained in:
parent
9dec1b6949
commit
fe71bbb21e
|
@ -3366,16 +3366,17 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
|
|||
|
||||
runnable_load_sum = (s64)se_runnable(se) * runnable_sum;
|
||||
runnable_load_avg = div_s64(runnable_load_sum, LOAD_AVG_MAX);
|
||||
delta_sum = runnable_load_sum - se_weight(se) * se->avg.runnable_load_sum;
|
||||
delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
|
||||
|
||||
se->avg.runnable_load_sum = runnable_sum;
|
||||
se->avg.runnable_load_avg = runnable_load_avg;
|
||||
|
||||
if (se->on_rq) {
|
||||
delta_sum = runnable_load_sum -
|
||||
se_weight(se) * se->avg.runnable_load_sum;
|
||||
delta_avg = runnable_load_avg - se->avg.runnable_load_avg;
|
||||
add_positive(&cfs_rq->avg.runnable_load_avg, delta_avg);
|
||||
add_positive(&cfs_rq->avg.runnable_load_sum, delta_sum);
|
||||
}
|
||||
|
||||
se->avg.runnable_load_sum = runnable_sum;
|
||||
se->avg.runnable_load_avg = runnable_load_avg;
|
||||
}
|
||||
|
||||
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
|
||||
|
|
Loading…
Reference in New Issue