sched/deadline: Make update_curr_dl() more accurate
rq->clock_task may be updated between the two calls of rq_clock_task() in update_curr_dl(). Calling rq_clock_task() only once makes it more accurate and efficient, taking update_curr() as reference. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Wen Yang <wen.yang99@zte.com.cn> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Jiang Biao <jiang.biao2@zte.com.cn> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: zhong.weidong@zte.com.cn Link: http://lkml.kernel.org/r/1517882148-44599-1-git-send-email-wen.yang99@zte.com.cn Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
6a546c7e69
commit
6fe0ce1eb0
|
@ -1153,6 +1153,7 @@ static void update_curr_dl(struct rq *rq)
|
|||
struct sched_dl_entity *dl_se = &curr->dl;
|
||||
u64 delta_exec, scaled_delta_exec;
|
||||
int cpu = cpu_of(rq);
|
||||
u64 now;
|
||||
|
||||
if (!dl_task(curr) || !on_dl_rq(dl_se))
|
||||
return;
|
||||
|
@ -1165,7 +1166,8 @@ static void update_curr_dl(struct rq *rq)
|
|||
* natural solution, but the full ramifications of this
|
||||
* approach need further study.
|
||||
*/
|
||||
delta_exec = rq_clock_task(rq) - curr->se.exec_start;
|
||||
now = rq_clock_task(rq);
|
||||
delta_exec = now - curr->se.exec_start;
|
||||
if (unlikely((s64)delta_exec <= 0)) {
|
||||
if (unlikely(dl_se->dl_yielded))
|
||||
goto throttle;
|
||||
|
@ -1178,7 +1180,7 @@ static void update_curr_dl(struct rq *rq)
|
|||
curr->se.sum_exec_runtime += delta_exec;
|
||||
account_group_exec_runtime(curr, delta_exec);
|
||||
|
||||
curr->se.exec_start = rq_clock_task(rq);
|
||||
curr->se.exec_start = now;
|
||||
cgroup_account_cputime(curr, delta_exec);
|
||||
|
||||
sched_rt_avg_update(rq, delta_exec);
|
||||
|
|
Loading…
Reference in New Issue