sched/fair: Fix PELT integrity for new groups
Vincent reported that when a new task is moved into a new cgroup it gets attached twice to the load tracking: sched_move_task() task_move_group_fair() detach_task_cfs_rq() set_task_rq() attach_task_cfs_rq() attach_entity_load_avg() se->avg.last_load_update = cfs_rq->avg.last_load_update // == 0 enqueue_entity() enqueue_entity_load_avg() update_cfs_rq_load_avg() now = clock() __update_load_avg(&cfs_rq->avg) cfs_rq->avg.last_load_update = now // ages load/util for: now - 0, load/util -> 0 if (migrated) attach_entity_load_avg() se->avg.last_load_update = cfs_rq->avg.last_load_update; // now != 0 The problem is that we don't update cfs_rq load_avg before all entity attach/detach operations. Only enqueue_task() and migrate_task() do this. By fixing this, the above will not happen, because the sched_move_task() attach will have updated cfs_rq's last_load_update time before attach, and in turn the attach will have set the entity's last_load_update stamp. Note that there is a further problem with sched_move_task() calling detach on a task that hasn't yet been attached; this will be taken care of in a subsequent patch. Reported-by: Vincent Guittot <vincent.guittot@linaro.org> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Yuyang Du <yuyang.du@intel.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e210bffd39
commit
010114739d
|
@ -3083,6 +3083,12 @@ static int idle_balance(struct rq *this_rq);
|
||||||
|
|
||||||
#else /* CONFIG_SMP */
|
#else /* CONFIG_SMP */
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void update_load_avg(struct sched_entity *se, int not_used)
|
static inline void update_load_avg(struct sched_entity *se, int not_used)
|
||||||
{
|
{
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||||
|
@ -8368,6 +8374,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct sched_entity *se = &p->se;
|
struct sched_entity *se = &p->se;
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||||
|
u64 now = cfs_rq_clock_task(cfs_rq);
|
||||||
|
|
||||||
if (!vruntime_normalized(p)) {
|
if (!vruntime_normalized(p)) {
|
||||||
/*
|
/*
|
||||||
|
@ -8379,6 +8386,7 @@ static void detach_task_cfs_rq(struct task_struct *p)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Catch up with the cfs_rq and remove our load when we leave */
|
/* Catch up with the cfs_rq and remove our load when we leave */
|
||||||
|
update_cfs_rq_load_avg(now, cfs_rq, false);
|
||||||
detach_entity_load_avg(cfs_rq, se);
|
detach_entity_load_avg(cfs_rq, se);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8386,6 +8394,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct sched_entity *se = &p->se;
|
struct sched_entity *se = &p->se;
|
||||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||||
|
u64 now = cfs_rq_clock_task(cfs_rq);
|
||||||
|
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
/*
|
/*
|
||||||
|
@ -8396,6 +8405,7 @@ static void attach_task_cfs_rq(struct task_struct *p)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Synchronize task with its cfs_rq */
|
/* Synchronize task with its cfs_rq */
|
||||||
|
update_cfs_rq_load_avg(now, cfs_rq, false);
|
||||||
attach_entity_load_avg(cfs_rq, se);
|
attach_entity_load_avg(cfs_rq, se);
|
||||||
|
|
||||||
if (!vruntime_normalized(p))
|
if (!vruntime_normalized(p))
|
||||||
|
|
Loading…
Reference in New Issue