sched/core: Add missing update_rq_clock() in detach_task_cfs_rq()
Instead of adding the update_rq_clock() all the way at the bottom of the callstack, add one at the top, this to aid later effort to minimize update_rq_lock() calls. WARNING: CPU: 0 PID: 1 at ../kernel/sched/sched.h:797 detach_task_cfs_rq() rq->clock_update_flags < RQCF_ACT_SKIP Call Trace: dump_stack() __warn() warn_slowpath_fmt() detach_task_cfs_rq() switched_from_fair() __sched_setscheduler() _sched_setscheduler() sched_set_stop_task() cpu_stop_create() __smpboot_create_thread.part.2() smpboot_register_percpu_thread_cpumask() cpu_stop_init() do_one_initcall() ? print_cpu_info() kernel_init_freeable() ? rest_init() kernel_init() ret_from_fork() Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
4126bad671
commit
80f5c1b84b
|
@ -3655,6 +3655,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|||
BUG_ON(prio > MAX_PRIO);
|
||||
|
||||
rq = __task_rq_lock(p, &rf);
|
||||
update_rq_clock(rq);
|
||||
|
||||
/*
|
||||
* Idle task boosting is a nono in general. There is one
|
||||
|
@ -4183,6 +4184,7 @@ recheck:
|
|||
* runqueue lock must be held.
|
||||
*/
|
||||
rq = task_rq_lock(p, &rf);
|
||||
update_rq_clock(rq);
|
||||
|
||||
/*
|
||||
* Changing the policy of the stop threads its a very bad idea
|
||||
|
@ -8435,6 +8437,7 @@ static void cpu_cgroup_fork(struct task_struct *task)
|
|||
|
||||
rq = task_rq_lock(task, &rf);
|
||||
|
||||
update_rq_clock(rq);
|
||||
sched_change_group(task, TASK_SET_GROUP);
|
||||
|
||||
task_rq_unlock(rq, task, &rf);
|
||||
|
|
Loading…
Reference in New Issue