sched/psi: Use task->psi_flags to clear in CPU migration
The commit d583d360a6
("psi: Fix psi state corruption when schedule()
races with cgroup move") fixed a race problem by making cgroup_move_task()
use task->psi_flags instead of looking at the scheduler state.
We can extend task->psi_flags usage to CPU migration, which should be
a minor optimization for performance and code simplicity.
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Link: https://lore.kernel.org/r/20220926081931.45420-1-zhouchengming@bytedance.com
This commit is contained in:
parent
710ffe671e
commit
52b33d87b9
|
@ -888,9 +888,6 @@ struct task_struct {
|
|||
unsigned sched_reset_on_fork:1;
|
||||
unsigned sched_contributes_to_load:1;
|
||||
unsigned sched_migrated:1;
|
||||
#ifdef CONFIG_PSI
|
||||
unsigned sched_psi_wake_requeue:1;
|
||||
#endif
|
||||
|
||||
/* Force alignment to the next boundary: */
|
||||
unsigned :0;
|
||||
|
|
|
@ -2053,7 +2053,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
|||
|
||||
if (!(flags & ENQUEUE_RESTORE)) {
|
||||
sched_info_enqueue(rq, p);
|
||||
psi_enqueue(p, flags & ENQUEUE_WAKEUP);
|
||||
psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED));
|
||||
}
|
||||
|
||||
uclamp_rq_inc(rq, p);
|
||||
|
|
|
@ -128,11 +128,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
|
|||
if (p->in_memstall)
|
||||
set |= TSK_MEMSTALL_RUNNING;
|
||||
|
||||
if (!wakeup || p->sched_psi_wake_requeue) {
|
||||
if (!wakeup) {
|
||||
if (p->in_memstall)
|
||||
set |= TSK_MEMSTALL;
|
||||
if (p->sched_psi_wake_requeue)
|
||||
p->sched_psi_wake_requeue = 0;
|
||||
} else {
|
||||
if (p->in_iowait)
|
||||
clear |= TSK_IOWAIT;
|
||||
|
@ -143,8 +141,6 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
|
|||
|
||||
static inline void psi_dequeue(struct task_struct *p, bool sleep)
|
||||
{
|
||||
int clear = TSK_RUNNING;
|
||||
|
||||
if (static_branch_likely(&psi_disabled))
|
||||
return;
|
||||
|
||||
|
@ -157,10 +153,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
|
|||
if (sleep)
|
||||
return;
|
||||
|
||||
if (p->in_memstall)
|
||||
clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING);
|
||||
|
||||
psi_task_change(p, clear, 0);
|
||||
psi_task_change(p, p->psi_flags, 0);
|
||||
}
|
||||
|
||||
static inline void psi_ttwu_dequeue(struct task_struct *p)
|
||||
|
@ -172,19 +165,12 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
|
|||
* deregister its sleep-persistent psi states from the old
|
||||
* queue, and let psi_enqueue() know it has to requeue.
|
||||
*/
|
||||
if (unlikely(p->in_iowait || p->in_memstall)) {
|
||||
if (unlikely(p->psi_flags)) {
|
||||
struct rq_flags rf;
|
||||
struct rq *rq;
|
||||
int clear = 0;
|
||||
|
||||
if (p->in_iowait)
|
||||
clear |= TSK_IOWAIT;
|
||||
if (p->in_memstall)
|
||||
clear |= TSK_MEMSTALL;
|
||||
|
||||
rq = __task_rq_lock(p, &rf);
|
||||
psi_task_change(p, clear, 0);
|
||||
p->sched_psi_wake_requeue = 1;
|
||||
psi_task_change(p, p->psi_flags, 0);
|
||||
__task_rq_unlock(rq, &rf);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue