sched/core: s/WF_ON_RQ/WQ_ON_CPU/

Use a better name for this poorly named flag, to avoid confusion...

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Mel Gorman <mgorman@suse.de>
Link: https://lkml.kernel.org/r/20200622100825.785115830@infradead.org
This commit is contained in:
Peter Zijlstra 2020-06-22 12:01:24 +02:00 committed by Borislav Petkov
parent b6e13e8582
commit 739f70b476
2 changed files with 3 additions and 3 deletions

View File

@ -2376,7 +2376,7 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags)
* the soon-to-be-idle CPU as the current CPU is likely busy. * the soon-to-be-idle CPU as the current CPU is likely busy.
* nr_running is checked to avoid unnecessary task stacking. * nr_running is checked to avoid unnecessary task stacking.
*/ */
if ((wake_flags & WF_ON_RQ) && cpu_rq(cpu)->nr_running <= 1) if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
return true; return true;
return false; return false;
@ -2636,7 +2636,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
* scheduling. * scheduling.
*/ */
if (smp_load_acquire(&p->on_cpu) && if (smp_load_acquire(&p->on_cpu) &&
ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_RQ)) ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
goto unlock; goto unlock;
/* /*

View File

@ -1682,7 +1682,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_FORK 0x02 /* Child wakeup after fork */
#define WF_MIGRATED 0x04 /* Internal use, task got migrated */ #define WF_MIGRATED 0x04 /* Internal use, task got migrated */
#define WF_ON_RQ 0x08 /* Wakee is on_rq */ #define WF_ON_CPU 0x08 /* Wakee is on_cpu */
/* /*
* To aid in avoiding the subversion of "niceness" due to uneven distribution * To aid in avoiding the subversion of "niceness" due to uneven distribution