sched/core: Reorganize ttwu_do_wakeup() and ttwu_do_activate()
ttwu_do_activate() is used for a complete wakeup, in which we will activate_task() and use ttwu_do_wakeup() to mark the task runnable and perform wakeup-preemption, also call class->task_woken() callback and update the rq->idle_stamp. Since ttwu_runnable() is not a complete wakeup, don't need all those done in ttwu_do_wakeup(), so we can move those to ttwu_do_activate() to simplify ttwu_do_wakeup(), making it only mark the task runnable to be reused in ttwu_runnable() and try_to_wake_up(). This patch should not have any functional changes. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20221223103257.4962-2-zhouchengming@bytedance.com
This commit is contained in:
parent
efe0938586
commit
160fb0d83f
|
@ -3625,14 +3625,39 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
|
|||
}
|
||||
|
||||
/*
|
||||
* Mark the task runnable and perform wakeup-preemption.
|
||||
* Mark the task runnable.
|
||||
*/
|
||||
static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
|
||||
struct rq_flags *rf)
|
||||
static inline void ttwu_do_wakeup(struct task_struct *p)
|
||||
{
|
||||
check_preempt_curr(rq, p, wake_flags);
|
||||
WRITE_ONCE(p->__state, TASK_RUNNING);
|
||||
trace_sched_wakeup(p);
|
||||
}
|
||||
|
||||
static void
|
||||
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
|
||||
struct rq_flags *rf)
|
||||
{
|
||||
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
|
||||
|
||||
lockdep_assert_rq_held(rq);
|
||||
|
||||
if (p->sched_contributes_to_load)
|
||||
rq->nr_uninterruptible--;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (wake_flags & WF_MIGRATED)
|
||||
en_flags |= ENQUEUE_MIGRATED;
|
||||
else
|
||||
#endif
|
||||
if (p->in_iowait) {
|
||||
delayacct_blkio_end(p);
|
||||
atomic_dec(&task_rq(p)->nr_iowait);
|
||||
}
|
||||
|
||||
activate_task(rq, p, en_flags);
|
||||
check_preempt_curr(rq, p, wake_flags);
|
||||
|
||||
ttwu_do_wakeup(p);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (p->sched_class->task_woken) {
|
||||
|
@ -3662,31 +3687,6 @@ static void ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags,
|
|||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
|
||||
struct rq_flags *rf)
|
||||
{
|
||||
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
|
||||
|
||||
lockdep_assert_rq_held(rq);
|
||||
|
||||
if (p->sched_contributes_to_load)
|
||||
rq->nr_uninterruptible--;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (wake_flags & WF_MIGRATED)
|
||||
en_flags |= ENQUEUE_MIGRATED;
|
||||
else
|
||||
#endif
|
||||
if (p->in_iowait) {
|
||||
delayacct_blkio_end(p);
|
||||
atomic_dec(&task_rq(p)->nr_iowait);
|
||||
}
|
||||
|
||||
activate_task(rq, p, en_flags);
|
||||
ttwu_do_wakeup(rq, p, wake_flags, rf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Consider @p being inside a wait loop:
|
||||
*
|
||||
|
@ -3728,8 +3728,7 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags)
|
|||
update_rq_clock(rq);
|
||||
check_preempt_curr(rq, p, wake_flags);
|
||||
}
|
||||
WRITE_ONCE(p->__state, TASK_RUNNING);
|
||||
trace_sched_wakeup(p);
|
||||
ttwu_do_wakeup(p);
|
||||
ret = 1;
|
||||
}
|
||||
__task_rq_unlock(rq, &rf);
|
||||
|
@ -4095,8 +4094,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|||
goto out;
|
||||
|
||||
trace_sched_waking(p);
|
||||
WRITE_ONCE(p->__state, TASK_RUNNING);
|
||||
trace_sched_wakeup(p);
|
||||
ttwu_do_wakeup(p);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue