sched/deadline: Add dl_task_is_earliest_deadline helper
Wrap repeated code in helper function dl_task_is_earliest_deadline, which return true if there is no deadline task on the rq at all, or task's deadline earlier than the whole rq. Signed-off-by: Shang XiaoJing <shangxiaojing@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Daniel Bristot de Oliveira <bristot@kernel.org> Link: https://lore.kernel.org/r/20220826083453.698-1-shangxiaojing@huawei.com
This commit is contained in:
parent
53aa930dc4
commit
973bee493a
|
@ -1810,6 +1810,14 @@ static void yield_task_dl(struct rq *rq)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
|
||||
struct rq *rq)
|
||||
{
|
||||
return (!rq->dl.dl_nr_running ||
|
||||
dl_time_before(p->dl.deadline,
|
||||
rq->dl.earliest_dl.curr));
|
||||
}
|
||||
|
||||
static int find_later_rq(struct task_struct *task);
|
||||
|
||||
static int
|
||||
|
@ -1852,9 +1860,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int flags)
|
|||
int target = find_later_rq(p);
|
||||
|
||||
if (target != -1 &&
|
||||
(dl_time_before(p->dl.deadline,
|
||||
cpu_rq(target)->dl.earliest_dl.curr) ||
|
||||
(cpu_rq(target)->dl.dl_nr_running == 0)))
|
||||
dl_task_is_earliest_deadline(p, cpu_rq(target)))
|
||||
cpu = target;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -2221,9 +2227,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
|
|||
|
||||
later_rq = cpu_rq(cpu);
|
||||
|
||||
if (later_rq->dl.dl_nr_running &&
|
||||
!dl_time_before(task->dl.deadline,
|
||||
later_rq->dl.earliest_dl.curr)) {
|
||||
if (!dl_task_is_earliest_deadline(task, later_rq)) {
|
||||
/*
|
||||
* Target rq has tasks of equal or earlier deadline,
|
||||
* retrying does not release any lock and is unlikely
|
||||
|
@ -2251,9 +2255,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
|
|||
* its earliest one has a later deadline than our
|
||||
* task, the rq is a good one.
|
||||
*/
|
||||
if (!later_rq->dl.dl_nr_running ||
|
||||
dl_time_before(task->dl.deadline,
|
||||
later_rq->dl.earliest_dl.curr))
|
||||
if (dl_task_is_earliest_deadline(task, later_rq))
|
||||
break;
|
||||
|
||||
/* Otherwise we try again. */
|
||||
|
@ -2424,9 +2426,7 @@ static void pull_dl_task(struct rq *this_rq)
|
|||
* - it will preempt the last one we pulled (if any).
|
||||
*/
|
||||
if (p && dl_time_before(p->dl.deadline, dmin) &&
|
||||
(!this_rq->dl.dl_nr_running ||
|
||||
dl_time_before(p->dl.deadline,
|
||||
this_rq->dl.earliest_dl.curr))) {
|
||||
dl_task_is_earliest_deadline(p, this_rq)) {
|
||||
WARN_ON(p == src_rq->curr);
|
||||
WARN_ON(!task_on_rq_queued(p));
|
||||
|
||||
|
|
Loading…
Reference in New Issue