sched/fair: Rearrange select_task_rq_fair() to optimize it
Rearrange select_task_rq_fair() a bit to avoid executing some conditional statements in few specific code-paths. That gets rid of the goto as well. This shouldn't result in any functional changes. Tested-by: Rohit Jain <rohit.k.jain@oracle.com> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vincent Guittot <vincent.guittot@linaro.org> Link: http://lkml.kernel.org/r/20831b8d237bf3a20e4e328286f678b425ff04c9.1524738578.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
b5bf9a90bb
commit
f1d88b4468
|
@ -6613,7 +6613,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
|
|||
static int
|
||||
select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
|
||||
{
|
||||
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
|
||||
struct sched_domain *tmp, *sd = NULL;
|
||||
int cpu = smp_processor_id();
|
||||
int new_cpu = prev_cpu;
|
||||
int want_affine = 0;
|
||||
|
@ -6636,7 +6636,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|||
*/
|
||||
if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
|
||||
cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
|
||||
affine_sd = tmp;
|
||||
if (cpu != prev_cpu)
|
||||
new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync);
|
||||
|
||||
sd = NULL; /* Prefer wake_affine over balance flags */
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -6646,33 +6649,25 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|||
break;
|
||||
}
|
||||
|
||||
if (affine_sd) {
|
||||
sd = NULL; /* Prefer wake_affine over balance flags */
|
||||
if (cpu == prev_cpu)
|
||||
goto pick_cpu;
|
||||
if (unlikely(sd)) {
|
||||
/* Slow path */
|
||||
|
||||
new_cpu = wake_affine(affine_sd, p, cpu, prev_cpu, sync);
|
||||
}
|
||||
|
||||
if (sd && !(sd_flag & SD_BALANCE_FORK)) {
|
||||
/*
|
||||
* We're going to need the task's util for capacity_spare_wake
|
||||
* in find_idlest_group. Sync it up to prev_cpu's
|
||||
* last_update_time.
|
||||
*/
|
||||
sync_entity_load_avg(&p->se);
|
||||
}
|
||||
if (!(sd_flag & SD_BALANCE_FORK))
|
||||
sync_entity_load_avg(&p->se);
|
||||
|
||||
if (!sd) {
|
||||
pick_cpu:
|
||||
if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
|
||||
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
|
||||
|
||||
if (want_affine)
|
||||
current->recent_used_cpu = cpu;
|
||||
}
|
||||
} else {
|
||||
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
|
||||
} else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */
|
||||
/* Fast path */
|
||||
|
||||
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
|
||||
|
||||
if (want_affine)
|
||||
current->recent_used_cpu = cpu;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
|
Loading…
Reference in New Issue