sched/fair: Restructure wake_affine*() to return a CPU id
This is a preparation patch that has wake_affine*() return a CPU ID instead of a boolean. The intent is to allow the wake_affine() helpers to be avoided if a decision is already made. This patch has no functional change. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20180130104555.4125-3-mgorman@techsingularity.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
89a55f56fd
commit
3b76c4a339
|
@ -5692,7 +5692,7 @@ static int wake_wide(struct task_struct *p)
|
||||||
* scheduling latency of the CPUs. This seems to work
|
* scheduling latency of the CPUs. This seems to work
|
||||||
* for the overloaded case.
|
* for the overloaded case.
|
||||||
*/
|
*/
|
||||||
static bool
|
static int
|
||||||
wake_affine_idle(int this_cpu, int prev_cpu, int sync)
|
wake_affine_idle(int this_cpu, int prev_cpu, int sync)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -5702,15 +5702,15 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync)
|
||||||
* node depending on the IO topology or IRQ affinity settings.
|
* node depending on the IO topology or IRQ affinity settings.
|
||||||
*/
|
*/
|
||||||
if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
|
if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu))
|
||||||
return true;
|
return this_cpu;
|
||||||
|
|
||||||
if (sync && cpu_rq(this_cpu)->nr_running == 1)
|
if (sync && cpu_rq(this_cpu)->nr_running == 1)
|
||||||
return true;
|
return this_cpu;
|
||||||
|
|
||||||
return false;
|
return nr_cpumask_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static int
|
||||||
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
||||||
int this_cpu, int prev_cpu, int sync)
|
int this_cpu, int prev_cpu, int sync)
|
||||||
{
|
{
|
||||||
|
@ -5724,7 +5724,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
||||||
unsigned long current_load = task_h_load(current);
|
unsigned long current_load = task_h_load(current);
|
||||||
|
|
||||||
if (current_load > this_eff_load)
|
if (current_load > this_eff_load)
|
||||||
return true;
|
return this_cpu;
|
||||||
|
|
||||||
this_eff_load -= current_load;
|
this_eff_load -= current_load;
|
||||||
}
|
}
|
||||||
|
@ -5741,28 +5741,28 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
|
||||||
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
|
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
|
||||||
prev_eff_load *= capacity_of(this_cpu);
|
prev_eff_load *= capacity_of(this_cpu);
|
||||||
|
|
||||||
return this_eff_load <= prev_eff_load;
|
return this_eff_load <= prev_eff_load ? this_cpu : nr_cpumask_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
static int wake_affine(struct sched_domain *sd, struct task_struct *p,
|
||||||
int prev_cpu, int sync)
|
int prev_cpu, int sync)
|
||||||
{
|
{
|
||||||
int this_cpu = smp_processor_id();
|
int this_cpu = smp_processor_id();
|
||||||
bool affine = false;
|
int target = nr_cpumask_bits;
|
||||||
|
|
||||||
if (sched_feat(WA_IDLE))
|
if (sched_feat(WA_IDLE))
|
||||||
affine = wake_affine_idle(this_cpu, prev_cpu, sync);
|
target = wake_affine_idle(this_cpu, prev_cpu, sync);
|
||||||
|
|
||||||
if (sched_feat(WA_WEIGHT) && !affine)
|
if (sched_feat(WA_WEIGHT) && target == nr_cpumask_bits)
|
||||||
affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
|
target = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync);
|
||||||
|
|
||||||
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts);
|
||||||
if (affine) {
|
if (target == nr_cpumask_bits)
|
||||||
schedstat_inc(sd->ttwu_move_affine);
|
return prev_cpu;
|
||||||
schedstat_inc(p->se.statistics.nr_wakeups_affine);
|
|
||||||
}
|
|
||||||
|
|
||||||
return affine;
|
schedstat_inc(sd->ttwu_move_affine);
|
||||||
|
schedstat_inc(p->se.statistics.nr_wakeups_affine);
|
||||||
|
return target;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long task_util(struct task_struct *p);
|
static inline unsigned long task_util(struct task_struct *p);
|
||||||
|
@ -6355,8 +6355,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||||
if (cpu == prev_cpu)
|
if (cpu == prev_cpu)
|
||||||
goto pick_cpu;
|
goto pick_cpu;
|
||||||
|
|
||||||
if (wake_affine(affine_sd, p, prev_cpu, sync))
|
new_cpu = wake_affine(affine_sd, p, prev_cpu, sync);
|
||||||
new_cpu = cpu;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sd && !(sd_flag & SD_BALANCE_FORK)) {
|
if (sd && !(sd_flag & SD_BALANCE_FORK)) {
|
||||||
|
|
Loading…
Reference in New Issue