sched/fair: Fix unnecessary increase of balance interval

In case of active balancing, we increase the balance interval to cover
pinned tasks cases not covered by all_pinned logic. Neverthless, the
active migration triggered by asym packing should be treated as the normal
unbalanced case and reset the interval to default value, otherwise active
migration for asym_packing can be easily delayed for hundreds of ms
because of this pinned task detection mechanism.

The same happens to other conditions tested in need_active_balance() like
misfit task and when the capacity of src_cpu is reduced compared to
dst_cpu (see comments in need_active_balance() for details).

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: valentin.schneider@arm.com
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Vincent Guittot 2018-12-14 17:01:57 +01:00 committed by Ingo Molnar
parent 4ad4e481bd
commit 46a745d905
1 changed files with 27 additions and 13 deletions

View File

@ -8826,22 +8826,26 @@ static struct rq *find_busiest_queue(struct lb_env *env,
*/ */
#define MAX_PINNED_INTERVAL 512 #define MAX_PINNED_INTERVAL 512
static int need_active_balance(struct lb_env *env) static inline bool
asym_active_balance(struct lb_env *env)
{ {
struct sched_domain *sd = env->sd;
if (env->idle != CPU_NOT_IDLE) {
/* /*
* ASYM_PACKING needs to force migrate tasks from busy but * ASYM_PACKING needs to force migrate tasks from busy but
* lower priority CPUs in order to pack all tasks in the * lower priority CPUs in order to pack all tasks in the
* highest priority CPUs. * highest priority CPUs.
*/ */
if ((sd->flags & SD_ASYM_PACKING) && return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
sched_asym_prefer(env->dst_cpu, env->src_cpu)) sched_asym_prefer(env->dst_cpu, env->src_cpu);
return 1;
} }
static inline bool
voluntary_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
if (asym_active_balance(env))
return 1;
/* /*
* The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
* It's worth migrating the task if the src_cpu's capacity is reduced * It's worth migrating the task if the src_cpu's capacity is reduced
@ -8858,6 +8862,16 @@ static int need_active_balance(struct lb_env *env)
if (env->src_grp_type == group_misfit_task) if (env->src_grp_type == group_misfit_task)
return 1; return 1;
return 0;
}
static int need_active_balance(struct lb_env *env)
{
struct sched_domain *sd = env->sd;
if (voluntary_active_balance(env))
return 1;
return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
} }
@ -9119,7 +9133,7 @@ more_balance:
} else } else
sd->nr_balance_failed = 0; sd->nr_balance_failed = 0;
if (likely(!active_balance)) { if (likely(!active_balance) || voluntary_active_balance(&env)) {
/* We were unbalanced, so reset the balancing interval */ /* We were unbalanced, so reset the balancing interval */
sd->balance_interval = sd->min_interval; sd->balance_interval = sd->min_interval;
} else { } else {