sched/numa: Rename nr_running and break out the magic number

This is simply a preparation patch to make the following patches easier
to read. No functional change.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20201120090630.3286-2-mgorman@techsingularity.net
This commit is contained in:
Mel Gorman 2020-11-20 09:06:27 +00:00 committed by Peter Zijlstra
parent 74d862b682
commit abeae76a47
1 changed files with 6 additions and 4 deletions

View File

@ -1559,7 +1559,7 @@ struct task_numa_env {
static unsigned long cpu_load(struct rq *rq);
static unsigned long cpu_runnable(struct rq *rq);
static unsigned long cpu_util(int cpu);
static inline long adjust_numa_imbalance(int imbalance, int nr_running);
static inline long adjust_numa_imbalance(int imbalance, int dst_running);
static inline enum
numa_type numa_classify(unsigned int imbalance_pct,
@ -8991,7 +8991,9 @@ next_group:
}
}
static inline long adjust_numa_imbalance(int imbalance, int nr_running)
#define NUMA_IMBALANCE_MIN 2
static inline long adjust_numa_imbalance(int imbalance, int dst_running)
{
unsigned int imbalance_min;
@ -8999,8 +9001,8 @@ static inline long adjust_numa_imbalance(int imbalance, int nr_running)
* Allow a small imbalance based on a simple pair of communicating
* tasks that remain local when the source domain is almost idle.
*/
imbalance_min = 2;
if (nr_running <= imbalance_min)
imbalance_min = NUMA_IMBALANCE_MIN;
if (dst_running <= imbalance_min)
return 0;
return imbalance;