Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two load-balancing fixes for cgroups-intense workloads" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix calc_cfs_shares() fixed point arithmetics width confusion sched/fair: Fix effective_load() to consistently use smoothed load
This commit is contained in:
commit
369da7fc6d
|
@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
|
|||
}
|
||||
}
|
||||
|
||||
static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
|
||||
static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
|
||||
#else
|
||||
void init_entity_runnable_average(struct sched_entity *se)
|
||||
{
|
||||
|
@ -2499,28 +2497,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
# ifdef CONFIG_SMP
|
||||
static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
|
||||
{
|
||||
long tg_weight;
|
||||
|
||||
/*
|
||||
* Use this CPU's real-time load instead of the last load contribution
|
||||
* as the updating of the contribution is delayed, and we will use the
|
||||
* the real-time load to calc the share. See update_tg_load_avg().
|
||||
*/
|
||||
tg_weight = atomic_long_read(&tg->load_avg);
|
||||
tg_weight -= cfs_rq->tg_load_avg_contrib;
|
||||
tg_weight += cfs_rq->load.weight;
|
||||
|
||||
return tg_weight;
|
||||
}
|
||||
|
||||
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
||||
{
|
||||
long tg_weight, load, shares;
|
||||
|
||||
tg_weight = calc_tg_weight(tg, cfs_rq);
|
||||
load = cfs_rq->load.weight;
|
||||
/*
|
||||
* This really should be: cfs_rq->avg.load_avg, but instead we use
|
||||
* cfs_rq->load.weight, which is its upper bound. This helps ramp up
|
||||
* the shares for small weight interactive tasks.
|
||||
*/
|
||||
load = scale_load_down(cfs_rq->load.weight);
|
||||
|
||||
tg_weight = atomic_long_read(&tg->load_avg);
|
||||
|
||||
/* Ensure tg_weight >= load */
|
||||
tg_weight -= cfs_rq->tg_load_avg_contrib;
|
||||
tg_weight += load;
|
||||
|
||||
shares = (tg->shares * load);
|
||||
if (tg_weight)
|
||||
|
@ -2539,6 +2531,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
|
|||
return tg->shares;
|
||||
}
|
||||
# endif /* CONFIG_SMP */
|
||||
|
||||
static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
|
||||
unsigned long weight)
|
||||
{
|
||||
|
@ -4946,19 +4939,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
|
|||
return wl;
|
||||
|
||||
for_each_sched_entity(se) {
|
||||
long w, W;
|
||||
struct cfs_rq *cfs_rq = se->my_q;
|
||||
long W, w = cfs_rq_load_avg(cfs_rq);
|
||||
|
||||
tg = se->my_q->tg;
|
||||
tg = cfs_rq->tg;
|
||||
|
||||
/*
|
||||
* W = @wg + \Sum rw_j
|
||||
*/
|
||||
W = wg + calc_tg_weight(tg, se->my_q);
|
||||
W = wg + atomic_long_read(&tg->load_avg);
|
||||
|
||||
/* Ensure \Sum rw_j >= rw_i */
|
||||
W -= cfs_rq->tg_load_avg_contrib;
|
||||
W += w;
|
||||
|
||||
/*
|
||||
* w = rw_i + @wl
|
||||
*/
|
||||
w = cfs_rq_load_avg(se->my_q) + wl;
|
||||
w += wl;
|
||||
|
||||
/*
|
||||
* wl = S * s'_i; see (2)
|
||||
|
|
Loading…
Reference in New Issue