sched/fair: Remove rq->load
The CFS class is the only one maintaining and using the CPU wide load (rq->load(.weight)). The last use case of the CPU wide load in CFS's set_next_entity() can be replaced by using the load of the CFS class (rq->cfs.load(.weight)) instead. Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190424084556.604-1-dietmar.eggemann@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3bd3706251
commit
f2bedc4705
|
@ -656,8 +656,6 @@ do { \
|
|||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
|
||||
|
||||
P(nr_running);
|
||||
SEQ_printf(m, " .%-30s: %lu\n", "load",
|
||||
rq->load.weight);
|
||||
P(nr_switches);
|
||||
P(nr_load_updates);
|
||||
P(nr_uninterruptible);
|
||||
|
|
|
@ -2686,8 +2686,6 @@ static void
|
|||
account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_add(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
if (entity_is_task(se)) {
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
|
@ -2703,8 +2701,6 @@ static void
|
|||
account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||
{
|
||||
update_load_sub(&cfs_rq->load, se->load.weight);
|
||||
if (!parent_entity(se))
|
||||
update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
|
||||
#ifdef CONFIG_SMP
|
||||
if (entity_is_task(se)) {
|
||||
account_numa_dequeue(rq_of(cfs_rq), task_of(se));
|
||||
|
@ -4100,7 +4096,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
|||
* least twice that of our own weight (i.e. dont track it
|
||||
* when there are only lesser-weight tasks around):
|
||||
*/
|
||||
if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
|
||||
if (schedstat_enabled() &&
|
||||
rq_of(cfs_rq)->cfs.load.weight >= 2*se->load.weight) {
|
||||
schedstat_set(se->statistics.slice_max,
|
||||
max((u64)schedstat_val(se->statistics.slice_max),
|
||||
se->sum_exec_runtime - se->prev_sum_exec_runtime));
|
||||
|
|
|
@ -830,8 +830,6 @@ struct rq {
|
|||
atomic_t nohz_flags;
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
/* capture load from *all* tasks on this CPU: */
|
||||
struct load_weight load;
|
||||
unsigned long nr_load_updates;
|
||||
u64 nr_switches;
|
||||
|
||||
|
|
Loading…
Reference in New Issue