diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0a6eff8a278b..2dde735635ec 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -803,8 +803,36 @@ static inline void uclamp_se_set(struct uclamp_se *uc_se, unsigned int value) uc_se->bucket_id = uclamp_bucket_id(value); } +static inline unsigned int +uclamp_idle_value(struct rq *rq, unsigned int clamp_id, + unsigned int clamp_value) +{ + /* + * Avoid blocked utilization pushing up the frequency when we go + * idle (which drops the max-clamp) by retaining the last known + * max-clamp. + */ + if (clamp_id == UCLAMP_MAX) { + rq->uclamp_flags |= UCLAMP_FLAG_IDLE; + return clamp_value; + } + + return uclamp_none(UCLAMP_MIN); +} + +static inline void uclamp_idle_reset(struct rq *rq, unsigned int clamp_id, + unsigned int clamp_value) +{ + /* Reset max-clamp retention only on idle exit */ + if (!(rq->uclamp_flags & UCLAMP_FLAG_IDLE)) + return; + + WRITE_ONCE(rq->uclamp[clamp_id].value, clamp_value); +} + static inline -unsigned int uclamp_rq_max_value(struct rq *rq, unsigned int clamp_id) +unsigned int uclamp_rq_max_value(struct rq *rq, unsigned int clamp_id, + unsigned int clamp_value) { struct uclamp_bucket *bucket = rq->uclamp[clamp_id].bucket; int bucket_id = UCLAMP_BUCKETS - 1; @@ -820,7 +848,7 @@ unsigned int uclamp_rq_max_value(struct rq *rq, unsigned int clamp_id) } /* No tasks -- default clamp values */ - return uclamp_none(clamp_id); + return uclamp_idle_value(rq, clamp_id, clamp_value); } /* @@ -845,6 +873,8 @@ static inline void uclamp_rq_inc_id(struct rq *rq, struct task_struct *p, bucket = &uc_rq->bucket[uc_se->bucket_id]; bucket->tasks++; + uclamp_idle_reset(rq, clamp_id, uc_se->value); + /* * Local max aggregation: rq buckets always track the max * "requested" clamp value of its RUNNABLE tasks. @@ -871,6 +901,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, struct uclamp_rq *uc_rq = &rq->uclamp[clamp_id]; struct uclamp_se *uc_se = &p->uclamp[clamp_id]; struct uclamp_bucket *bucket; + unsigned int bkt_clamp; unsigned int rq_clamp; lockdep_assert_held(&rq->lock); @@ -895,8 +926,10 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p, * e.g. due to future modification, warn and fixup the expected value. */ SCHED_WARN_ON(bucket->value > rq_clamp); - if (bucket->value >= rq_clamp) - WRITE_ONCE(uc_rq->value, uclamp_rq_max_value(rq, clamp_id)); + if (bucket->value >= rq_clamp) { + bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value); + WRITE_ONCE(uc_rq->value, bkt_clamp); + } } static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) @@ -908,6 +941,10 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) for_each_clamp_id(clamp_id) uclamp_rq_inc_id(rq, p, clamp_id); + + /* Reset clamp idle holding when there is one RUNNABLE task */ + if (rq->uclamp_flags & UCLAMP_FLAG_IDLE) + rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE; } static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) @@ -926,8 +963,10 @@ static void __init init_uclamp(void) unsigned int clamp_id; int cpu; - for_each_possible_cpu(cpu) + for_each_possible_cpu(cpu) { memset(&cpu_rq(cpu)->uclamp, 0, sizeof(struct uclamp_rq)); + cpu_rq(cpu)->uclamp_flags = 0; + } for_each_clamp_id(clamp_id) { uclamp_se_set(&init_task.uclamp[clamp_id], diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cecc6baaba93..0d2ba8bb2cb3 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -870,6 +870,8 @@ struct rq { #ifdef CONFIG_UCLAMP_TASK /* Utilization clamp values based on CPU's RUNNABLE tasks */ struct uclamp_rq uclamp[UCLAMP_CNT] ____cacheline_aligned; + unsigned int uclamp_flags; +#define UCLAMP_FLAG_IDLE 0x01 #endif struct cfs_rq cfs;