- Make sure the scheduler doesn't use stale frequency scaling values when latter
get disabled due to a value error - Fix a NULL pointer access on UP configs - Use the proper locking when updating CPU capacity -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmPNKRsACgkQEsHwGGHe VUr8NA/9GTqGUpOq0iRQkEOE1oAdT4ZxJ9dQpaWjwWSNv40HT7XJBzjtvzAyiOBF LTUVClBct5i1/j0tVcNq1zXEj3im2e24Ki6A1TWukejgGAMT/7siSkuChEDAMg2M b79nKCMpuIZMzkJND3qTkW/aMPpAyU82G8BeLCjw7vgPBsbVkjgbxGFKYdHgpLZa kdX/GhOufu40jcGeUxA4zWTpXfuXT3OG7JYLrlHeJ/HEdzy9kLCkWH4jHHllzPQw c4JwG7UnNkDKD6zkG0Guzi1zQy39egh/kaj7FQmVap9sneq+x69T4ta0BfXdBXnQ Vsqc/nnOybUzr9Gjg5W6KRZWk0k6hK9n3+cye88BfRvzMY0KAgxeCiZEn7cuKqZp 15Agzz77vcwt32QsxjSp+hKQxJtePcBCurDhkfuAZyPELNIBeCW6inrXArprfTIg IfEF068GKsvDGu3I/z49VXFZ9YBoerQREHbN/xL1VNeB8VoAxAE227OMUvhMcIu1 jxVvkESwc9BIybTcJbUjgg2i9t+Fv3gtZBQ6GFfDboHneia4U5a9aTyN6QGjX+5P SIXPhePbFCNrhW7JbSGqKBd96MczbFNQinRhgX1lBU0241cAnchY4nH9fUvv7Zrt b/QDg58tb2bJkm1Z08L256ZETELOU9nLJVxUbtBNSSO9dfkvoBs= =aNSK -----END PGP SIGNATURE----- Merge tag 'sched_urgent_for_v6.2_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull scheduler fixes from Borislav Petkov: - Make sure the scheduler doesn't use stale frequency scaling values when latter get disabled due to a value error - Fix a NULL pointer access on UP configs - Use the proper locking when updating CPU capacity * tag 'sched_urgent_for_v6.2_rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/aperfmperf: Erase stale arch_freq_scale values when disabling frequency invariance readings sched/core: Fix NULL pointer access fault in sched_setaffinity() with non-SMP configs sched/fair: Fixes for capacity inversion detection sched/uclamp: Fix a uninitialized variable warnings
This commit is contained in:
commit
2475bf0250
|
@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void)
|
|||
|
||||
static void disable_freq_invariance_workfn(struct work_struct *work)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
static_branch_disable(&arch_scale_freq_key);
|
||||
|
||||
/*
|
||||
* Set arch_freq_scale to a default value on all cpus
|
||||
* This negates the effect of scaling
|
||||
*/
|
||||
for_each_possible_cpu(cpu)
|
||||
per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE;
|
||||
}
|
||||
|
||||
static DECLARE_WORK(disable_freq_invariance_work,
|
||||
|
|
|
@ -8290,12 +8290,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
|
|||
if (retval)
|
||||
goto out_put_task;
|
||||
|
||||
/*
|
||||
* With non-SMP configs, user_cpus_ptr/user_mask isn't used and
|
||||
* alloc_user_cpus_ptr() returns NULL.
|
||||
*/
|
||||
user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
|
||||
if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
|
||||
if (user_mask) {
|
||||
cpumask_copy(user_mask, in_mask);
|
||||
} else if (IS_ENABLED(CONFIG_SMP)) {
|
||||
retval = -ENOMEM;
|
||||
goto out_put_task;
|
||||
}
|
||||
cpumask_copy(user_mask, in_mask);
|
||||
|
||||
ac = (struct affinity_context){
|
||||
.new_mask = in_mask,
|
||||
.user_mask = user_mask,
|
||||
|
|
|
@ -7229,10 +7229,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
|
|||
eenv_task_busy_time(&eenv, p, prev_cpu);
|
||||
|
||||
for (; pd; pd = pd->next) {
|
||||
unsigned long util_min = p_util_min, util_max = p_util_max;
|
||||
unsigned long cpu_cap, cpu_thermal_cap, util;
|
||||
unsigned long cur_delta, max_spare_cap = 0;
|
||||
unsigned long rq_util_min, rq_util_max;
|
||||
unsigned long util_min, util_max;
|
||||
unsigned long prev_spare_cap = 0;
|
||||
int max_spare_cap_cpu = -1;
|
||||
unsigned long base_energy;
|
||||
|
@ -7251,6 +7251,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
|
|||
eenv.pd_cap = 0;
|
||||
|
||||
for_each_cpu(cpu, cpus) {
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
eenv.pd_cap += cpu_thermal_cap;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
|
||||
|
@ -7269,24 +7271,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
|
|||
* much capacity we can get out of the CPU; this is
|
||||
* aligned with sched_cpu_util().
|
||||
*/
|
||||
if (uclamp_is_used()) {
|
||||
if (uclamp_rq_is_idle(cpu_rq(cpu))) {
|
||||
util_min = p_util_min;
|
||||
util_max = p_util_max;
|
||||
} else {
|
||||
/*
|
||||
* Open code uclamp_rq_util_with() except for
|
||||
* the clamp() part. Ie: apply max aggregation
|
||||
* only. util_fits_cpu() logic requires to
|
||||
* operate on non clamped util but must use the
|
||||
* max-aggregated uclamp_{min, max}.
|
||||
*/
|
||||
rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
|
||||
rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
|
||||
if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) {
|
||||
/*
|
||||
* Open code uclamp_rq_util_with() except for
|
||||
* the clamp() part. Ie: apply max aggregation
|
||||
* only. util_fits_cpu() logic requires to
|
||||
* operate on non clamped util but must use the
|
||||
* max-aggregated uclamp_{min, max}.
|
||||
*/
|
||||
rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN);
|
||||
rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX);
|
||||
|
||||
util_min = max(rq_util_min, p_util_min);
|
||||
util_max = max(rq_util_max, p_util_max);
|
||||
}
|
||||
util_min = max(rq_util_min, p_util_min);
|
||||
util_max = max(rq_util_max, p_util_max);
|
||||
}
|
||||
if (!util_fits_cpu(util, util_min, util_max, cpu))
|
||||
continue;
|
||||
|
@ -8871,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
|||
* * Thermal pressure will impact all cpus in this perf domain
|
||||
* equally.
|
||||
*/
|
||||
if (static_branch_unlikely(&sched_asym_cpucapacity)) {
|
||||
if (sched_energy_enabled()) {
|
||||
unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
|
||||
struct perf_domain *pd = rcu_dereference(rq->rd->pd);
|
||||
struct perf_domain *pd;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
pd = rcu_dereference(rq->rd->pd);
|
||||
rq->cpu_capacity_inverted = 0;
|
||||
|
||||
for (; pd; pd = pd->next) {
|
||||
struct cpumask *pd_span = perf_domain_span(pd);
|
||||
unsigned long pd_cap_orig, pd_cap;
|
||||
|
||||
/* We can't be inverted against our own pd */
|
||||
if (cpumask_test_cpu(cpu_of(rq), pd_span))
|
||||
continue;
|
||||
|
||||
cpu = cpumask_any(pd_span);
|
||||
pd_cap_orig = arch_scale_cpu_capacity(cpu);
|
||||
|
||||
|
@ -8905,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
trace_sched_cpu_capacity_tp(rq);
|
||||
|
|
Loading…
Reference in New Issue