sched/deadline: Ensure that updates to exclusive cpusets don't break AC
How we deal with updates to exclusive cpusets is currently broken. As an example, suppose we have an exclusive cpuset composed of two cpus: A[cpu0,cpu1]. We can assign SCHED_DEADLINE task to it up to the allowed bandwidth. If we want now to modify cpusetA's cpumask, we have to check that removing a cpu's amount of bandwidth doesn't break AC guarantees. This thing isn't checked in the current code. This patch fixes the problem above, denying an update if the new cpumask won't have enough bandwidth for SCHED_DEADLINE tasks that are currently active. Signed-off-by: Juri Lelli <juri.lelli@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Li Zefan <lizefan@huawei.com> Cc: cgroups@vger.kernel.org Link: http://lkml.kernel.org/r/5433E6AF.5080105@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
7f51412a41
commit
f82f80426f
|
@ -2052,6 +2052,8 @@ static inline void tsk_restore_flags(struct task_struct *task,
|
|||
task->flags |= orig_flags & flags;
|
||||
}
|
||||
|
||||
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
||||
const struct cpumask *trial);
|
||||
extern int task_can_attach(struct task_struct *p,
|
||||
const struct cpumask *cs_cpus_allowed);
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -506,6 +506,16 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't shrink if we won't have enough room for SCHED_DEADLINE
|
||||
* tasks.
|
||||
*/
|
||||
ret = -EBUSY;
|
||||
if (is_cpu_exclusive(cur) &&
|
||||
!cpuset_cpumask_can_shrink(cur->cpus_allowed,
|
||||
trial->cpus_allowed))
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -4650,6 +4650,25 @@ void init_idle(struct task_struct *idle, int cpu)
|
|||
#endif
|
||||
}
|
||||
|
||||
int cpuset_cpumask_can_shrink(const struct cpumask *cur,
|
||||
const struct cpumask *trial)
|
||||
{
|
||||
int ret = 1, trial_cpus;
|
||||
struct dl_bw *cur_dl_b;
|
||||
unsigned long flags;
|
||||
|
||||
cur_dl_b = dl_bw_of(cpumask_any(cur));
|
||||
trial_cpus = cpumask_weight(trial);
|
||||
|
||||
raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
|
||||
if (cur_dl_b->bw != -1 &&
|
||||
cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
|
||||
ret = 0;
|
||||
raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int task_can_attach(struct task_struct *p,
|
||||
const struct cpumask *cs_cpus_allowed)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue