sched: Introduce dl_task_check_affinity() to check proposed affinity
In preparation for restricting the affinity of a task during execve() on arm64, introduce a new dl_task_check_affinity() helper function to give an indication as to whether the restricted mask is admissible for a deadline task. Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Link: https://lore.kernel.org/r/20210730112443.23245-10-will@kernel.org
This commit is contained in:
parent
07ec77a1d4
commit
234b8ab647
|
@ -1709,6 +1709,7 @@ extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new
|
|||
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
|
||||
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
|
||||
extern void release_user_cpus_ptr(struct task_struct *p);
|
||||
extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
|
||||
extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
|
||||
extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
|
||||
#else
|
||||
|
@ -1731,6 +1732,11 @@ static inline void release_user_cpus_ptr(struct task_struct *p)
|
|||
{
|
||||
WARN_ON(p->user_cpus_ptr);
|
||||
}
|
||||
|
||||
static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern int yield_to(struct task_struct *p, bool preempt);
|
||||
|
|
|
@ -7756,6 +7756,32 @@ out_unlock:
|
|||
return retval;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If the task isn't a deadline task or admission control is
|
||||
* disabled then we don't care about affinity changes.
|
||||
*/
|
||||
if (!task_has_dl_policy(p) || !dl_bandwidth_enabled())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Since bandwidth control happens on root_domain basis,
|
||||
* if admission test is enabled, we only admit -deadline
|
||||
* tasks allowed to run on all the CPUs in the task's
|
||||
* root_domain.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (!cpumask_subset(task_rq(p)->rd->span, mask))
|
||||
ret = -EBUSY;
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
|
||||
{
|
||||
|
@ -7773,23 +7799,9 @@ __sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
|
|||
cpuset_cpus_allowed(p, cpus_allowed);
|
||||
cpumask_and(new_mask, mask, cpus_allowed);
|
||||
|
||||
/*
|
||||
* Since bandwidth control happens on root_domain basis,
|
||||
* if admission test is enabled, we only admit -deadline
|
||||
* tasks allowed to run on all the CPUs in the task's
|
||||
* root_domain.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
|
||||
rcu_read_lock();
|
||||
if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
|
||||
retval = -EBUSY;
|
||||
rcu_read_unlock();
|
||||
goto out_free_new_mask;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
#endif
|
||||
retval = dl_task_check_affinity(p, new_mask);
|
||||
if (retval)
|
||||
goto out_free_new_mask;
|
||||
again:
|
||||
retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
|
||||
if (retval)
|
||||
|
|
Loading…
Reference in New Issue