sched: convert check_preempt_equal_prio to cpumask_var_t.
Impact: stack reduction for large NR_CPUS Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves stack space. We simply return if the allocation fails: since we don't use it we could just pass NULL to cpupri_find and have it handle that. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
68e74568fb
commit
24600ce89a
|
@ -805,17 +805,20 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
|
|||
|
||||
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
cpumask_t mask;
|
||||
cpumask_var_t mask;
|
||||
|
||||
if (rq->curr->rt.nr_cpus_allowed == 1)
|
||||
return;
|
||||
|
||||
if (p->rt.nr_cpus_allowed != 1
|
||||
&& cpupri_find(&rq->rd->cpupri, p, &mask))
|
||||
if (!alloc_cpumask_var(&mask, GFP_ATOMIC))
|
||||
return;
|
||||
|
||||
if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
|
||||
return;
|
||||
if (p->rt.nr_cpus_allowed != 1
|
||||
&& cpupri_find(&rq->rd->cpupri, p, mask))
|
||||
goto free;
|
||||
|
||||
if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask))
|
||||
goto free;
|
||||
|
||||
/*
|
||||
* There appears to be other cpus that can accept
|
||||
|
@ -824,6 +827,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
|||
*/
|
||||
requeue_task_rt(rq, p, 1);
|
||||
resched_task(rq->curr);
|
||||
free:
|
||||
free_cpumask_var(mask);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
|
Loading…
Reference in New Issue