sched: cleanup for alloc_rt/fair_sched_group()
Impact: cleanup Remove checking parent == NULL. It won't be NULLL, because we dynamically create sub task_group only, and sub task_group always has its parent. (root task_group is statically defined) Also replace kmalloc_node(GFP_ZERO) with kzalloc_node(). Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
e946217e4f
commit
eab172294d
|
@ -8472,7 +8472,7 @@ static
|
||||||
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
||||||
{
|
{
|
||||||
struct cfs_rq *cfs_rq;
|
struct cfs_rq *cfs_rq;
|
||||||
struct sched_entity *se, *parent_se;
|
struct sched_entity *se;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -8488,18 +8488,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
rq = cpu_rq(i);
|
rq = cpu_rq(i);
|
||||||
|
|
||||||
cfs_rq = kmalloc_node(sizeof(struct cfs_rq),
|
cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
|
||||||
GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
|
GFP_KERNEL, cpu_to_node(i));
|
||||||
if (!cfs_rq)
|
if (!cfs_rq)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
se = kmalloc_node(sizeof(struct sched_entity),
|
se = kzalloc_node(sizeof(struct sched_entity),
|
||||||
GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
|
GFP_KERNEL, cpu_to_node(i));
|
||||||
if (!se)
|
if (!se)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
parent_se = parent ? parent->se[i] : NULL;
|
init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
|
||||||
init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -8560,7 +8559,7 @@ static
|
||||||
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
||||||
{
|
{
|
||||||
struct rt_rq *rt_rq;
|
struct rt_rq *rt_rq;
|
||||||
struct sched_rt_entity *rt_se, *parent_se;
|
struct sched_rt_entity *rt_se;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -8577,18 +8576,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
rq = cpu_rq(i);
|
rq = cpu_rq(i);
|
||||||
|
|
||||||
rt_rq = kmalloc_node(sizeof(struct rt_rq),
|
rt_rq = kzalloc_node(sizeof(struct rt_rq),
|
||||||
GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
|
GFP_KERNEL, cpu_to_node(i));
|
||||||
if (!rt_rq)
|
if (!rt_rq)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
rt_se = kmalloc_node(sizeof(struct sched_rt_entity),
|
rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
|
||||||
GFP_KERNEL|__GFP_ZERO, cpu_to_node(i));
|
GFP_KERNEL, cpu_to_node(i));
|
||||||
if (!rt_se)
|
if (!rt_se)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
parent_se = parent ? parent->rt_se[i] : NULL;
|
init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
|
||||||
init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
Loading…
Reference in New Issue