sched: split out css_online/css_offline from tg creation/destruction
This is a preparaton for later patches. - What do we gain from cpu_cgroup_css_online(): After ss->css_alloc() and before ss->css_online(), there's a small window that tg->css.cgroup is NULL. With this change, tg won't be seen before ss->css_online(), where it's added to the global list, so we're guaranteed we'll never see NULL tg->css.cgroup. - What do we gain from cpu_cgroup_css_offline(): tg is freed via RCU, so is cgroup. Without this change, This is how synchronization works: cgroup_rmdir() no ss->css_offline() diput() syncornize_rcu() ss->css_free() <-- unregister tg, and free it via call_rcu() kfree_rcu(cgroup) <-- wait possible refs to cgroup, and free cgroup We can't just kfree(cgroup), because tg might access tg->css.cgroup. With this change: cgroup_rmdir() ss->css_offline() <-- unregister tg diput() synchronize_rcu() <-- wait possible refs to tg and cgroup ss->css_free() <-- free tg kfree_rcu(cgroup) <-- free cgroup As you see, kfree_rcu() is redundant now. Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
fe1c06ca75
commit
ace783b9bb
|
@ -2750,7 +2750,10 @@ extern void normalize_rt_tasks(void);
|
||||||
extern struct task_group root_task_group;
|
extern struct task_group root_task_group;
|
||||||
|
|
||||||
extern struct task_group *sched_create_group(struct task_group *parent);
|
extern struct task_group *sched_create_group(struct task_group *parent);
|
||||||
|
extern void sched_online_group(struct task_group *tg,
|
||||||
|
struct task_group *parent);
|
||||||
extern void sched_destroy_group(struct task_group *tg);
|
extern void sched_destroy_group(struct task_group *tg);
|
||||||
|
extern void sched_offline_group(struct task_group *tg);
|
||||||
extern void sched_move_task(struct task_struct *tsk);
|
extern void sched_move_task(struct task_struct *tsk);
|
||||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
||||||
|
|
|
@ -35,6 +35,7 @@ static inline void autogroup_destroy(struct kref *kref)
|
||||||
ag->tg->rt_se = NULL;
|
ag->tg->rt_se = NULL;
|
||||||
ag->tg->rt_rq = NULL;
|
ag->tg->rt_rq = NULL;
|
||||||
#endif
|
#endif
|
||||||
|
sched_offline_group(ag->tg);
|
||||||
sched_destroy_group(ag->tg);
|
sched_destroy_group(ag->tg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,6 +77,8 @@ static inline struct autogroup *autogroup_create(void)
|
||||||
if (IS_ERR(tg))
|
if (IS_ERR(tg))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
sched_online_group(tg, &root_task_group);
|
||||||
|
|
||||||
kref_init(&ag->kref);
|
kref_init(&ag->kref);
|
||||||
init_rwsem(&ag->lock);
|
init_rwsem(&ag->lock);
|
||||||
ag->id = atomic_inc_return(&autogroup_seq_nr);
|
ag->id = atomic_inc_return(&autogroup_seq_nr);
|
||||||
|
|
|
@ -7159,7 +7159,6 @@ static void free_sched_group(struct task_group *tg)
|
||||||
struct task_group *sched_create_group(struct task_group *parent)
|
struct task_group *sched_create_group(struct task_group *parent)
|
||||||
{
|
{
|
||||||
struct task_group *tg;
|
struct task_group *tg;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
|
tg = kzalloc(sizeof(*tg), GFP_KERNEL);
|
||||||
if (!tg)
|
if (!tg)
|
||||||
|
@ -7171,6 +7170,17 @@ struct task_group *sched_create_group(struct task_group *parent)
|
||||||
if (!alloc_rt_sched_group(tg, parent))
|
if (!alloc_rt_sched_group(tg, parent))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
return tg;
|
||||||
|
|
||||||
|
err:
|
||||||
|
free_sched_group(tg);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
|
void sched_online_group(struct task_group *tg, struct task_group *parent)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&task_group_lock, flags);
|
spin_lock_irqsave(&task_group_lock, flags);
|
||||||
list_add_rcu(&tg->list, &task_groups);
|
list_add_rcu(&tg->list, &task_groups);
|
||||||
|
|
||||||
|
@ -7180,12 +7190,6 @@ struct task_group *sched_create_group(struct task_group *parent)
|
||||||
INIT_LIST_HEAD(&tg->children);
|
INIT_LIST_HEAD(&tg->children);
|
||||||
list_add_rcu(&tg->siblings, &parent->children);
|
list_add_rcu(&tg->siblings, &parent->children);
|
||||||
spin_unlock_irqrestore(&task_group_lock, flags);
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
||||||
|
|
||||||
return tg;
|
|
||||||
|
|
||||||
err:
|
|
||||||
free_sched_group(tg);
|
|
||||||
return ERR_PTR(-ENOMEM);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* rcu callback to free various structures associated with a task group */
|
/* rcu callback to free various structures associated with a task group */
|
||||||
|
@ -7197,6 +7201,12 @@ static void free_sched_group_rcu(struct rcu_head *rhp)
|
||||||
|
|
||||||
/* Destroy runqueue etc associated with a task group */
|
/* Destroy runqueue etc associated with a task group */
|
||||||
void sched_destroy_group(struct task_group *tg)
|
void sched_destroy_group(struct task_group *tg)
|
||||||
|
{
|
||||||
|
/* wait for possible concurrent references to cfs_rqs complete */
|
||||||
|
call_rcu(&tg->rcu, free_sched_group_rcu);
|
||||||
|
}
|
||||||
|
|
||||||
|
void sched_offline_group(struct task_group *tg)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
|
@ -7209,9 +7219,6 @@ void sched_destroy_group(struct task_group *tg)
|
||||||
list_del_rcu(&tg->list);
|
list_del_rcu(&tg->list);
|
||||||
list_del_rcu(&tg->siblings);
|
list_del_rcu(&tg->siblings);
|
||||||
spin_unlock_irqrestore(&task_group_lock, flags);
|
spin_unlock_irqrestore(&task_group_lock, flags);
|
||||||
|
|
||||||
/* wait for possible concurrent references to cfs_rqs complete */
|
|
||||||
call_rcu(&tg->rcu, free_sched_group_rcu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* change task's runqueue when it moves between groups.
|
/* change task's runqueue when it moves between groups.
|
||||||
|
@ -7563,6 +7570,19 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
|
||||||
return &tg->css;
|
return &tg->css;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int cpu_cgroup_css_online(struct cgroup *cgrp)
|
||||||
|
{
|
||||||
|
struct task_group *tg = cgroup_tg(cgrp);
|
||||||
|
struct task_group *parent;
|
||||||
|
|
||||||
|
if (!cgrp->parent)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
parent = cgroup_tg(cgrp->parent);
|
||||||
|
sched_online_group(tg, parent);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void cpu_cgroup_css_free(struct cgroup *cgrp)
|
static void cpu_cgroup_css_free(struct cgroup *cgrp)
|
||||||
{
|
{
|
||||||
struct task_group *tg = cgroup_tg(cgrp);
|
struct task_group *tg = cgroup_tg(cgrp);
|
||||||
|
@ -7570,6 +7590,13 @@ static void cpu_cgroup_css_free(struct cgroup *cgrp)
|
||||||
sched_destroy_group(tg);
|
sched_destroy_group(tg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void cpu_cgroup_css_offline(struct cgroup *cgrp)
|
||||||
|
{
|
||||||
|
struct task_group *tg = cgroup_tg(cgrp);
|
||||||
|
|
||||||
|
sched_offline_group(tg);
|
||||||
|
}
|
||||||
|
|
||||||
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
|
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
|
||||||
struct cgroup_taskset *tset)
|
struct cgroup_taskset *tset)
|
||||||
{
|
{
|
||||||
|
@ -7925,6 +7952,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
|
||||||
.name = "cpu",
|
.name = "cpu",
|
||||||
.css_alloc = cpu_cgroup_css_alloc,
|
.css_alloc = cpu_cgroup_css_alloc,
|
||||||
.css_free = cpu_cgroup_css_free,
|
.css_free = cpu_cgroup_css_free,
|
||||||
|
.css_online = cpu_cgroup_css_online,
|
||||||
|
.css_offline = cpu_cgroup_css_offline,
|
||||||
.can_attach = cpu_cgroup_can_attach,
|
.can_attach = cpu_cgroup_can_attach,
|
||||||
.attach = cpu_cgroup_attach,
|
.attach = cpu_cgroup_attach,
|
||||||
.exit = cpu_cgroup_exit,
|
.exit = cpu_cgroup_exit,
|
||||||
|
|
Loading…
Reference in New Issue