cpuset: use rcu_read_lock() to protect task_cs()
We no longer use task_lock() to protect tsk->cgroups. Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Li Zefan <lizefan@huawei.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
a60bed296a
commit
b8dadcb58d
|
@ -2239,10 +2239,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
|
|||
struct cpuset *cpus_cs;
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
task_lock(tsk);
|
||||
rcu_read_lock();
|
||||
cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
|
||||
guarantee_online_cpus(cpus_cs, pmask);
|
||||
task_unlock(tsk);
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&callback_mutex);
|
||||
}
|
||||
|
||||
|
@ -2295,10 +2295,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
|
|||
nodemask_t mask;
|
||||
|
||||
mutex_lock(&callback_mutex);
|
||||
task_lock(tsk);
|
||||
rcu_read_lock();
|
||||
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
|
||||
guarantee_online_mems(mems_cs, &mask);
|
||||
task_unlock(tsk);
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
||||
return mask;
|
||||
|
@ -2414,9 +2414,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
|
|||
/* Not hardwall and node outside mems_allowed: scan up cpusets */
|
||||
mutex_lock(&callback_mutex);
|
||||
|
||||
task_lock(current);
|
||||
rcu_read_lock();
|
||||
cs = nearest_hardwall_ancestor(task_cs(current));
|
||||
task_unlock(current);
|
||||
rcu_read_unlock();
|
||||
|
||||
allowed = node_isset(node, cs->mems_allowed);
|
||||
mutex_unlock(&callback_mutex);
|
||||
|
@ -2543,24 +2543,26 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
|||
* @task: pointer to task_struct of some task.
|
||||
*
|
||||
* Description: Prints @task's name, cpuset name, and cached copy of its
|
||||
* mems_allowed to the kernel log. Must hold task_lock(task) to allow
|
||||
* dereferencing task_cs(task).
|
||||
* mems_allowed to the kernel log.
|
||||
*/
|
||||
void cpuset_print_task_mems_allowed(struct task_struct *tsk)
|
||||
{
|
||||
/* Statically allocated to prevent using excess stack. */
|
||||
static char cpuset_nodelist[CPUSET_NODELIST_LEN];
|
||||
static DEFINE_SPINLOCK(cpuset_buffer_lock);
|
||||
struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
|
||||
struct cgroup *cgrp;
|
||||
|
||||
spin_lock(&cpuset_buffer_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
cgrp = task_cs(tsk)->css.cgroup;
|
||||
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
|
||||
tsk->mems_allowed);
|
||||
printk(KERN_INFO "%s cpuset=", tsk->comm);
|
||||
pr_cont_cgroup_name(cgrp);
|
||||
pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
|
||||
|
||||
rcu_read_unlock();
|
||||
spin_unlock(&cpuset_buffer_lock);
|
||||
}
|
||||
|
||||
|
@ -2592,9 +2594,9 @@ int cpuset_memory_pressure_enabled __read_mostly;
|
|||
|
||||
void __cpuset_memory_pressure_bump(void)
|
||||
{
|
||||
task_lock(current);
|
||||
rcu_read_lock();
|
||||
fmeter_markevent(&task_cs(current)->fmeter);
|
||||
task_unlock(current);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_PID_CPUSET
|
||||
|
|
Loading…
Reference in New Issue