memcg: let css_get_next() rely upon rcu_read_lock()
Remove lock and unlock around css_get_next()'s call to idr_get_next(). memcg iterators (only users of css_get_next) already did rcu_read_lock(), and its comment demands that; but add a WARN_ON_ONCE to make sure of it. Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Li Zefan <lizf@cn.fujitsu.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
42aee6c495
commit
ca464d69b1
|
@ -5033,6 +5033,8 @@ css_get_next(struct cgroup_subsys *ss, int id,
|
|||
return NULL;
|
||||
|
||||
BUG_ON(!ss->use_id);
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
||||
/* fill start point for scan */
|
||||
tmpid = id;
|
||||
while (1) {
|
||||
|
@ -5040,10 +5042,7 @@ css_get_next(struct cgroup_subsys *ss, int id,
|
|||
* scan next entry from bitmap(tree), tmpid is updated after
|
||||
* idr_get_next().
|
||||
*/
|
||||
spin_lock(&ss->id_lock);
|
||||
tmp = idr_get_next(&ss->idr, &tmpid);
|
||||
spin_unlock(&ss->id_lock);
|
||||
|
||||
if (!tmp)
|
||||
break;
|
||||
if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
|
||||
|
|
Loading…
Reference in New Issue