From 79d719749d23234e9b725098aa49133f3ef7299d Mon Sep 17 00:00:00 2001 From: Aristeu Rozanski Date: Mon, 21 Apr 2014 12:13:03 -0400 Subject: [PATCH 1/7] device_cgroup: rework device access check and exception checking Whenever a device file is opened and checked against current device cgroup rules, it uses the same function (may_access()) as when a new exception rule is added by writing devices.{allow,deny}. And in both cases, the algorithm is the same, doesn't matter the behavior. First problem is having device access to be considered the same as rule checking. Consider the following structure: A (default behavior: allow, exceptions disallow access) \ B (default behavior: allow, exceptions disallow access) A new exception is added to B by writing devices.deny: c 12:34 rw When checking if that exception is allowed in may_access(): if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { if (behavior == DEVCG_DEFAULT_ALLOW) { /* the exception will deny access to certain devices */ return true; Which is ok, since B is not getting more privileges than A, it doesn't matter and the rule is accepted Now, consider it's a device file open check and the process belongs to cgroup B. The access will be generated as: behavior: allow exception: c 12:34 rw The very same chunk of code will allow it, even if there's an explicit exception telling to do otherwise. A simple test case: # mkdir new_group # cd new_group # echo $$ >tasks # echo "c 1:3 w" >devices.deny # echo >/dev/null # echo $? 0 This is a serious bug and was introduced on c39a2a3018f8 devcg: prepare may_access() for hierarchy support To solve this problem, the device file open function was split from the new exception check. Second problem is how exceptions are processed by may_access(). The first part of the said function tries to match fully with an existing exception: list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) { if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) continue; if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) continue; if (ex->major != ~0 && ex->major != refex->major) continue; if (ex->minor != ~0 && ex->minor != refex->minor) continue; if (refex->access & (~ex->access)) continue; match = true; break; } That means the new exception should be contained into an existing one to be considered a match: New exception Existing match? notes b 12:34 rwm b 12:34 rwm yes b 12:34 r b *:34 rw yes b 12:34 rw b 12:34 w no extra "r" b *:34 rw b 12:34 rw no too broad "*" b *:34 rw b *:34 rwm yes Which is fine in some cases. Consider: A (default behavior: deny, exceptions allow access) \ B (default behavior: deny, exceptions allow access) In this case the full match makes sense, the new exception cannot add more access than the parent allows But this doesn't always work, consider: A (default behavior: allow, exceptions disallow access) \ B (default behavior: deny, exceptions allow access) In this case, a new exception in B shouldn't match any of the exceptions in A, after all you can't allow something that was forbidden by A. But consider this scenario: New exception Existing in A match? outcome b 12:34 rw b 12:34 r no exception is accepted Because the new exception has "w" as extra, it doesn't match, so it'll be added to B's exception list. The same problem can happen during a file access check. Consider a cgroup with allow as default behavior: Access Exception match? b 12:34 rw b 12:34 r no In this case, the access didn't match any of the exceptions in the cgroup, which is required since exceptions will disallow access. To solve this problem, two new functions were created to match an exception either fully or partially. In the example above, a partial check will be performed and it'll produce a match since at least "b 12:34 r" from "b 12:34 rw" access matches. Cc: cgroups@vger.kernel.org Cc: Tejun Heo Cc: Serge Hallyn Cc: Li Zefan Cc: stable@vger.kernel.org Signed-off-by: Aristeu Rozanski Signed-off-by: Tejun Heo --- security/device_cgroup.c | 170 +++++++++++++++++++++++++++++---------- 1 file changed, 126 insertions(+), 44 deletions(-) diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 8365909f5f8c..b9048dc46b1a 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -306,57 +306,139 @@ static int devcgroup_seq_show(struct seq_file *m, void *v) } /** - * may_access - verifies if a new exception is part of what is allowed - * by a dev cgroup based on the default policy + - * exceptions. This is used to make sure a child cgroup - * won't have more privileges than its parent or to - * verify if a certain access is allowed. - * @dev_cgroup: dev cgroup to be tested against - * @refex: new exception - * @behavior: behavior of the exception + * match_exception - iterates the exception list trying to match a rule + * based on type, major, minor and access type. It is + * considered a match if an exception is found that + * will contain the entire range of provided parameters. + * @exceptions: list of exceptions + * @type: device type (DEV_BLOCK or DEV_CHAR) + * @major: device file major number, ~0 to match all + * @minor: device file minor number, ~0 to match all + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) + * + * returns: true in case it matches an exception completely */ -static bool may_access(struct dev_cgroup *dev_cgroup, - struct dev_exception_item *refex, - enum devcg_behavior behavior) +static bool match_exception(struct list_head *exceptions, short type, + u32 major, u32 minor, short access) { struct dev_exception_item *ex; + + list_for_each_entry_rcu(ex, exceptions, list) { + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) + continue; + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) + continue; + if (ex->major != ~0 && ex->major != major) + continue; + if (ex->minor != ~0 && ex->minor != minor) + continue; + /* provided access cannot have more than the exception rule */ + if (access & (~ex->access)) + continue; + return true; + } + return false; +} + +/** + * match_exception_partial - iterates the exception list trying to match a rule + * based on type, major, minor and access type. It is + * considered a match if an exception's range is + * found to contain *any* of the devices specified by + * provided parameters. This is used to make sure no + * extra access is being granted that is forbidden by + * any of the exception list. + * @exceptions: list of exceptions + * @type: device type (DEV_BLOCK or DEV_CHAR) + * @major: device file major number, ~0 to match all + * @minor: device file minor number, ~0 to match all + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) + * + * returns: true in case the provided range mat matches an exception completely + */ +static bool match_exception_partial(struct list_head *exceptions, short type, + u32 major, u32 minor, short access) +{ + struct dev_exception_item *ex; + + list_for_each_entry_rcu(ex, exceptions, list) { + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) + continue; + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) + continue; + /* + * We must be sure that both the exception and the provided + * range aren't masking all devices + */ + if (ex->major != ~0 && major != ~0 && ex->major != major) + continue; + if (ex->minor != ~0 && minor != ~0 && ex->minor != minor) + continue; + /* + * In order to make sure the provided range isn't matching + * an exception, all its access bits shouldn't match the + * exception's access bits + */ + if (!(access & ex->access)) + continue; + return true; + } + return false; +} + +/** + * verify_new_ex - verifies if a new exception is part of what is allowed + * by a dev cgroup based on the default policy + + * exceptions. This is used to make sure a child cgroup + * won't have more privileges than its parent + * @dev_cgroup: dev cgroup to be tested against + * @refex: new exception + * @behavior: behavior of the exception's dev_cgroup + */ +static bool verify_new_ex(struct dev_cgroup *dev_cgroup, + struct dev_exception_item *refex, + enum devcg_behavior behavior) +{ bool match = false; rcu_lockdep_assert(rcu_read_lock_held() || lockdep_is_held(&devcgroup_mutex), - "device_cgroup::may_access() called without proper synchronization"); - - list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) { - if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) - continue; - if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR)) - continue; - if (ex->major != ~0 && ex->major != refex->major) - continue; - if (ex->minor != ~0 && ex->minor != refex->minor) - continue; - if (refex->access & (~ex->access)) - continue; - match = true; - break; - } + "device_cgroup:verify_new_ex called without proper synchronization"); if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { if (behavior == DEVCG_DEFAULT_ALLOW) { - /* the exception will deny access to certain devices */ + /* + * new exception in the child doesn't matter, only + * adding extra restrictions + */ return true; } else { - /* the exception will allow access to certain devices */ + /* + * new exception in the child will add more devices + * that can be acessed, so it can't match any of + * parent's exceptions, even slightly + */ + match = match_exception_partial(&dev_cgroup->exceptions, + refex->type, + refex->major, + refex->minor, + refex->access); + if (match) - /* - * a new exception allowing access shouldn't - * match an parent's exception - */ return false; return true; } } else { - /* only behavior == DEVCG_DEFAULT_DENY allowed here */ + /* + * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore + * the new exception will add access to more devices and must + * be contained completely in an parent's exception to be + * allowed + */ + match = match_exception(&dev_cgroup->exceptions, refex->type, + refex->major, refex->minor, + refex->access); + if (match) /* parent has an exception that matches the proposed */ return true; @@ -378,7 +460,7 @@ static int parent_has_perm(struct dev_cgroup *childcg, if (!parent) return 1; - return may_access(parent, ex, childcg->behavior); + return verify_new_ex(parent, ex, childcg->behavior); } /** @@ -704,18 +786,18 @@ static int __devcgroup_check_permission(short type, u32 major, u32 minor, short access) { struct dev_cgroup *dev_cgroup; - struct dev_exception_item ex; - int rc; - - memset(&ex, 0, sizeof(ex)); - ex.type = type; - ex.major = major; - ex.minor = minor; - ex.access = access; + bool rc; rcu_read_lock(); dev_cgroup = task_devcgroup(current); - rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior); + if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) + /* Can't match any of the exceptions, even partially */ + rc = !match_exception_partial(&dev_cgroup->exceptions, + type, major, minor, access); + else + /* Need to match completely one exception to be allowed */ + rc = match_exception(&dev_cgroup->exceptions, type, major, + minor, access); rcu_read_unlock(); if (!rc) From f5f3cf6f7e49b9529fc00a2c4629fa92cf2755fe Mon Sep 17 00:00:00 2001 From: Aristeu Rozanski Date: Thu, 24 Apr 2014 15:33:21 -0400 Subject: [PATCH 2/7] device_cgroup: fix the comment format for recently added functions Moving more extensive explanations to the end of the comment. Cc: Li Zefan Signed-off-by: Aristeu Rozanski Acked-by: Serge Hallyn Signed-off-by: Tejun Heo --- security/device_cgroup.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/security/device_cgroup.c b/security/device_cgroup.c index b9048dc46b1a..6b1266dd92bb 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -306,17 +306,17 @@ static int devcgroup_seq_show(struct seq_file *m, void *v) } /** - * match_exception - iterates the exception list trying to match a rule - * based on type, major, minor and access type. It is - * considered a match if an exception is found that - * will contain the entire range of provided parameters. + * match_exception - iterates the exception list trying to find a complete match * @exceptions: list of exceptions * @type: device type (DEV_BLOCK or DEV_CHAR) * @major: device file major number, ~0 to match all * @minor: device file minor number, ~0 to match all * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) * - * returns: true in case it matches an exception completely + * It is considered a complete match if an exception is found that will + * contain the entire range of provided parameters. + * + * Return: true in case it matches an exception completely */ static bool match_exception(struct list_head *exceptions, short type, u32 major, u32 minor, short access) @@ -341,20 +341,19 @@ static bool match_exception(struct list_head *exceptions, short type, } /** - * match_exception_partial - iterates the exception list trying to match a rule - * based on type, major, minor and access type. It is - * considered a match if an exception's range is - * found to contain *any* of the devices specified by - * provided parameters. This is used to make sure no - * extra access is being granted that is forbidden by - * any of the exception list. + * match_exception_partial - iterates the exception list trying to find a partial match * @exceptions: list of exceptions * @type: device type (DEV_BLOCK or DEV_CHAR) * @major: device file major number, ~0 to match all * @minor: device file minor number, ~0 to match all * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) * - * returns: true in case the provided range mat matches an exception completely + * It is considered a partial match if an exception's range is found to + * contain *any* of the devices specified by provided parameters. This is + * used to make sure no extra access is being granted that is forbidden by + * any of the exception list. + * + * Return: true in case the provided range mat matches an exception completely */ static bool match_exception_partial(struct list_head *exceptions, short type, u32 major, u32 minor, short access) @@ -387,13 +386,13 @@ static bool match_exception_partial(struct list_head *exceptions, short type, } /** - * verify_new_ex - verifies if a new exception is part of what is allowed - * by a dev cgroup based on the default policy + - * exceptions. This is used to make sure a child cgroup - * won't have more privileges than its parent + * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions * @dev_cgroup: dev cgroup to be tested against * @refex: new exception * @behavior: behavior of the exception's dev_cgroup + * + * This is used to make sure a child cgroup won't have more privileges + * than its parent */ static bool verify_new_ex(struct dev_cgroup *dev_cgroup, struct dev_exception_item *refex, From d2c2b11cfa134f4fbdcc34088824da26a084d8de Mon Sep 17 00:00:00 2001 From: Aristeu Rozanski Date: Mon, 5 May 2014 11:18:59 -0400 Subject: [PATCH 3/7] device_cgroup: check if exception removal is allowed [PATCH v3 1/2] device_cgroup: check if exception removal is allowed When the device cgroup hierarchy was introduced in bd2953ebbb53 - devcg: propagate local changes down the hierarchy a specific case was overlooked. Consider the hierarchy bellow: A default policy: ALLOW, exceptions will deny access \ B default policy: ALLOW, exceptions will deny access There's no need to verify when an new exception is added to B because in this case exceptions will deny access to further devices, which is always fine. Hierarchy in device cgroup only makes sure B won't have more access than A. But when an exception is removed (by writing devices.allow), it isn't checked if the user is in fact removing an inherited exception from A, thus giving more access to B. Example: # echo 'a' >A/devices.allow # echo 'c 1:3 rw' >A/devices.deny # echo $$ >A/B/tasks # echo >/dev/null -bash: /dev/null: Operation not permitted # echo 'c 1:3 w' >A/B/devices.allow # echo >/dev/null # This shouldn't be allowed and this patch fixes it by making sure to never allow exceptions in this case to be removed if the exception is partially or fully present on the parent. v3: missing '*' in function description v2: improved log message and formatting fixes Cc: cgroups@vger.kernel.org Cc: Li Zefan Cc: stable@vger.kernel.org Signed-off-by: Aristeu Rozanski Acked-by: Serge Hallyn Signed-off-by: Tejun Heo --- security/device_cgroup.c | 41 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 38 insertions(+), 3 deletions(-) diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 6b1266dd92bb..9134dbf70d3e 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -462,6 +462,37 @@ static int parent_has_perm(struct dev_cgroup *childcg, return verify_new_ex(parent, ex, childcg->behavior); } +/** + * parent_allows_removal - verify if it's ok to remove an exception + * @childcg: child cgroup from where the exception will be removed + * @ex: exception being removed + * + * When removing an exception in cgroups with default ALLOW policy, it must + * be checked if removing it will give the child cgroup more access than the + * parent. + * + * Return: true if it's ok to remove exception, false otherwise + */ +static bool parent_allows_removal(struct dev_cgroup *childcg, + struct dev_exception_item *ex) +{ + struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css)); + + if (!parent) + return true; + + /* It's always allowed to remove access to devices */ + if (childcg->behavior == DEVCG_DEFAULT_DENY) + return true; + + /* + * Make sure you're not removing part or a whole exception existing in + * the parent cgroup + */ + return !match_exception_partial(&parent->exceptions, ex->type, + ex->major, ex->minor, ex->access); +} + /** * may_allow_all - checks if it's possible to change the behavior to * allow based on parent's rules. @@ -697,17 +728,21 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, switch (filetype) { case DEVCG_ALLOW: - if (!parent_has_perm(devcgroup, &ex)) - return -EPERM; /* * If the default policy is to allow by default, try to remove * an matching exception instead. And be silent about it: we * don't want to break compatibility */ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { + /* Check if the parent allows removing it first */ + if (!parent_allows_removal(devcgroup, &ex)) + return -EPERM; dev_exception_rm(devcgroup, &ex); - return 0; + break; } + + if (!parent_has_perm(devcgroup, &ex)) + return -EPERM; rc = dev_exception_add(devcgroup, &ex); break; case DEVCG_DENY: From 36c38fb7144aa941dc072ba8f58b2dbe509c0345 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 5 May 2014 12:37:30 -0400 Subject: [PATCH 4/7] blkcg: use trylock on blkcg_pol_mutex in blkcg_reset_stats() During the recent conversion of cgroup to kernfs, cgroup_tree_mutex which nests above both the kernfs s_active protection and cgroup_mutex is added to synchronize cgroup file type operations as cgroup_mutex needed to be grabbed from some file operations and thus can't be put above s_active protection. While this arrangement mostly worked for cgroup, this triggered the following lockdep warning. ====================================================== [ INFO: possible circular locking dependency detected ] 3.15.0-rc3-next-20140430-sasha-00016-g4e281fa-dirty #429 Tainted: G W ------------------------------------------------------- trinity-c173/9024 is trying to acquire lock: (blkcg_pol_mutex){+.+.+.}, at: blkcg_reset_stats (include/linux/spinlock.h:328 block/blk-cgroup.c:455) but task is already holding lock: (s_active#89){++++.+}, at: kernfs_fop_write (fs/kernfs/file.c:283) which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #2 (s_active#89){++++.+}: lock_acquire (arch/x86/include/asm/current.h:14 kernel/locking/lockdep.c:3602) __kernfs_remove (arch/x86/include/asm/atomic.h:27 fs/kernfs/dir.c:352 fs/kernfs/dir.c:1024) kernfs_remove_by_name_ns (fs/kernfs/dir.c:1219) cgroup_addrm_files (include/linux/kernfs.h:427 kernel/cgroup.c:1074 kernel/cgroup.c:2899) cgroup_clear_dir (kernel/cgroup.c:1092 (discriminator 2)) rebind_subsystems (kernel/cgroup.c:1144) cgroup_setup_root (kernel/cgroup.c:1568) cgroup_mount (kernel/cgroup.c:1716) mount_fs (fs/super.c:1094) vfs_kern_mount (fs/namespace.c:899) do_mount (fs/namespace.c:2238 fs/namespace.c:2561) SyS_mount (fs/namespace.c:2758 fs/namespace.c:2729) tracesys (arch/x86/kernel/entry_64.S:746) -> #1 (cgroup_tree_mutex){+.+.+.}: lock_acquire (arch/x86/include/asm/current.h:14 kernel/locking/lockdep.c:3602) mutex_lock_nested (kernel/locking/mutex.c:486 kernel/locking/mutex.c:587) cgroup_add_cftypes (include/linux/list.h:76 kernel/cgroup.c:3040) blkcg_policy_register (block/blk-cgroup.c:1106) throtl_init (block/blk-throttle.c:1694) do_one_initcall (init/main.c:789) kernel_init_freeable (init/main.c:854 init/main.c:863 init/main.c:882 init/main.c:1003) kernel_init (init/main.c:935) ret_from_fork (arch/x86/kernel/entry_64.S:552) -> #0 (blkcg_pol_mutex){+.+.+.}: __lock_acquire (kernel/locking/lockdep.c:1840 kernel/locking/lockdep.c:1945 kernel/locking/lockdep.c:2131 kernel/locking/lockdep.c:3182) lock_acquire (arch/x86/include/asm/current.h:14 kernel/locking/lockdep.c:3602) mutex_lock_nested (kernel/locking/mutex.c:486 kernel/locking/mutex.c:587) blkcg_reset_stats (include/linux/spinlock.h:328 block/blk-cgroup.c:455) cgroup_file_write (kernel/cgroup.c:2714) kernfs_fop_write (fs/kernfs/file.c:295) vfs_write (fs/read_write.c:532) SyS_write (fs/read_write.c:584 fs/read_write.c:576) tracesys (arch/x86/kernel/entry_64.S:746) other info that might help us debug this: Chain exists of: blkcg_pol_mutex --> cgroup_tree_mutex --> s_active#89 Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(s_active#89); lock(cgroup_tree_mutex); lock(s_active#89); lock(blkcg_pol_mutex); *** DEADLOCK *** 4 locks held by trinity-c173/9024: #0: (&f->f_pos_lock){+.+.+.}, at: __fdget_pos (fs/file.c:714) #1: (sb_writers#18){.+.+.+}, at: vfs_write (include/linux/fs.h:2255 fs/read_write.c:530) #2: (&of->mutex){+.+.+.}, at: kernfs_fop_write (fs/kernfs/file.c:283) #3: (s_active#89){++++.+}, at: kernfs_fop_write (fs/kernfs/file.c:283) stack backtrace: CPU: 3 PID: 9024 Comm: trinity-c173 Tainted: G W 3.15.0-rc3-next-20140430-sasha-00016-g4e281fa-dirty #429 ffffffff919687b0 ffff8805f6373bb8 ffffffff8e52cdbb 0000000000000002 ffffffff919d8400 ffff8805f6373c08 ffffffff8e51fb88 0000000000000004 ffff8805f6373c98 ffff8805f6373c08 ffff88061be70d98 ffff88061be70dd0 Call Trace: dump_stack (lib/dump_stack.c:52) print_circular_bug (kernel/locking/lockdep.c:1216) __lock_acquire (kernel/locking/lockdep.c:1840 kernel/locking/lockdep.c:1945 kernel/locking/lockdep.c:2131 kernel/locking/lockdep.c:3182) lock_acquire (arch/x86/include/asm/current.h:14 kernel/locking/lockdep.c:3602) mutex_lock_nested (kernel/locking/mutex.c:486 kernel/locking/mutex.c:587) blkcg_reset_stats (include/linux/spinlock.h:328 block/blk-cgroup.c:455) cgroup_file_write (kernel/cgroup.c:2714) kernfs_fop_write (fs/kernfs/file.c:295) vfs_write (fs/read_write.c:532) SyS_write (fs/read_write.c:584 fs/read_write.c:576) This is a highly unlikely but valid circular dependency between "echo 1 > blkcg.reset_stats" and cfq module [un]loading. cgroup is going through further locking update which will remove this complication but for now let's use trylock on blkcg_pol_mutex and retry the file operation if the trylock fails. Signed-off-by: Tejun Heo Reported-by: Sasha Levin References: http://lkml.kernel.org/g/5363C04B.4010400@oracle.com --- block/blk-cgroup.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index e4a4145926f6..1039fb9ff5f5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -451,7 +451,20 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css, struct blkcg_gq *blkg; int i; - mutex_lock(&blkcg_pol_mutex); + /* + * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex + * which ends up putting cgroup's internal cgroup_tree_mutex under + * it; however, cgroup_tree_mutex is nested above cgroup file + * active protection and grabbing blkcg_pol_mutex from a cgroup + * file operation creates a possible circular dependency. cgroup + * internal locking is planned to go through further simplification + * and this issue should go away soon. For now, let's trylock + * blkcg_pol_mutex and restart the write on failure. + * + * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com + */ + if (!mutex_trylock(&blkcg_pol_mutex)) + return restart_syscall(); spin_lock_irq(&blkcg->lock); /* From 5024ae29cd285ce9e736776414da645d3a91828c Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 7 May 2014 21:31:17 -0400 Subject: [PATCH 5/7] cgroup: introduce task_css_is_root() Determining the css of a task usually requires RCU read lock as that's the only thing which keeps the returned css accessible till its reference is acquired; however, testing whether a task belongs to the root can be performed without dereferencing the returned css by comparing the returned pointer against the root one in init_css_set[] which never changes. Implement task_css_is_root() which can be invoked in any context. This will be used by the scheduled cgroup_freezer change. v2: cgroup no longer supports modular controllers. No need to export init_css_set. Pointed out by Li. Signed-off-by: Tejun Heo Acked-by: Li Zefan --- include/linux/cgroup.h | 15 +++++++++++++++ kernel/cgroup.c | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c2515851c1aa..d60904b9e505 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -473,6 +473,7 @@ struct cftype { }; extern struct cgroup_root cgrp_dfl_root; +extern struct css_set init_css_set; static inline bool cgroup_on_dfl(const struct cgroup *cgrp) { @@ -700,6 +701,20 @@ static inline struct cgroup_subsys_state *task_css(struct task_struct *task, return task_css_check(task, subsys_id, false); } +/** + * task_css_is_root - test whether a task belongs to the root css + * @task: the target task + * @subsys_id: the target subsystem ID + * + * Test whether @task belongs to the root css on the specified subsystem. + * May be invoked in any context. + */ +static inline bool task_css_is_root(struct task_struct *task, int subsys_id) +{ + return task_css_check(task, subsys_id, true) == + init_css_set.subsys[subsys_id]; +} + static inline struct cgroup *task_cgroup(struct task_struct *task, int subsys_id) { diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 11a03d67635a..3f1ca934a237 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -348,7 +348,7 @@ struct cgrp_cset_link { * reference-counted, to improve performance when child cgroups * haven't been created. */ -static struct css_set init_css_set = { +struct css_set init_css_set = { .refcount = ATOMIC_INIT(1), .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), .tasks = LIST_HEAD_INIT(init_css_set.tasks), From e5ced8ebb10c20a3b349bd798b69ccabd3b25d21 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 7 May 2014 21:31:17 -0400 Subject: [PATCH 6/7] cgroup_freezer: replace freezer->lock with freezer_mutex After 96d365e0b86e ("cgroup: make css_set_lock a rwsem and rename it to css_set_rwsem"), css task iterators requires sleepable context as it may block on css_set_rwsem. I missed that cgroup_freezer was iterating tasks under IRQ-safe spinlock freezer->lock. This leads to errors like the following on freezer state reads and transitions. BUG: sleeping function called from invalid context at /work /os/work/kernel/locking/rwsem.c:20 in_atomic(): 0, irqs_disabled(): 0, pid: 462, name: bash 5 locks held by bash/462: #0: (sb_writers#7){.+.+.+}, at: [] vfs_write+0x1a3/0x1c0 #1: (&of->mutex){+.+.+.}, at: [] kernfs_fop_write+0xbb/0x170 #2: (s_active#70){.+.+.+}, at: [] kernfs_fop_write+0xc3/0x170 #3: (freezer_mutex){+.+...}, at: [] freezer_write+0x61/0x1e0 #4: (rcu_read_lock){......}, at: [] freezer_write+0x53/0x1e0 Preemption disabled at:[] console_unlock+0x1e4/0x460 CPU: 3 PID: 462 Comm: bash Not tainted 3.15.0-rc1-work+ #10 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 ffff88000916a6d0 ffff88000e0a3da0 ffffffff81cf8c96 0000000000000000 ffff88000e0a3dc8 ffffffff810cf4f2 ffffffff82388040 ffff880013aaf740 0000000000000002 ffff88000e0a3de8 ffffffff81d05974 0000000000000246 Call Trace: [] dump_stack+0x4e/0x7a [] __might_sleep+0x162/0x260 [] down_read+0x24/0x60 [] css_task_iter_start+0x27/0x70 [] freezer_apply_state+0x5d/0x130 [] freezer_write+0xf6/0x1e0 [] cgroup_file_write+0xd8/0x230 [] kernfs_fop_write+0xe7/0x170 [] vfs_write+0xb6/0x1c0 [] SyS_write+0x4d/0xc0 [] system_call_fastpath+0x16/0x1b freezer->lock used to be used in hot paths but that time is long gone and there's no reason for the lock to be IRQ-safe spinlock or even per-cgroup. In fact, given the fact that a cgroup may contain large number of tasks, it's not a good idea to iterate over them while holding IRQ-safe spinlock. Let's simplify locking by replacing per-cgroup freezer->lock with global freezer_mutex. This also makes the comments explaining the intricacies of policy inheritance and the locking around it as the states are protected by a common mutex. The conversion is mostly straight-forward. The followings are worth mentioning. * freezer_css_online() no longer needs double locking. * freezer_attach() now performs propagation simply while holding freezer_mutex. update_if_frozen() race no longer exists and the comment is removed. * freezer_fork() now tests whether the task is in root cgroup using the new task_css_is_root() without doing rcu_read_lock/unlock(). If not, it grabs freezer_mutex and performs the operation. * freezer_read() and freezer_change_state() grab freezer_mutex across the whole operation and pin the css while iterating so that each descendant processing happens in sleepable context. Fixes: 96d365e0b86e ("cgroup: make css_set_lock a rwsem and rename it to css_set_rwsem") Signed-off-by: Tejun Heo Acked-by: Li Zefan --- kernel/cgroup_freezer.c | 112 +++++++++++++++++----------------------- 1 file changed, 46 insertions(+), 66 deletions(-) diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 2bc4a2256444..12ead0b766ee 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -21,6 +21,7 @@ #include #include #include +#include /* * A cgroup is freezing if any FREEZING flags are set. FREEZING_SELF is @@ -42,9 +43,10 @@ enum freezer_state_flags { struct freezer { struct cgroup_subsys_state css; unsigned int state; - spinlock_t lock; }; +static DEFINE_MUTEX(freezer_mutex); + static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) { return css ? container_of(css, struct freezer, css) : NULL; @@ -93,7 +95,6 @@ freezer_css_alloc(struct cgroup_subsys_state *parent_css) if (!freezer) return ERR_PTR(-ENOMEM); - spin_lock_init(&freezer->lock); return &freezer->css; } @@ -110,14 +111,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css) struct freezer *freezer = css_freezer(css); struct freezer *parent = parent_freezer(freezer); - /* - * The following double locking and freezing state inheritance - * guarantee that @cgroup can never escape ancestors' freezing - * states. See css_for_each_descendant_pre() for details. - */ - if (parent) - spin_lock_irq(&parent->lock); - spin_lock_nested(&freezer->lock, SINGLE_DEPTH_NESTING); + mutex_lock(&freezer_mutex); freezer->state |= CGROUP_FREEZER_ONLINE; @@ -126,10 +120,7 @@ static int freezer_css_online(struct cgroup_subsys_state *css) atomic_inc(&system_freezing_cnt); } - spin_unlock(&freezer->lock); - if (parent) - spin_unlock_irq(&parent->lock); - + mutex_unlock(&freezer_mutex); return 0; } @@ -144,14 +135,14 @@ static void freezer_css_offline(struct cgroup_subsys_state *css) { struct freezer *freezer = css_freezer(css); - spin_lock_irq(&freezer->lock); + mutex_lock(&freezer_mutex); if (freezer->state & CGROUP_FREEZING) atomic_dec(&system_freezing_cnt); freezer->state = 0; - spin_unlock_irq(&freezer->lock); + mutex_unlock(&freezer_mutex); } static void freezer_css_free(struct cgroup_subsys_state *css) @@ -175,7 +166,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, struct task_struct *task; bool clear_frozen = false; - spin_lock_irq(&freezer->lock); + mutex_lock(&freezer_mutex); /* * Make the new tasks conform to the current state of @new_css. @@ -197,21 +188,13 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, } } - spin_unlock_irq(&freezer->lock); - - /* - * Propagate FROZEN clearing upwards. We may race with - * update_if_frozen(), but as long as both work bottom-up, either - * update_if_frozen() sees child's FROZEN cleared or we clear the - * parent's FROZEN later. No parent w/ !FROZEN children can be - * left FROZEN. - */ + /* propagate FROZEN clearing upwards */ while (clear_frozen && (freezer = parent_freezer(freezer))) { - spin_lock_irq(&freezer->lock); freezer->state &= ~CGROUP_FROZEN; clear_frozen = freezer->state & CGROUP_FREEZING; - spin_unlock_irq(&freezer->lock); } + + mutex_unlock(&freezer_mutex); } /** @@ -228,9 +211,6 @@ static void freezer_fork(struct task_struct *task) { struct freezer *freezer; - rcu_read_lock(); - freezer = task_freezer(task); - /* * The root cgroup is non-freezable, so we can skip locking the * freezer. This is safe regardless of race with task migration. @@ -238,24 +218,18 @@ static void freezer_fork(struct task_struct *task) * to do. If we lost and root is the new cgroup, noop is still the * right thing to do. */ - if (!parent_freezer(freezer)) - goto out; + if (task_css_is_root(task, freezer_cgrp_id)) + return; - /* - * Grab @freezer->lock and freeze @task after verifying @task still - * belongs to @freezer and it's freezing. The former is for the - * case where we have raced against task migration and lost and - * @task is already in a different cgroup which may not be frozen. - * This isn't strictly necessary as freeze_task() is allowed to be - * called spuriously but let's do it anyway for, if nothing else, - * documentation. - */ - spin_lock_irq(&freezer->lock); - if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING)) + mutex_lock(&freezer_mutex); + rcu_read_lock(); + + freezer = task_freezer(task); + if (freezer->state & CGROUP_FREEZING) freeze_task(task); - spin_unlock_irq(&freezer->lock); -out: + rcu_read_unlock(); + mutex_unlock(&freezer_mutex); } /** @@ -281,22 +255,22 @@ static void update_if_frozen(struct cgroup_subsys_state *css) struct css_task_iter it; struct task_struct *task; - WARN_ON_ONCE(!rcu_read_lock_held()); - - spin_lock_irq(&freezer->lock); + lockdep_assert_held(&freezer_mutex); if (!(freezer->state & CGROUP_FREEZING) || (freezer->state & CGROUP_FROZEN)) - goto out_unlock; + return; /* are all (live) children frozen? */ + rcu_read_lock(); css_for_each_child(pos, css) { struct freezer *child = css_freezer(pos); if ((child->state & CGROUP_FREEZER_ONLINE) && !(child->state & CGROUP_FROZEN)) - goto out_unlock; + return; } + rcu_read_unlock(); /* are all tasks frozen? */ css_task_iter_start(css, &it); @@ -317,21 +291,29 @@ static void update_if_frozen(struct cgroup_subsys_state *css) freezer->state |= CGROUP_FROZEN; out_iter_end: css_task_iter_end(&it); -out_unlock: - spin_unlock_irq(&freezer->lock); } static int freezer_read(struct seq_file *m, void *v) { struct cgroup_subsys_state *css = seq_css(m), *pos; + mutex_lock(&freezer_mutex); rcu_read_lock(); /* update states bottom-up */ - css_for_each_descendant_post(pos, css) + css_for_each_descendant_post(pos, css) { + if (!css_tryget(pos)) + continue; + rcu_read_unlock(); + update_if_frozen(pos); + rcu_read_lock(); + css_put(pos); + } + rcu_read_unlock(); + mutex_unlock(&freezer_mutex); seq_puts(m, freezer_state_strs(css_freezer(css)->state)); seq_putc(m, '\n'); @@ -373,7 +355,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze, unsigned int state) { /* also synchronizes against task migration, see freezer_attach() */ - lockdep_assert_held(&freezer->lock); + lockdep_assert_held(&freezer_mutex); if (!(freezer->state & CGROUP_FREEZER_ONLINE)) return; @@ -414,31 +396,29 @@ static void freezer_change_state(struct freezer *freezer, bool freeze) * descendant will try to inherit its parent's FREEZING state as * CGROUP_FREEZING_PARENT. */ + mutex_lock(&freezer_mutex); rcu_read_lock(); css_for_each_descendant_pre(pos, &freezer->css) { struct freezer *pos_f = css_freezer(pos); struct freezer *parent = parent_freezer(pos_f); - spin_lock_irq(&pos_f->lock); + if (!css_tryget(pos)) + continue; + rcu_read_unlock(); - if (pos_f == freezer) { + if (pos_f == freezer) freezer_apply_state(pos_f, freeze, CGROUP_FREEZING_SELF); - } else { - /* - * Our update to @parent->state is already visible - * which is all we need. No need to lock @parent. - * For more info on synchronization, see - * freezer_post_create(). - */ + else freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING, CGROUP_FREEZING_PARENT); - } - spin_unlock_irq(&pos_f->lock); + rcu_read_lock(); + css_put(pos); } rcu_read_unlock(); + mutex_unlock(&freezer_mutex); } static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, From 36e9d2ebcc15d029b33f42a36146ab5a5bcfcfe7 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 13 May 2014 11:28:30 -0400 Subject: [PATCH 7/7] cgroup: fix rcu_read_lock() leak in update_if_frozen() While updating cgroup_freezer locking, 68fafb77d827 ("cgroup_freezer: replace freezer->lock with freezer_mutex") introduced a bug in update_if_frozen() where it returns with rcu_read_lock() held. Fix it by adding rcu_read_unlock() before returning. Signed-off-by: Tejun Heo Reported-by: kbuild test robot --- kernel/cgroup_freezer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index 12ead0b766ee..345628c78b5b 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c @@ -267,8 +267,10 @@ static void update_if_frozen(struct cgroup_subsys_state *css) struct freezer *child = css_freezer(pos); if ((child->state & CGROUP_FREEZER_ONLINE) && - !(child->state & CGROUP_FROZEN)) + !(child->state & CGROUP_FROZEN)) { + rcu_read_unlock(); return; + } } rcu_read_unlock();