mnt: Don't propagate unmounts to locked mounts

If the first mount in shared subtree is locked don't unmount the
shared subtree.

This is ensured by walking through the mounts parents before children
and marking a mount as unmountable if it is not locked or it is locked
but it's parent is marked.

This allows recursive mount detach to propagate through a set of
mounts when unmounting them would not reveal what is under any locked
mount.

Cc: stable@vger.kernel.org
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
This commit is contained in:
Eric W. Biederman 2015-01-05 13:38:04 -06:00
parent 5d88457eb5
commit 0c56fe3142
2 changed files with 30 additions and 3 deletions

View File

@ -381,6 +381,26 @@ void propagate_mount_unlock(struct mount *mnt)
} }
} }
/*
* Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
*/
static void mark_umount_candidates(struct mount *mnt)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
BUG_ON(parent == mnt);
for (m = propagation_next(parent, parent); m;
m = propagation_next(m, parent)) {
struct mount *child = __lookup_mnt_last(&m->mnt,
mnt->mnt_mountpoint);
if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
SET_MNT_MARK(child);
}
}
}
/* /*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to. * parent propagates to.
@ -398,10 +418,13 @@ static void __propagate_umount(struct mount *mnt)
struct mount *child = __lookup_mnt_last(&m->mnt, struct mount *child = __lookup_mnt_last(&m->mnt,
mnt->mnt_mountpoint); mnt->mnt_mountpoint);
/* /*
* umount the child only if the child has no * umount the child only if the child has no children
* other children * and the child is marked safe to unmount.
*/ */
if (child && list_empty(&child->mnt_mounts)) { if (!child || !IS_MNT_MARKED(child))
continue;
CLEAR_MNT_MARK(child);
if (list_empty(&child->mnt_mounts)) {
list_del_init(&child->mnt_child); list_del_init(&child->mnt_child);
child->mnt.mnt_flags |= MNT_UMOUNT; child->mnt.mnt_flags |= MNT_UMOUNT;
list_move_tail(&child->mnt_list, &mnt->mnt_list); list_move_tail(&child->mnt_list, &mnt->mnt_list);
@ -420,6 +443,9 @@ int propagate_umount(struct list_head *list)
{ {
struct mount *mnt; struct mount *mnt;
list_for_each_entry_reverse(mnt, list, mnt_list)
mark_umount_candidates(mnt);
list_for_each_entry(mnt, list, mnt_list) list_for_each_entry(mnt, list, mnt_list)
__propagate_umount(mnt); __propagate_umount(mnt);
return 0; return 0;

View File

@ -19,6 +19,7 @@
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED) #define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
#define CL_EXPIRE 0x01 #define CL_EXPIRE 0x01
#define CL_SLAVE 0x02 #define CL_SLAVE 0x02