2019-05-29 01:10:12 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2005-11-08 06:19:07 +08:00
|
|
|
/*
|
|
|
|
* linux/fs/pnode.h
|
|
|
|
*
|
|
|
|
* (C) Copyright IBM Corporation 2005.
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_PNODE_H
|
|
|
|
#define _LINUX_PNODE_H
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
2011-11-24 08:26:23 +08:00
|
|
|
#include "mount.h"
|
2005-11-08 06:19:33 +08:00
|
|
|
|
2011-11-25 14:05:37 +08:00
|
|
|
#define IS_MNT_SHARED(m) ((m)->mnt.mnt_flags & MNT_SHARED)
|
|
|
|
#define IS_MNT_SLAVE(m) ((m)->mnt_master)
|
2024-06-11 20:26:44 +08:00
|
|
|
#define IS_MNT_NEW(m) (!(m)->mnt_ns || is_anon_ns((m)->mnt_ns))
|
2011-11-25 14:05:37 +08:00
|
|
|
#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
|
|
|
|
#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
|
smarter propagate_mnt()
The current mainline has copies propagated to *all* nodes, then
tears down the copies we made for nodes that do not contain
counterparts of the desired mountpoint. That sets the right
propagation graph for the copies (at teardown time we move
the slaves of removed node to a surviving peer or directly
to master), but we end up paying a fairly steep price in
useless allocations. It's fairly easy to create a situation
where N calls of mount(2) create exactly N bindings, with
O(N^2) vfsmounts allocated and freed in process.
Fortunately, it is possible to avoid those allocations/freeings.
The trick is to create copies in the right order and find which
one would've eventually become a master with the current algorithm.
It turns out to be possible in O(nodes getting propagation) time
and with no extra allocations at all.
One part is that we need to make sure that eventual master will be
created before its slaves, so we need to walk the propagation
tree in a different order - by peer groups. And iterate through
the peers before dealing with the next group.
Another thing is finding the (earlier) copy that will be a master
of one we are about to create; to do that we are (temporary) marking
the masters of mountpoints we are attaching the copies to.
Either we are in a peer of the last mountpoint we'd dealt with,
or we have the following situation: we are attaching to mountpoint M,
the last copy S_0 had been attached to M_0 and there are sequences
S_0...S_n, M_0...M_n such that S_{i+1} is a master of S_{i},
S_{i} mounted on M{i} and we need to create a slave of the first S_{k}
such that M is getting propagation from M_{k}. It means that the master
of M_{k} will be among the sequence of masters of M. On the
other hand, the nearest marked node in that sequence will either
be the master of M_{k} or the master of M_{k-1} (the latter -
in the case if M_{k-1} is a slave of something M gets propagation
from, but in a wrong peer group).
So we go through the sequence of masters of M until we find
a marked one (P). Let N be the one before it. Then we go through
the sequence of masters of S_0 until we find one (say, S) mounted
on a node D that has P as master and check if D is a peer of N.
If it is, S will be the master of new copy, if not - the master of S
will be.
That's it for the hard part; the rest is fairly simple. Iterator
is in next_group(), handling of one prospective mountpoint is
propagate_one().
It seems to survive all tests and gives a noticably better performance
than the current mainline for setups that are seriously using shared
subtrees.
Cc: stable@vger.kernel.org
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
2014-02-27 22:35:45 +08:00
|
|
|
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
|
|
|
|
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
|
|
|
|
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
|
2015-01-06 03:38:04 +08:00
|
|
|
#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
|
2005-11-08 06:19:33 +08:00
|
|
|
|
2005-11-08 06:19:50 +08:00
|
|
|
#define CL_EXPIRE 0x01
|
2005-11-08 06:20:48 +08:00
|
|
|
#define CL_SLAVE 0x02
|
2013-03-30 16:35:18 +08:00
|
|
|
#define CL_COPY_UNBINDABLE 0x04
|
2005-11-08 06:19:50 +08:00
|
|
|
#define CL_MAKE_SHARED 0x08
|
2010-01-17 02:28:47 +08:00
|
|
|
#define CL_PRIVATE 0x10
|
2012-08-01 04:13:04 +08:00
|
|
|
#define CL_SHARED_TO_SLAVE 0x20
|
2019-01-31 02:15:45 +08:00
|
|
|
#define CL_COPY_MNT_NS_FILE 0x40
|
2013-03-30 16:35:18 +08:00
|
|
|
|
|
|
|
#define CL_COPY_ALL (CL_COPY_UNBINDABLE | CL_COPY_MNT_NS_FILE)
|
2005-11-08 06:19:50 +08:00
|
|
|
|
2011-11-25 09:43:10 +08:00
|
|
|
static inline void set_mnt_shared(struct mount *mnt)
|
2005-11-08 06:19:50 +08:00
|
|
|
{
|
2011-11-25 09:43:10 +08:00
|
|
|
mnt->mnt.mnt_flags &= ~MNT_SHARED_MASK;
|
|
|
|
mnt->mnt.mnt_flags |= MNT_SHARED;
|
2005-11-08 06:19:50 +08:00
|
|
|
}
|
|
|
|
|
2011-11-25 09:43:10 +08:00
|
|
|
void change_mnt_propagation(struct mount *, int);
|
2013-03-15 22:53:28 +08:00
|
|
|
int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
|
2014-03-21 09:10:51 +08:00
|
|
|
struct hlist_head *);
|
2014-12-19 03:10:48 +08:00
|
|
|
int propagate_umount(struct list_head *);
|
2011-11-25 10:35:16 +08:00
|
|
|
int propagate_mount_busy(struct mount *, int);
|
2015-01-03 19:39:35 +08:00
|
|
|
void propagate_mount_unlock(struct mount *);
|
2011-11-25 08:54:23 +08:00
|
|
|
void mnt_release_group_id(struct mount *);
|
2011-11-25 12:35:54 +08:00
|
|
|
int get_dominating_id(struct mount *mnt, const struct path *root);
|
2024-06-11 20:26:44 +08:00
|
|
|
int mnt_get_count(struct mount *mnt);
|
2013-03-15 22:53:28 +08:00
|
|
|
void mnt_set_mountpoint(struct mount *, struct mountpoint *,
|
2011-11-25 10:28:22 +08:00
|
|
|
struct mount *);
|
2017-01-20 13:28:35 +08:00
|
|
|
void mnt_change_mountpoint(struct mount *parent, struct mountpoint *mp,
|
|
|
|
struct mount *mnt);
|
2011-11-25 10:24:27 +08:00
|
|
|
struct mount *copy_tree(struct mount *, struct dentry *, int);
|
2011-11-25 11:00:28 +08:00
|
|
|
bool is_path_reachable(struct mount *, struct dentry *,
|
2011-11-24 08:34:49 +08:00
|
|
|
const struct path *root);
|
2016-09-28 13:27:17 +08:00
|
|
|
int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
|
2005-11-08 06:19:07 +08:00
|
|
|
#endif /* _LINUX_PNODE_H */
|