提交 f2ebb3a9 编写于 作者: A Al Viro

smarter propagate_mnt()

The current mainline has copies propagated to *all* nodes, then
tears down the copies we made for nodes that do not contain
counterparts of the desired mountpoint.  That sets the right
propagation graph for the copies (at teardown time we move
the slaves of removed node to a surviving peer or directly
to master), but we end up paying a fairly steep price in
useless allocations.  It's fairly easy to create a situation
where N calls of mount(2) create exactly N bindings, with
O(N^2) vfsmounts allocated and freed in process.

Fortunately, it is possible to avoid those allocations/freeings.
The trick is to create copies in the right order and find which
one would've eventually become a master with the current algorithm.
It turns out to be possible in O(nodes getting propagation) time
and with no extra allocations at all.

One part is that we need to make sure that eventual master will be
created before its slaves, so we need to walk the propagation
tree in a different order - by peer groups.  And iterate through
the peers before dealing with the next group.

Another thing is finding the (earlier) copy that will be a master
of one we are about to create; to do that we are (temporary) marking
the masters of mountpoints we are attaching the copies to.

Either we are in a peer of the last mountpoint we'd dealt with,
or we have the following situation: we are attaching to mountpoint M,
the last copy S_0 had been attached to M_0 and there are sequences
S_0...S_n, M_0...M_n such that S_{i+1} is a master of S_{i},
S_{i} mounted on M{i} and we need to create a slave of the first S_{k}
such that M is getting propagation from M_{k}.  It means that the master
of M_{k} will be among the sequence of masters of M.  On the
other hand, the nearest marked node in that sequence will either
be the master of M_{k} or the master of M_{k-1} (the latter -
in the case if M_{k-1} is a slave of something M gets propagation
from, but in a wrong peer group).

So we go through the sequence of masters of M until we find
a marked one (P).  Let N be the one before it.  Then we go through
the sequence of masters of S_0 until we find one (say, S) mounted
on a node D that has P as master and check if D is a peer of N.
If it is, S will be the master of new copy, if not - the master of S
will be.

That's it for the hard part; the rest is fairly simple.  Iterator
is in next_group(), handling of one prospective mountpoint is
propagate_one().

It seems to survive all tests and gives a noticably better performance
than the current mainline for setups that are seriously using shared
subtrees.

Cc: stable@vger.kernel.org
Signed-off-by: NAl Viro <viro@zeniv.linux.org.uk>
上级 38129a13
...@@ -885,7 +885,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root, ...@@ -885,7 +885,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
goto out_free; goto out_free;
} }
mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
/* Don't allow unprivileged users to change mount flags */ /* Don't allow unprivileged users to change mount flags */
if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY)) if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
...@@ -1661,9 +1661,9 @@ static int attach_recursive_mnt(struct mount *source_mnt, ...@@ -1661,9 +1661,9 @@ static int attach_recursive_mnt(struct mount *source_mnt,
if (err) if (err)
goto out; goto out;
err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
lock_mount_hash();
if (err) if (err)
goto out_cleanup_ids; goto out_cleanup_ids;
lock_mount_hash();
for (p = source_mnt; p; p = next_mnt(p, source_mnt)) for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p); set_mnt_shared(p);
} else { } else {
...@@ -1690,6 +1690,11 @@ static int attach_recursive_mnt(struct mount *source_mnt, ...@@ -1690,6 +1690,11 @@ static int attach_recursive_mnt(struct mount *source_mnt,
return 0; return 0;
out_cleanup_ids: out_cleanup_ids:
while (!hlist_empty(&tree_list)) {
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
umount_tree(child, 0);
}
unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL); cleanup_group_ids(source_mnt, NULL);
out: out:
return err; return err;
...@@ -2044,7 +2049,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) ...@@ -2044,7 +2049,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
struct mount *parent; struct mount *parent;
int err; int err;
mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT); mnt_flags &= ~MNT_INTERNAL_FLAGS;
mp = lock_mount(path); mp = lock_mount(path);
if (IS_ERR(mp)) if (IS_ERR(mp))
......
...@@ -164,46 +164,94 @@ static struct mount *propagation_next(struct mount *m, ...@@ -164,46 +164,94 @@ static struct mount *propagation_next(struct mount *m,
} }
} }
/* static struct mount *next_group(struct mount *m, struct mount *origin)
* return the source mount to be used for cloning
*
* @dest the current destination mount
* @last_dest the last seen destination mount
* @last_src the last seen source mount
* @type return CL_SLAVE if the new mount has to be
* cloned as a slave.
*/
static struct mount *get_source(struct mount *dest,
struct mount *last_dest,
struct mount *last_src,
int *type)
{ {
struct mount *p_last_src = NULL; while (1) {
struct mount *p_last_dest = NULL; while (1) {
struct mount *next;
while (last_dest != dest->mnt_master) { if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
p_last_dest = last_dest; return first_slave(m);
p_last_src = last_src; next = next_peer(m);
last_dest = last_dest->mnt_master; if (m->mnt_group_id == origin->mnt_group_id) {
last_src = last_src->mnt_master; if (next == origin)
return NULL;
} else if (m->mnt_slave.next != &next->mnt_slave)
break;
m = next;
}
/* m is the last peer */
while (1) {
struct mount *master = m->mnt_master;
if (m->mnt_slave.next != &master->mnt_slave_list)
return next_slave(m);
m = next_peer(master);
if (master->mnt_group_id == origin->mnt_group_id)
break;
if (master->mnt_slave.next == &m->mnt_slave)
break;
m = master;
}
if (m == origin)
return NULL;
} }
}
if (p_last_dest) { /* all accesses are serialized by namespace_sem */
do { static struct user_namespace *user_ns;
p_last_dest = next_peer(p_last_dest); static struct mount *last_dest, *last_source, *dest_master;
} while (IS_MNT_NEW(p_last_dest)); static struct mountpoint *mp;
/* is that a peer of the earlier? */ static struct hlist_head *list;
if (dest == p_last_dest) {
*type = CL_MAKE_SHARED; static int propagate_one(struct mount *m)
return p_last_src; {
struct mount *child;
int type;
/* skip ones added by this propagate_mnt() */
if (IS_MNT_NEW(m))
return 0;
/* skip if mountpoint isn't covered by it */
if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
return 0;
if (m->mnt_group_id == last_dest->mnt_group_id) {
type = CL_MAKE_SHARED;
} else {
struct mount *n, *p;
for (n = m; ; n = p) {
p = n->mnt_master;
if (p == dest_master || IS_MNT_MARKED(p)) {
while (last_dest->mnt_master != p) {
last_source = last_source->mnt_master;
last_dest = last_source->mnt_parent;
} }
if (n->mnt_group_id != last_dest->mnt_group_id) {
last_source = last_source->mnt_master;
last_dest = last_source->mnt_parent;
} }
/* slave of the earlier, then */ break;
*type = CL_SLAVE; }
}
type = CL_SLAVE;
/* beginning of peer group among the slaves? */ /* beginning of peer group among the slaves? */
if (IS_MNT_SHARED(dest)) if (IS_MNT_SHARED(m))
*type |= CL_MAKE_SHARED; type |= CL_MAKE_SHARED;
return last_src; }
/* Notice when we are propagating across user namespaces */
if (m->mnt_ns->user_ns != user_ns)
type |= CL_UNPRIVILEGED;
child = copy_tree(last_source, last_source->mnt.mnt_root, type);
if (IS_ERR(child))
return PTR_ERR(child);
mnt_set_mountpoint(m, mp, child);
last_dest = m;
last_source = child;
if (m->mnt_master != dest_master) {
read_seqlock_excl(&mount_lock);
SET_MNT_MARK(m->mnt_master);
read_sequnlock_excl(&mount_lock);
}
hlist_add_head(&child->mnt_hash, list);
return 0;
} }
/* /*
...@@ -222,56 +270,48 @@ static struct mount *get_source(struct mount *dest, ...@@ -222,56 +270,48 @@ static struct mount *get_source(struct mount *dest,
int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
struct mount *source_mnt, struct hlist_head *tree_list) struct mount *source_mnt, struct hlist_head *tree_list)
{ {
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct mount *m, *n;
struct mount *m, *child;
int ret = 0; int ret = 0;
struct mount *prev_dest_mnt = dest_mnt;
struct mount *prev_src_mnt = source_mnt;
HLIST_HEAD(tmp_list);
for (m = propagation_next(dest_mnt, dest_mnt); m;
m = propagation_next(m, dest_mnt)) {
int type;
struct mount *source;
if (IS_MNT_NEW(m))
continue;
source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
/* Notice when we are propagating across user namespaces */
if (m->mnt_ns->user_ns != user_ns)
type |= CL_UNPRIVILEGED;
child = copy_tree(source, source->mnt.mnt_root, type);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
tmp_list = *tree_list;
tmp_list.first->pprev = &tmp_list.first;
INIT_HLIST_HEAD(tree_list);
goto out;
}
if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
mnt_set_mountpoint(m, dest_mp, child);
hlist_add_head(&child->mnt_hash, tree_list);
} else {
/* /*
* This can happen if the parent mount was bind mounted * we don't want to bother passing tons of arguments to
* on some subdirectory of a shared/slave mount. * propagate_one(); everything is serialized by namespace_sem,
* so globals will do just fine.
*/ */
hlist_add_head(&child->mnt_hash, &tmp_list); user_ns = current->nsproxy->mnt_ns->user_ns;
last_dest = dest_mnt;
last_source = source_mnt;
mp = dest_mp;
list = tree_list;
dest_master = dest_mnt->mnt_master;
/* all peers of dest_mnt, except dest_mnt itself */
for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
ret = propagate_one(n);
if (ret)
goto out;
} }
prev_dest_mnt = m;
prev_src_mnt = child; /* all slave groups */
for (m = next_group(dest_mnt, dest_mnt); m;
m = next_group(m, dest_mnt)) {
/* everything in that slave group */
n = m;
do {
ret = propagate_one(n);
if (ret)
goto out;
n = next_peer(n);
} while (n != m);
} }
out: out:
lock_mount_hash(); read_seqlock_excl(&mount_lock);
while (!hlist_empty(&tmp_list)) { hlist_for_each_entry(n, tree_list, mnt_hash) {
child = hlist_entry(tmp_list.first, struct mount, mnt_hash); m = n->mnt_parent;
umount_tree(child, 0); if (m->mnt_master != dest_mnt->mnt_master)
CLEAR_MNT_MARK(m->mnt_master);
} }
unlock_mount_hash(); read_sequnlock_excl(&mount_lock);
return ret; return ret;
} }
......
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
#define IS_MNT_NEW(m) (!(m)->mnt_ns) #define IS_MNT_NEW(m) (!(m)->mnt_ns)
#define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED) #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
#define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE) #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
#define CL_EXPIRE 0x01 #define CL_EXPIRE 0x01
#define CL_SLAVE 0x02 #define CL_SLAVE 0x02
......
...@@ -44,6 +44,8 @@ struct mnt_namespace; ...@@ -44,6 +44,8 @@ struct mnt_namespace;
#define MNT_SHARED_MASK (MNT_UNBINDABLE) #define MNT_SHARED_MASK (MNT_UNBINDABLE)
#define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE) #define MNT_PROPAGATION_MASK (MNT_SHARED | MNT_UNBINDABLE)
#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
#define MNT_INTERNAL 0x4000 #define MNT_INTERNAL 0x4000
...@@ -51,6 +53,7 @@ struct mnt_namespace; ...@@ -51,6 +53,7 @@ struct mnt_namespace;
#define MNT_LOCKED 0x800000 #define MNT_LOCKED 0x800000
#define MNT_DOOMED 0x1000000 #define MNT_DOOMED 0x1000000
#define MNT_SYNC_UMOUNT 0x2000000 #define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
struct vfsmount { struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */ struct dentry *mnt_root; /* root of the mounted tree */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册