Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
909b0a88
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
909b0a88
编写于
11月 25, 2011
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
vfs: spread struct mount - remaining argument of next_mnt()
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
c63181e6
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
18 addition
and
17 deletion
+18
-17
fs/namespace.c
fs/namespace.c
+18
-17
未找到文件。
fs/namespace.c
浏览文件 @
909b0a88
...
@@ -631,12 +631,12 @@ static void commit_tree(struct mount *mnt)
...
@@ -631,12 +631,12 @@ static void commit_tree(struct mount *mnt)
touch_mnt_namespace
(
n
);
touch_mnt_namespace
(
n
);
}
}
static
struct
mount
*
next_mnt
(
struct
mount
*
p
,
struct
vfs
mount
*
root
)
static
struct
mount
*
next_mnt
(
struct
mount
*
p
,
struct
mount
*
root
)
{
{
struct
list_head
*
next
=
p
->
mnt_mounts
.
next
;
struct
list_head
*
next
=
p
->
mnt_mounts
.
next
;
if
(
next
==
&
p
->
mnt_mounts
)
{
if
(
next
==
&
p
->
mnt_mounts
)
{
while
(
1
)
{
while
(
1
)
{
if
(
&
p
->
mnt
==
root
)
if
(
p
==
root
)
return
NULL
;
return
NULL
;
next
=
p
->
mnt_child
.
next
;
next
=
p
->
mnt_child
.
next
;
if
(
next
!=
&
p
->
mnt_parent
->
mnt_mounts
)
if
(
next
!=
&
p
->
mnt_parent
->
mnt_mounts
)
...
@@ -1145,16 +1145,17 @@ const struct seq_operations mountstats_op = {
...
@@ -1145,16 +1145,17 @@ const struct seq_operations mountstats_op = {
* open files, pwds, chroots or sub mounts that are
* open files, pwds, chroots or sub mounts that are
* busy.
* busy.
*/
*/
int
may_umount_tree
(
struct
vfsmount
*
m
nt
)
int
may_umount_tree
(
struct
vfsmount
*
m
)
{
{
struct
mount
*
mnt
=
real_mount
(
m
);
int
actual_refs
=
0
;
int
actual_refs
=
0
;
int
minimum_refs
=
0
;
int
minimum_refs
=
0
;
struct
mount
*
p
;
struct
mount
*
p
;
BUG_ON
(
!
m
nt
);
BUG_ON
(
!
m
);
/* write lock needed for mnt_get_count */
/* write lock needed for mnt_get_count */
br_write_lock
(
vfsmount_lock
);
br_write_lock
(
vfsmount_lock
);
for
(
p
=
real_mount
(
mnt
)
;
p
;
p
=
next_mnt
(
p
,
mnt
))
{
for
(
p
=
mnt
;
p
;
p
=
next_mnt
(
p
,
mnt
))
{
actual_refs
+=
mnt_get_count
(
p
);
actual_refs
+=
mnt_get_count
(
p
);
minimum_refs
+=
2
;
minimum_refs
+=
2
;
}
}
...
@@ -1228,7 +1229,7 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
...
@@ -1228,7 +1229,7 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
LIST_HEAD
(
tmp_list
);
LIST_HEAD
(
tmp_list
);
struct
mount
*
p
;
struct
mount
*
p
;
for
(
p
=
mnt
;
p
;
p
=
next_mnt
(
p
,
&
mnt
->
mnt
))
for
(
p
=
mnt
;
p
;
p
=
next_mnt
(
p
,
mnt
))
list_move
(
&
p
->
mnt_hash
,
&
tmp_list
);
list_move
(
&
p
->
mnt_hash
,
&
tmp_list
);
if
(
propagate
)
if
(
propagate
)
...
@@ -1436,7 +1437,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
...
@@ -1436,7 +1437,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
if
(
!
is_subdir
(
r
->
mnt_mountpoint
,
dentry
))
if
(
!
is_subdir
(
r
->
mnt_mountpoint
,
dentry
))
continue
;
continue
;
for
(
s
=
r
;
s
;
s
=
next_mnt
(
s
,
&
r
->
mnt
))
{
for
(
s
=
r
;
s
;
s
=
next_mnt
(
s
,
r
))
{
if
(
!
(
flag
&
CL_COPY_ALL
)
&&
IS_MNT_UNBINDABLE
(
s
))
{
if
(
!
(
flag
&
CL_COPY_ALL
)
&&
IS_MNT_UNBINDABLE
(
s
))
{
s
=
skip_mnt_tree
(
s
);
s
=
skip_mnt_tree
(
s
);
continue
;
continue
;
...
@@ -1509,7 +1510,7 @@ static void cleanup_group_ids(struct mount *mnt, struct mount *end)
...
@@ -1509,7 +1510,7 @@ static void cleanup_group_ids(struct mount *mnt, struct mount *end)
{
{
struct
mount
*
p
;
struct
mount
*
p
;
for
(
p
=
mnt
;
p
!=
end
;
p
=
next_mnt
(
p
,
&
mnt
->
mnt
))
{
for
(
p
=
mnt
;
p
!=
end
;
p
=
next_mnt
(
p
,
mnt
))
{
if
(
p
->
mnt_group_id
&&
!
IS_MNT_SHARED
(
p
))
if
(
p
->
mnt_group_id
&&
!
IS_MNT_SHARED
(
p
))
mnt_release_group_id
(
p
);
mnt_release_group_id
(
p
);
}
}
...
@@ -1519,7 +1520,7 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
...
@@ -1519,7 +1520,7 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
{
{
struct
mount
*
p
;
struct
mount
*
p
;
for
(
p
=
mnt
;
p
;
p
=
recurse
?
next_mnt
(
p
,
&
mnt
->
mnt
)
:
NULL
)
{
for
(
p
=
mnt
;
p
;
p
=
recurse
?
next_mnt
(
p
,
mnt
)
:
NULL
)
{
if
(
!
p
->
mnt_group_id
&&
!
IS_MNT_SHARED
(
p
))
{
if
(
!
p
->
mnt_group_id
&&
!
IS_MNT_SHARED
(
p
))
{
int
err
=
mnt_alloc_group_id
(
p
);
int
err
=
mnt_alloc_group_id
(
p
);
if
(
err
)
{
if
(
err
)
{
...
@@ -1616,7 +1617,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
...
@@ -1616,7 +1617,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
br_write_lock
(
vfsmount_lock
);
br_write_lock
(
vfsmount_lock
);
if
(
IS_MNT_SHARED
(
dest_mnt
))
{
if
(
IS_MNT_SHARED
(
dest_mnt
))
{
for
(
p
=
source_mnt
;
p
;
p
=
next_mnt
(
p
,
&
source_mnt
->
mnt
))
for
(
p
=
source_mnt
;
p
;
p
=
next_mnt
(
p
,
source_
mnt
))
set_mnt_shared
(
p
);
set_mnt_shared
(
p
);
}
}
if
(
parent_path
)
{
if
(
parent_path
)
{
...
@@ -1731,7 +1732,7 @@ static int do_change_type(struct path *path, int flag)
...
@@ -1731,7 +1732,7 @@ static int do_change_type(struct path *path, int flag)
}
}
br_write_lock
(
vfsmount_lock
);
br_write_lock
(
vfsmount_lock
);
for
(
m
=
mnt
;
m
;
m
=
(
recurse
?
next_mnt
(
m
,
&
mnt
->
mnt
)
:
NULL
))
for
(
m
=
mnt
;
m
;
m
=
(
recurse
?
next_mnt
(
m
,
mnt
)
:
NULL
))
change_mnt_propagation
(
m
,
type
);
change_mnt_propagation
(
m
,
type
);
br_write_unlock
(
vfsmount_lock
);
br_write_unlock
(
vfsmount_lock
);
...
@@ -1859,7 +1860,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
...
@@ -1859,7 +1860,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
static
inline
int
tree_contains_unbindable
(
struct
mount
*
mnt
)
static
inline
int
tree_contains_unbindable
(
struct
mount
*
mnt
)
{
{
struct
mount
*
p
;
struct
mount
*
p
;
for
(
p
=
mnt
;
p
;
p
=
next_mnt
(
p
,
&
mnt
->
mnt
))
{
for
(
p
=
mnt
;
p
;
p
=
next_mnt
(
p
,
mnt
))
{
if
(
IS_MNT_UNBINDABLE
(
p
))
if
(
IS_MNT_UNBINDABLE
(
p
))
return
1
;
return
1
;
}
}
...
@@ -2399,6 +2400,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
...
@@ -2399,6 +2400,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
struct
mnt_namespace
*
new_ns
;
struct
mnt_namespace
*
new_ns
;
struct
vfsmount
*
rootmnt
=
NULL
,
*
pwdmnt
=
NULL
;
struct
vfsmount
*
rootmnt
=
NULL
,
*
pwdmnt
=
NULL
;
struct
mount
*
p
,
*
q
;
struct
mount
*
p
,
*
q
;
struct
mount
*
old
=
real_mount
(
mnt_ns
->
root
);
struct
mount
*
new
;
struct
mount
*
new
;
new_ns
=
alloc_mnt_ns
();
new_ns
=
alloc_mnt_ns
();
...
@@ -2407,8 +2409,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
...
@@ -2407,8 +2409,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
down_write
(
&
namespace_sem
);
down_write
(
&
namespace_sem
);
/* First pass: copy the tree topology */
/* First pass: copy the tree topology */
new
=
copy_tree
(
real_mount
(
mnt_ns
->
root
),
mnt_ns
->
root
->
mnt_root
,
new
=
copy_tree
(
old
,
old
->
mnt
.
mnt_root
,
CL_COPY_ALL
|
CL_EXPIRE
);
CL_COPY_ALL
|
CL_EXPIRE
);
if
(
!
new
)
{
if
(
!
new
)
{
up_write
(
&
namespace_sem
);
up_write
(
&
namespace_sem
);
kfree
(
new_ns
);
kfree
(
new_ns
);
...
@@ -2424,7 +2425,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
...
@@ -2424,7 +2425,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
* as belonging to new namespace. We have already acquired a private
* as belonging to new namespace. We have already acquired a private
* fs_struct, so tsk->fs->lock is not needed.
* fs_struct, so tsk->fs->lock is not needed.
*/
*/
p
=
real_mount
(
mnt_ns
->
root
)
;
p
=
old
;
q
=
new
;
q
=
new
;
while
(
p
)
{
while
(
p
)
{
q
->
mnt_ns
=
new_ns
;
q
->
mnt_ns
=
new_ns
;
...
@@ -2443,8 +2444,8 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
...
@@ -2443,8 +2444,8 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
pwdmnt
=
&
p
->
mnt
;
pwdmnt
=
&
p
->
mnt
;
}
}
}
}
p
=
next_mnt
(
p
,
mnt_ns
->
root
);
p
=
next_mnt
(
p
,
old
);
q
=
next_mnt
(
q
,
new
_ns
->
root
);
q
=
next_mnt
(
q
,
new
);
}
}
up_write
(
&
namespace_sem
);
up_write
(
&
namespace_sem
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录