Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
7eb5e882
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7eb5e882
编写于
10月 01, 2013
作者:
A
Al Viro
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
uninline destroy_super(), consolidate alloc_super()
Signed-off-by:
N
Al Viro
<
viro@zeniv.linux.org.uk
>
上级
966c1f75
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
86 addition
and
120 deletion
+86
-120
fs/super.c
fs/super.c
+86
-120
未找到文件。
fs/super.c
浏览文件 @
7eb5e882
...
...
@@ -129,33 +129,27 @@ static unsigned long super_cache_count(struct shrinker *shrink,
return
total_objects
;
}
static
int
init_sb_writers
(
struct
super_block
*
s
,
struct
file_system_type
*
type
)
{
int
err
;
int
i
;
for
(
i
=
0
;
i
<
SB_FREEZE_LEVELS
;
i
++
)
{
err
=
percpu_counter_init
(
&
s
->
s_writers
.
counter
[
i
],
0
);
if
(
err
<
0
)
goto
err_out
;
lockdep_init_map
(
&
s
->
s_writers
.
lock_map
[
i
],
sb_writers_name
[
i
],
&
type
->
s_writers_key
[
i
],
0
);
}
init_waitqueue_head
(
&
s
->
s_writers
.
wait
);
init_waitqueue_head
(
&
s
->
s_writers
.
wait_unfrozen
);
return
0
;
err_out:
while
(
--
i
>=
0
)
percpu_counter_destroy
(
&
s
->
s_writers
.
counter
[
i
]);
return
err
;
}
static
void
destroy_sb_writers
(
struct
super_block
*
s
)
/**
* destroy_super - frees a superblock
* @s: superblock to free
*
* Frees a superblock.
*/
static
void
destroy_super
(
struct
super_block
*
s
)
{
int
i
;
list_lru_destroy
(
&
s
->
s_dentry_lru
);
list_lru_destroy
(
&
s
->
s_inode_lru
);
#ifdef CONFIG_SMP
free_percpu
(
s
->
s_files
);
#endif
for
(
i
=
0
;
i
<
SB_FREEZE_LEVELS
;
i
++
)
percpu_counter_destroy
(
&
s
->
s_writers
.
counter
[
i
]);
security_sb_free
(
s
);
WARN_ON
(
!
list_empty
(
&
s
->
s_mounts
));
kfree
(
s
->
s_subtype
);
kfree
(
s
->
s_options
);
kfree
(
s
);
}
/**
...
...
@@ -170,111 +164,83 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
{
struct
super_block
*
s
=
kzalloc
(
sizeof
(
struct
super_block
),
GFP_USER
);
static
const
struct
super_operations
default_op
;
int
i
;
if
(
!
s
)
return
NULL
;
if
(
s
)
{
if
(
security_sb_alloc
(
s
))
goto
out_free_sb
;
if
(
security_sb_alloc
(
s
))
goto
fail
;
#ifdef CONFIG_SMP
s
->
s_files
=
alloc_percpu
(
struct
list_head
);
if
(
!
s
->
s_files
)
goto
err_out
;
else
{
int
i
;
for_each_possible_cpu
(
i
)
INIT_LIST_HEAD
(
per_cpu_ptr
(
s
->
s_files
,
i
));
}
s
->
s_files
=
alloc_percpu
(
struct
list_head
);
if
(
!
s
->
s_files
)
goto
fail
;
for_each_possible_cpu
(
i
)
INIT_LIST_HEAD
(
per_cpu_ptr
(
s
->
s_files
,
i
));
#else
INIT_LIST_HEAD
(
&
s
->
s_files
);
INIT_LIST_HEAD
(
&
s
->
s_files
);
#endif
if
(
init_sb_writers
(
s
,
type
))
goto
err_out
;
s
->
s_flags
=
flags
;
s
->
s_bdi
=
&
default_backing_dev_info
;
INIT_HLIST_NODE
(
&
s
->
s_instances
);
INIT_HLIST_BL_HEAD
(
&
s
->
s_anon
);
INIT_LIST_HEAD
(
&
s
->
s_inodes
);
if
(
list_lru_init
(
&
s
->
s_dentry_lru
))
goto
err_out
;
if
(
list_lru_init
(
&
s
->
s_inode_lru
))
goto
err_out_dentry_lru
;
INIT_LIST_HEAD
(
&
s
->
s_mounts
);
init_rwsem
(
&
s
->
s_umount
);
lockdep_set_class
(
&
s
->
s_umount
,
&
type
->
s_umount_key
);
/*
* sget() can have s_umount recursion.
*
* When it cannot find a suitable sb, it allocates a new
* one (this one), and tries again to find a suitable old
* one.
*
* In case that succeeds, it will acquire the s_umount
* lock of the old one. Since these are clearly distrinct
* locks, and this object isn't exposed yet, there's no
* risk of deadlocks.
*
* Annotate this by putting this lock in a different
* subclass.
*/
down_write_nested
(
&
s
->
s_umount
,
SINGLE_DEPTH_NESTING
);
s
->
s_count
=
1
;
atomic_set
(
&
s
->
s_active
,
1
);
mutex_init
(
&
s
->
s_vfs_rename_mutex
);
lockdep_set_class
(
&
s
->
s_vfs_rename_mutex
,
&
type
->
s_vfs_rename_key
);
mutex_init
(
&
s
->
s_dquot
.
dqio_mutex
);
mutex_init
(
&
s
->
s_dquot
.
dqonoff_mutex
);
init_rwsem
(
&
s
->
s_dquot
.
dqptr_sem
);
s
->
s_maxbytes
=
MAX_NON_LFS
;
s
->
s_op
=
&
default_op
;
s
->
s_time_gran
=
1000000000
;
s
->
cleancache_poolid
=
-
1
;
s
->
s_shrink
.
seeks
=
DEFAULT_SEEKS
;
s
->
s_shrink
.
scan_objects
=
super_cache_scan
;
s
->
s_shrink
.
count_objects
=
super_cache_count
;
s
->
s_shrink
.
batch
=
1024
;
s
->
s_shrink
.
flags
=
SHRINKER_NUMA_AWARE
;
for
(
i
=
0
;
i
<
SB_FREEZE_LEVELS
;
i
++
)
{
if
(
percpu_counter_init
(
&
s
->
s_writers
.
counter
[
i
],
0
)
<
0
)
goto
fail
;
lockdep_init_map
(
&
s
->
s_writers
.
lock_map
[
i
],
sb_writers_name
[
i
],
&
type
->
s_writers_key
[
i
],
0
);
}
out:
init_waitqueue_head
(
&
s
->
s_writers
.
wait
);
init_waitqueue_head
(
&
s
->
s_writers
.
wait_unfrozen
);
s
->
s_flags
=
flags
;
s
->
s_bdi
=
&
default_backing_dev_info
;
INIT_HLIST_NODE
(
&
s
->
s_instances
);
INIT_HLIST_BL_HEAD
(
&
s
->
s_anon
);
INIT_LIST_HEAD
(
&
s
->
s_inodes
);
if
(
list_lru_init
(
&
s
->
s_dentry_lru
))
goto
fail
;
if
(
list_lru_init
(
&
s
->
s_inode_lru
))
goto
fail
;
INIT_LIST_HEAD
(
&
s
->
s_mounts
);
init_rwsem
(
&
s
->
s_umount
);
lockdep_set_class
(
&
s
->
s_umount
,
&
type
->
s_umount_key
);
/*
* sget() can have s_umount recursion.
*
* When it cannot find a suitable sb, it allocates a new
* one (this one), and tries again to find a suitable old
* one.
*
* In case that succeeds, it will acquire the s_umount
* lock of the old one. Since these are clearly distrinct
* locks, and this object isn't exposed yet, there's no
* risk of deadlocks.
*
* Annotate this by putting this lock in a different
* subclass.
*/
down_write_nested
(
&
s
->
s_umount
,
SINGLE_DEPTH_NESTING
);
s
->
s_count
=
1
;
atomic_set
(
&
s
->
s_active
,
1
);
mutex_init
(
&
s
->
s_vfs_rename_mutex
);
lockdep_set_class
(
&
s
->
s_vfs_rename_mutex
,
&
type
->
s_vfs_rename_key
);
mutex_init
(
&
s
->
s_dquot
.
dqio_mutex
);
mutex_init
(
&
s
->
s_dquot
.
dqonoff_mutex
);
init_rwsem
(
&
s
->
s_dquot
.
dqptr_sem
);
s
->
s_maxbytes
=
MAX_NON_LFS
;
s
->
s_op
=
&
default_op
;
s
->
s_time_gran
=
1000000000
;
s
->
cleancache_poolid
=
-
1
;
s
->
s_shrink
.
seeks
=
DEFAULT_SEEKS
;
s
->
s_shrink
.
scan_objects
=
super_cache_scan
;
s
->
s_shrink
.
count_objects
=
super_cache_count
;
s
->
s_shrink
.
batch
=
1024
;
s
->
s_shrink
.
flags
=
SHRINKER_NUMA_AWARE
;
return
s
;
err_out_dentry_lru:
list_lru_destroy
(
&
s
->
s_dentry_lru
);
err_out:
security_sb_free
(
s
);
#ifdef CONFIG_SMP
if
(
s
->
s_files
)
free_percpu
(
s
->
s_files
);
#endif
destroy_sb_writers
(
s
);
out_free_sb:
kfree
(
s
);
s
=
NULL
;
goto
out
;
}
/**
* destroy_super - frees a superblock
* @s: superblock to free
*
* Frees a superblock.
*/
static
inline
void
destroy_super
(
struct
super_block
*
s
)
{
list_lru_destroy
(
&
s
->
s_dentry_lru
);
list_lru_destroy
(
&
s
->
s_inode_lru
);
#ifdef CONFIG_SMP
free_percpu
(
s
->
s_files
);
#endif
destroy_sb_writers
(
s
);
security_sb_free
(
s
);
WARN_ON
(
!
list_empty
(
&
s
->
s_mounts
));
kfree
(
s
->
s_subtype
);
kfree
(
s
->
s_options
);
kfree
(
s
);
fail:
destroy_super
(
s
);
return
NULL
;
}
/* Superblock refcounting */
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录