Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
a1b32a59
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
a1b32a59
编写于
9月 05, 2008
作者:
C
Chris Mason
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Btrfs: Add debugging checks to track down corrupted metadata
Signed-off-by:
N
Chris Mason
<
chris.mason@oracle.com
>
上级
95819c05
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
49 addition
and
34 deletion
+49
-34
fs/btrfs/disk-io.c
fs/btrfs/disk-io.c
+6
-1
fs/btrfs/extent_io.c
fs/btrfs/extent_io.c
+14
-7
fs/btrfs/file.c
fs/btrfs/file.c
+8
-7
fs/btrfs/volumes.c
fs/btrfs/volumes.c
+21
-19
未找到文件。
fs/btrfs/disk-io.c
浏览文件 @
a1b32a59
...
...
@@ -250,7 +250,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
if
(
!
ret
&&
!
verify_parent_transid
(
io_tree
,
eb
,
parent_transid
))
return
ret
;
printk
(
"read extent buffer pages failed with ret %d mirror no %d
\n
"
,
ret
,
mirror_num
);
num_copies
=
btrfs_num_copies
(
&
root
->
fs_info
->
mapping_tree
,
eb
->
start
,
eb
->
len
);
if
(
num_copies
==
1
)
...
...
@@ -348,6 +348,9 @@ int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
found_start
=
btrfs_header_bytenr
(
eb
);
if
(
found_start
!=
start
)
{
printk
(
"bad tree block start %llu %llu
\n
"
,
(
unsigned
long
long
)
found_start
,
(
unsigned
long
long
)
eb
->
start
);
ret
=
-
EIO
;
goto
err
;
}
...
...
@@ -709,6 +712,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
if
(
ret
==
0
)
{
buf
->
flags
|=
EXTENT_UPTODATE
;
}
else
{
WARN_ON
(
1
);
}
return
buf
;
...
...
fs/btrfs/extent_io.c
浏览文件 @
a1b32a59
...
...
@@ -1811,6 +1811,7 @@ printk("2bad mapping end %Lu cur %Lu\n", end, cur);
}
/* the get_extent function already copied into the page */
if
(
test_range_bit
(
tree
,
cur
,
cur_end
,
EXTENT_UPTODATE
,
1
))
{
check_page_uptodate
(
tree
,
page
);
unlock_extent
(
tree
,
cur
,
cur
+
iosize
-
1
,
GFP_NOFS
);
cur
=
cur
+
iosize
;
page_offset
+=
iosize
;
...
...
@@ -2785,21 +2786,20 @@ int set_extent_buffer_dirty(struct extent_io_tree *tree,
* properly set. releasepage may drop page->private
* on us if the page isn't already dirty.
*/
if
(
i
==
0
)
{
lock_page
(
page
);
if
(
i
==
0
)
{
set_page_extent_head
(
page
,
eb
->
len
);
}
else
if
(
PagePrivate
(
page
)
&&
page
->
private
!=
EXTENT_PAGE_PRIVATE
)
{
lock_page
(
page
);
set_page_extent_mapped
(
page
);
unlock_page
(
page
);
}
__set_page_dirty_nobuffers
(
extent_buffer_page
(
eb
,
i
));
if
(
i
==
0
)
set_extent_dirty
(
tree
,
page_offset
(
page
),
page_offset
(
page
)
+
PAGE_CACHE_SIZE
-
1
,
GFP_NOFS
);
unlock_page
(
page
);
}
return
set_extent_dirty
(
tree
,
eb
->
start
,
eb
->
start
+
eb
->
len
-
1
,
GFP_NOFS
);
return
0
;
}
EXPORT_SYMBOL
(
set_extent_buffer_dirty
);
...
...
@@ -2952,6 +2952,9 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
if
(
all_uptodate
)
{
if
(
start_i
==
0
)
eb
->
flags
|=
EXTENT_UPTODATE
;
if
(
ret
)
{
printk
(
"all up to date but ret is %d
\n
"
,
ret
);
}
goto
unlock_exit
;
}
...
...
@@ -2968,6 +2971,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
mirror_num
);
if
(
err
)
{
ret
=
err
;
printk
(
"err %d from __extent_read_full_page
\n
"
,
ret
);
}
}
else
{
unlock_page
(
page
);
...
...
@@ -2978,12 +2982,15 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
submit_one_bio
(
READ
,
bio
,
mirror_num
);
if
(
ret
||
!
wait
)
{
if
(
ret
)
printk
(
"ret %d wait %d returning
\n
"
,
ret
,
wait
);
return
ret
;
}
for
(
i
=
start_i
;
i
<
num_pages
;
i
++
)
{
page
=
extent_buffer_page
(
eb
,
i
);
wait_on_page_locked
(
page
);
if
(
!
PageUptodate
(
page
))
{
printk
(
"page not uptodate after wait_on_page_locked
\n
"
);
ret
=
-
EIO
;
}
}
...
...
fs/btrfs/file.c
浏览文件 @
a1b32a59
...
...
@@ -39,7 +39,8 @@
#include "compat.h"
static
int
btrfs_copy_from_user
(
loff_t
pos
,
int
num_pages
,
int
write_bytes
,
static
int
noinline
btrfs_copy_from_user
(
loff_t
pos
,
int
num_pages
,
int
write_bytes
,
struct
page
**
prepared_pages
,
const
char
__user
*
buf
)
{
...
...
@@ -69,7 +70,7 @@ static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
return
page_fault
?
-
EFAULT
:
0
;
}
static
void
btrfs_drop_pages
(
struct
page
**
pages
,
size_t
num_pages
)
static
void
noinline
btrfs_drop_pages
(
struct
page
**
pages
,
size_t
num_pages
)
{
size_t
i
;
for
(
i
=
0
;
i
<
num_pages
;
i
++
)
{
...
...
@@ -359,7 +360,7 @@ static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
return
err
;
}
int
btrfs_drop_extent_cache
(
struct
inode
*
inode
,
u64
start
,
u64
end
)
int
noinline
btrfs_drop_extent_cache
(
struct
inode
*
inode
,
u64
start
,
u64
end
)
{
struct
extent_map
*
em
;
struct
extent_map
*
split
=
NULL
;
...
...
@@ -515,7 +516,7 @@ int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
* it is either truncated or split. Anything entirely inside the range
* is deleted from the tree.
*/
int
btrfs_drop_extents
(
struct
btrfs_trans_handle
*
trans
,
int
noinline
btrfs_drop_extents
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
struct
inode
*
inode
,
u64
start
,
u64
end
,
u64
inline_limit
,
u64
*
hint_byte
)
{
...
...
@@ -785,7 +786,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
/*
* this gets pages into the page cache and locks them down
*/
static
int
prepare_pages
(
struct
btrfs_root
*
root
,
struct
file
*
file
,
static
int
noinline
prepare_pages
(
struct
btrfs_root
*
root
,
struct
file
*
file
,
struct
page
**
pages
,
size_t
num_pages
,
loff_t
pos
,
unsigned
long
first_index
,
unsigned
long
last_index
,
size_t
write_bytes
)
...
...
fs/btrfs/volumes.c
浏览文件 @
a1b32a59
...
...
@@ -94,8 +94,8 @@ int btrfs_cleanup_fs_uuids(void)
return
0
;
}
static
struct
btrfs_device
*
__find_device
(
struct
list_head
*
head
,
u64
devi
d
,
u8
*
uuid
)
static
noinline
struct
btrfs_device
*
__find_device
(
struct
list_head
*
hea
d
,
u64
devid
,
u8
*
uuid
)
{
struct
btrfs_device
*
dev
;
struct
list_head
*
cur
;
...
...
@@ -110,7 +110,7 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
return
NULL
;
}
static
struct
btrfs_fs_devices
*
find_fsid
(
u8
*
fsid
)
static
noinline
struct
btrfs_fs_devices
*
find_fsid
(
u8
*
fsid
)
{
struct
list_head
*
cur
;
struct
btrfs_fs_devices
*
fs_devices
;
...
...
@@ -134,7 +134,7 @@ static struct btrfs_fs_devices *find_fsid(u8 *fsid)
* the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread.
*/
int
run_scheduled_bios
(
struct
btrfs_device
*
device
)
static
int
noinline
run_scheduled_bios
(
struct
btrfs_device
*
device
)
{
struct
bio
*
pending
;
struct
backing_dev_info
*
bdi
;
...
...
@@ -233,7 +233,7 @@ void pending_bios_fn(struct btrfs_work *work)
run_scheduled_bios
(
device
);
}
static
int
device_list_add
(
const
char
*
path
,
static
noinline
int
device_list_add
(
const
char
*
path
,
struct
btrfs_super_block
*
disk_super
,
u64
devid
,
struct
btrfs_fs_devices
**
fs_devices_ret
)
{
...
...
@@ -480,7 +480,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder,
* called very infrequently and that a given device has a small number
* of extents
*/
static
int
find_free_dev_extent
(
struct
btrfs_trans_handle
*
trans
,
static
noinline
int
find_free_dev_extent
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_device
*
device
,
struct
btrfs_path
*
path
,
u64
num_bytes
,
u64
*
start
)
...
...
@@ -645,7 +645,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
return
ret
;
}
int
btrfs_alloc_dev_extent
(
struct
btrfs_trans_handle
*
trans
,
int
noinline
btrfs_alloc_dev_extent
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_device
*
device
,
u64
chunk_tree
,
u64
chunk_objectid
,
u64
chunk_offset
,
...
...
@@ -693,7 +693,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
return
ret
;
}
static
int
find_next_chunk
(
struct
btrfs_root
*
root
,
u64
objectid
,
u64
*
offset
)
static
noinline
int
find_next_chunk
(
struct
btrfs_root
*
root
,
u64
objectid
,
u64
*
offset
)
{
struct
btrfs_path
*
path
;
int
ret
;
...
...
@@ -735,8 +736,8 @@ static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
return
ret
;
}
static
int
find_next_devid
(
struct
btrfs_root
*
root
,
struct
btrfs_path
*
path
,
u64
*
objectid
)
static
noinline
int
find_next_devid
(
struct
btrfs_root
*
root
,
struct
btrfs_path
*
path
,
u64
*
objectid
)
{
int
ret
;
struct
btrfs_key
key
;
...
...
@@ -1103,7 +1104,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
goto
out
;
}
int
btrfs_update_device
(
struct
btrfs_trans_handle
*
trans
,
int
noinline
btrfs_update_device
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_device
*
device
)
{
int
ret
;
...
...
@@ -1544,8 +1545,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
return
0
;
}
static
u64
chunk_bytes_by_type
(
u64
type
,
u64
calc_size
,
int
num_stripes
,
int
sub_stripes
)
static
u64
noinline
chunk_bytes_by_type
(
u64
type
,
u64
calc_size
,
int
num_stripes
,
int
sub_stripes
)
{
if
(
type
&
(
BTRFS_BLOCK_GROUP_RAID1
|
BTRFS_BLOCK_GROUP_DUP
))
return
calc_size
;
...
...
@@ -2141,7 +2142,8 @@ struct async_sched {
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
int
schedule_bio
(
struct
btrfs_root
*
root
,
struct
btrfs_device
*
device
,
static
int
noinline
schedule_bio
(
struct
btrfs_root
*
root
,
struct
btrfs_device
*
device
,
int
rw
,
struct
bio
*
bio
)
{
int
should_queue
=
1
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录