Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
07157aac
cloud-kernel
项目概览
openanolis
/
cloud-kernel
大约 1 年 前同步成功
通知
158
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
07157aac
编写于
8月 30, 2007
作者:
C
Chris Mason
提交者:
David Woodhouse
8月 30, 2007
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Btrfs: Add file data csums back in via hooks in the extent map code
Signed-off-by:
N
Chris Mason
<
chris.mason@oracle.com
>
上级
95e05289
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
193 addition
and
14 deletion
+193
-14
fs/btrfs/extent_map.c
fs/btrfs/extent_map.c
+84
-10
fs/btrfs/extent_map.h
fs/btrfs/extent_map.h
+14
-1
fs/btrfs/inode.c
fs/btrfs/inode.c
+95
-3
未找到文件。
fs/btrfs/extent_map.c
浏览文件 @
07157aac
...
...
@@ -57,7 +57,7 @@ void extent_map_tree_init(struct extent_map_tree *tree,
{
tree
->
map
.
rb_node
=
NULL
;
tree
->
state
.
rb_node
=
NULL
;
tree
->
fill_delalloc
=
NULL
;
tree
->
ops
=
NULL
;
rwlock_init
(
&
tree
->
lock
);
tree
->
mapping
=
mapping
;
}
...
...
@@ -93,6 +93,7 @@ struct extent_state *alloc_extent_state(gfp_t mask)
return
state
;
state
->
state
=
0
;
state
->
in_tree
=
0
;
state
->
private
=
0
;
atomic_set
(
&
state
->
refs
,
1
);
init_waitqueue_head
(
&
state
->
wq
);
return
state
;
...
...
@@ -1034,6 +1035,61 @@ int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
}
EXPORT_SYMBOL
(
unlock_range
);
int
set_state_private
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
private
)
{
struct
rb_node
*
node
;
struct
extent_state
*
state
;
int
ret
=
0
;
write_lock_irq
(
&
tree
->
lock
);
/*
* this search will find all the extents that end after
* our range starts.
*/
node
=
tree_search
(
&
tree
->
state
,
start
);
if
(
!
node
||
IS_ERR
(
node
))
{
ret
=
-
ENOENT
;
goto
out
;
}
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
if
(
state
->
start
!=
start
)
{
ret
=
-
ENOENT
;
goto
out
;
}
state
->
private
=
private
;
out:
write_unlock_irq
(
&
tree
->
lock
);
return
ret
;
}
int
get_state_private
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
*
private
)
{
struct
rb_node
*
node
;
struct
extent_state
*
state
;
int
ret
=
0
;
read_lock_irq
(
&
tree
->
lock
);
/*
* this search will find all the extents that end after
* our range starts.
*/
node
=
tree_search
(
&
tree
->
state
,
start
);
if
(
!
node
||
IS_ERR
(
node
))
{
ret
=
-
ENOENT
;
goto
out
;
}
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
if
(
state
->
start
!=
start
)
{
ret
=
-
ENOENT
;
goto
out
;
}
*
private
=
state
->
private
;
out:
read_unlock_irq
(
&
tree
->
lock
);
return
ret
;
}
/*
* searches a range in the state tree for a given mask.
* If 'filled' == 1, this returns 1 only if ever extent in the tree
...
...
@@ -1185,12 +1241,13 @@ static int end_bio_extent_writepage(struct bio *bio,
static
int
end_bio_extent_readpage
(
struct
bio
*
bio
,
unsigned
int
bytes_done
,
int
err
)
{
const
int
uptodate
=
test_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
);
int
uptodate
=
test_bit
(
BIO_UPTODATE
,
&
bio
->
bi_flags
);
struct
bio_vec
*
bvec
=
bio
->
bi_io_vec
+
bio
->
bi_vcnt
-
1
;
struct
extent_map_tree
*
tree
=
bio
->
bi_private
;
u64
start
;
u64
end
;
int
whole_page
;
int
ret
;
if
(
bio
->
bi_size
)
return
1
;
...
...
@@ -1208,6 +1265,11 @@ static int end_bio_extent_readpage(struct bio *bio,
if
(
--
bvec
>=
bio
->
bi_io_vec
)
prefetchw
(
&
bvec
->
bv_page
->
flags
);
if
(
uptodate
&&
tree
->
ops
&&
tree
->
ops
->
readpage_end_io_hook
)
{
ret
=
tree
->
ops
->
readpage_end_io_hook
(
page
,
start
,
end
);
if
(
ret
)
uptodate
=
0
;
}
if
(
uptodate
)
{
set_extent_uptodate
(
tree
,
start
,
end
,
GFP_ATOMIC
);
if
(
whole_page
)
...
...
@@ -1388,9 +1450,16 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
continue
;
}
ret
=
0
;
if
(
tree
->
ops
&&
tree
->
ops
->
readpage_io_hook
)
{
ret
=
tree
->
ops
->
readpage_io_hook
(
page
,
cur
,
cur
+
iosize
-
1
);
}
if
(
!
ret
)
{
ret
=
submit_extent_page
(
READ
,
tree
,
page
,
sector
,
iosize
,
page_offset
,
bdev
,
end_bio_extent_readpage
);
sector
,
iosize
,
page_offset
,
bdev
,
end_bio_extent_readpage
);
}
if
(
ret
)
SetPageError
(
page
);
cur
=
cur
+
iosize
;
...
...
@@ -1462,7 +1531,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
&
delalloc_end
,
128
*
1024
*
1024
);
if
(
nr_delalloc
)
{
tree
->
fill_delalloc
(
inode
,
start
,
delalloc_end
);
tree
->
ops
->
fill_delalloc
(
inode
,
start
,
delalloc_end
);
if
(
delalloc_end
>=
page_end
+
1
)
{
clear_extent_bit
(
tree
,
page_end
+
1
,
delalloc_end
,
EXTENT_LOCKED
|
EXTENT_DELALLOC
,
...
...
@@ -1528,12 +1597,17 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
continue
;
}
clear_extent_dirty
(
tree
,
cur
,
cur
+
iosize
-
1
,
GFP_NOFS
);
ret
=
tree
->
ops
->
writepage_io_hook
(
page
,
cur
,
cur
+
iosize
-
1
);
if
(
ret
)
SetPageError
(
page
);
else
{
set_range_writeback
(
tree
,
cur
,
cur
+
iosize
-
1
);
ret
=
submit_extent_page
(
WRITE
,
tree
,
page
,
sector
,
iosize
,
page_offset
,
bdev
,
ret
=
submit_extent_page
(
WRITE
,
tree
,
page
,
sector
,
iosize
,
page_offset
,
bdev
,
end_bio_extent_writepage
);
if
(
ret
)
SetPageError
(
page
);
}
cur
=
cur
+
iosize
;
page_offset
+=
iosize
;
nr
++
;
...
...
fs/btrfs/extent_map.h
浏览文件 @
07157aac
...
...
@@ -6,12 +6,19 @@
#define EXTENT_MAP_INLINE (u64)-2
#define EXTENT_MAP_DELALLOC (u64)-1
struct
extent_map_ops
{
int
(
*
fill_delalloc
)(
struct
inode
*
inode
,
u64
start
,
u64
end
);
int
(
*
writepage_io_hook
)(
struct
page
*
page
,
u64
start
,
u64
end
);
int
(
*
readpage_io_hook
)(
struct
page
*
page
,
u64
start
,
u64
end
);
int
(
*
readpage_end_io_hook
)(
struct
page
*
page
,
u64
start
,
u64
end
);
};
struct
extent_map_tree
{
struct
rb_root
map
;
struct
rb_root
state
;
struct
address_space
*
mapping
;
rwlock_t
lock
;
int
(
*
fill_delalloc
)(
struct
inode
*
inode
,
u64
start
,
u64
end
)
;
struct
extent_map_ops
*
ops
;
};
/* note, this must start with the same fields as fs/extent_map.c:tree_entry */
...
...
@@ -36,6 +43,10 @@ struct extent_state {
wait_queue_head_t
wq
;
atomic_t
refs
;
unsigned
long
state
;
/* for use by the FS */
u64
private
;
struct
list_head
list
;
};
...
...
@@ -89,4 +100,6 @@ int extent_commit_write(struct extent_map_tree *tree,
struct
inode
*
inode
,
struct
page
*
page
,
unsigned
from
,
unsigned
to
);
int
set_range_dirty
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
end
);
int
set_state_private
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
private
);
int
get_state_private
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
*
private
);
#endif
fs/btrfs/inode.c
浏览文件 @
07157aac
...
...
@@ -52,6 +52,7 @@ static struct inode_operations btrfs_file_inode_operations;
static
struct
address_space_operations
btrfs_aops
;
static
struct
address_space_operations
btrfs_symlink_aops
;
static
struct
file_operations
btrfs_dir_file_operations
;
static
struct
extent_map_ops
btrfs_extent_map_ops
;
static
struct
kmem_cache
*
btrfs_inode_cachep
;
struct
kmem_cache
*
btrfs_trans_handle_cachep
;
...
...
@@ -103,6 +104,90 @@ static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
return
ret
;
}
int
btrfs_writepage_io_hook
(
struct
page
*
page
,
u64
start
,
u64
end
)
{
struct
inode
*
inode
=
page
->
mapping
->
host
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
struct
btrfs_trans_handle
*
trans
;
char
*
kaddr
;
int
ret
;
u64
page_start
=
page
->
index
<<
PAGE_CACHE_SHIFT
;
size_t
offset
=
start
-
page_start
;
mutex_lock
(
&
root
->
fs_info
->
fs_mutex
);
trans
=
btrfs_start_transaction
(
root
,
1
);
btrfs_set_trans_block_group
(
trans
,
inode
);
kaddr
=
kmap
(
page
);
btrfs_csum_file_block
(
trans
,
root
,
inode
->
i_ino
,
start
,
kaddr
+
offset
,
end
-
start
+
1
);
kunmap
(
page
);
ret
=
btrfs_end_transaction
(
trans
,
root
);
BUG_ON
(
ret
);
mutex_unlock
(
&
root
->
fs_info
->
fs_mutex
);
return
ret
;
}
int
btrfs_readpage_io_hook
(
struct
page
*
page
,
u64
start
,
u64
end
)
{
int
ret
=
0
;
struct
inode
*
inode
=
page
->
mapping
->
host
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
struct
extent_map_tree
*
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
struct
btrfs_csum_item
*
item
;
struct
btrfs_path
*
path
=
NULL
;
u64
private
;
mutex_lock
(
&
root
->
fs_info
->
fs_mutex
);
path
=
btrfs_alloc_path
();
item
=
btrfs_lookup_csum
(
NULL
,
root
,
path
,
inode
->
i_ino
,
start
,
0
);
if
(
IS_ERR
(
item
))
{
ret
=
PTR_ERR
(
item
);
/* a csum that isn't present is a preallocated region. */
if
(
ret
==
-
ENOENT
||
ret
==
-
EFBIG
)
ret
=
0
;
private
=
0
;
goto
out
;
}
memcpy
((
char
*
)
&
private
,
&
item
->
csum
,
BTRFS_CRC32_SIZE
);
set_state_private
(
em_tree
,
start
,
private
);
out:
if
(
path
)
btrfs_free_path
(
path
);
mutex_unlock
(
&
root
->
fs_info
->
fs_mutex
);
return
ret
;
}
int
btrfs_readpage_end_io_hook
(
struct
page
*
page
,
u64
start
,
u64
end
)
{
char
csum
[
BTRFS_CRC32_SIZE
];
size_t
offset
=
start
-
(
page
->
index
<<
PAGE_CACHE_SHIFT
);
struct
inode
*
inode
=
page
->
mapping
->
host
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
struct
extent_map_tree
*
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
char
*
kaddr
;
u64
private
;
int
ret
;
ret
=
get_state_private
(
em_tree
,
start
,
&
private
);
kaddr
=
kmap_atomic
(
page
,
KM_IRQ0
);
if
(
ret
)
{
goto
zeroit
;
}
ret
=
btrfs_csum_data
(
root
,
kaddr
+
offset
,
end
-
start
+
1
,
csum
);
BUG_ON
(
ret
);
if
(
memcmp
(
csum
,
&
private
,
BTRFS_CRC32_SIZE
))
{
goto
zeroit
;
}
kunmap_atomic
(
kaddr
,
KM_IRQ0
);
return
0
;
zeroit:
printk
(
"btrfs csum failed ino %lu off %llu
\n
"
,
page
->
mapping
->
host
->
i_ino
,
(
unsigned
long
long
)
start
);
memset
(
kaddr
+
offset
,
1
,
end
-
start
+
1
);
flush_dcache_page
(
page
);
kunmap_atomic
(
kaddr
,
KM_IRQ0
);
return
0
;
}
void
btrfs_read_locked_inode
(
struct
inode
*
inode
)
{
...
...
@@ -155,7 +240,7 @@ void btrfs_read_locked_inode(struct inode *inode)
switch
(
inode
->
i_mode
&
S_IFMT
)
{
case
S_IFREG
:
inode
->
i_mapping
->
a_ops
=
&
btrfs_aops
;
BTRFS_I
(
inode
)
->
extent_tree
.
fill_delalloc
=
run_delalloc_range
;
BTRFS_I
(
inode
)
->
extent_tree
.
ops
=
&
btrfs_extent_map_ops
;
inode
->
i_fop
=
&
btrfs_file_operations
;
inode
->
i_op
=
&
btrfs_file_inode_operations
;
break
;
...
...
@@ -1148,7 +1233,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
inode
->
i_op
=
&
btrfs_file_inode_operations
;
extent_map_tree_init
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
inode
->
i_mapping
,
GFP_NOFS
);
BTRFS_I
(
inode
)
->
extent_tree
.
fill_delalloc
=
run_delalloc_range
;
BTRFS_I
(
inode
)
->
extent_tree
.
ops
=
&
btrfs_extent_map_ops
;
}
dir
->
i_sb
->
s_dirt
=
1
;
btrfs_update_inode_block_group
(
trans
,
inode
);
...
...
@@ -2286,7 +2371,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
inode
->
i_op
=
&
btrfs_file_inode_operations
;
extent_map_tree_init
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
inode
->
i_mapping
,
GFP_NOFS
);
BTRFS_I
(
inode
)
->
extent_tree
.
fill_delalloc
=
run_delalloc_range
;
BTRFS_I
(
inode
)
->
extent_tree
.
ops
=
&
btrfs_extent_map_ops
;
}
dir
->
i_sb
->
s_dirt
=
1
;
btrfs_update_inode_block_group
(
trans
,
inode
);
...
...
@@ -2362,6 +2447,13 @@ static struct file_operations btrfs_dir_file_operations = {
#endif
};
static
struct
extent_map_ops
btrfs_extent_map_ops
=
{
.
fill_delalloc
=
run_delalloc_range
,
.
writepage_io_hook
=
btrfs_writepage_io_hook
,
.
readpage_io_hook
=
btrfs_readpage_io_hook
,
.
readpage_end_io_hook
=
btrfs_readpage_end_io_hook
,
};
static
struct
address_space_operations
btrfs_aops
=
{
.
readpage
=
btrfs_readpage
,
.
writepage
=
btrfs_writepage
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录