Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
42f31734
K
Kernel
项目概览
openeuler
/
Kernel
接近 2 年 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
42f31734
编写于
5月 25, 2016
作者:
D
David Sterba
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'cleanups-4.7' into for-chris-4.7-20160525
上级
e7344086
01327610
变更
36
隐藏空白更改
内联
并排
Showing
36 changed file
with
193 addition
and
196 deletion
+193
-196
fs/btrfs/backref.c
fs/btrfs/backref.c
+1
-1
fs/btrfs/btrfs_inode.h
fs/btrfs/btrfs_inode.h
+1
-1
fs/btrfs/check-integrity.c
fs/btrfs/check-integrity.c
+1
-1
fs/btrfs/ctree.c
fs/btrfs/ctree.c
+7
-7
fs/btrfs/ctree.h
fs/btrfs/ctree.h
+3
-3
fs/btrfs/delayed-ref.h
fs/btrfs/delayed-ref.h
+1
-1
fs/btrfs/dev-replace.c
fs/btrfs/dev-replace.c
+1
-1
fs/btrfs/disk-io.c
fs/btrfs/disk-io.c
+7
-7
fs/btrfs/extent-tree.c
fs/btrfs/extent-tree.c
+24
-24
fs/btrfs/extent_io.c
fs/btrfs/extent_io.c
+44
-44
fs/btrfs/extent_io.h
fs/btrfs/extent_io.h
+17
-17
fs/btrfs/extent_map.c
fs/btrfs/extent_map.c
+1
-1
fs/btrfs/file-item.c
fs/btrfs/file-item.c
+1
-1
fs/btrfs/file.c
fs/btrfs/file.c
+2
-2
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.c
+1
-1
fs/btrfs/free-space-cache.h
fs/btrfs/free-space-cache.h
+1
-1
fs/btrfs/inode.c
fs/btrfs/inode.c
+13
-14
fs/btrfs/ioctl.c
fs/btrfs/ioctl.c
+6
-6
fs/btrfs/ordered-data.h
fs/btrfs/ordered-data.h
+1
-1
fs/btrfs/qgroup.c
fs/btrfs/qgroup.c
+11
-13
fs/btrfs/raid56.c
fs/btrfs/raid56.c
+3
-3
fs/btrfs/relocation.c
fs/btrfs/relocation.c
+9
-10
fs/btrfs/root-tree.c
fs/btrfs/root-tree.c
+2
-2
fs/btrfs/scrub.c
fs/btrfs/scrub.c
+4
-4
fs/btrfs/send.c
fs/btrfs/send.c
+3
-3
fs/btrfs/struct-funcs.c
fs/btrfs/struct-funcs.c
+1
-1
fs/btrfs/super.c
fs/btrfs/super.c
+4
-4
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/extent-io-tests.c
+5
-5
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tests/free-space-tests.c
+4
-3
fs/btrfs/tests/inode-tests.c
fs/btrfs/tests/inode-tests.c
+1
-1
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/tests/qgroup-tests.c
+1
-1
fs/btrfs/transaction.c
fs/btrfs/transaction.c
+1
-1
fs/btrfs/transaction.h
fs/btrfs/transaction.h
+1
-1
fs/btrfs/tree-log.c
fs/btrfs/tree-log.c
+5
-5
fs/btrfs/ulist.c
fs/btrfs/ulist.c
+1
-1
fs/btrfs/volumes.c
fs/btrfs/volumes.c
+4
-4
未找到文件。
fs/btrfs/backref.c
浏览文件 @
42f31734
...
...
@@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
* from ipath->fspath->val[i].
* when it returns, there are ipath->fspath->elem_cnt number of paths available
* in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
* number of missed paths i
n recor
ed in ipath->fspath->elem_missed, otherwise,
* number of missed paths i
s record
ed in ipath->fspath->elem_missed, otherwise,
* it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
* have been needed to return all paths.
*/
...
...
fs/btrfs/btrfs_inode.h
浏览文件 @
42f31734
...
...
@@ -313,7 +313,7 @@ struct btrfs_dio_private {
struct
bio
*
dio_bio
;
/*
* The original bio may be split
ed
to several sub-bios, this is
* The original bio may be split to several sub-bios, this is
* done during endio of sub-bios
*/
int
(
*
subio_endio
)(
struct
inode
*
,
struct
btrfs_io_bio
*
,
int
);
...
...
fs/btrfs/check-integrity.c
浏览文件 @
42f31734
...
...
@@ -1939,7 +1939,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
/*
* Clear all references of this block. Do not free
* the block itself even if is not referenced anymore
* because it still carries valu
e
able information
* because it still carries valuable information
* like whether it was ever written and IO completed.
*/
list_for_each_entry_safe
(
l
,
tmp
,
&
block
->
ref_to_list
,
...
...
fs/btrfs/ctree.c
浏览文件 @
42f31734
...
...
@@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
/*
* RCU really hurts here, we could free up the root node because
* it was
cow'
ed but we may not get the new root node yet so do
* it was
COW
ed but we may not get the new root node yet so do
* the inc_not_zero dance and if it doesn't work then
* synchronize_rcu and try again.
*/
...
...
@@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
struct
extent_buffer
*
buf
)
{
/*
* Tree blocks not in refer
ne
ce counted trees and tree roots
* Tree blocks not in refer
en
ce counted trees and tree roots
* are never shared. If a block was allocated after the last
* snapshot and the block was not allocated by tree relocation,
* we know the block is not shared.
...
...
@@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
/*
* tm is a pointer to the first operation to rewind within eb. then, all
* previous operations will be rew
inde
d (until we reach something older than
* previous operations will be rew
oun
d (until we reach something older than
* time_seq).
*/
static
void
...
...
@@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
}
/*
* Called with eb read locked. If the buffer cannot be rew
inde
d, the same buffer
* Called with eb read locked. If the buffer cannot be rew
oun
d, the same buffer
* is returned. If rewind operations happen, a fresh buffer is returned. The
* returned buffer is always read-locked. If the returned buffer is not the
* input buffer, the lock on the input buffer is released and the input buffer
...
...
@@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
* 3) the root is not forced COW.
*
* What is forced COW:
* when we create snapshot during commiting the transaction,
* when we create snapshot during commit
t
ing the transaction,
* after we've finished coping src root, we must COW the shared
* block to ensure the metadata consistency.
*/
...
...
@@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
/*
* cows a single block, see __btrfs_cow_block for the real work.
* This version of it has extra checks so that a block isn't
cow'
d more than
* This version of it has extra checks so that a block isn't
COWe
d more than
* once per transaction, as long as it hasn't been written yet
*/
noinline
int
btrfs_cow_block
(
struct
btrfs_trans_handle
*
trans
,
...
...
@@ -2986,7 +2986,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
btrfs_unlock_up_safe
(
p
,
level
+
1
);
/*
* Since we can unwind eb
'
s we want to do a real search every
* Since we can unwind ebs we want to do a real search every
* time.
*/
prev_cmp
=
-
1
;
...
...
fs/btrfs/ctree.h
浏览文件 @
42f31734
...
...
@@ -89,7 +89,7 @@ static const int btrfs_csum_sizes[] = { 4 };
/* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0
/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
/* spe
ci
fic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
#define REQ_GET_READ_MIRRORS (1 << 30)
/* ioprio of readahead is set to idle */
...
...
@@ -431,7 +431,7 @@ struct btrfs_space_info {
* bytes_pinned does not reflect the bytes that will be pinned once the
* delayed refs are flushed, so this counter is inc'ed every time we
* call btrfs_free_extent so it is a realtime count of what will be
* freed once the transaction is committed. It will be zero
'
ed every
* freed once the transaction is committed. It will be zeroed every
* time the transaction commits.
*/
struct
percpu_counter
total_bytes_pinned
;
...
...
@@ -1401,7 +1401,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
token
->
kaddr
=
NULL
;
}
/* some macros to generate set/get funcs for the struct fields. This
/* some macros to generate set/get func
tion
s for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
* one for u8:
*/
...
...
fs/btrfs/delayed-ref.h
浏览文件 @
42f31734
...
...
@@ -188,7 +188,7 @@ struct btrfs_delayed_ref_root {
/*
* To make qgroup to skip given root.
* This is for snapshot, as btrfs_qgroup_inherit() will manully
* This is for snapshot, as btrfs_qgroup_inherit() will manu
a
lly
* modify counters for snapshot and its source, so we should skip
* the snapshot in new_root/old_roots or it will get calculated twice
*/
...
...
fs/btrfs/dev-replace.c
浏览文件 @
42f31734
...
...
@@ -450,7 +450,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
}
/*
* blocked until all
flighting bio
s are finished.
* blocked until all
in-flight bios operation
s are finished.
*/
static
void
btrfs_rm_dev_replace_blocked
(
struct
btrfs_fs_info
*
fs_info
)
{
...
...
fs/btrfs/disk-io.c
浏览文件 @
42f31734
...
...
@@ -384,7 +384,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
/*
* Things reading via commit roots that don't have normal protection,
* like send, can have a really old block in cache that may point at a
* block that has been free
'
d and re-allocated. So don't clear uptodate
* block that has been freed and re-allocated. So don't clear uptodate
* if we find an eb that is under IO (dirty/writeback) because we could
* end up reading in the stale data and then writing it back out and
* making everybody very sad.
...
...
@@ -418,7 +418,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
/*
* The super_block structure does not span the whole
* BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
* is filled with zeros and is included in the checkum.
* is filled with zeros and is included in the check
s
um.
*/
crc
=
btrfs_csum_data
(
raw_disk_sb
+
BTRFS_CSUM_SIZE
,
crc
,
BTRFS_SUPER_INFO_SIZE
-
BTRFS_CSUM_SIZE
);
...
...
@@ -600,7 +600,7 @@ static noinline int check_leaf(struct btrfs_root *root,
/*
* Check to make sure that we don't point outside of the leaf,
* just in
case all the items are consistent to each
other, but
* just in
case all the items are consistent to each
other, but
* all point outside of the leaf.
*/
if
(
btrfs_item_end_nr
(
leaf
,
slot
)
>
...
...
@@ -3022,7 +3022,7 @@ int open_ctree(struct super_block *sb,
}
/*
* Mount does not set all options immediatel
l
y, we can do it now and do
* Mount does not set all options immediately, we can do it now and do
* not have to wait for transaction commit
*/
btrfs_apply_pending_changes
(
fs_info
);
...
...
@@ -3255,7 +3255,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
btrfs_warn_rl_in_rcu
(
device
->
dev_root
->
fs_info
,
"lost page write due to IO error on %s"
,
rcu_str_deref
(
device
->
name
));
/* note, we don
t'
set_buffer_write_io_error because we have
/* note, we don
't
set_buffer_write_io_error because we have
* our own ways of dealing with the IO errors
*/
clear_buffer_uptodate
(
bh
);
...
...
@@ -4367,7 +4367,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
if
(
ret
)
break
;
clear_extent_bits
(
dirty_pages
,
start
,
end
,
mark
,
GFP_NOFS
);
clear_extent_bits
(
dirty_pages
,
start
,
end
,
mark
);
while
(
start
<=
end
)
{
eb
=
btrfs_find_tree_block
(
root
->
fs_info
,
start
);
start
+=
root
->
nodesize
;
...
...
@@ -4402,7 +4402,7 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
if
(
ret
)
break
;
clear_extent_dirty
(
unpin
,
start
,
end
,
GFP_NOFS
);
clear_extent_dirty
(
unpin
,
start
,
end
);
btrfs_error_unpin_extent_range
(
root
,
start
,
end
);
cond_resched
();
}
...
...
fs/btrfs/extent-tree.c
浏览文件 @
42f31734
...
...
@@ -231,9 +231,9 @@ static int add_excluded_extent(struct btrfs_root *root,
{
u64
end
=
start
+
num_bytes
-
1
;
set_extent_bits
(
&
root
->
fs_info
->
freed_extents
[
0
],
start
,
end
,
EXTENT_UPTODATE
,
GFP_NOFS
);
start
,
end
,
EXTENT_UPTODATE
);
set_extent_bits
(
&
root
->
fs_info
->
freed_extents
[
1
],
start
,
end
,
EXTENT_UPTODATE
,
GFP_NOFS
);
start
,
end
,
EXTENT_UPTODATE
);
return
0
;
}
...
...
@@ -246,9 +246,9 @@ static void free_excluded_extents(struct btrfs_root *root,
end
=
start
+
cache
->
key
.
offset
-
1
;
clear_extent_bits
(
&
root
->
fs_info
->
freed_extents
[
0
],
start
,
end
,
EXTENT_UPTODATE
,
GFP_NOFS
);
start
,
end
,
EXTENT_UPTODATE
);
clear_extent_bits
(
&
root
->
fs_info
->
freed_extents
[
1
],
start
,
end
,
EXTENT_UPTODATE
,
GFP_NOFS
);
start
,
end
,
EXTENT_UPTODATE
);
}
static
int
exclude_super_stripes
(
struct
btrfs_root
*
root
,
...
...
@@ -980,7 +980,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
* event that tree block loses its owner tree's reference and do the
* back refs conversion.
*
* When a tree block is COW
'
d through a tree, there are four cases:
* When a tree block is COW
e
d through a tree, there are four cases:
*
* The reference count of the block is one and the tree is the block's
* owner tree. Nothing to do in this case.
...
...
@@ -2595,7 +2595,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
/*
* Need to drop our head ref lock and re-a
qc
uire the
* Need to drop our head ref lock and re-a
cq
uire the
* delayed ref lock and then re-check to make sure
* nobody got added.
*/
...
...
@@ -2747,7 +2747,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
/*
* We don't ever fill up leaves all the way so multiply by 2 just to be
* closer to what we're really going to want to
o
use.
* closer to what we're really going to want to use.
*/
return
div_u64
(
num_bytes
,
BTRFS_LEAF_DATA_SIZE
(
root
));
}
...
...
@@ -2851,7 +2851,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
}
/*
* trans->sync means that when we call end_transac
it
on, we won't
* trans->sync means that when we call end_transac
ti
on, we won't
* wait on delayed refs
*/
trans
->
sync
=
true
;
...
...
@@ -4296,7 +4296,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
* This one will handle the per-in
do
e data rsv map for accurate reserved
* This one will handle the per-in
od
e data rsv map for accurate reserved
* space framework.
*/
void
btrfs_free_reserved_data_space
(
struct
inode
*
inode
,
u64
start
,
u64
len
)
...
...
@@ -4967,7 +4967,7 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
* This will reserve or
gi
_bytes number of bytes from the space info associated
* This will reserve or
ig
_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
* flush out space to make room. It will do this by flushing delalloc if
* possible or committing the transaction. If flush is 0 then no attempts to
...
...
@@ -5572,7 +5572,7 @@ void btrfs_orphan_release_metadata(struct inode *inode)
* common file/directory operations, they change two fs/file trees
* and root tree, the number of items that the qgroup reserves is
* different with the free space reservation. So we can not use
* the space reser
av
tion mechanism in start_transaction().
* the space reser
va
tion mechanism in start_transaction().
*/
int
btrfs_subvolume_reserve_metadata
(
struct
btrfs_root
*
root
,
struct
btrfs_block_rsv
*
rsv
,
...
...
@@ -5621,7 +5621,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
/**
* drop_outstanding_extent - drop an outstanding extent
* @inode: the inode we're dropping the extent for
* @num_bytes: the number of bytes we're rel
ase
ing.
* @num_bytes: the number of bytes we're rel
eas
ing.
*
* This is called when we are freeing up an outstanding extent, either called
* after an error or after an extent is written. This will return the number of
...
...
@@ -5647,7 +5647,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
drop_inode_space
=
1
;
/*
* If we have more or the same amount of outsanding extents than we have
* If we have more or the same amount of outs
t
anding extents than we have
* reserved then we need to leave the reserved extents count alone.
*/
if
(
BTRFS_I
(
inode
)
->
outstanding_extents
>=
...
...
@@ -5661,8 +5661,8 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
}
/**
* calc_csum_metadata_size - return the amount of metada space that must be
* reserved/free
'
d for the given bytes.
* calc_csum_metadata_size - return the amount of metada
ta
space that must be
* reserved/freed for the given bytes.
* @inode: the inode we're manipulating
* @num_bytes: the number of bytes in question
* @reserve: 1 if we are reserving space, 0 if we are freeing space
...
...
@@ -5814,7 +5814,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
/*
* This is tricky, but first we need to figure out how much we
* free
'
d from any free-ers that occurred during this
* freed from any free-ers that occurred during this
* reservation, so we reset ->csum_bytes to the csum_bytes
* before we dropped our lock, and then call the free for the
* number of bytes that were freed while we were trying our
...
...
@@ -5836,7 +5836,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
/*
* Now reset ->csum_bytes to what it should be. If bytes is
* more than to_free then we would have free
'
d more space had we
* more than to_free then we would have freed more space had we
* not had an artificially high ->csum_bytes, so we need to free
* the remainder. If bytes is the same or less then we don't
* need to do anything, the other free-ers did the correct
...
...
@@ -6515,7 +6515,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
ret
=
btrfs_discard_extent
(
root
,
start
,
end
+
1
-
start
,
NULL
);
clear_extent_dirty
(
unpin
,
start
,
end
,
GFP_NOFS
);
clear_extent_dirty
(
unpin
,
start
,
end
);
unpin_extent_range
(
root
,
start
,
end
,
true
);
mutex_unlock
(
&
fs_info
->
unused_bg_unpin_mutex
);
cond_resched
();
...
...
@@ -7578,7 +7578,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
if
(
loop
==
LOOP_CACHING_NOWAIT
)
{
/*
* We want to skip the LOOP_CACHING_WAIT step if we
* don't have any un
ached bgs and we've alrel
ady done a
* don't have any un
cached bgs and we've alre
ady done a
* full search through.
*/
if
(
orig_have_caching_bg
||
!
full_search
)
...
...
@@ -7982,7 +7982,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
/*
* Mixed block groups will exclude before processing the log so we only
* need to do the exlude dance if this fs isn't mixed.
* need to do the ex
c
lude dance if this fs isn't mixed.
*/
if
(
!
btrfs_fs_incompat
(
root
->
fs_info
,
MIXED_GROUPS
))
{
ret
=
__exclude_logged_extent
(
root
,
ins
->
objectid
,
ins
->
offset
);
...
...
@@ -8032,7 +8032,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
buf
->
start
+
buf
->
len
-
1
,
GFP_NOFS
);
else
set_extent_new
(
&
root
->
dirty_log_pages
,
buf
->
start
,
buf
->
start
+
buf
->
len
-
1
,
GFP_NOFS
);
buf
->
start
+
buf
->
len
-
1
);
}
else
{
buf
->
log_index
=
-
1
;
set_extent_dirty
(
&
trans
->
transaction
->
dirty_pages
,
buf
->
start
,
...
...
@@ -9426,7 +9426,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
u64
free_bytes
=
0
;
int
factor
;
/* It's df, we don't care if it's rac
e
y */
/* It's df, we don't care if it's racy */
if
(
list_empty
(
&
sinfo
->
ro_bgs
))
return
0
;
...
...
@@ -10635,14 +10635,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
*/
mutex_lock
(
&
fs_info
->
unused_bg_unpin_mutex
);
ret
=
clear_extent_bits
(
&
fs_info
->
freed_extents
[
0
],
start
,
end
,
EXTENT_DIRTY
,
GFP_NOFS
);
EXTENT_DIRTY
);
if
(
ret
)
{
mutex_unlock
(
&
fs_info
->
unused_bg_unpin_mutex
);
btrfs_dec_block_group_ro
(
root
,
block_group
);
goto
end_trans
;
}
ret
=
clear_extent_bits
(
&
fs_info
->
freed_extents
[
1
],
start
,
end
,
EXTENT_DIRTY
,
GFP_NOFS
);
EXTENT_DIRTY
);
if
(
ret
)
{
mutex_unlock
(
&
fs_info
->
unused_bg_unpin_mutex
);
btrfs_dec_block_group_ro
(
root
,
block_group
);
...
...
fs/btrfs/extent_io.c
浏览文件 @
42f31734
...
...
@@ -726,14 +726,6 @@ static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
start
=
last_end
+
1
;
if
(
start
<=
end
&&
state
&&
!
need_resched
())
goto
hit_next
;
goto
search_again
;
out:
spin_unlock
(
&
tree
->
lock
);
if
(
prealloc
)
free_extent_state
(
prealloc
);
return
0
;
search_again:
if
(
start
>
end
)
...
...
@@ -742,6 +734,14 @@ static int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if
(
gfpflags_allow_blocking
(
mask
))
cond_resched
();
goto
again
;
out:
spin_unlock
(
&
tree
->
lock
);
if
(
prealloc
)
free_extent_state
(
prealloc
);
return
0
;
}
static
void
wait_on_state
(
struct
extent_io_tree
*
tree
,
...
...
@@ -873,8 +873,14 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
bits
|=
EXTENT_FIRST_DELALLOC
;
again:
if
(
!
prealloc
&&
gfpflags_allow_blocking
(
mask
))
{
/*
* Don't care for allocation failure here because we might end
* up not needing the pre-allocated extent state at all, which
* is the case if we only have in the tree extent states that
* cover our input range and don't cover too any other range.
* If we end up needing a new extent state we allocate it later.
*/
prealloc
=
alloc_extent_state
(
mask
);
BUG_ON
(
!
prealloc
);
}
spin_lock
(
&
tree
->
lock
);
...
...
@@ -1037,7 +1043,13 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
goto
out
;
}
goto
search_again
;
search_again:
if
(
start
>
end
)
goto
out
;
spin_unlock
(
&
tree
->
lock
);
if
(
gfpflags_allow_blocking
(
mask
))
cond_resched
();
goto
again
;
out:
spin_unlock
(
&
tree
->
lock
);
...
...
@@ -1046,13 +1058,6 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
return
err
;
search_again:
if
(
start
>
end
)
goto
out
;
spin_unlock
(
&
tree
->
lock
);
if
(
gfpflags_allow_blocking
(
mask
))
cond_resched
();
goto
again
;
}
int
set_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
...
...
@@ -1073,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* @bits: the bits to set in this range
* @clear_bits: the bits to clear in this range
* @cached_state: state that we're going to cache
* @mask: the allocation mask
*
* This will go through and set bits for the given range. If any states exist
* already in this range they are set with the given bit and cleared of the
* clear_bits. This is only meant to be used by things that are mergeable, ie
* converting from say DELALLOC to DIRTY. This is not meant to be used with
* boundary bits like LOCK.
*
* All allocations are done with GFP_NOFS.
*/
int
convert_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
unsigned
clear_bits
,
struct
extent_state
**
cached_state
,
gfp_t
mask
)
struct
extent_state
**
cached_state
)
{
struct
extent_state
*
state
;
struct
extent_state
*
prealloc
=
NULL
;
...
...
@@ -1098,7 +1104,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
btrfs_debug_check_extent_io_range
(
tree
,
start
,
end
);
again:
if
(
!
prealloc
&&
gfpflags_allow_blocking
(
mask
)
)
{
if
(
!
prealloc
)
{
/*
* Best effort, don't worry if extent state allocation fails
* here for the first iteration. We might have a cached state
...
...
@@ -1106,7 +1112,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* extent state allocations are needed. We'll only know this
* after locking the tree.
*/
prealloc
=
alloc_extent_state
(
mask
);
prealloc
=
alloc_extent_state
(
GFP_NOFS
);
if
(
!
prealloc
&&
!
first_iteration
)
return
-
ENOMEM
;
}
...
...
@@ -1263,7 +1269,13 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
goto
out
;
}
goto
search_again
;
search_again:
if
(
start
>
end
)
goto
out
;
spin_unlock
(
&
tree
->
lock
);
cond_resched
();
first_iteration
=
false
;
goto
again
;
out:
spin_unlock
(
&
tree
->
lock
);
...
...
@@ -1271,21 +1283,11 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
free_extent_state
(
prealloc
);
return
err
;
search_again:
if
(
start
>
end
)
goto
out
;
spin_unlock
(
&
tree
->
lock
);
if
(
gfpflags_allow_blocking
(
mask
))
cond_resched
();
first_iteration
=
false
;
goto
again
;
}
/* wrappers around set/clear extent bit */
int
set_record_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
gfp_t
mask
,
struct
extent_changeset
*
changeset
)
unsigned
bits
,
struct
extent_changeset
*
changeset
)
{
/*
* We don't support EXTENT_LOCKED yet, as current changeset will
...
...
@@ -1295,7 +1297,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
BUG_ON
(
bits
&
EXTENT_LOCKED
);
return
__set_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
NULL
,
NULL
,
mask
,
return
__set_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
NULL
,
NULL
,
GFP_NOFS
,
changeset
);
}
...
...
@@ -1308,8 +1310,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
}
int
clear_record_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
gfp_t
mask
,
struct
extent_changeset
*
changeset
)
unsigned
bits
,
struct
extent_changeset
*
changeset
)
{
/*
* Don't support EXTENT_LOCKED case, same reason as
...
...
@@ -1317,7 +1318,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
BUG_ON
(
bits
&
EXTENT_LOCKED
);
return
__clear_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
0
,
NULL
,
mask
,
return
__clear_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
0
,
NULL
,
GFP_NOFS
,
changeset
);
}
...
...
@@ -1975,13 +1976,13 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec)
set_state_failrec
(
failure_tree
,
rec
->
start
,
NULL
);
ret
=
clear_extent_bits
(
failure_tree
,
rec
->
start
,
rec
->
start
+
rec
->
len
-
1
,
EXTENT_LOCKED
|
EXTENT_DIRTY
,
GFP_NOFS
);
EXTENT_LOCKED
|
EXTENT_DIRTY
);
if
(
ret
)
err
=
ret
;
ret
=
clear_extent_bits
(
&
BTRFS_I
(
inode
)
->
io_tree
,
rec
->
start
,
rec
->
start
+
rec
->
len
-
1
,
EXTENT_DAMAGED
,
GFP_NOFS
);
EXTENT_DAMAGED
);
if
(
ret
&&
!
err
)
err
=
ret
;
...
...
@@ -2232,13 +2233,12 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
/* set the bits in the private failure tree */
ret
=
set_extent_bits
(
failure_tree
,
start
,
end
,
EXTENT_LOCKED
|
EXTENT_DIRTY
,
GFP_NOFS
);
EXTENT_LOCKED
|
EXTENT_DIRTY
);
if
(
ret
>=
0
)
ret
=
set_state_failrec
(
failure_tree
,
start
,
failrec
);
/* set the bits in the inode's tree */
if
(
ret
>=
0
)
ret
=
set_extent_bits
(
tree
,
start
,
end
,
EXTENT_DAMAGED
,
GFP_NOFS
);
ret
=
set_extent_bits
(
tree
,
start
,
end
,
EXTENT_DAMAGED
);
if
(
ret
<
0
)
{
kfree
(
failrec
);
return
ret
;
...
...
@@ -4605,7 +4605,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
if
(
mapped
)
spin_unlock
(
&
page
->
mapping
->
private_lock
);
/* One for when we alloced the page */
/* One for when we alloc
at
ed the page */
put_page
(
page
);
}
while
(
index
!=
0
);
}
...
...
@@ -5765,7 +5765,7 @@ int try_release_extent_buffer(struct page *page)
struct
extent_buffer
*
eb
;
/*
* We need to make sure nobo
o
dy is attaching this page to an eb right
* We need to make sure nobody is attaching this page to an eb right
* now.
*/
spin_lock
(
&
page
->
mapping
->
private_lock
);
...
...
fs/btrfs/extent_io.h
浏览文件 @
42f31734
...
...
@@ -220,8 +220,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned
bits
,
int
filled
,
struct
extent_state
*
cached_state
);
int
clear_record_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
gfp_t
mask
,
struct
extent_changeset
*
changeset
);
unsigned
bits
,
struct
extent_changeset
*
changeset
);
int
clear_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
int
wake
,
int
delete
,
struct
extent_state
**
cached
,
gfp_t
mask
);
...
...
@@ -240,27 +239,27 @@ static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
}
static
inline
int
clear_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
gfp_t
mask
)
u64
end
,
unsigned
bits
)
{
int
wake
=
0
;
if
(
bits
&
EXTENT_LOCKED
)
wake
=
1
;
return
clear_extent_bit
(
tree
,
start
,
end
,
bits
,
wake
,
0
,
NULL
,
mask
);
return
clear_extent_bit
(
tree
,
start
,
end
,
bits
,
wake
,
0
,
NULL
,
GFP_NOFS
);
}
int
set_record_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
gfp_t
mask
,
struct
extent_changeset
*
changeset
);
unsigned
bits
,
struct
extent_changeset
*
changeset
);
int
set_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
u64
*
failed_start
,
struct
extent_state
**
cached_state
,
gfp_t
mask
);
static
inline
int
set_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
gfp_t
mask
)
u64
end
,
unsigned
bits
)
{
return
set_extent_bit
(
tree
,
start
,
end
,
bits
,
NULL
,
NULL
,
mask
);
return
set_extent_bit
(
tree
,
start
,
end
,
bits
,
NULL
,
NULL
,
GFP_NOFS
);
}
static
inline
int
clear_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
...
...
@@ -278,37 +277,38 @@ static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
}
static
inline
int
clear_extent_dirty
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
u64
end
)
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_DIRTY
|
EXTENT_DELALLOC
|
EXTENT_DO_ACCOUNTING
,
0
,
0
,
NULL
,
mask
);
EXTENT_DO_ACCOUNTING
,
0
,
0
,
NULL
,
GFP_NOFS
);
}
int
convert_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
unsigned
bits
,
unsigned
clear_bits
,
struct
extent_state
**
cached_state
,
gfp_t
mask
);
struct
extent_state
**
cached_state
);
static
inline
int
set_extent_delalloc
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
struct
extent_state
**
cached_state
,
gfp_t
mask
)
u64
end
,
struct
extent_state
**
cached_state
)
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_DELALLOC
|
EXTENT_UPTODATE
,
NULL
,
cached_state
,
mask
);
NULL
,
cached_state
,
GFP_NOFS
);
}
static
inline
int
set_extent_defrag
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
struct
extent_state
**
cached_state
,
gfp_t
mask
)
u64
end
,
struct
extent_state
**
cached_state
)
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_DELALLOC
|
EXTENT_UPTODATE
|
EXTENT_DEFRAG
,
NULL
,
cached_state
,
mask
);
NULL
,
cached_state
,
GFP_NOFS
);
}
static
inline
int
set_extent_new
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
u64
end
)
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_NEW
,
NULL
,
NULL
,
mask
);
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_NEW
,
NULL
,
NULL
,
GFP_NOFS
);
}
static
inline
int
set_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
...
...
fs/btrfs/extent_map.c
浏览文件 @
42f31734
...
...
@@ -62,7 +62,7 @@ struct extent_map *alloc_extent_map(void)
/**
* free_extent_map - drop reference count of an extent_map
* @em: extent map being release
a
d
* @em: extent map being released
*
* Drops the reference out on @em by one and free the structure
* if the reference count hits zero.
...
...
fs/btrfs/file-item.c
浏览文件 @
42f31734
...
...
@@ -248,7 +248,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
BTRFS_DATA_RELOC_TREE_OBJECTID
)
{
set_extent_bits
(
io_tree
,
offset
,
offset
+
root
->
sectorsize
-
1
,
EXTENT_NODATASUM
,
GFP_NOFS
);
EXTENT_NODATASUM
);
}
else
{
btrfs_info
(
BTRFS_I
(
inode
)
->
root
->
fs_info
,
"no csum found for inode %llu start %llu"
,
...
...
fs/btrfs/file.c
浏览文件 @
42f31734
...
...
@@ -2026,7 +2026,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
BTRFS_I
(
inode
)
->
last_trans
<=
root
->
fs_info
->
last_trans_committed
))
{
/*
* We'v had everything committed since the last time we were
* We'v
e
had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
* reason, it's no longer relevant.
*/
...
...
@@ -2374,7 +2374,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* Check the aligned pages after the first unaligned page,
* if offset != orig_start, which means the first unaligned page
* including se
r
veral following pages are already in holes,
* including several following pages are already in holes,
* the extra check can be skipped */
if
(
offset
==
orig_start
)
{
/* after truncate page, check hole again */
...
...
fs/btrfs/free-space-cache.c
浏览文件 @
42f31734
...
...
@@ -1983,7 +1983,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
/*
* If this block group has some small extents we don't want to
* use up all of our free slots in the cache with them, we want
* to reserve them to larger extents, however if we have plent
* to reserve them to larger extents, however if we have plent
y
* of cache left then go ahead an dadd them, no sense in adding
* the overhead of a bitmap if we don't have to.
*/
...
...
fs/btrfs/free-space-cache.h
浏览文件 @
42f31734
...
...
@@ -123,7 +123,7 @@ int btrfs_return_cluster_to_free_space(
int
btrfs_trim_block_group
(
struct
btrfs_block_group_cache
*
block_group
,
u64
*
trimmed
,
u64
start
,
u64
end
,
u64
minlen
);
/* Support functions for runnin
t
our sanity tests */
/* Support functions for runnin
g
our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int
test_add_free_space_entry
(
struct
btrfs_block_group_cache
*
cache
,
u64
offset
,
u64
bytes
,
bool
bitmap
);
...
...
fs/btrfs/inode.c
浏览文件 @
42f31734
...
...
@@ -455,7 +455,7 @@ static noinline void compress_file_range(struct inode *inode,
/*
* skip compression for a small file range(<=blocksize) that
* isn't an inline extent, since it do
se
n't save disk space at all.
* isn't an inline extent, since it do
es
n't save disk space at all.
*/
if
(
total_compressed
<=
blocksize
&&
(
start
>
0
||
end
+
1
<
BTRFS_I
(
inode
)
->
disk_i_size
))
...
...
@@ -1978,7 +1978,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
{
WARN_ON
((
end
&
(
PAGE_SIZE
-
1
))
==
0
);
return
set_extent_delalloc
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
cached_state
,
GFP_NOFS
);
cached_state
);
}
/* see btrfs_writepage_start_hook for details on why this is required */
...
...
@@ -3119,8 +3119,7 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
if
(
root
->
root_key
.
objectid
==
BTRFS_DATA_RELOC_TREE_OBJECTID
&&
test_range_bit
(
io_tree
,
start
,
end
,
EXTENT_NODATASUM
,
1
,
NULL
))
{
clear_extent_bits
(
io_tree
,
start
,
end
,
EXTENT_NODATASUM
,
GFP_NOFS
);
clear_extent_bits
(
io_tree
,
start
,
end
,
EXTENT_NODATASUM
);
return
0
;
}
...
...
@@ -3722,7 +3721,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
* and doesn't have an inode ref with the name "bar" anymore.
*
* Setting last_unlink_trans to last_trans is a pessimistic approach,
* but it guarantees correctness at the expense of oc
as
sional full
* but it guarantees correctness at the expense of oc
ca
sional full
* transaction commits on fsync if our inode is a directory, or if our
* inode is not a directory, logging its parent unnecessarily.
*/
...
...
@@ -4978,7 +4977,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* be instantly completed which will give us extents that need
* to be truncated. If we fail to get an orphan inode down we
* could have left over extents that were never meant to live,
* so we need to g
aru
ntee from this point on that everything
* so we need to g
uara
ntee from this point on that everything
* will be consistent.
*/
ret
=
btrfs_orphan_add
(
trans
,
inode
);
...
...
@@ -5248,7 +5247,7 @@ void btrfs_evict_inode(struct inode *inode)
}
/*
* We can't just steal from the global reserve, we need tomake
* We can't just steal from the global reserve, we need to
make
* sure there is room to do it, if not we need to commit and try
* again.
*/
...
...
@@ -7433,7 +7432,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
cached_state
);
/*
* We're concerned with the entire range that we're going to be
* doing DIO to, so we need to make sure theres no ordered
* doing DIO to, so we need to make sure there
'
s no ordered
* extents in this range.
*/
ordered
=
btrfs_lookup_ordered_range
(
inode
,
lockstart
,
...
...
@@ -7595,7 +7594,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if
(
current
->
journal_info
)
{
/*
* Need to pull our outstanding extents and set journal_info to NULL so
* that anything that needs to check if there's a transction doesn't get
* that anything that needs to check if there's a trans
a
ction doesn't get
* confused.
*/
dio_data
=
current
->
journal_info
;
...
...
@@ -7628,7 +7627,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
* decompress it, so there will be buffering required no matter what we
* do, so go ahead and fallback to buffered.
*
* We return -ENOTBLK because thats what makes DIO go ahead and go back
* We return -ENOTBLK because that
'
s what makes DIO go ahead and go back
* to buffered IO. Don't blame me, this is the price we pay for using
* the generic code.
*/
...
...
@@ -9041,7 +9040,7 @@ static int btrfs_truncate(struct inode *inode)
return
ret
;
/*
* Yes ladies and gent
elment
, this is indeed ugly. The fact is we have
* Yes ladies and gent
lemen
, this is indeed ugly. The fact is we have
* 3 things going on here
*
* 1) We need to reserve space for our orphan item and the space to
...
...
@@ -9055,15 +9054,15 @@ static int btrfs_truncate(struct inode *inode)
* space reserved in case it uses space during the truncate (thank you
* very much snapshotting).
*
* And we need these to all be sep
erate. The fact is we can use a
lot of
* And we need these to all be sep
arate. The fact is we can use a
lot of
* space doing the truncate, and we have no earthly idea how much space
* we will use, so we need the truncate reservation to be sep
e
rate so it
* we will use, so we need the truncate reservation to be sep
a
rate so it
* doesn't end up using space reserved for updating the inode or
* removing the orphan item. We also need to be able to stop the
* transaction and start a new one, which means we need to be able to
* update the inode several times, and we have no idea of knowing how
* many times that will be, so we can't just reserve 1 item for the
* entirety of the op
ration, so that has to be done sepe
rately as well.
* entirety of the op
eration, so that has to be done sepa
rately as well.
* Then there is the orphan item, which does indeed need to be held on
* to for the whole operation, and we need nobody to touch this reserved
* space except the orphan code.
...
...
fs/btrfs/ioctl.c
浏览文件 @
42f31734
...
...
@@ -296,7 +296,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
}
}
else
{
/*
* Revert back under same assuptions as above
* Revert back under same assu
m
ptions as above
*/
if
(
S_ISREG
(
mode
))
{
if
(
inode
->
i_size
==
0
)
...
...
@@ -465,7 +465,7 @@ static noinline int create_subvol(struct inode *dir,
/*
* Don't create subvolume whose level is not zero. Or qgroup will be
* screwed up since it assume
subvol
me qgroup's level to be 0.
* screwed up since it assume
s subvolu
me qgroup's level to be 0.
*/
if
(
btrfs_qgroup_level
(
objectid
))
{
ret
=
-
ENOSPC
;
...
...
@@ -780,7 +780,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do an
ty
hing with
* 6. If the victim is append-only or immutable we can't do an
yt
hing with
* links pointing to it.
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
...
...
@@ -1237,7 +1237,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
set_extent_defrag
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_end
-
1
,
&
cached_state
,
GFP_NOFS
);
&
cached_state
);
unlock_extent_cached
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_end
-
1
,
&
cached_state
,
...
...
@@ -4650,7 +4650,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
}
/*
* mut. excl. ops lock is locked. Three possibilites:
* mut. excl. ops lock is locked. Three possibilit
i
es:
* (1) some other op is running
* (2) balance is running
* (3) balance is paused -- special case (think resume)
...
...
@@ -5567,7 +5567,7 @@ long btrfs_ioctl(struct file *file, unsigned int
ret
=
btrfs_sync_fs
(
file_inode
(
file
)
->
i_sb
,
1
);
/*
* The transaction thread may want to do more work,
* namely it pokes the cleaner ktread that will start
* namely it pokes the cleaner kt
h
read that will start
* processing uncleaned subvols.
*/
wake_up_process
(
root
->
fs_info
->
transaction_kthread
);
...
...
fs/btrfs/ordered-data.h
浏览文件 @
42f31734
...
...
@@ -58,7 +58,7 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_COMPRESSED 3
/* writing a zlib compressed extent */
#define BTRFS_ORDERED_PREALLOC 4
/* set when writing to prealloced extent */
#define BTRFS_ORDERED_PREALLOC 4
/* set when writing to prealloc
at
ed extent */
#define BTRFS_ORDERED_DIRECT 5
/* set when we're doing DIO with this extent */
...
...
fs/btrfs/qgroup.c
浏览文件 @
42f31734
...
...
@@ -85,7 +85,7 @@ struct btrfs_qgroup {
/*
* temp variables for accounting operations
* Refer to qgroup_shared_accouting() for details.
* Refer to qgroup_shared_accou
n
ting() for details.
*/
u64
old_refcnt
;
u64
new_refcnt
;
...
...
@@ -499,7 +499,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
}
/*
* we call btrfs_free_qgroup_config() when umounting
* filesystem and disabling quota, so we set qgroup_ulit
* filesystem and disabling quota, so we set qgroup_uli
s
t
* to be null here to avoid double free.
*/
ulist_free
(
fs_info
->
qgroup_ulist
);
...
...
@@ -1036,7 +1036,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
/*
* The easy accounting, if we are adding/removing the only ref for an extent
* then this qgroup and all of the parent qgroups get their refrence and
* then this qgroup and all of the parent qgroups get their ref
e
rence and
* exclusive counts adjusted.
*
* Caller should hold fs_info->qgroup_lock.
...
...
@@ -1436,7 +1436,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
/*
* No need to do lock, since this function will only be called in
* btrfs_comm
m
it_transaction().
* btrfs_commit_transaction().
*/
node
=
rb_first
(
&
delayed_refs
->
dirty_extent_root
);
while
(
node
)
{
...
...
@@ -1557,7 +1557,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
* A: cur_old_roots < nr_old_roots (not exclusive before)
* !A: cur_old_roots == nr_old_roots (possible exclusive before)
* B: cur_new_roots < nr_new_roots (not exclusive now)
* !B: cur_new_roots == nr_new_roots (possible excl
su
ive now)
* !B: cur_new_roots == nr_new_roots (possible excl
us
ive now)
*
* Results:
* +: Possible sharing -> exclusive -: Possible exclusive -> sharing
...
...
@@ -1851,7 +1851,7 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
}
/*
* Copy the acounting information between qgroups. This is necessary
* Copy the ac
c
ounting information between qgroups. This is necessary
* when a snapshot or a subvolume is created. Throwing an error will
* cause a transaction abort so we take extra care here to only error
* when a readonly fs is a reasonable outcome.
...
...
@@ -2340,7 +2340,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
mutex_unlock
(
&
fs_info
->
qgroup_rescan_lock
);
/*
* only update status, since the previous part has alreay updated the
* only update status, since the previous part has alrea
d
y updated the
* qgroup info.
*/
trans
=
btrfs_start_transaction
(
fs_info
->
quota_root
,
1
);
...
...
@@ -2542,8 +2542,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
changeset
.
bytes_changed
=
0
;
changeset
.
range_changed
=
ulist_alloc
(
GFP_NOFS
);
ret
=
set_record_extent_bits
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
start
+
len
-
1
,
EXTENT_QGROUP_RESERVED
,
GFP_NOFS
,
&
changeset
);
start
+
len
-
1
,
EXTENT_QGROUP_RESERVED
,
&
changeset
);
trace_btrfs_qgroup_reserve_data
(
inode
,
start
,
len
,
changeset
.
bytes_changed
,
QGROUP_RESERVE
);
...
...
@@ -2580,8 +2579,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
return
-
ENOMEM
;
ret
=
clear_record_extent_bits
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
start
+
len
-
1
,
EXTENT_QGROUP_RESERVED
,
GFP_NOFS
,
&
changeset
);
start
+
len
-
1
,
EXTENT_QGROUP_RESERVED
,
&
changeset
);
if
(
ret
<
0
)
goto
out
;
...
...
@@ -2672,7 +2670,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
}
/*
* Check qgroup reserved space leaking, normally at dest
or
y inode
* Check qgroup reserved space leaking, normally at dest
ro
y inode
* time
*/
void
btrfs_qgroup_check_reserved_leak
(
struct
inode
*
inode
)
...
...
@@ -2688,7 +2686,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
return
;
ret
=
clear_record_extent_bits
(
&
BTRFS_I
(
inode
)
->
io_tree
,
0
,
(
u64
)
-
1
,
EXTENT_QGROUP_RESERVED
,
GFP_NOFS
,
&
changeset
);
EXTENT_QGROUP_RESERVED
,
&
changeset
);
WARN_ON
(
ret
<
0
);
if
(
WARN_ON
(
changeset
.
bytes_changed
))
{
...
...
fs/btrfs/raid56.c
浏览文件 @
42f31734
...
...
@@ -576,7 +576,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
* we can't merge with cached rbios, since the
* idea is that when we merge the destination
* rbio is going to run our IO for us. We can
* steal from cached rbio
'
s though, other functions
* steal from cached rbios though, other functions
* handle that.
*/
if
(
test_bit
(
RBIO_CACHE_BIT
,
&
last
->
flags
)
||
...
...
@@ -2368,7 +2368,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
run_xor
(
pointers
+
1
,
nr_data
-
1
,
PAGE_SIZE
);
}
/* Check scrubbing pa
ir
ty and repair it */
/* Check scrubbing pa
ri
ty and repair it */
p
=
rbio_stripe_page
(
rbio
,
rbio
->
scrubp
,
pagenr
);
parity
=
kmap
(
p
);
if
(
memcmp
(
parity
,
pointers
[
rbio
->
scrubp
],
PAGE_SIZE
))
...
...
@@ -2493,7 +2493,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
/*
* Here means we got one corrupted data stripe and one
* corrupted parity on RAID6, if the corrupted parity
* is scrubbing parity, luckly, use the other one to repair
* is scrubbing parity, luck
i
ly, use the other one to repair
* the data, or we can not repair the data stripe.
*/
if
(
failp
!=
rbio
->
scrubp
)
...
...
fs/btrfs/relocation.c
浏览文件 @
42f31734
...
...
@@ -668,8 +668,8 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
* roots of b-trees that reference the tree block.
*
* the basic idea of this function is check backrefs of a given block
* to find upper level blocks that refer
ne
ce the block, and then check
* ba
kc
refs of these upper level blocks recursively. the recursion stop
* to find upper level blocks that refer
en
ce the block, and then check
* ba
ck
refs of these upper level blocks recursively. the recursion stop
* when tree root is reached or backrefs for the block is cached.
*
* NOTE: if we find backrefs for a block are cached, we know backrefs
...
...
@@ -1160,7 +1160,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
if
(
!
RB_EMPTY_NODE
(
&
upper
->
rb_node
))
continue
;
/* Add this guy's upper edges to the list to proces */
/* Add this guy's upper edges to the list to proces
s
*/
list_for_each_entry
(
edge
,
&
upper
->
upper
,
list
[
LOWER
])
list_add_tail
(
&
edge
->
list
[
UPPER
],
&
list
);
if
(
list_empty
(
&
upper
->
upper
))
...
...
@@ -2396,7 +2396,7 @@ void merge_reloc_roots(struct reloc_control *rc)
}
/*
* we keep the old last snapsho
d
transid in rtranid when we
* we keep the old last snapsho
t
transid in rtranid when we
* created the relocation tree.
*/
last_snap
=
btrfs_root_rtransid
(
&
reloc_root
->
root_item
);
...
...
@@ -2616,7 +2616,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
* only one thread can access block_rsv at this point,
* so we don't need hold lock to protect block_rsv.
* we expand more reservation size here to allow enough
* space for relocation and we will return ea
il
er in
* space for relocation and we will return ea
rli
er in
* enospc case.
*/
rc
->
block_rsv
->
size
=
tmp
+
rc
->
extent_root
->
nodesize
*
...
...
@@ -2814,7 +2814,7 @@ static void mark_block_processed(struct reloc_control *rc,
u64
bytenr
,
u32
blocksize
)
{
set_extent_bits
(
&
rc
->
processed_blocks
,
bytenr
,
bytenr
+
blocksize
-
1
,
EXTENT_DIRTY
,
GFP_NOFS
);
EXTENT_DIRTY
);
}
static
void
__mark_block_processed
(
struct
reloc_control
*
rc
,
...
...
@@ -3182,7 +3182,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
page_start
+
offset
==
cluster
->
boundary
[
nr
])
{
set_extent_bits
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_end
,
EXTENT_BOUNDARY
,
GFP_NOFS
);
EXTENT_BOUNDARY
);
nr
++
;
}
...
...
@@ -4059,8 +4059,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
btrfs_release_path
(
path
);
clear_extent_bits
(
&
rc
->
processed_blocks
,
0
,
(
u64
)
-
1
,
EXTENT_DIRTY
,
GFP_NOFS
);
clear_extent_bits
(
&
rc
->
processed_blocks
,
0
,
(
u64
)
-
1
,
EXTENT_DIRTY
);
if
(
trans
)
{
btrfs_end_transaction_throttle
(
trans
,
rc
->
extent_root
);
...
...
@@ -4591,7 +4590,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
/*
* called before creating snapshot. it calculates metadata reservation
* requ
ri
ed for relocating tree blocks in the snapshot
* requ
ir
ed for relocating tree blocks in the snapshot
*/
void
btrfs_reloc_pre_snapshot
(
struct
btrfs_pending_snapshot
*
pending
,
u64
*
bytes_to_reserve
)
...
...
fs/btrfs/root-tree.c
浏览文件 @
42f31734
...
...
@@ -71,9 +71,9 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
* search_key: the key to search
* path: the path we search
* root_item: the root item of the tree we look for
* root_key: the r
eak
key of the tree we look for
* root_key: the r
oot
key of the tree we look for
*
* If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset
* If ->offset of 'sea
r
ch_key' is -1ULL, it means we are not sure the offset
* of the search key, just lookup the root with the highest offset for a
* given objectid.
*
...
...
fs/btrfs/scrub.c
浏览文件 @
42f31734
...
...
@@ -745,7 +745,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
* sure we read the bad mirror.
*/
ret
=
set_extent_bits
(
&
BTRFS_I
(
inode
)
->
io_tree
,
offset
,
end
,
EXTENT_DAMAGED
,
GFP_NOFS
);
EXTENT_DAMAGED
);
if
(
ret
)
{
/* set_extent_bits should give proper error */
WARN_ON
(
ret
>
0
);
...
...
@@ -763,7 +763,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
end
,
EXTENT_DAMAGED
,
0
,
NULL
);
if
(
!
corrected
)
clear_extent_bits
(
&
BTRFS_I
(
inode
)
->
io_tree
,
offset
,
end
,
EXTENT_DAMAGED
,
GFP_NOFS
);
EXTENT_DAMAGED
);
}
out:
...
...
@@ -1044,7 +1044,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
/*
* !is_metadata and !have_csum, this means that the data
* might not be COW
'
ed, that it might be modified
* might not be COWed, that it might be modified
* concurrently. The general strategy to work on the
* commit root does not help in the case when COW is not
* used.
...
...
@@ -1125,7 +1125,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
* the 2nd page of mirror #1 faces I/O errors, and the 2nd page
* of mirror #2 is readable but the final checksum test fails,
* then the 2nd page of mirror #3 could be tried, whether now
* the final checksum succeed
e
s. But this would be a rare
* the final checksum succeeds. But this would be a rare
* exception and is therefore not implemented. At least it is
* avoided that the good copy is overwritten.
* A more useful improvement would be to pick the sectors
...
...
fs/btrfs/send.c
浏览文件 @
42f31734
...
...
@@ -1831,7 +1831,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
/*
* If we have a parent root we need to verify that the parent dir was
* not delted and then re-created, if it was then we have no overwrite
* not del
e
ted and then re-created, if it was then we have no overwrite
* and we can just unlink this entry.
*/
if
(
sctx
->
parent_root
)
{
...
...
@@ -4192,9 +4192,9 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
return
-
ENOMEM
;
/*
* This hack is needed because empty acl
'
s are stored as zero byte
* This hack is needed because empty acls are stored as zero byte
* data in xattrs. Problem with that is, that receiving these zero byte
* acl
'
s will fail later. To fix this, we send a dummy acl list that
* acls will fail later. To fix this, we send a dummy acl list that
* only contains the version number and no entries.
*/
if
(
!
strncmp
(
name
,
XATTR_NAME_POSIX_ACL_ACCESS
,
name_len
)
||
...
...
fs/btrfs/struct-funcs.c
浏览文件 @
42f31734
...
...
@@ -36,7 +36,7 @@ static inline void put_unaligned_le8(u8 val, void *p)
*
* The end result is that anyone who #includes ctree.h gets a
* declaration for the btrfs_set_foo functions and btrfs_foo functions,
* which are wappers of btrfs_set_token_#bits functions and
* which are w
r
appers of btrfs_set_token_#bits functions and
* btrfs_get_token_#bits functions, which are defined in this file.
*
* These setget functions do all the extent_buffer related mapping
...
...
fs/btrfs/super.c
浏览文件 @
42f31734
...
...
@@ -112,7 +112,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
* Note that a running device replace operation is not
* canceled here although there is no way to update
* the progress. It would add the risk of a deadlock,
* therefore the canceling is om
mi
ted. The only penalty
* therefore the canceling is om
it
ted. The only penalty
* is that some I/O remains active until the procedure
* completes. The next time when the filesystem is
* mounted writeable again, the device replace
...
...
@@ -1877,7 +1877,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
int
ret
;
/*
* We aren't under the device list lock, so this is rac
e
y-ish, but good
* We aren't under the device list lock, so this is racy-ish, but good
* enough for our purposes.
*/
nr_devices
=
fs_info
->
fs_devices
->
open_devices
;
...
...
@@ -1896,7 +1896,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
if
(
!
devices_info
)
return
-
ENOMEM
;
/* calc min stripe number for data space alloction */
/* calc min stripe number for data space alloc
a
tion */
type
=
btrfs_get_alloc_profile
(
root
,
1
);
if
(
type
&
BTRFS_BLOCK_GROUP_RAID0
)
{
min_stripes
=
2
;
...
...
@@ -1932,7 +1932,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
avail_space
*=
BTRFS_STRIPE_LEN
;
/*
* In order to avoid overwrit
t
ing the superblock on the drive,
* In order to avoid overwriting the superblock on the drive,
* btrfs starts at an offset of at least 1MB when doing chunk
* allocation.
*/
...
...
fs/btrfs/tests/extent-io-tests.c
浏览文件 @
42f31734
...
...
@@ -113,7 +113,7 @@ static int test_find_delalloc(void)
* |--- delalloc ---|
* |--- search ---|
*/
set_extent_delalloc
(
&
tmp
,
0
,
4095
,
NULL
,
GFP_KERNEL
);
set_extent_delalloc
(
&
tmp
,
0
,
4095
,
NULL
);
start
=
0
;
end
=
0
;
found
=
find_lock_delalloc_range
(
inode
,
&
tmp
,
locked_page
,
&
start
,
...
...
@@ -144,7 +144,7 @@ static int test_find_delalloc(void)
test_msg
(
"Couldn't find the locked page
\n
"
);
goto
out_bits
;
}
set_extent_delalloc
(
&
tmp
,
4096
,
max_bytes
-
1
,
NULL
,
GFP_KERNEL
);
set_extent_delalloc
(
&
tmp
,
4096
,
max_bytes
-
1
,
NULL
);
start
=
test_start
;
end
=
0
;
found
=
find_lock_delalloc_range
(
inode
,
&
tmp
,
locked_page
,
&
start
,
...
...
@@ -176,7 +176,7 @@ static int test_find_delalloc(void)
locked_page
=
find_lock_page
(
inode
->
i_mapping
,
test_start
>>
PAGE_SHIFT
);
if
(
!
locked_page
)
{
test_msg
(
"Could
'n
t find the locked page
\n
"
);
test_msg
(
"Could
n'
t find the locked page
\n
"
);
goto
out_bits
;
}
start
=
test_start
;
...
...
@@ -199,7 +199,7 @@ static int test_find_delalloc(void)
*
* We are re-using our test_start from above since it works out well.
*/
set_extent_delalloc
(
&
tmp
,
max_bytes
,
total_dirty
-
1
,
NULL
,
GFP_KERNEL
);
set_extent_delalloc
(
&
tmp
,
max_bytes
,
total_dirty
-
1
,
NULL
);
start
=
test_start
;
end
=
0
;
found
=
find_lock_delalloc_range
(
inode
,
&
tmp
,
locked_page
,
&
start
,
...
...
@@ -262,7 +262,7 @@ static int test_find_delalloc(void)
}
ret
=
0
;
out_bits:
clear_extent_bits
(
&
tmp
,
0
,
total_dirty
-
1
,
(
unsigned
)
-
1
,
GFP_KERNEL
);
clear_extent_bits
(
&
tmp
,
0
,
total_dirty
-
1
,
(
unsigned
)
-
1
);
out:
if
(
locked_page
)
put_page
(
locked_page
);
...
...
fs/btrfs/tests/free-space-tests.c
浏览文件 @
42f31734
...
...
@@ -25,7 +25,7 @@
#define BITS_PER_BITMAP (PAGE_SIZE * 8)
/*
* This test just does basic sanity checking, making sure we can add an exten
* This test just does basic sanity checking, making sure we can add an exten
t
* entry and remove space from either end and the middle, and make sure we can
* remove space that covers adjacent extent entries.
*/
...
...
@@ -396,8 +396,9 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
* wasn't optimal as they could be spread all over the block group while under
* concurrency (extra overhead and fragmentation).
*
* This stealing approach is benefical, since we always prefer to allocate from
* extent entries, both for clustered and non-clustered allocation requests.
* This stealing approach is beneficial, since we always prefer to allocate
* from extent entries, both for clustered and non-clustered allocation
* requests.
*/
static
int
test_steal_space_from_bitmap_to_extent
(
struct
btrfs_block_group_cache
*
cache
)
...
...
fs/btrfs/tests/inode-tests.c
浏览文件 @
42f31734
...
...
@@ -264,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
/*
* We will just free a dummy node if it's ref count is 2 so we need an
* extra ref so our searches don't accidently release our page.
* extra ref so our searches don't accident
al
ly release our page.
*/
extent_buffer_get
(
root
->
node
);
btrfs_set_header_nritems
(
root
->
node
,
0
);
...
...
fs/btrfs/tests/qgroup-tests.c
浏览文件 @
42f31734
...
...
@@ -234,7 +234,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
}
/*
* Since the test trans doesn't have
e
the complicated delayed refs,
* Since the test trans doesn't have the complicated delayed refs,
* we can only call btrfs_qgroup_account_extent() directly to test
* quota.
*/
...
...
fs/btrfs/transaction.c
浏览文件 @
42f31734
...
...
@@ -944,7 +944,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
err
=
convert_extent_bit
(
dirty_pages
,
start
,
end
,
EXTENT_NEED_WAIT
,
mark
,
&
cached_state
,
GFP_NOFS
);
mark
,
&
cached_state
);
/*
* convert_extent_bit can return -ENOMEM, which is most of the
* time a temporary error. So when it happens, ignore the error
...
...
fs/btrfs/transaction.h
浏览文件 @
42f31734
...
...
@@ -144,7 +144,7 @@ struct btrfs_pending_snapshot {
/* block reservation for the operation */
struct
btrfs_block_rsv
block_rsv
;
u64
qgroup_reserved
;
/* extra metadata reseration for relocation */
/* extra metadata reser
v
ation for relocation */
int
error
;
bool
readonly
;
struct
list_head
list
;
...
...
fs/btrfs/tree-log.c
浏览文件 @
42f31734
...
...
@@ -2330,7 +2330,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
break
;
/* for regular files, make sure corresponding
* or
hp
an item exist. extents past the new EOF
* or
ph
an item exist. extents past the new EOF
* will be truncated later by orphan cleanup.
*/
if
(
S_ISREG
(
mode
))
{
...
...
@@ -3001,7 +3001,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
break
;
clear_extent_bits
(
&
log
->
dirty_log_pages
,
start
,
end
,
EXTENT_DIRTY
|
EXTENT_NEW
,
GFP_NOFS
);
EXTENT_DIRTY
|
EXTENT_NEW
);
}
/*
...
...
@@ -4914,7 +4914,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* the actual unlink operation, so if we do this check before a concurrent task
* sets last_unlink_trans it means we've logged a consistent version/state of
* all the inode items, otherwise we are not sure and must do a transaction
* commit (the concurrent task mig
th
have only updated last_unlink_trans before
* commit (the concurrent task mig
ht
have only updated last_unlink_trans before
* we logged the inode or it might have also done the unlink).
*/
static
bool
btrfs_must_commit_transaction
(
struct
btrfs_trans_handle
*
trans
,
...
...
@@ -4973,7 +4973,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
while
(
1
)
{
/*
* If we are logging a directory then we start with our inode,
* not our parent
s inode, so we need to skip
p setting the
* not our parent
's inode, so we need to ski
p setting the
* logged_trans so that further down in the log code we don't
* think this inode has already been logged.
*/
...
...
@@ -5357,7 +5357,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
log_dentries
=
true
;
/*
* On unlink we must make sure all our current and old parent director
es
* On unlink we must make sure all our current and old parent director
y
* inodes are fully logged. This is to prevent leaving dangling
* directory index entries in directories that were our parents but are
* not anymore. Not doing this results in old parent directory being
...
...
fs/btrfs/ulist.c
浏览文件 @
42f31734
...
...
@@ -28,7 +28,7 @@
* }
* ulist_free(ulist);
*
* This assumes the graph nodes are adressable by u64. This stems from the
* This assumes the graph nodes are ad
d
ressable by u64. This stems from the
* usage for tree enumeration in btrfs, where the logical addresses are
* 64 bit.
*
...
...
fs/btrfs/volumes.c
浏览文件 @
42f31734
...
...
@@ -2190,7 +2190,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
}
/*
*
str
ore the expected generation for seed devices in device items.
*
St
ore the expected generation for seed devices in device items.
*/
static
int
btrfs_finish_sprout
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
)
...
...
@@ -3387,7 +3387,7 @@ static int should_balance_chunk(struct btrfs_root *root,
}
else
if
((
bargs
->
flags
&
BTRFS_BALANCE_ARGS_LIMIT_RANGE
))
{
/*
* Same logic as the 'limit' filter; the minimum cannot be
* determined here because we do not have the global informat
oi
n
* determined here because we do not have the global informat
io
n
* about the count of all chunks that satisfy the filters.
*/
if
(
bargs
->
limit_max
==
0
)
...
...
@@ -6076,7 +6076,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
atomic_inc
(
&
bbio
->
error
);
if
(
atomic_dec_and_test
(
&
bbio
->
stripes_pending
))
{
/* Shoud be the original bio. */
/* Shou
l
d be the original bio. */
WARN_ON
(
bio
!=
bbio
->
orig_bio
);
btrfs_io_bio
(
bio
)
->
mirror_num
=
bbio
->
mirror_num
;
...
...
@@ -6560,7 +6560,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
set_extent_buffer_uptodate
(
sb
);
btrfs_set_buffer_lockdep_class
(
root
->
root_key
.
objectid
,
sb
,
0
);
/*
* The sb extent buffer is artifical and just used to read the system array.
* The sb extent buffer is artific
i
al and just used to read the system array.
* set_extent_buffer_uptodate() call does not properly mark all it's
* pages up-to-date when the page is larger: extent does not cover the
* whole page and consequently check_page_uptodate does not find all
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录