提交 e11c0406 编写于 作者: J Josef Bacik 提交者: David Sterba

btrfs: unexport the temporary exported functions

These were renamed and exported to facilitate logical migration of
different code chunks into block-group.c.  Now that all the users are in
one file go ahead and rename them back, move the code around, and make
them static.
Signed-off-by: NJosef Bacik <josef@toxicpanda.com>
Reviewed-by: NDavid Sterba <dsterba@suse.com>
Signed-off-by: NDavid Sterba <dsterba@suse.com>
上级 3e43c279
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* *
* Should be called with balance_lock held * Should be called with balance_lock held
*/ */
u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{ {
struct btrfs_balance_control *bctl = fs_info->balance_ctl; struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0; u64 target = 0;
...@@ -62,7 +62,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags) ...@@ -62,7 +62,7 @@ static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
* reduce to the target profile * reduce to the target profile
*/ */
spin_lock(&fs_info->balance_lock); spin_lock(&fs_info->balance_lock);
target = btrfs_get_restripe_target(fs_info, flags); target = get_restripe_target(fs_info, flags);
if (target) { if (target) {
/* Pick target profile only if it's already available */ /* Pick target profile only if it's already available */
if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) { if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
...@@ -424,7 +424,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) ...@@ -424,7 +424,7 @@ int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
} }
#ifdef CONFIG_BTRFS_DEBUG #ifdef CONFIG_BTRFS_DEBUG
void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group) static void fragment_free_space(struct btrfs_block_group_cache *block_group)
{ {
struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_fs_info *fs_info = block_group->fs_info;
u64 start = block_group->key.objectid; u64 start = block_group->key.objectid;
...@@ -661,7 +661,7 @@ static noinline void caching_thread(struct btrfs_work *work) ...@@ -661,7 +661,7 @@ static noinline void caching_thread(struct btrfs_work *work)
block_group->space_info->bytes_used += bytes_used >> 1; block_group->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
spin_unlock(&block_group->space_info->lock); spin_unlock(&block_group->space_info->lock);
btrfs_fragment_free_space(block_group); fragment_free_space(block_group);
} }
#endif #endif
...@@ -768,7 +768,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -768,7 +768,7 @@ int btrfs_cache_block_group(struct btrfs_block_group_cache *cache,
cache->space_info->bytes_used += bytes_used >> 1; cache->space_info->bytes_used += bytes_used >> 1;
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock); spin_unlock(&cache->space_info->lock);
btrfs_fragment_free_space(cache); fragment_free_space(cache);
} }
#endif #endif
mutex_unlock(&caching_ctl->mutex); mutex_unlock(&caching_ctl->mutex);
...@@ -1180,7 +1180,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( ...@@ -1180,7 +1180,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
* data in this block group. That check should be done by relocation routine, * data in this block group. That check should be done by relocation routine,
* not this function. * not this function.
*/ */
int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
{ {
struct btrfs_space_info *sinfo = cache->space_info; struct btrfs_space_info *sinfo = cache->space_info;
u64 num_bytes; u64 num_bytes;
...@@ -1296,7 +1296,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) ...@@ -1296,7 +1296,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
/* We don't want to force the issue, only flip if it's ok. */ /* We don't want to force the issue, only flip if it's ok. */
ret = __btrfs_inc_block_group_ro(block_group, 0); ret = inc_block_group_ro(block_group, 0);
up_write(&space_info->groups_sem); up_write(&space_info->groups_sem);
if (ret < 0) { if (ret < 0) {
ret = 0; ret = 0;
...@@ -1822,7 +1822,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -1822,7 +1822,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
set_avail_alloc_bits(info, cache->flags); set_avail_alloc_bits(info, cache->flags);
if (btrfs_chunk_readonly(info, cache->key.objectid)) { if (btrfs_chunk_readonly(info, cache->key.objectid)) {
__btrfs_inc_block_group_ro(cache, 1); inc_block_group_ro(cache, 1);
} else if (btrfs_block_group_used(&cache->item) == 0) { } else if (btrfs_block_group_used(&cache->item) == 0) {
ASSERT(list_empty(&cache->bg_list)); ASSERT(list_empty(&cache->bg_list));
btrfs_mark_bg_unused(cache); btrfs_mark_bg_unused(cache);
...@@ -1843,11 +1843,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -1843,11 +1843,11 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
list_for_each_entry(cache, list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_RAID0], &space_info->block_groups[BTRFS_RAID_RAID0],
list) list)
__btrfs_inc_block_group_ro(cache, 1); inc_block_group_ro(cache, 1);
list_for_each_entry(cache, list_for_each_entry(cache,
&space_info->block_groups[BTRFS_RAID_SINGLE], &space_info->block_groups[BTRFS_RAID_SINGLE],
list) list)
__btrfs_inc_block_group_ro(cache, 1); inc_block_group_ro(cache, 1);
} }
btrfs_init_global_block_rsv(info); btrfs_init_global_block_rsv(info);
...@@ -1936,7 +1936,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, ...@@ -1936,7 +1936,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
u64 new_bytes_used = size - bytes_used; u64 new_bytes_used = size - bytes_used;
bytes_used += new_bytes_used >> 1; bytes_used += new_bytes_used >> 1;
btrfs_fragment_free_space(cache); fragment_free_space(cache);
} }
#endif #endif
/* /*
...@@ -1982,7 +1982,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags) ...@@ -1982,7 +1982,7 @@ static u64 update_block_group_flags(struct btrfs_fs_info *fs_info, u64 flags)
* if restripe for this chunk_type is on pick target profile and * if restripe for this chunk_type is on pick target profile and
* return, otherwise do the usual balance * return, otherwise do the usual balance
*/ */
stripped = btrfs_get_restripe_target(fs_info, flags); stripped = get_restripe_target(fs_info, flags);
if (stripped) if (stripped)
return extended_to_chunk(stripped); return extended_to_chunk(stripped);
...@@ -2070,14 +2070,14 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) ...@@ -2070,14 +2070,14 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache)
goto out; goto out;
} }
ret = __btrfs_inc_block_group_ro(cache, 0); ret = inc_block_group_ro(cache, 0);
if (!ret) if (!ret)
goto out; goto out;
alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE); ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = __btrfs_inc_block_group_ro(cache, 0); ret = inc_block_group_ro(cache, 0);
out: out:
if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
alloc_flags = update_block_group_flags(fs_info, cache->flags); alloc_flags = update_block_group_flags(fs_info, cache->flags);
......
...@@ -166,7 +166,6 @@ static inline int btrfs_should_fragment_free_space( ...@@ -166,7 +166,6 @@ static inline int btrfs_should_fragment_free_space(
(btrfs_test_opt(fs_info, FRAGMENT_DATA) && (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_DATA); block_group->flags & BTRFS_BLOCK_GROUP_DATA);
} }
void btrfs_fragment_free_space(struct btrfs_block_group_cache *block_group);
#endif #endif
struct btrfs_block_group_cache *btrfs_lookup_first_block_group( struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
...@@ -246,7 +245,4 @@ static inline int btrfs_block_group_cache_done( ...@@ -246,7 +245,4 @@ static inline int btrfs_block_group_cache_done(
cache->cached == BTRFS_CACHE_ERROR; cache->cached == BTRFS_CACHE_ERROR;
} }
int __btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache, int force);
u64 btrfs_get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags);
#endif /* BTRFS_BLOCK_GROUP_H */ #endif /* BTRFS_BLOCK_GROUP_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册