提交 73fa48b6 编写于 作者: O Omar Sandoval 提交者: Chris Mason

Btrfs: refactor caching_thread()

We're also going to load the free space tree from caching_thread(), so
we should refactor some of the common code.
Signed-off-by: NOmar Sandoval <osandov@fb.com>
Signed-off-by: NChris Mason <clm@fb.com>
上级 1abfbcdf
...@@ -1262,6 +1262,9 @@ struct btrfs_caching_control { ...@@ -1262,6 +1262,9 @@ struct btrfs_caching_control {
atomic_t count; atomic_t count;
}; };
/* Once caching_thread() finds this much free space, it will wake up waiters. */
#define CACHING_CTL_WAKE_UP (1024 * 1024 * 2)
struct btrfs_io_ctl { struct btrfs_io_ctl {
void *cur, *orig; void *cur, *orig;
struct page *page; struct page *page;
......
...@@ -375,11 +375,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, ...@@ -375,11 +375,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
return total_added; return total_added;
} }
static noinline void caching_thread(struct btrfs_work *work) static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
{ {
struct btrfs_block_group_cache *block_group; struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct btrfs_caching_control *caching_ctl;
struct btrfs_root *extent_root; struct btrfs_root *extent_root;
struct btrfs_path *path; struct btrfs_path *path;
struct extent_buffer *leaf; struct extent_buffer *leaf;
...@@ -387,16 +386,15 @@ static noinline void caching_thread(struct btrfs_work *work) ...@@ -387,16 +386,15 @@ static noinline void caching_thread(struct btrfs_work *work)
u64 total_found = 0; u64 total_found = 0;
u64 last = 0; u64 last = 0;
u32 nritems; u32 nritems;
int ret = -ENOMEM; int ret;
caching_ctl = container_of(work, struct btrfs_caching_control, work);
block_group = caching_ctl->block_group; block_group = caching_ctl->block_group;
fs_info = block_group->fs_info; fs_info = block_group->fs_info;
extent_root = fs_info->extent_root; extent_root = fs_info->extent_root;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
goto out; return -ENOMEM;
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET); last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
...@@ -413,15 +411,11 @@ static noinline void caching_thread(struct btrfs_work *work) ...@@ -413,15 +411,11 @@ static noinline void caching_thread(struct btrfs_work *work)
key.objectid = last; key.objectid = last;
key.offset = 0; key.offset = 0;
key.type = BTRFS_EXTENT_ITEM_KEY; key.type = BTRFS_EXTENT_ITEM_KEY;
again:
mutex_lock(&caching_ctl->mutex);
/* need to make sure the commit_root doesn't disappear */
down_read(&fs_info->commit_root_sem);
next: next:
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
if (ret < 0) if (ret < 0)
goto err; goto out;
leaf = path->nodes[0]; leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf); nritems = btrfs_header_nritems(leaf);
...@@ -446,12 +440,14 @@ static noinline void caching_thread(struct btrfs_work *work) ...@@ -446,12 +440,14 @@ static noinline void caching_thread(struct btrfs_work *work)
up_read(&fs_info->commit_root_sem); up_read(&fs_info->commit_root_sem);
mutex_unlock(&caching_ctl->mutex); mutex_unlock(&caching_ctl->mutex);
cond_resched(); cond_resched();
goto again; mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
goto next;
} }
ret = btrfs_next_leaf(extent_root, path); ret = btrfs_next_leaf(extent_root, path);
if (ret < 0) if (ret < 0)
goto err; goto out;
if (ret) if (ret)
break; break;
leaf = path->nodes[0]; leaf = path->nodes[0];
...@@ -489,7 +485,7 @@ static noinline void caching_thread(struct btrfs_work *work) ...@@ -489,7 +485,7 @@ static noinline void caching_thread(struct btrfs_work *work)
else else
last = key.objectid + key.offset; last = key.objectid + key.offset;
if (total_found > (1024 * 1024 * 2)) { if (total_found > CACHING_CTL_WAKE_UP) {
total_found = 0; total_found = 0;
wake_up(&caching_ctl->wait); wake_up(&caching_ctl->wait);
} }
...@@ -503,25 +499,36 @@ static noinline void caching_thread(struct btrfs_work *work) ...@@ -503,25 +499,36 @@ static noinline void caching_thread(struct btrfs_work *work)
block_group->key.offset); block_group->key.offset);
caching_ctl->progress = (u64)-1; caching_ctl->progress = (u64)-1;
out:
btrfs_free_path(path);
return ret;
}
static noinline void caching_thread(struct btrfs_work *work)
{
struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info;
struct btrfs_caching_control *caching_ctl;
int ret;
caching_ctl = container_of(work, struct btrfs_caching_control, work);
block_group = caching_ctl->block_group;
fs_info = block_group->fs_info;
mutex_lock(&caching_ctl->mutex);
down_read(&fs_info->commit_root_sem);
ret = load_extent_tree_free(caching_ctl);
spin_lock(&block_group->lock); spin_lock(&block_group->lock);
block_group->caching_ctl = NULL; block_group->caching_ctl = NULL;
block_group->cached = BTRFS_CACHE_FINISHED; block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
err:
btrfs_free_path(path);
up_read(&fs_info->commit_root_sem); up_read(&fs_info->commit_root_sem);
free_excluded_extents(fs_info->extent_root, block_group);
free_excluded_extents(extent_root, block_group);
mutex_unlock(&caching_ctl->mutex); mutex_unlock(&caching_ctl->mutex);
out:
if (ret) {
spin_lock(&block_group->lock);
block_group->caching_ctl = NULL;
block_group->cached = BTRFS_CACHE_ERROR;
spin_unlock(&block_group->lock);
}
wake_up(&caching_ctl->wait); wake_up(&caching_ctl->wait);
put_caching_control(caching_ctl); put_caching_control(caching_ctl);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册