提交 b8399dee 编写于 作者: J Josef Bacik

Btrfs: do not do fast caching if we are allocating blocks for tree_root

Since the fast caching uses normal tree locking, we can possibly deadlock if we
get to the caching via a btrfs_search_slot() on the tree_root.  So just check to
see if the root we are on is the tree root, and just don't do the fast caching.
Reported-by: NSage Weil <sage@newdream.net>
Signed-off-by: NJosef Bacik <josef@redhat.com>
上级 2b20982e
...@@ -429,6 +429,7 @@ static int caching_kthread(void *data) ...@@ -429,6 +429,7 @@ static int caching_kthread(void *data)
static int cache_block_group(struct btrfs_block_group_cache *cache, static int cache_block_group(struct btrfs_block_group_cache *cache,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int load_cache_only) int load_cache_only)
{ {
struct btrfs_fs_info *fs_info = cache->fs_info; struct btrfs_fs_info *fs_info = cache->fs_info;
...@@ -442,9 +443,12 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, ...@@ -442,9 +443,12 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
/* /*
* We can't do the read from on-disk cache during a commit since we need * We can't do the read from on-disk cache during a commit since we need
* to have the normal tree locking. * to have the normal tree locking. Also if we are currently trying to
* allocate blocks for the tree root we can't do the fast caching since
* we likely hold important locks.
*/ */
if (!trans->transaction->in_commit) { if (!trans->transaction->in_commit &&
(root && root != root->fs_info->tree_root)) {
spin_lock(&cache->lock); spin_lock(&cache->lock);
if (cache->cached != BTRFS_CACHE_NO) { if (cache->cached != BTRFS_CACHE_NO) {
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
...@@ -4083,7 +4087,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, ...@@ -4083,7 +4087,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
* space back to the block group, otherwise we will leak space. * space back to the block group, otherwise we will leak space.
*/ */
if (!alloc && cache->cached == BTRFS_CACHE_NO) if (!alloc && cache->cached == BTRFS_CACHE_NO)
cache_block_group(cache, trans, 1); cache_block_group(cache, trans, NULL, 1);
byte_in_group = bytenr - cache->key.objectid; byte_in_group = bytenr - cache->key.objectid;
WARN_ON(byte_in_group > cache->key.offset); WARN_ON(byte_in_group > cache->key.offset);
...@@ -4937,7 +4941,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -4937,7 +4941,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (unlikely(block_group->cached == BTRFS_CACHE_NO)) { if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
u64 free_percent; u64 free_percent;
ret = cache_block_group(block_group, trans, 1); ret = cache_block_group(block_group, trans,
orig_root, 1);
if (block_group->cached == BTRFS_CACHE_FINISHED) if (block_group->cached == BTRFS_CACHE_FINISHED)
goto have_block_group; goto have_block_group;
...@@ -4961,7 +4966,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans, ...@@ -4961,7 +4966,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (loop > LOOP_CACHING_NOWAIT || if (loop > LOOP_CACHING_NOWAIT ||
(loop > LOOP_FIND_IDEAL && (loop > LOOP_FIND_IDEAL &&
atomic_read(&space_info->caching_threads) < 2)) { atomic_read(&space_info->caching_threads) < 2)) {
ret = cache_block_group(block_group, trans, 0); ret = cache_block_group(block_group, trans,
orig_root, 0);
BUG_ON(ret); BUG_ON(ret);
} }
found_uncached_bg = true; found_uncached_bg = true;
...@@ -5518,7 +5524,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, ...@@ -5518,7 +5524,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
u64 num_bytes = ins->offset; u64 num_bytes = ins->offset;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid); block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
cache_block_group(block_group, trans, 0); cache_block_group(block_group, trans, NULL, 0);
caching_ctl = get_caching_control(block_group); caching_ctl = get_caching_control(block_group);
if (!caching_ctl) { if (!caching_ctl) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册