diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 551177c0011ae90d3e2c0460340071ea97158b50..35443cc4b9a976607f41866d68c202855c1bf16b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1530,8 +1530,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root * for higher level blocks, try not to allocate blocks * with the block and the parent locks held. */ - if (level > 0 && !prealloc_block.objectid && - btrfs_path_lock_waiting(p, level)) { + if (level > 0 && !prealloc_block.objectid) { u32 size = b->len; u64 hint = b->start; diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 68fd9ccf1805e63adf0973612f9822b9585034f8..9ebe9385129be7a32c3394a9d20ab13ff119fcb9 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -236,25 +236,3 @@ int btrfs_tree_locked(struct extent_buffer *eb) return test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags) || spin_is_locked(&eb->lock); } - -/* - * btrfs_search_slot uses this to decide if it should drop its locks - * before doing something expensive like allocating free blocks for cow. - */ -int btrfs_path_lock_waiting(struct btrfs_path *path, int level) -{ - int i; - struct extent_buffer *eb; - - for (i = level; i <= level + 1 && i < BTRFS_MAX_LEVEL; i++) { - eb = path->nodes[i]; - if (!eb) - break; - smp_mb(); - if (spin_is_contended(&eb->lock) || - waitqueue_active(&eb->lock_wq)) - return 1; - } - return 0; -} - diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index d92e707f5870c1dfb13462298047b58c8827f622..6bb0afbff9287d4e841748f02d60eae488d7de7c 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -26,8 +26,6 @@ int btrfs_tree_locked(struct extent_buffer *eb); int btrfs_try_tree_lock(struct extent_buffer *eb); int btrfs_try_spin_lock(struct extent_buffer *eb); -int btrfs_path_lock_waiting(struct btrfs_path *path, int level); - void btrfs_set_lock_blocking(struct extent_buffer *eb); void btrfs_clear_lock_blocking(struct extent_buffer *eb); #endif