diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 874565a1f634ae197fab853cb78a041df519e2db..3e7eeaf86408c9d66a125a71e3b7317613978fd0 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -510,6 +510,24 @@ int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans, return 0; } +/* + * this does a simple search for the head node for a given extent. + * It must be called with the delayed ref spinlock held, and it returns + * the head node if any where found, or NULL if not. + */ +struct btrfs_delayed_ref_head * +btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) +{ + struct btrfs_delayed_ref_node *ref; + struct btrfs_delayed_ref_root *delayed_refs; + + delayed_refs = &trans->transaction->delayed_refs; + ref = tree_search(&delayed_refs->root, bytenr, (u64)-1); + if (ref) + return btrfs_delayed_node_to_head(ref); + return NULL; +} + /* * add a delayed ref to the tree. This does all of the accounting required * to make sure the delayed ref is eventually processed before this diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 37919e5c007f655006bac968b0f4a77f3f00b6d0..c345fee9f96be45401098a0e5b5ef6349d86e0e6 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -137,9 +137,8 @@ int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans, u64 ref_generation, u64 owner_objectid, int action, int pin); -struct btrfs_delayed_ref * -btrfs_find_delayed_ref(struct btrfs_trans_handle *trans, u64 bytenr, - u64 parent); +struct btrfs_delayed_ref_head * +btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); int btrfs_lock_delayed_ref(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_node *ref, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9b5da2b013e41598f56f849701c78bde34255057..8471c79b0877316d17deae2c6366aa76cc775809 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1021,6 +1021,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (!locked_ref && count == 0) break; + cond_resched(); spin_lock(&delayed_refs->lock); } if (run_all) { @@ -1045,6 +1046,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, mutex_unlock(&head->mutex); btrfs_put_delayed_ref(ref); + cond_resched(); goto again; } node = rb_next(node); @@ -2361,6 +2363,68 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, owner_objectid, pin, pin == 0, refs_to_drop); } +/* + * when we free an extent, it is possible (and likely) that we free the last + * delayed ref for that extent as well. This searches the delayed ref tree for + * a given extent, and if there are no other delayed refs to be processed, it + * removes it from the tree. + */ +static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr) +{ + struct btrfs_delayed_ref_head *head; + struct btrfs_delayed_ref_root *delayed_refs; + struct btrfs_delayed_ref_node *ref; + struct rb_node *node; + int ret; + + delayed_refs = &trans->transaction->delayed_refs; + spin_lock(&delayed_refs->lock); + head = btrfs_find_delayed_ref_head(trans, bytenr); + if (!head) + goto out; + + node = rb_prev(&head->node.rb_node); + if (!node) + goto out; + + ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); + + /* there are still entries for this ref, we can't drop it */ + if (ref->bytenr == bytenr) + goto out; + + /* + * waiting for the lock here would deadlock. If someone else has it + * locked they are already in the process of dropping it anyway + */ + if (!mutex_trylock(&head->mutex)) + goto out; + + /* + * at this point we have a head with no other entries. Go + * ahead and process it. + */ + head->node.in_tree = 0; + rb_erase(&head->node.rb_node, &delayed_refs->root); + delayed_refs->num_entries--; + + /* + * we don't take a ref on the node because we're removing it from the + * tree, so we just steal the ref the tree was holding. + */ + spin_unlock(&delayed_refs->lock); + + ret = run_one_delayed_ref(trans, root->fs_info->tree_root, + &head->node, head->must_insert_reserved); + BUG_ON(ret); + btrfs_put_delayed_ref(&head->node); + return 0; +out: + spin_unlock(&delayed_refs->lock); + return 0; +} + int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, @@ -2388,6 +2452,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, root_objectid, ref_generation, owner_objectid, BTRFS_DROP_DELAYED_REF, 1); + BUG_ON(ret); + ret = check_ref_cleanup(trans, root, bytenr); + BUG_ON(ret); } return ret; }