delayed-ref.c 25.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2009 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/sched.h>
20
#include <linux/slab.h>
21 22 23 24 25
#include <linux/sort.h>
#include "ctree.h"
#include "delayed-ref.h"
#include "transaction.h"

26 27 28 29
struct kmem_cache *btrfs_delayed_ref_head_cachep;
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
struct kmem_cache *btrfs_delayed_data_ref_cachep;
struct kmem_cache *btrfs_delayed_extent_op_cachep;
30 31 32 33 34 35 36 37 38 39
/*
 * delayed back reference update tracking.  For subvolume trees
 * we queue up extent allocations and backref maintenance for
 * delayed processing.   This avoids deep call chains where we
 * add extents in the middle of btrfs_search_slot, and it allows
 * us to buffer up frequently modified backrefs in an rb tree instead
 * of hammering updates on the extent allocation tree.
 */

/*
40 41 42
 * compare two delayed tree backrefs with same bytenr and type
 */
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43
			  struct btrfs_delayed_tree_ref *ref1, int type)
44
{
45 46 47 48 49 50 51 52 53 54 55
	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
		if (ref1->root < ref2->root)
			return -1;
		if (ref1->root > ref2->root)
			return 1;
	} else {
		if (ref1->parent < ref2->parent)
			return -1;
		if (ref1->parent > ref2->parent)
			return 1;
	}
56 57 58 59 60
	return 0;
}

/*
 * compare two delayed data backrefs with same bytenr and type
61
 */
62 63
static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
			  struct btrfs_delayed_data_ref *ref1)
64
{
65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
		if (ref1->root < ref2->root)
			return -1;
		if (ref1->root > ref2->root)
			return 1;
		if (ref1->objectid < ref2->objectid)
			return -1;
		if (ref1->objectid > ref2->objectid)
			return 1;
		if (ref1->offset < ref2->offset)
			return -1;
		if (ref1->offset > ref2->offset)
			return 1;
	} else {
		if (ref1->parent < ref2->parent)
			return -1;
		if (ref1->parent > ref2->parent)
			return 1;
	}
	return 0;
}

/*
 * entries in the rb tree are ordered by the byte number of the extent,
 * type of the delayed backrefs and content of delayed backrefs.
 */
static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92 93
		      struct btrfs_delayed_ref_node *ref1,
		      bool compare_seq)
94 95
{
	if (ref1->bytenr < ref2->bytenr)
96
		return -1;
97
	if (ref1->bytenr > ref2->bytenr)
98
		return 1;
99 100 101
	if (ref1->is_head && ref2->is_head)
		return 0;
	if (ref2->is_head)
102
		return -1;
103
	if (ref1->is_head)
104
		return 1;
105 106 107 108
	if (ref1->type < ref2->type)
		return -1;
	if (ref1->type > ref2->type)
		return 1;
109
	/* merging of sequenced refs is not allowed */
110 111 112 113 114 115
	if (compare_seq) {
		if (ref1->seq < ref2->seq)
			return -1;
		if (ref1->seq > ref2->seq)
			return 1;
	}
116 117 118
	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119 120
				      btrfs_delayed_node_to_tree_ref(ref1),
				      ref1->type);
121 122 123 124 125 126
	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
				      btrfs_delayed_node_to_data_ref(ref1));
	}
	BUG();
127 128 129 130 131 132 133 134 135 136 137 138 139 140
	return 0;
}

/*
 * insert a new ref into the rbtree.  This returns any existing refs
 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
 * inserted.
 */
static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
						  struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent_node = NULL;
	struct btrfs_delayed_ref_node *entry;
141
	struct btrfs_delayed_ref_node *ins;
142 143
	int cmp;

144
	ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
145 146 147 148 149
	while (*p) {
		parent_node = *p;
		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
				 rb_node);

150
		cmp = comp_entry(entry, ins, 1);
151 152 153 154 155 156 157 158 159 160 161 162 163
		if (cmp < 0)
			p = &(*p)->rb_left;
		else if (cmp > 0)
			p = &(*p)->rb_right;
		else
			return entry;
	}

	rb_link_node(node, parent_node, p);
	rb_insert_color(node, root);
	return NULL;
}

L
Liu Bo 已提交
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/* insert a new ref to head ref rbtree */
static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
						   struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent_node = NULL;
	struct btrfs_delayed_ref_head *entry;
	struct btrfs_delayed_ref_head *ins;
	u64 bytenr;

	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
	bytenr = ins->node.bytenr;
	while (*p) {
		parent_node = *p;
		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
				 href_node);

		if (bytenr < entry->node.bytenr)
			p = &(*p)->rb_left;
		else if (bytenr > entry->node.bytenr)
			p = &(*p)->rb_right;
		else
			return entry;
	}

	rb_link_node(node, parent_node, p);
	rb_insert_color(node, root);
	return NULL;
}

194
/*
195
 * find an head entry based on bytenr. This returns the delayed ref
196 197 198
 * head if it was able to find one, or NULL if nothing was in that spot.
 * If return_bigger is given, the next bigger entry is returned if no exact
 * match is found.
199
 */
L
Liu Bo 已提交
200 201 202
static struct btrfs_delayed_ref_head *
find_ref_head(struct rb_root *root, u64 bytenr,
	      struct btrfs_delayed_ref_head **last, int return_bigger)
203
{
204
	struct rb_node *n;
L
Liu Bo 已提交
205
	struct btrfs_delayed_ref_head *entry;
206
	int cmp = 0;
207

208 209 210
again:
	n = root->rb_node;
	entry = NULL;
211
	while (n) {
L
Liu Bo 已提交
212
		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
213 214
		if (last)
			*last = entry;
215

L
Liu Bo 已提交
216
		if (bytenr < entry->node.bytenr)
217
			cmp = -1;
L
Liu Bo 已提交
218
		else if (bytenr > entry->node.bytenr)
219 220 221 222
			cmp = 1;
		else
			cmp = 0;

223 224 225 226 227 228 229
		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return entry;
	}
230 231
	if (entry && return_bigger) {
		if (cmp > 0) {
L
Liu Bo 已提交
232
			n = rb_next(&entry->href_node);
233 234
			if (!n)
				n = rb_first(root);
L
Liu Bo 已提交
235 236 237
			entry = rb_entry(n, struct btrfs_delayed_ref_head,
					 href_node);
			bytenr = entry->node.bytenr;
238 239 240 241 242
			return_bigger = 0;
			goto again;
		}
		return entry;
	}
243 244 245
	return NULL;
}

246 247
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
			   struct btrfs_delayed_ref_head *head)
248
{
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	struct btrfs_delayed_ref_root *delayed_refs;

	delayed_refs = &trans->transaction->delayed_refs;
	assert_spin_locked(&delayed_refs->lock);
	if (mutex_trylock(&head->mutex))
		return 0;

	atomic_inc(&head->node.refs);
	spin_unlock(&delayed_refs->lock);

	mutex_lock(&head->mutex);
	spin_lock(&delayed_refs->lock);
	if (!head->node.in_tree) {
		mutex_unlock(&head->mutex);
		btrfs_put_delayed_ref(&head->node);
		return -EAGAIN;
	}
	btrfs_put_delayed_ref(&head->node);
	return 0;
}

270
static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
271 272 273 274
				    struct btrfs_delayed_ref_root *delayed_refs,
				    struct btrfs_delayed_ref_node *ref)
{
	rb_erase(&ref->rb_node, &delayed_refs->root);
L
Liu Bo 已提交
275 276 277 278 279 280
	if (btrfs_delayed_ref_is_head(ref)) {
		struct btrfs_delayed_ref_head *head;

		head = btrfs_delayed_node_to_head(ref);
		rb_erase(&head->href_node, &delayed_refs->href_root);
	}
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	ref->in_tree = 0;
	btrfs_put_delayed_ref(ref);
	delayed_refs->num_entries--;
	if (trans->delayed_ref_updates)
		trans->delayed_ref_updates--;
}

static int merge_ref(struct btrfs_trans_handle *trans,
		     struct btrfs_delayed_ref_root *delayed_refs,
		     struct btrfs_delayed_ref_node *ref, u64 seq)
{
	struct rb_node *node;
	int merged = 0;
	int mod = 0;
	int done = 0;

	node = rb_prev(&ref->rb_node);
	while (node) {
		struct btrfs_delayed_ref_node *next;

		next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
		node = rb_prev(node);
		if (next->bytenr != ref->bytenr)
			break;
		if (seq && next->seq >= seq)
			break;
		if (comp_entry(ref, next, 0))
			continue;

		if (ref->action == next->action) {
			mod = next->ref_mod;
		} else {
			if (ref->ref_mod < next->ref_mod) {
				struct btrfs_delayed_ref_node *tmp;

				tmp = ref;
				ref = next;
				next = tmp;
				done = 1;
			}
			mod = -next->ref_mod;
		}

		merged++;
		drop_delayed_ref(trans, delayed_refs, next);
		ref->ref_mod += mod;
		if (ref->ref_mod == 0) {
			drop_delayed_ref(trans, delayed_refs, ref);
			break;
		} else {
			/*
			 * You can't have multiples of the same ref on a tree
			 * block.
			 */
			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
		}

		if (done)
			break;
		node = rb_prev(&ref->rb_node);
	}

	return merged;
}

void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
			      struct btrfs_fs_info *fs_info,
			      struct btrfs_delayed_ref_root *delayed_refs,
			      struct btrfs_delayed_ref_head *head)
{
	struct rb_node *node;
	u64 seq = 0;

355 356 357 358 359 360 361
	/*
	 * We don't have too much refs to merge in the case of delayed data
	 * refs.
	 */
	if (head->is_data)
		return;

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	spin_lock(&fs_info->tree_mod_seq_lock);
	if (!list_empty(&fs_info->tree_mod_seq_list)) {
		struct seq_list *elem;

		elem = list_first_entry(&fs_info->tree_mod_seq_list,
					struct seq_list, list);
		seq = elem->seq;
	}
	spin_unlock(&fs_info->tree_mod_seq_lock);

	node = rb_prev(&head->node.rb_node);
	while (node) {
		struct btrfs_delayed_ref_node *ref;

		ref = rb_entry(node, struct btrfs_delayed_ref_node,
			       rb_node);
		if (ref->bytenr != head->node.bytenr)
			break;

		/* We can't merge refs that are outside of our seq count */
		if (seq && ref->seq >= seq)
			break;
		if (merge_ref(trans, delayed_refs, ref, seq))
			node = rb_prev(&head->node.rb_node);
		else
			node = rb_prev(node);
	}
}

391 392
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
			    struct btrfs_delayed_ref_root *delayed_refs,
393 394 395
			    u64 seq)
{
	struct seq_list *elem;
396 397 398 399 400 401 402
	int ret = 0;

	spin_lock(&fs_info->tree_mod_seq_lock);
	if (!list_empty(&fs_info->tree_mod_seq_list)) {
		elem = list_first_entry(&fs_info->tree_mod_seq_list,
					struct seq_list, list);
		if (seq >= elem->seq) {
403 404 405 406
			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
				 (u32)(seq >> 32), (u32)seq,
				 (u32)(elem->seq >> 32), (u32)elem->seq,
				 delayed_refs);
407 408
			ret = 1;
		}
409
	}
410 411 412

	spin_unlock(&fs_info->tree_mod_seq_lock);
	return ret;
413 414
}

415 416 417 418 419
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
			   struct list_head *cluster, u64 start)
{
	int count = 0;
	struct btrfs_delayed_ref_root *delayed_refs;
420
	struct rb_node *node;
L
Liu Bo 已提交
421
	struct btrfs_delayed_ref_head *head = NULL;
422

423
	delayed_refs = &trans->transaction->delayed_refs;
L
Liu Bo 已提交
424 425 426 427 428 429
	node = rb_first(&delayed_refs->href_root);

	if (start) {
		find_ref_head(&delayed_refs->href_root, start + 1, &head, 1);
		if (head)
			node = &head->href_node;
430 431 432
	}
again:
	while (node && count < 32) {
L
Liu Bo 已提交
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
		head = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
		if (list_empty(&head->cluster)) {
			list_add_tail(&head->cluster, cluster);
			delayed_refs->run_delayed_start =
				head->node.bytenr;
			count++;

			WARN_ON(delayed_refs->num_heads_ready == 0);
			delayed_refs->num_heads_ready--;
		} else if (count) {
			/* the goal of the clustering is to find extents
			 * that are likely to end up in the same extent
			 * leaf on disk.  So, we don't want them spread
			 * all over the tree.  Stop now if we've hit
			 * a head that was already in use
			 */
			break;
450
		}
451
		node = rb_next(node);
452
	}
453 454 455 456 457 458 459 460
	if (count) {
		return 0;
	} else if (start) {
		/*
		 * we've gone to the end of the rbtree without finding any
		 * clusters.  start from the beginning and try again
		 */
		start = 0;
L
Liu Bo 已提交
461
		node = rb_first(&delayed_refs->href_root);
462 463 464
		goto again;
	}
	return 1;
465 466
}

467 468 469 470 471 472 473 474
void btrfs_release_ref_cluster(struct list_head *cluster)
{
	struct list_head *pos, *q;

	list_for_each_safe(pos, q, cluster)
		list_del_init(pos);
}

475 476 477 478 479 480 481 482 483 484 485 486 487 488
/*
 * helper function to update an extent delayed ref in the
 * rbtree.  existing and update must both have the same
 * bytenr and parent
 *
 * This may free existing if the update cancels out whatever
 * operation it was doing.
 */
static noinline void
update_existing_ref(struct btrfs_trans_handle *trans,
		    struct btrfs_delayed_ref_root *delayed_refs,
		    struct btrfs_delayed_ref_node *existing,
		    struct btrfs_delayed_ref_node *update)
{
489
	if (update->action != existing->action) {
490 491 492 493 494 495 496
		/*
		 * this is effectively undoing either an add or a
		 * drop.  We decrement the ref_mod, and if it goes
		 * down to zero we just delete the entry without
		 * every changing the extent allocation tree.
		 */
		existing->ref_mod--;
497 498 499
		if (existing->ref_mod == 0)
			drop_delayed_ref(trans, delayed_refs, existing);
		else
500 501
			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
				existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
502
	} else {
503 504
		WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
			existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
		/*
		 * the action on the existing ref matches
		 * the action on the ref we're trying to add.
		 * Bump the ref_mod by one so the backref that
		 * is eventually added/removed has the correct
		 * reference count
		 */
		existing->ref_mod += update->ref_mod;
	}
}

/*
 * helper function to update the accounting in the head ref
 * existing and update must have the same bytenr
 */
static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
			 struct btrfs_delayed_ref_node *update)
{
	struct btrfs_delayed_ref_head *existing_ref;
	struct btrfs_delayed_ref_head *ref;

	existing_ref = btrfs_delayed_node_to_head(existing);
	ref = btrfs_delayed_node_to_head(update);
529
	BUG_ON(existing_ref->is_data != ref->is_data);
530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548

	if (ref->must_insert_reserved) {
		/* if the extent was freed and then
		 * reallocated before the delayed ref
		 * entries were processed, we can end up
		 * with an existing head ref without
		 * the must_insert_reserved flag set.
		 * Set it again here
		 */
		existing_ref->must_insert_reserved = ref->must_insert_reserved;

		/*
		 * update the num_bytes so we make sure the accounting
		 * is done correctly
		 */
		existing->num_bytes = update->num_bytes;

	}

549 550 551 552 553 554 555 556 557 558 559 560 561 562 563
	if (ref->extent_op) {
		if (!existing_ref->extent_op) {
			existing_ref->extent_op = ref->extent_op;
		} else {
			if (ref->extent_op->update_key) {
				memcpy(&existing_ref->extent_op->key,
				       &ref->extent_op->key,
				       sizeof(ref->extent_op->key));
				existing_ref->extent_op->update_key = 1;
			}
			if (ref->extent_op->update_flags) {
				existing_ref->extent_op->flags_to_set |=
					ref->extent_op->flags_to_set;
				existing_ref->extent_op->update_flags = 1;
			}
564
			btrfs_free_delayed_extent_op(ref->extent_op);
565 566
		}
	}
567 568 569 570 571 572 573
	/*
	 * update the reference mod on the head to reflect this new operation
	 */
	existing->ref_mod += update->ref_mod;
}

/*
574
 * helper function to actually insert a head node into the rbtree.
575
 * this does all the dirty work in terms of maintaining the correct
576
 * overall modification count.
577
 */
578
static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
A
Arne Jansen 已提交
579
					struct btrfs_trans_handle *trans,
580 581 582
					struct btrfs_delayed_ref_node *ref,
					u64 bytenr, u64 num_bytes,
					int action, int is_data)
583 584
{
	struct btrfs_delayed_ref_node *existing;
585
	struct btrfs_delayed_ref_head *head_ref = NULL;
586 587 588 589 590 591 592 593
	struct btrfs_delayed_ref_root *delayed_refs;
	int count_mod = 1;
	int must_insert_reserved = 0;

	/*
	 * the head node stores the sum of all the mods, so dropping a ref
	 * should drop the sum in the head node by one.
	 */
594 595 596 597
	if (action == BTRFS_UPDATE_DELAYED_HEAD)
		count_mod = 0;
	else if (action == BTRFS_DROP_DELAYED_REF)
		count_mod = -1;
598 599 600 601 602 603 604 605 606 607 608 609

	/*
	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
	 * the reserved accounting when the extent is finally added, or
	 * if a later modification deletes the delayed ref without ever
	 * inserting the extent into the extent allocation tree.
	 * ref->must_insert_reserved is the flag used to record
	 * that accounting mods are required.
	 *
	 * Once we record must_insert_reserved, switch the action to
	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
	 */
610
	if (action == BTRFS_ADD_DELAYED_EXTENT)
611
		must_insert_reserved = 1;
612
	else
613 614 615 616 617 618 619
		must_insert_reserved = 0;

	delayed_refs = &trans->transaction->delayed_refs;

	/* first set the basic ref node struct up */
	atomic_set(&ref->refs, 1);
	ref->bytenr = bytenr;
620
	ref->num_bytes = num_bytes;
621
	ref->ref_mod = count_mod;
622 623 624
	ref->type  = 0;
	ref->action  = 0;
	ref->is_head = 1;
625
	ref->in_tree = 1;
626
	ref->seq = 0;
627 628 629 630 631 632 633 634

	head_ref = btrfs_delayed_node_to_head(ref);
	head_ref->must_insert_reserved = must_insert_reserved;
	head_ref->is_data = is_data;

	INIT_LIST_HEAD(&head_ref->cluster);
	mutex_init(&head_ref->mutex);

635
	trace_add_delayed_ref_head(ref, head_ref, action);
636

637 638 639 640 641 642 643 644
	existing = tree_insert(&delayed_refs->root, &ref->rb_node);

	if (existing) {
		update_existing_head_ref(existing, ref);
		/*
		 * we've updated the existing ref, free the newly
		 * allocated ref
		 */
645
		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
646
	} else {
L
Liu Bo 已提交
647
		htree_insert(&delayed_refs->href_root, &head_ref->href_node);
648 649 650 651 652 653 654 655 656 657
		delayed_refs->num_heads++;
		delayed_refs->num_heads_ready++;
		delayed_refs->num_entries++;
		trans->delayed_ref_updates++;
	}
}

/*
 * helper to insert a delayed tree ref into the rbtree.
 */
658
static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
A
Arne Jansen 已提交
659
					 struct btrfs_trans_handle *trans,
660 661
					 struct btrfs_delayed_ref_node *ref,
					 u64 bytenr, u64 num_bytes, u64 parent,
A
Arne Jansen 已提交
662 663
					 u64 ref_root, int level, int action,
					 int for_cow)
664 665 666 667
{
	struct btrfs_delayed_ref_node *existing;
	struct btrfs_delayed_tree_ref *full_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
668
	u64 seq = 0;
669 670 671 672 673 674 675 676 677

	if (action == BTRFS_ADD_DELAYED_EXTENT)
		action = BTRFS_ADD_DELAYED_REF;

	delayed_refs = &trans->transaction->delayed_refs;

	/* first set the basic ref node struct up */
	atomic_set(&ref->refs, 1);
	ref->bytenr = bytenr;
678
	ref->num_bytes = num_bytes;
679 680 681 682
	ref->ref_mod = 1;
	ref->action = action;
	ref->is_head = 0;
	ref->in_tree = 1;
683

684 685
	if (need_ref_seq(for_cow, ref_root))
		seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
686 687
	ref->seq = seq;

688
	full_ref = btrfs_delayed_node_to_tree_ref(ref);
689 690 691
	full_ref->parent = parent;
	full_ref->root = ref_root;
	if (parent)
692
		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
693
	else
694 695
		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
	full_ref->level = level;
696

697
	trace_add_delayed_tree_ref(ref, full_ref, action);
698

699
	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
700 701

	if (existing) {
702 703 704 705 706
		update_existing_ref(trans, delayed_refs, existing, ref);
		/*
		 * we've updated the existing ref, free the newly
		 * allocated ref
		 */
707
		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
708 709 710 711 712 713 714 715 716
	} else {
		delayed_refs->num_entries++;
		trans->delayed_ref_updates++;
	}
}

/*
 * helper to insert a delayed data ref into the rbtree.
 */
717
static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
A
Arne Jansen 已提交
718
					 struct btrfs_trans_handle *trans,
719 720 721
					 struct btrfs_delayed_ref_node *ref,
					 u64 bytenr, u64 num_bytes, u64 parent,
					 u64 ref_root, u64 owner, u64 offset,
A
Arne Jansen 已提交
722
					 int action, int for_cow)
723 724 725 726
{
	struct btrfs_delayed_ref_node *existing;
	struct btrfs_delayed_data_ref *full_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
727
	u64 seq = 0;
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742

	if (action == BTRFS_ADD_DELAYED_EXTENT)
		action = BTRFS_ADD_DELAYED_REF;

	delayed_refs = &trans->transaction->delayed_refs;

	/* first set the basic ref node struct up */
	atomic_set(&ref->refs, 1);
	ref->bytenr = bytenr;
	ref->num_bytes = num_bytes;
	ref->ref_mod = 1;
	ref->action = action;
	ref->is_head = 0;
	ref->in_tree = 1;

743 744
	if (need_ref_seq(for_cow, ref_root))
		seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
745 746
	ref->seq = seq;

747
	full_ref = btrfs_delayed_node_to_data_ref(ref);
748 749 750
	full_ref->parent = parent;
	full_ref->root = ref_root;
	if (parent)
751
		ref->type = BTRFS_SHARED_DATA_REF_KEY;
752
	else
753
		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
A
Arne Jansen 已提交
754

755 756
	full_ref->objectid = owner;
	full_ref->offset = offset;
757

758
	trace_add_delayed_data_ref(ref, full_ref, action);
759

760 761 762 763
	existing = tree_insert(&delayed_refs->root, &ref->rb_node);

	if (existing) {
		update_existing_ref(trans, delayed_refs, existing, ref);
764 765 766 767
		/*
		 * we've updated the existing ref, free the newly
		 * allocated ref
		 */
768
		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
769 770 771 772 773 774 775
	} else {
		delayed_refs->num_entries++;
		trans->delayed_ref_updates++;
	}
}

/*
776
 * add a delayed tree ref.  This does all of the accounting required
777 778 779
 * to make sure the delayed ref is eventually processed before this
 * transaction commits.
 */
A
Arne Jansen 已提交
780 781
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
			       struct btrfs_trans_handle *trans,
782 783
			       u64 bytenr, u64 num_bytes, u64 parent,
			       u64 ref_root,  int level, int action,
A
Arne Jansen 已提交
784 785
			       struct btrfs_delayed_extent_op *extent_op,
			       int for_cow)
786
{
787
	struct btrfs_delayed_tree_ref *ref;
788 789 790
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;

791
	BUG_ON(extent_op && extent_op->is_data);
792
	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
793 794 795
	if (!ref)
		return -ENOMEM;

796
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
797
	if (!head_ref) {
798
		kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
799 800 801 802 803 804 805 806
		return -ENOMEM;
	}

	head_ref->extent_op = extent_op;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

807
	/*
808 809
	 * insert both the head node and the new ref without dropping
	 * the spin lock
810
	 */
811
	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
A
Arne Jansen 已提交
812
				   num_bytes, action, 0);
813

814
	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
A
Arne Jansen 已提交
815 816
				   num_bytes, parent, ref_root, level, action,
				   for_cow);
817
	spin_unlock(&delayed_refs->lock);
818 819
	if (need_ref_seq(for_cow, ref_root))
		btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
820

821 822 823 824 825 826
	return 0;
}

/*
 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
 */
A
Arne Jansen 已提交
827 828
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
			       struct btrfs_trans_handle *trans,
829 830 831
			       u64 bytenr, u64 num_bytes,
			       u64 parent, u64 ref_root,
			       u64 owner, u64 offset, int action,
A
Arne Jansen 已提交
832 833
			       struct btrfs_delayed_extent_op *extent_op,
			       int for_cow)
834 835 836 837 838 839
{
	struct btrfs_delayed_data_ref *ref;
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;

	BUG_ON(extent_op && !extent_op->is_data);
840
	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
841 842
	if (!ref)
		return -ENOMEM;
843

844
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
845
	if (!head_ref) {
846
		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
847 848
		return -ENOMEM;
	}
849 850 851

	head_ref->extent_op = extent_op;

852 853 854 855 856 857 858
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

	/*
	 * insert both the head node and the new ref without dropping
	 * the spin lock
	 */
859
	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
A
Arne Jansen 已提交
860
				   num_bytes, action, 1);
861

862
	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
A
Arne Jansen 已提交
863 864
				   num_bytes, parent, ref_root, owner, offset,
				   action, for_cow);
865
	spin_unlock(&delayed_refs->lock);
866 867
	if (need_ref_seq(for_cow, ref_root))
		btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
868

869 870 871
	return 0;
}

A
Arne Jansen 已提交
872 873
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
				struct btrfs_trans_handle *trans,
874 875 876 877 878 879
				u64 bytenr, u64 num_bytes,
				struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;

880
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
881 882 883 884 885 886 887 888
	if (!head_ref)
		return -ENOMEM;

	head_ref->extent_op = extent_op;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

889
	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
890 891 892
				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
				   extent_op->is_data);

893 894 895 896
	spin_unlock(&delayed_refs->lock);
	return 0;
}

897 898 899 900 901 902 903 904 905 906 907
/*
 * this does a simple search for the head node for a given extent.
 * It must be called with the delayed ref spinlock held, and it returns
 * the head node if any where found, or NULL if not.
 */
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
{
	struct btrfs_delayed_ref_root *delayed_refs;

	delayed_refs = &trans->transaction->delayed_refs;
L
Liu Bo 已提交
908
	return find_ref_head(&delayed_refs->href_root, bytenr, NULL, 0);
909
}
910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957

void btrfs_delayed_ref_exit(void)
{
	if (btrfs_delayed_ref_head_cachep)
		kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
	if (btrfs_delayed_tree_ref_cachep)
		kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
	if (btrfs_delayed_data_ref_cachep)
		kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
	if (btrfs_delayed_extent_op_cachep)
		kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
}

int btrfs_delayed_ref_init(void)
{
	btrfs_delayed_ref_head_cachep = kmem_cache_create(
				"btrfs_delayed_ref_head",
				sizeof(struct btrfs_delayed_ref_head), 0,
				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_delayed_ref_head_cachep)
		goto fail;

	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
				"btrfs_delayed_tree_ref",
				sizeof(struct btrfs_delayed_tree_ref), 0,
				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_delayed_tree_ref_cachep)
		goto fail;

	btrfs_delayed_data_ref_cachep = kmem_cache_create(
				"btrfs_delayed_data_ref",
				sizeof(struct btrfs_delayed_data_ref), 0,
				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_delayed_data_ref_cachep)
		goto fail;

	btrfs_delayed_extent_op_cachep = kmem_cache_create(
				"btrfs_delayed_extent_op",
				sizeof(struct btrfs_delayed_extent_op), 0,
				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
	if (!btrfs_delayed_extent_op_cachep)
		goto fail;

	return 0;
fail:
	btrfs_delayed_ref_exit();
	return -ENOMEM;
}