delayed-ref.c 26.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2009 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/sched.h>
20
#include <linux/slab.h>
21 22 23 24
#include <linux/sort.h>
#include "ctree.h"
#include "delayed-ref.h"
#include "transaction.h"
25
#include "qgroup.h"
26

27 28 29 30
struct kmem_cache *btrfs_delayed_ref_head_cachep;
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
struct kmem_cache *btrfs_delayed_data_ref_cachep;
struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 32 33 34 35 36 37 38 39 40
/*
 * delayed back reference update tracking.  For subvolume trees
 * we queue up extent allocations and backref maintenance for
 * delayed processing.   This avoids deep call chains where we
 * add extents in the middle of btrfs_search_slot, and it allows
 * us to buffer up frequently modified backrefs in an rb tree instead
 * of hammering updates on the extent allocation tree.
 */

/*
41 42
 * compare two delayed tree backrefs with same bytenr and type
 */
J
Josef Bacik 已提交
43 44
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
			  struct btrfs_delayed_tree_ref *ref2)
45
{
46
	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
47 48 49 50 51 52 53 54 55 56
		if (ref1->root < ref2->root)
			return -1;
		if (ref1->root > ref2->root)
			return 1;
	} else {
		if (ref1->parent < ref2->parent)
			return -1;
		if (ref1->parent > ref2->parent)
			return 1;
	}
57 58 59 60 61
	return 0;
}

/*
 * compare two delayed data backrefs with same bytenr and type
62
 */
J
Josef Bacik 已提交
63 64
static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
			  struct btrfs_delayed_data_ref *ref2)
65
{
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
		if (ref1->root < ref2->root)
			return -1;
		if (ref1->root > ref2->root)
			return 1;
		if (ref1->objectid < ref2->objectid)
			return -1;
		if (ref1->objectid > ref2->objectid)
			return 1;
		if (ref1->offset < ref2->offset)
			return -1;
		if (ref1->offset > ref2->offset)
			return 1;
	} else {
		if (ref1->parent < ref2->parent)
			return -1;
		if (ref1->parent > ref2->parent)
			return 1;
	}
	return 0;
}

J
Josef Bacik 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
static int comp_refs(struct btrfs_delayed_ref_node *ref1,
		     struct btrfs_delayed_ref_node *ref2,
		     bool check_seq)
{
	int ret = 0;

	if (ref1->type < ref2->type)
		return -1;
	if (ref1->type > ref2->type)
		return 1;
	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
		ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
				     btrfs_delayed_node_to_tree_ref(ref2));
	else
		ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
				     btrfs_delayed_node_to_data_ref(ref2));
	if (ret)
		return ret;
	if (check_seq) {
		if (ref1->seq < ref2->seq)
			return -1;
		if (ref1->seq > ref2->seq)
			return 1;
	}
	return 0;
}

L
Liu Bo 已提交
116 117 118 119 120 121 122 123 124 125 126
/* insert a new ref to head ref rbtree */
static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
						   struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent_node = NULL;
	struct btrfs_delayed_ref_head *entry;
	struct btrfs_delayed_ref_head *ins;
	u64 bytenr;

	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
127
	bytenr = ins->bytenr;
L
Liu Bo 已提交
128 129 130 131 132
	while (*p) {
		parent_node = *p;
		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
				 href_node);

133
		if (bytenr < entry->bytenr)
L
Liu Bo 已提交
134
			p = &(*p)->rb_left;
135
		else if (bytenr > entry->bytenr)
L
Liu Bo 已提交
136 137 138 139 140 141 142 143 144 145
			p = &(*p)->rb_right;
		else
			return entry;
	}

	rb_link_node(node, parent_node, p);
	rb_insert_color(node, root);
	return NULL;
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
		struct btrfs_delayed_ref_node *ins)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *node = &ins->ref_node;
	struct rb_node *parent_node = NULL;
	struct btrfs_delayed_ref_node *entry;

	while (*p) {
		int comp;

		parent_node = *p;
		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
				 ref_node);
		comp = comp_refs(ins, entry, true);
		if (comp < 0)
			p = &(*p)->rb_left;
		else if (comp > 0)
			p = &(*p)->rb_right;
		else
			return entry;
	}

	rb_link_node(node, parent_node, p);
	rb_insert_color(node, root);
	return NULL;
}

174
/*
175
 * find an head entry based on bytenr. This returns the delayed ref
176 177 178
 * head if it was able to find one, or NULL if nothing was in that spot.
 * If return_bigger is given, the next bigger entry is returned if no exact
 * match is found.
179
 */
L
Liu Bo 已提交
180 181
static struct btrfs_delayed_ref_head *
find_ref_head(struct rb_root *root, u64 bytenr,
182
	      int return_bigger)
183
{
184
	struct rb_node *n;
L
Liu Bo 已提交
185
	struct btrfs_delayed_ref_head *entry;
186

187 188
	n = root->rb_node;
	entry = NULL;
189
	while (n) {
L
Liu Bo 已提交
190
		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
191

192
		if (bytenr < entry->bytenr)
193
			n = n->rb_left;
194
		else if (bytenr > entry->bytenr)
195 196 197 198
			n = n->rb_right;
		else
			return entry;
	}
199
	if (entry && return_bigger) {
200
		if (bytenr > entry->bytenr) {
L
Liu Bo 已提交
201
			n = rb_next(&entry->href_node);
202 203
			if (!n)
				n = rb_first(root);
L
Liu Bo 已提交
204 205
			entry = rb_entry(n, struct btrfs_delayed_ref_head,
					 href_node);
206
			return entry;
207 208 209
		}
		return entry;
	}
210 211 212
	return NULL;
}

213 214
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
			   struct btrfs_delayed_ref_head *head)
215
{
216 217 218 219 220 221 222
	struct btrfs_delayed_ref_root *delayed_refs;

	delayed_refs = &trans->transaction->delayed_refs;
	assert_spin_locked(&delayed_refs->lock);
	if (mutex_trylock(&head->mutex))
		return 0;

223
	refcount_inc(&head->refs);
224 225 226 227
	spin_unlock(&delayed_refs->lock);

	mutex_lock(&head->mutex);
	spin_lock(&delayed_refs->lock);
228
	if (RB_EMPTY_NODE(&head->href_node)) {
229
		mutex_unlock(&head->mutex);
230
		btrfs_put_delayed_ref_head(head);
231 232
		return -EAGAIN;
	}
233
	btrfs_put_delayed_ref_head(head);
234 235 236
	return 0;
}

237
static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
238
				    struct btrfs_delayed_ref_root *delayed_refs,
239
				    struct btrfs_delayed_ref_head *head,
240 241
				    struct btrfs_delayed_ref_node *ref)
{
242
	assert_spin_locked(&head->lock);
243 244
	rb_erase(&ref->ref_node, &head->ref_tree);
	RB_CLEAR_NODE(&ref->ref_node);
245 246
	if (!list_empty(&ref->add_list))
		list_del(&ref->add_list);
247 248
	ref->in_tree = 0;
	btrfs_put_delayed_ref(ref);
249
	atomic_dec(&delayed_refs->num_entries);
250 251 252 253
	if (trans->delayed_ref_updates)
		trans->delayed_ref_updates--;
}

254 255 256 257 258 259 260
static bool merge_ref(struct btrfs_trans_handle *trans,
		      struct btrfs_delayed_ref_root *delayed_refs,
		      struct btrfs_delayed_ref_head *head,
		      struct btrfs_delayed_ref_node *ref,
		      u64 seq)
{
	struct btrfs_delayed_ref_node *next;
261
	struct rb_node *node = rb_next(&ref->ref_node);
262 263
	bool done = false;

264
	while (!done && node) {
265 266
		int mod;

267 268
		next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
		node = rb_next(node);
269
		if (seq && next->seq >= seq)
270
			break;
J
Josef Bacik 已提交
271
		if (comp_refs(ref, next, false))
272
			break;
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306

		if (ref->action == next->action) {
			mod = next->ref_mod;
		} else {
			if (ref->ref_mod < next->ref_mod) {
				swap(ref, next);
				done = true;
			}
			mod = -next->ref_mod;
		}

		drop_delayed_ref(trans, delayed_refs, head, next);
		ref->ref_mod += mod;
		if (ref->ref_mod == 0) {
			drop_delayed_ref(trans, delayed_refs, head, ref);
			done = true;
		} else {
			/*
			 * Can't have multiples of the same ref on a tree block.
			 */
			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
		}
	}

	return done;
}

void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
			      struct btrfs_fs_info *fs_info,
			      struct btrfs_delayed_ref_root *delayed_refs,
			      struct btrfs_delayed_ref_head *head)
{
	struct btrfs_delayed_ref_node *ref;
307
	struct rb_node *node;
308 309 310 311
	u64 seq = 0;

	assert_spin_locked(&head->lock);

312
	if (RB_EMPTY_ROOT(&head->ref_tree))
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
		return;

	/* We don't have too many refs to merge for data. */
	if (head->is_data)
		return;

	spin_lock(&fs_info->tree_mod_seq_lock);
	if (!list_empty(&fs_info->tree_mod_seq_list)) {
		struct seq_list *elem;

		elem = list_first_entry(&fs_info->tree_mod_seq_list,
					struct seq_list, list);
		seq = elem->seq;
	}
	spin_unlock(&fs_info->tree_mod_seq_lock);

329 330 331
again:
	for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
		ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
332 333
		if (seq && ref->seq >= seq)
			continue;
334 335
		if (merge_ref(trans, delayed_refs, head, ref, seq))
			goto again;
336 337 338
	}
}

339 340
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
			    struct btrfs_delayed_ref_root *delayed_refs,
341 342 343
			    u64 seq)
{
	struct seq_list *elem;
344 345 346 347 348 349 350
	int ret = 0;

	spin_lock(&fs_info->tree_mod_seq_lock);
	if (!list_empty(&fs_info->tree_mod_seq_list)) {
		elem = list_first_entry(&fs_info->tree_mod_seq_list,
					struct seq_list, list);
		if (seq >= elem->seq) {
351 352 353 354 355
			btrfs_debug(fs_info,
				"holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
				(u32)(seq >> 32), (u32)seq,
				(u32)(elem->seq >> 32), (u32)elem->seq,
				delayed_refs);
356 357
			ret = 1;
		}
358
	}
359 360 361

	spin_unlock(&fs_info->tree_mod_seq_lock);
	return ret;
362 363
}

364 365
struct btrfs_delayed_ref_head *
btrfs_select_ref_head(struct btrfs_trans_handle *trans)
366 367
{
	struct btrfs_delayed_ref_root *delayed_refs;
368 369 370
	struct btrfs_delayed_ref_head *head;
	u64 start;
	bool loop = false;
371

372
	delayed_refs = &trans->transaction->delayed_refs;
L
Liu Bo 已提交
373

374
again:
375
	start = delayed_refs->run_delayed_start;
376
	head = find_ref_head(&delayed_refs->href_root, start, 1);
377 378
	if (!head && !loop) {
		delayed_refs->run_delayed_start = 0;
379
		start = 0;
380
		loop = true;
381
		head = find_ref_head(&delayed_refs->href_root, start, 1);
382 383 384 385
		if (!head)
			return NULL;
	} else if (!head && loop) {
		return NULL;
386
	}
387

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
	while (head->processing) {
		struct rb_node *node;

		node = rb_next(&head->href_node);
		if (!node) {
			if (loop)
				return NULL;
			delayed_refs->run_delayed_start = 0;
			start = 0;
			loop = true;
			goto again;
		}
		head = rb_entry(node, struct btrfs_delayed_ref_head,
				href_node);
	}
403

404 405 406
	head->processing = 1;
	WARN_ON(delayed_refs->num_heads_ready == 0);
	delayed_refs->num_heads_ready--;
407 408
	delayed_refs->run_delayed_start = head->bytenr +
		head->num_bytes;
409
	return head;
410 411
}

412 413 414 415 416 417
/*
 * Helper to insert the ref_node to the tail or merge with tail.
 *
 * Return 0 for insert.
 * Return >0 for merge.
 */
418 419 420 421
static int insert_delayed_ref(struct btrfs_trans_handle *trans,
			      struct btrfs_delayed_ref_root *root,
			      struct btrfs_delayed_ref_head *href,
			      struct btrfs_delayed_ref_node *ref)
422 423 424 425 426 427
{
	struct btrfs_delayed_ref_node *exist;
	int mod;
	int ret = 0;

	spin_lock(&href->lock);
428 429 430
	exist = tree_insert(&href->ref_tree, ref);
	if (!exist)
		goto inserted;
431 432 433 434 435 436 437 438 439 440 441

	/* Now we are sure we can merge */
	ret = 1;
	if (exist->action == ref->action) {
		mod = ref->ref_mod;
	} else {
		/* Need to change action */
		if (exist->ref_mod < ref->ref_mod) {
			exist->action = ref->action;
			mod = -exist->ref_mod;
			exist->ref_mod = ref->ref_mod;
442 443 444 445 446 447 448 449 450
			if (ref->action == BTRFS_ADD_DELAYED_REF)
				list_add_tail(&exist->add_list,
					      &href->ref_add_list);
			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
				ASSERT(!list_empty(&exist->add_list));
				list_del(&exist->add_list);
			} else {
				ASSERT(0);
			}
451 452 453 454 455 456 457 458 459 460
		} else
			mod = -ref->ref_mod;
	}
	exist->ref_mod += mod;

	/* remove existing tail if its ref_mod is zero */
	if (exist->ref_mod == 0)
		drop_delayed_ref(trans, root, href, exist);
	spin_unlock(&href->lock);
	return ret;
461
inserted:
462 463
	if (ref->action == BTRFS_ADD_DELAYED_REF)
		list_add_tail(&ref->add_list, &href->ref_add_list);
464 465 466 467 468 469
	atomic_inc(&root->num_entries);
	trans->delayed_ref_updates++;
	spin_unlock(&href->lock);
	return ret;
}

470 471 472 473 474
/*
 * helper function to update the accounting in the head ref
 * existing and update must have the same bytenr
 */
static noinline void
475
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
476 477
			 struct btrfs_delayed_ref_head *existing,
			 struct btrfs_delayed_ref_head *update,
478
			 int *old_ref_mod_ret)
479
{
480
	int old_ref_mod;
481

482
	BUG_ON(existing->is_data != update->is_data);
483

484 485
	spin_lock(&existing->lock);
	if (update->must_insert_reserved) {
486 487 488 489 490 491 492
		/* if the extent was freed and then
		 * reallocated before the delayed ref
		 * entries were processed, we can end up
		 * with an existing head ref without
		 * the must_insert_reserved flag set.
		 * Set it again here
		 */
493
		existing->must_insert_reserved = update->must_insert_reserved;
494 495 496 497 498 499 500 501 502

		/*
		 * update the num_bytes so we make sure the accounting
		 * is done correctly
		 */
		existing->num_bytes = update->num_bytes;

	}

503 504 505
	if (update->extent_op) {
		if (!existing->extent_op) {
			existing->extent_op = update->extent_op;
506
		} else {
507 508 509 510 511
			if (update->extent_op->update_key) {
				memcpy(&existing->extent_op->key,
				       &update->extent_op->key,
				       sizeof(update->extent_op->key));
				existing->extent_op->update_key = true;
512
			}
513 514 515 516
			if (update->extent_op->update_flags) {
				existing->extent_op->flags_to_set |=
					update->extent_op->flags_to_set;
				existing->extent_op->update_flags = true;
517
			}
518
			btrfs_free_delayed_extent_op(update->extent_op);
519 520
		}
	}
521
	/*
522 523 524
	 * update the reference mod on the head to reflect this new operation,
	 * only need the lock for this case cause we could be processing it
	 * currently, for refs we just added we know we're a-ok.
525
	 */
526
	old_ref_mod = existing->total_ref_mod;
527 528
	if (old_ref_mod_ret)
		*old_ref_mod_ret = old_ref_mod;
529
	existing->ref_mod += update->ref_mod;
530
	existing->total_ref_mod += update->ref_mod;
531 532 533 534 535

	/*
	 * If we are going to from a positive ref mod to a negative or vice
	 * versa we need to make sure to adjust pending_csums accordingly.
	 */
536 537
	if (existing->is_data) {
		if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
538
			delayed_refs->pending_csums -= existing->num_bytes;
539
		if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
540 541
			delayed_refs->pending_csums += existing->num_bytes;
	}
542
	spin_unlock(&existing->lock);
543 544 545
}

/*
546
 * helper function to actually insert a head node into the rbtree.
547
 * this does all the dirty work in terms of maintaining the correct
548
 * overall modification count.
549
 */
550 551 552
static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_fs_info *fs_info,
		     struct btrfs_trans_handle *trans,
553
		     struct btrfs_delayed_ref_head *head_ref,
554
		     struct btrfs_qgroup_extent_record *qrecord,
555
		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
556 557
		     int action, int is_data, int *qrecord_inserted_ret,
		     int *old_ref_mod, int *new_ref_mod)
558
{
559
	struct btrfs_delayed_ref_head *existing;
560 561 562
	struct btrfs_delayed_ref_root *delayed_refs;
	int count_mod = 1;
	int must_insert_reserved = 0;
563
	int qrecord_inserted = 0;
564

565 566 567
	/* If reserved is provided, it must be a data extent. */
	BUG_ON(!is_data && reserved);

568 569 570 571
	/*
	 * the head node stores the sum of all the mods, so dropping a ref
	 * should drop the sum in the head node by one.
	 */
572 573 574 575
	if (action == BTRFS_UPDATE_DELAYED_HEAD)
		count_mod = 0;
	else if (action == BTRFS_DROP_DELAYED_REF)
		count_mod = -1;
576 577 578 579 580 581 582 583 584 585 586 587

	/*
	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
	 * the reserved accounting when the extent is finally added, or
	 * if a later modification deletes the delayed ref without ever
	 * inserting the extent into the extent allocation tree.
	 * ref->must_insert_reserved is the flag used to record
	 * that accounting mods are required.
	 *
	 * Once we record must_insert_reserved, switch the action to
	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
	 */
588
	if (action == BTRFS_ADD_DELAYED_EXTENT)
589
		must_insert_reserved = 1;
590
	else
591 592 593 594
		must_insert_reserved = 0;

	delayed_refs = &trans->transaction->delayed_refs;

595 596 597 598
	refcount_set(&head_ref->refs, 1);
	head_ref->bytenr = bytenr;
	head_ref->num_bytes = num_bytes;
	head_ref->ref_mod = count_mod;
599 600
	head_ref->must_insert_reserved = must_insert_reserved;
	head_ref->is_data = is_data;
601
	head_ref->ref_tree = RB_ROOT;
602
	INIT_LIST_HEAD(&head_ref->ref_add_list);
603
	RB_CLEAR_NODE(&head_ref->href_node);
604
	head_ref->processing = 0;
605
	head_ref->total_ref_mod = count_mod;
606 607
	head_ref->qgroup_reserved = 0;
	head_ref->qgroup_ref_root = 0;
608 609
	spin_lock_init(&head_ref->lock);
	mutex_init(&head_ref->mutex);
610

611 612
	/* Record qgroup extent info if provided */
	if (qrecord) {
613 614 615 616 617
		if (ref_root && reserved) {
			head_ref->qgroup_ref_root = ref_root;
			head_ref->qgroup_reserved = reserved;
		}

618 619 620 621
		qrecord->bytenr = bytenr;
		qrecord->num_bytes = num_bytes;
		qrecord->old_roots = NULL;

622
		if(btrfs_qgroup_trace_extent_nolock(fs_info,
623
					delayed_refs, qrecord))
624
			kfree(qrecord);
625 626
		else
			qrecord_inserted = 1;
627 628
	}

629
	trace_add_delayed_ref_head(fs_info, head_ref, action);
630

631 632
	existing = htree_insert(&delayed_refs->href_root,
				&head_ref->href_node);
633
	if (existing) {
634 635
		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
			&& existing->qgroup_reserved);
636
		update_existing_head_ref(delayed_refs, existing, head_ref,
637
					 old_ref_mod);
638 639 640 641
		/*
		 * we've updated the existing ref, free the newly
		 * allocated ref
		 */
642
		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
643
		head_ref = existing;
644
	} else {
645 646
		if (old_ref_mod)
			*old_ref_mod = 0;
647 648
		if (is_data && count_mod < 0)
			delayed_refs->pending_csums += num_bytes;
649 650
		delayed_refs->num_heads++;
		delayed_refs->num_heads_ready++;
651
		atomic_inc(&delayed_refs->num_entries);
652 653
		trans->delayed_ref_updates++;
	}
654 655
	if (qrecord_inserted_ret)
		*qrecord_inserted_ret = qrecord_inserted;
656 657
	if (new_ref_mod)
		*new_ref_mod = head_ref->total_ref_mod;
658
	return head_ref;
659 660 661 662 663
}

/*
 * helper to insert a delayed tree ref into the rbtree.
 */
664 665 666 667 668 669
static noinline void
add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
		     struct btrfs_trans_handle *trans,
		     struct btrfs_delayed_ref_head *head_ref,
		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
		     u64 num_bytes, u64 parent, u64 ref_root, int level,
670
		     int action)
671 672 673
{
	struct btrfs_delayed_tree_ref *full_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
674
	u64 seq = 0;
675
	int ret;
676 677 678 679

	if (action == BTRFS_ADD_DELAYED_EXTENT)
		action = BTRFS_ADD_DELAYED_REF;

J
Josef Bacik 已提交
680 681
	if (is_fstree(ref_root))
		seq = atomic64_read(&fs_info->tree_mod_seq);
682 683 684
	delayed_refs = &trans->transaction->delayed_refs;

	/* first set the basic ref node struct up */
685
	refcount_set(&ref->refs, 1);
686
	ref->bytenr = bytenr;
687
	ref->num_bytes = num_bytes;
688 689 690 691
	ref->ref_mod = 1;
	ref->action = action;
	ref->is_head = 0;
	ref->in_tree = 1;
692
	ref->seq = seq;
693
	RB_CLEAR_NODE(&ref->ref_node);
694
	INIT_LIST_HEAD(&ref->add_list);
695

696
	full_ref = btrfs_delayed_node_to_tree_ref(ref);
697 698 699
	full_ref->parent = parent;
	full_ref->root = ref_root;
	if (parent)
700
		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
701
	else
702 703
		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
	full_ref->level = level;
704

705
	trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
706

707
	ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
708 709 710 711 712 713

	/*
	 * XXX: memory should be freed at the same level allocated.
	 * But bad practice is anywhere... Follow it now. Need cleanup.
	 */
	if (ret > 0)
714
		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
715 716 717 718 719
}

/*
 * helper to insert a delayed data ref into the rbtree.
 */
720 721 722 723 724 725
static noinline void
add_delayed_data_ref(struct btrfs_fs_info *fs_info,
		     struct btrfs_trans_handle *trans,
		     struct btrfs_delayed_ref_head *head_ref,
		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
726
		     u64 offset, int action)
727 728 729
{
	struct btrfs_delayed_data_ref *full_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
730
	u64 seq = 0;
731
	int ret;
732 733 734 735 736 737

	if (action == BTRFS_ADD_DELAYED_EXTENT)
		action = BTRFS_ADD_DELAYED_REF;

	delayed_refs = &trans->transaction->delayed_refs;

J
Josef Bacik 已提交
738 739 740
	if (is_fstree(ref_root))
		seq = atomic64_read(&fs_info->tree_mod_seq);

741
	/* first set the basic ref node struct up */
742
	refcount_set(&ref->refs, 1);
743 744 745 746 747 748
	ref->bytenr = bytenr;
	ref->num_bytes = num_bytes;
	ref->ref_mod = 1;
	ref->action = action;
	ref->is_head = 0;
	ref->in_tree = 1;
749
	ref->seq = seq;
750
	RB_CLEAR_NODE(&ref->ref_node);
751
	INIT_LIST_HEAD(&ref->add_list);
752

753
	full_ref = btrfs_delayed_node_to_data_ref(ref);
754 755 756
	full_ref->parent = parent;
	full_ref->root = ref_root;
	if (parent)
757
		ref->type = BTRFS_SHARED_DATA_REF_KEY;
758
	else
759
		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
A
Arne Jansen 已提交
760

761 762
	full_ref->objectid = owner;
	full_ref->offset = offset;
763

764
	trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
765

766
	ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
767
	if (ret > 0)
768
		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
769 770 771
}

/*
772
 * add a delayed tree ref.  This does all of the accounting required
773 774 775
 * to make sure the delayed ref is eventually processed before this
 * transaction commits.
 */
A
Arne Jansen 已提交
776 777
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
			       struct btrfs_trans_handle *trans,
778 779
			       u64 bytenr, u64 num_bytes, u64 parent,
			       u64 ref_root,  int level, int action,
780 781
			       struct btrfs_delayed_extent_op *extent_op,
			       int *old_ref_mod, int *new_ref_mod)
782
{
783
	struct btrfs_delayed_tree_ref *ref;
784 785
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
786
	struct btrfs_qgroup_extent_record *record = NULL;
787
	int qrecord_inserted;
788

789
	BUG_ON(extent_op && extent_op->is_data);
790
	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
791 792 793
	if (!ref)
		return -ENOMEM;

794
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
795 796
	if (!head_ref)
		goto free_ref;
797

798 799
	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
	    is_fstree(ref_root)) {
800
		record = kmalloc(sizeof(*record), GFP_NOFS);
801 802
		if (!record)
			goto free_head_ref;
803 804
	}

805 806 807 808 809
	head_ref->extent_op = extent_op;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

810
	/*
811 812
	 * insert both the head node and the new ref without dropping
	 * the spin lock
813
	 */
814
	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
815
					bytenr, num_bytes, 0, 0, action, 0,
816 817
					&qrecord_inserted, old_ref_mod,
					new_ref_mod);
818

819
	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
820
			     num_bytes, parent, ref_root, level, action);
821
	spin_unlock(&delayed_refs->lock);
822

823 824
	if (qrecord_inserted)
		return btrfs_qgroup_trace_extent_post(fs_info, record);
825
	return 0;
826 827 828 829 830 831 832

free_head_ref:
	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
free_ref:
	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);

	return -ENOMEM;
833 834 835 836 837
}

/*
 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
 */
A
Arne Jansen 已提交
838 839
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
			       struct btrfs_trans_handle *trans,
840 841
			       u64 bytenr, u64 num_bytes,
			       u64 parent, u64 ref_root,
842 843
			       u64 owner, u64 offset, u64 reserved, int action,
			       int *old_ref_mod, int *new_ref_mod)
844 845 846 847
{
	struct btrfs_delayed_data_ref *ref;
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
848
	struct btrfs_qgroup_extent_record *record = NULL;
849
	int qrecord_inserted;
850

851
	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
852 853
	if (!ref)
		return -ENOMEM;
854

855
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
856
	if (!head_ref) {
857
		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
858 859
		return -ENOMEM;
	}
860

861 862
	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
	    is_fstree(ref_root)) {
863 864 865 866 867 868 869 870 871
		record = kmalloc(sizeof(*record), GFP_NOFS);
		if (!record) {
			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
			kmem_cache_free(btrfs_delayed_ref_head_cachep,
					head_ref);
			return -ENOMEM;
		}
	}

872
	head_ref->extent_op = NULL;
873

874 875 876 877 878 879 880
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

	/*
	 * insert both the head node and the new ref without dropping
	 * the spin lock
	 */
881
	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
882
					bytenr, num_bytes, ref_root, reserved,
883 884
					action, 1, &qrecord_inserted,
					old_ref_mod, new_ref_mod);
885

886
	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
A
Arne Jansen 已提交
887
				   num_bytes, parent, ref_root, owner, offset,
888
				   action);
889
	spin_unlock(&delayed_refs->lock);
890

891 892
	if (qrecord_inserted)
		return btrfs_qgroup_trace_extent_post(fs_info, record);
893 894 895
	return 0;
}

A
Arne Jansen 已提交
896 897
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
				struct btrfs_trans_handle *trans,
898 899 900 901 902 903
				u64 bytenr, u64 num_bytes,
				struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;

904
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
905 906 907 908 909 910 911 912
	if (!head_ref)
		return -ENOMEM;

	head_ref->extent_op = extent_op;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

913
	add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
914
			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
915
			     extent_op->is_data, NULL, NULL, NULL);
916

917 918 919 920
	spin_unlock(&delayed_refs->lock);
	return 0;
}

921 922 923 924 925 926
/*
 * this does a simple search for the head node for a given extent.
 * It must be called with the delayed ref spinlock held, and it returns
 * the head node if any where found, or NULL if not.
 */
struct btrfs_delayed_ref_head *
927
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
928
{
929
	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
930
}
931 932 933

void btrfs_delayed_ref_exit(void)
{
934 935 936 937
	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
938 939
}

940
int __init btrfs_delayed_ref_init(void)
941 942 943 944
{
	btrfs_delayed_ref_head_cachep = kmem_cache_create(
				"btrfs_delayed_ref_head",
				sizeof(struct btrfs_delayed_ref_head), 0,
945
				SLAB_MEM_SPREAD, NULL);
946 947 948 949 950 951
	if (!btrfs_delayed_ref_head_cachep)
		goto fail;

	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
				"btrfs_delayed_tree_ref",
				sizeof(struct btrfs_delayed_tree_ref), 0,
952
				SLAB_MEM_SPREAD, NULL);
953 954 955 956 957 958
	if (!btrfs_delayed_tree_ref_cachep)
		goto fail;

	btrfs_delayed_data_ref_cachep = kmem_cache_create(
				"btrfs_delayed_data_ref",
				sizeof(struct btrfs_delayed_data_ref), 0,
959
				SLAB_MEM_SPREAD, NULL);
960 961 962 963 964 965
	if (!btrfs_delayed_data_ref_cachep)
		goto fail;

	btrfs_delayed_extent_op_cachep = kmem_cache_create(
				"btrfs_delayed_extent_op",
				sizeof(struct btrfs_delayed_extent_op), 0,
966
				SLAB_MEM_SPREAD, NULL);
967 968 969 970 971 972 973 974
	if (!btrfs_delayed_extent_op_cachep)
		goto fail;

	return 0;
fail:
	btrfs_delayed_ref_exit();
	return -ENOMEM;
}