delayed-ref.c 26.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Copyright (C) 2009 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/sched.h>
20
#include <linux/slab.h>
21 22 23 24
#include <linux/sort.h>
#include "ctree.h"
#include "delayed-ref.h"
#include "transaction.h"
25
#include "qgroup.h"
26

27 28 29 30
struct kmem_cache *btrfs_delayed_ref_head_cachep;
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
struct kmem_cache *btrfs_delayed_data_ref_cachep;
struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 32 33 34 35 36 37 38 39 40
/*
 * delayed back reference update tracking.  For subvolume trees
 * we queue up extent allocations and backref maintenance for
 * delayed processing.   This avoids deep call chains where we
 * add extents in the middle of btrfs_search_slot, and it allows
 * us to buffer up frequently modified backrefs in an rb tree instead
 * of hammering updates on the extent allocation tree.
 */

/*
41 42 43
 * compare two delayed tree backrefs with same bytenr and type
 */
static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44
			  struct btrfs_delayed_tree_ref *ref1)
45
{
46
	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
47 48 49 50 51 52 53 54 55 56
		if (ref1->root < ref2->root)
			return -1;
		if (ref1->root > ref2->root)
			return 1;
	} else {
		if (ref1->parent < ref2->parent)
			return -1;
		if (ref1->parent > ref2->parent)
			return 1;
	}
57 58 59 60 61
	return 0;
}

/*
 * compare two delayed data backrefs with same bytenr and type
62
 */
63 64
static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
			  struct btrfs_delayed_data_ref *ref1)
65
{
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
		if (ref1->root < ref2->root)
			return -1;
		if (ref1->root > ref2->root)
			return 1;
		if (ref1->objectid < ref2->objectid)
			return -1;
		if (ref1->objectid > ref2->objectid)
			return 1;
		if (ref1->offset < ref2->offset)
			return -1;
		if (ref1->offset > ref2->offset)
			return 1;
	} else {
		if (ref1->parent < ref2->parent)
			return -1;
		if (ref1->parent > ref2->parent)
			return 1;
	}
	return 0;
}

L
Liu Bo 已提交
88 89 90 91 92 93 94 95 96 97 98
/* insert a new ref to head ref rbtree */
static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
						   struct rb_node *node)
{
	struct rb_node **p = &root->rb_node;
	struct rb_node *parent_node = NULL;
	struct btrfs_delayed_ref_head *entry;
	struct btrfs_delayed_ref_head *ins;
	u64 bytenr;

	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99
	bytenr = ins->bytenr;
L
Liu Bo 已提交
100 101 102 103 104
	while (*p) {
		parent_node = *p;
		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
				 href_node);

105
		if (bytenr < entry->bytenr)
L
Liu Bo 已提交
106
			p = &(*p)->rb_left;
107
		else if (bytenr > entry->bytenr)
L
Liu Bo 已提交
108 109 110 111 112 113 114 115 116 117
			p = &(*p)->rb_right;
		else
			return entry;
	}

	rb_link_node(node, parent_node, p);
	rb_insert_color(node, root);
	return NULL;
}

118
/*
119
 * find an head entry based on bytenr. This returns the delayed ref
120 121 122
 * head if it was able to find one, or NULL if nothing was in that spot.
 * If return_bigger is given, the next bigger entry is returned if no exact
 * match is found.
123
 */
L
Liu Bo 已提交
124 125
static struct btrfs_delayed_ref_head *
find_ref_head(struct rb_root *root, u64 bytenr,
126
	      int return_bigger)
127
{
128
	struct rb_node *n;
L
Liu Bo 已提交
129
	struct btrfs_delayed_ref_head *entry;
130

131 132
	n = root->rb_node;
	entry = NULL;
133
	while (n) {
L
Liu Bo 已提交
134
		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135

136
		if (bytenr < entry->bytenr)
137
			n = n->rb_left;
138
		else if (bytenr > entry->bytenr)
139 140 141 142
			n = n->rb_right;
		else
			return entry;
	}
143
	if (entry && return_bigger) {
144
		if (bytenr > entry->bytenr) {
L
Liu Bo 已提交
145
			n = rb_next(&entry->href_node);
146 147
			if (!n)
				n = rb_first(root);
L
Liu Bo 已提交
148 149
			entry = rb_entry(n, struct btrfs_delayed_ref_head,
					 href_node);
150
			return entry;
151 152 153
		}
		return entry;
	}
154 155 156
	return NULL;
}

157 158
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
			   struct btrfs_delayed_ref_head *head)
159
{
160 161 162 163 164 165 166
	struct btrfs_delayed_ref_root *delayed_refs;

	delayed_refs = &trans->transaction->delayed_refs;
	assert_spin_locked(&delayed_refs->lock);
	if (mutex_trylock(&head->mutex))
		return 0;

167
	refcount_inc(&head->refs);
168 169 170 171
	spin_unlock(&delayed_refs->lock);

	mutex_lock(&head->mutex);
	spin_lock(&delayed_refs->lock);
172
	if (RB_EMPTY_NODE(&head->href_node)) {
173
		mutex_unlock(&head->mutex);
174
		btrfs_put_delayed_ref_head(head);
175 176
		return -EAGAIN;
	}
177
	btrfs_put_delayed_ref_head(head);
178 179 180
	return 0;
}

181
static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182
				    struct btrfs_delayed_ref_root *delayed_refs,
183
				    struct btrfs_delayed_ref_head *head,
184 185
				    struct btrfs_delayed_ref_node *ref)
{
186 187 188 189
	assert_spin_locked(&head->lock);
	list_del(&ref->list);
	if (!list_empty(&ref->add_list))
		list_del(&ref->add_list);
190 191
	ref->in_tree = 0;
	btrfs_put_delayed_ref(ref);
192
	atomic_dec(&delayed_refs->num_entries);
193 194 195 196
	if (trans->delayed_ref_updates)
		trans->delayed_ref_updates--;
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
static bool merge_ref(struct btrfs_trans_handle *trans,
		      struct btrfs_delayed_ref_root *delayed_refs,
		      struct btrfs_delayed_ref_head *head,
		      struct btrfs_delayed_ref_node *ref,
		      u64 seq)
{
	struct btrfs_delayed_ref_node *next;
	bool done = false;

	next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
				list);
	while (!done && &next->list != &head->ref_list) {
		int mod;
		struct btrfs_delayed_ref_node *next2;

		next2 = list_next_entry(next, list);

		if (next == ref)
			goto next;

		if (seq && next->seq >= seq)
			goto next;

220
		if (next->type != ref->type)
221 222 223 224 225
			goto next;

		if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
		     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
		    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
226
				   btrfs_delayed_node_to_tree_ref(next)))
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
			goto next;
		if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
		     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
		    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
				   btrfs_delayed_node_to_data_ref(next)))
			goto next;

		if (ref->action == next->action) {
			mod = next->ref_mod;
		} else {
			if (ref->ref_mod < next->ref_mod) {
				swap(ref, next);
				done = true;
			}
			mod = -next->ref_mod;
		}

		drop_delayed_ref(trans, delayed_refs, head, next);
		ref->ref_mod += mod;
		if (ref->ref_mod == 0) {
			drop_delayed_ref(trans, delayed_refs, head, ref);
			done = true;
		} else {
			/*
			 * Can't have multiples of the same ref on a tree block.
			 */
			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
		}
next:
		next = next2;
	}

	return done;
}

void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
			      struct btrfs_fs_info *fs_info,
			      struct btrfs_delayed_ref_root *delayed_refs,
			      struct btrfs_delayed_ref_head *head)
{
	struct btrfs_delayed_ref_node *ref;
	u64 seq = 0;

	assert_spin_locked(&head->lock);

	if (list_empty(&head->ref_list))
		return;

	/* We don't have too many refs to merge for data. */
	if (head->is_data)
		return;

	spin_lock(&fs_info->tree_mod_seq_lock);
	if (!list_empty(&fs_info->tree_mod_seq_list)) {
		struct seq_list *elem;

		elem = list_first_entry(&fs_info->tree_mod_seq_list,
					struct seq_list, list);
		seq = elem->seq;
	}
	spin_unlock(&fs_info->tree_mod_seq_lock);

	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
			       list);
	while (&ref->list != &head->ref_list) {
		if (seq && ref->seq >= seq)
			goto next;

		if (merge_ref(trans, delayed_refs, head, ref, seq)) {
			if (list_empty(&head->ref_list))
				break;
			ref = list_first_entry(&head->ref_list,
					       struct btrfs_delayed_ref_node,
					       list);
			continue;
		}
next:
		ref = list_next_entry(ref, list);
	}
}

309 310
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
			    struct btrfs_delayed_ref_root *delayed_refs,
311 312 313
			    u64 seq)
{
	struct seq_list *elem;
314 315 316 317 318 319 320
	int ret = 0;

	spin_lock(&fs_info->tree_mod_seq_lock);
	if (!list_empty(&fs_info->tree_mod_seq_list)) {
		elem = list_first_entry(&fs_info->tree_mod_seq_list,
					struct seq_list, list);
		if (seq >= elem->seq) {
321 322 323 324 325
			btrfs_debug(fs_info,
				"holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
				(u32)(seq >> 32), (u32)seq,
				(u32)(elem->seq >> 32), (u32)elem->seq,
				delayed_refs);
326 327
			ret = 1;
		}
328
	}
329 330 331

	spin_unlock(&fs_info->tree_mod_seq_lock);
	return ret;
332 333
}

334 335
struct btrfs_delayed_ref_head *
btrfs_select_ref_head(struct btrfs_trans_handle *trans)
336 337
{
	struct btrfs_delayed_ref_root *delayed_refs;
338 339 340
	struct btrfs_delayed_ref_head *head;
	u64 start;
	bool loop = false;
341

342
	delayed_refs = &trans->transaction->delayed_refs;
L
Liu Bo 已提交
343

344
again:
345
	start = delayed_refs->run_delayed_start;
346
	head = find_ref_head(&delayed_refs->href_root, start, 1);
347 348
	if (!head && !loop) {
		delayed_refs->run_delayed_start = 0;
349
		start = 0;
350
		loop = true;
351
		head = find_ref_head(&delayed_refs->href_root, start, 1);
352 353 354 355
		if (!head)
			return NULL;
	} else if (!head && loop) {
		return NULL;
356
	}
357

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	while (head->processing) {
		struct rb_node *node;

		node = rb_next(&head->href_node);
		if (!node) {
			if (loop)
				return NULL;
			delayed_refs->run_delayed_start = 0;
			start = 0;
			loop = true;
			goto again;
		}
		head = rb_entry(node, struct btrfs_delayed_ref_head,
				href_node);
	}
373

374 375 376
	head->processing = 1;
	WARN_ON(delayed_refs->num_heads_ready == 0);
	delayed_refs->num_heads_ready--;
377 378
	delayed_refs->run_delayed_start = head->bytenr +
		head->num_bytes;
379
	return head;
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
/*
 * Helper to insert the ref_node to the tail or merge with tail.
 *
 * Return 0 for insert.
 * Return >0 for merge.
 */
static int
add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
			   struct btrfs_delayed_ref_root *root,
			   struct btrfs_delayed_ref_head *href,
			   struct btrfs_delayed_ref_node *ref)
{
	struct btrfs_delayed_ref_node *exist;
	int mod;
	int ret = 0;

	spin_lock(&href->lock);
	/* Check whether we can merge the tail node with ref */
	if (list_empty(&href->ref_list))
		goto add_tail;
	exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
			   list);
	/* No need to compare bytenr nor is_head */
405
	if (exist->type != ref->type || exist->seq != ref->seq)
406 407 408 409 410
		goto add_tail;

	if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
	     exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
	    comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
411
			   btrfs_delayed_node_to_tree_ref(ref)))
412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
		goto add_tail;
	if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
	     exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
	    comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
			   btrfs_delayed_node_to_data_ref(ref)))
		goto add_tail;

	/* Now we are sure we can merge */
	ret = 1;
	if (exist->action == ref->action) {
		mod = ref->ref_mod;
	} else {
		/* Need to change action */
		if (exist->ref_mod < ref->ref_mod) {
			exist->action = ref->action;
			mod = -exist->ref_mod;
			exist->ref_mod = ref->ref_mod;
429 430 431 432 433 434 435 436 437
			if (ref->action == BTRFS_ADD_DELAYED_REF)
				list_add_tail(&exist->add_list,
					      &href->ref_add_list);
			else if (ref->action == BTRFS_DROP_DELAYED_REF) {
				ASSERT(!list_empty(&exist->add_list));
				list_del(&exist->add_list);
			} else {
				ASSERT(0);
			}
438 439 440 441 442 443 444 445 446 447 448 449 450
		} else
			mod = -ref->ref_mod;
	}
	exist->ref_mod += mod;

	/* remove existing tail if its ref_mod is zero */
	if (exist->ref_mod == 0)
		drop_delayed_ref(trans, root, href, exist);
	spin_unlock(&href->lock);
	return ret;

add_tail:
	list_add_tail(&ref->list, &href->ref_list);
451 452
	if (ref->action == BTRFS_ADD_DELAYED_REF)
		list_add_tail(&ref->add_list, &href->ref_add_list);
453 454 455 456 457 458
	atomic_inc(&root->num_entries);
	trans->delayed_ref_updates++;
	spin_unlock(&href->lock);
	return ret;
}

459 460 461 462 463
/*
 * helper function to update the accounting in the head ref
 * existing and update must have the same bytenr
 */
static noinline void
464
update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
465 466
			 struct btrfs_delayed_ref_head *existing,
			 struct btrfs_delayed_ref_head *update,
467
			 int *old_ref_mod_ret)
468
{
469
	int old_ref_mod;
470

471
	BUG_ON(existing->is_data != update->is_data);
472

473 474
	spin_lock(&existing->lock);
	if (update->must_insert_reserved) {
475 476 477 478 479 480 481
		/* if the extent was freed and then
		 * reallocated before the delayed ref
		 * entries were processed, we can end up
		 * with an existing head ref without
		 * the must_insert_reserved flag set.
		 * Set it again here
		 */
482
		existing->must_insert_reserved = update->must_insert_reserved;
483 484 485 486 487 488 489 490 491

		/*
		 * update the num_bytes so we make sure the accounting
		 * is done correctly
		 */
		existing->num_bytes = update->num_bytes;

	}

492 493 494
	if (update->extent_op) {
		if (!existing->extent_op) {
			existing->extent_op = update->extent_op;
495
		} else {
496 497 498 499 500
			if (update->extent_op->update_key) {
				memcpy(&existing->extent_op->key,
				       &update->extent_op->key,
				       sizeof(update->extent_op->key));
				existing->extent_op->update_key = true;
501
			}
502 503 504 505
			if (update->extent_op->update_flags) {
				existing->extent_op->flags_to_set |=
					update->extent_op->flags_to_set;
				existing->extent_op->update_flags = true;
506
			}
507
			btrfs_free_delayed_extent_op(update->extent_op);
508 509
		}
	}
510
	/*
511 512 513
	 * update the reference mod on the head to reflect this new operation,
	 * only need the lock for this case cause we could be processing it
	 * currently, for refs we just added we know we're a-ok.
514
	 */
515
	old_ref_mod = existing->total_ref_mod;
516 517
	if (old_ref_mod_ret)
		*old_ref_mod_ret = old_ref_mod;
518
	existing->ref_mod += update->ref_mod;
519
	existing->total_ref_mod += update->ref_mod;
520 521 522 523 524

	/*
	 * If we are going to from a positive ref mod to a negative or vice
	 * versa we need to make sure to adjust pending_csums accordingly.
	 */
525 526
	if (existing->is_data) {
		if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
527
			delayed_refs->pending_csums -= existing->num_bytes;
528
		if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
529 530
			delayed_refs->pending_csums += existing->num_bytes;
	}
531
	spin_unlock(&existing->lock);
532 533 534
}

/*
535
 * helper function to actually insert a head node into the rbtree.
536
 * this does all the dirty work in terms of maintaining the correct
537
 * overall modification count.
538
 */
539 540 541
static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_fs_info *fs_info,
		     struct btrfs_trans_handle *trans,
542
		     struct btrfs_delayed_ref_head *head_ref,
543
		     struct btrfs_qgroup_extent_record *qrecord,
544
		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
545 546
		     int action, int is_data, int *qrecord_inserted_ret,
		     int *old_ref_mod, int *new_ref_mod)
547
{
548
	struct btrfs_delayed_ref_head *existing;
549 550 551
	struct btrfs_delayed_ref_root *delayed_refs;
	int count_mod = 1;
	int must_insert_reserved = 0;
552
	int qrecord_inserted = 0;
553

554 555 556
	/* If reserved is provided, it must be a data extent. */
	BUG_ON(!is_data && reserved);

557 558 559 560
	/*
	 * the head node stores the sum of all the mods, so dropping a ref
	 * should drop the sum in the head node by one.
	 */
561 562 563 564
	if (action == BTRFS_UPDATE_DELAYED_HEAD)
		count_mod = 0;
	else if (action == BTRFS_DROP_DELAYED_REF)
		count_mod = -1;
565 566 567 568 569 570 571 572 573 574 575 576

	/*
	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
	 * the reserved accounting when the extent is finally added, or
	 * if a later modification deletes the delayed ref without ever
	 * inserting the extent into the extent allocation tree.
	 * ref->must_insert_reserved is the flag used to record
	 * that accounting mods are required.
	 *
	 * Once we record must_insert_reserved, switch the action to
	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
	 */
577
	if (action == BTRFS_ADD_DELAYED_EXTENT)
578
		must_insert_reserved = 1;
579
	else
580 581 582 583
		must_insert_reserved = 0;

	delayed_refs = &trans->transaction->delayed_refs;

584 585 586 587
	refcount_set(&head_ref->refs, 1);
	head_ref->bytenr = bytenr;
	head_ref->num_bytes = num_bytes;
	head_ref->ref_mod = count_mod;
588 589
	head_ref->must_insert_reserved = must_insert_reserved;
	head_ref->is_data = is_data;
590
	INIT_LIST_HEAD(&head_ref->ref_list);
591
	INIT_LIST_HEAD(&head_ref->ref_add_list);
592
	RB_CLEAR_NODE(&head_ref->href_node);
593
	head_ref->processing = 0;
594
	head_ref->total_ref_mod = count_mod;
595 596
	head_ref->qgroup_reserved = 0;
	head_ref->qgroup_ref_root = 0;
597 598
	spin_lock_init(&head_ref->lock);
	mutex_init(&head_ref->mutex);
599

600 601
	/* Record qgroup extent info if provided */
	if (qrecord) {
602 603 604 605 606
		if (ref_root && reserved) {
			head_ref->qgroup_ref_root = ref_root;
			head_ref->qgroup_reserved = reserved;
		}

607 608 609 610
		qrecord->bytenr = bytenr;
		qrecord->num_bytes = num_bytes;
		qrecord->old_roots = NULL;

611
		if(btrfs_qgroup_trace_extent_nolock(fs_info,
612
					delayed_refs, qrecord))
613
			kfree(qrecord);
614 615
		else
			qrecord_inserted = 1;
616 617
	}

618
	trace_add_delayed_ref_head(fs_info, head_ref, action);
619

620 621
	existing = htree_insert(&delayed_refs->href_root,
				&head_ref->href_node);
622
	if (existing) {
623 624
		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
			&& existing->qgroup_reserved);
625
		update_existing_head_ref(delayed_refs, existing, head_ref,
626
					 old_ref_mod);
627 628 629 630
		/*
		 * we've updated the existing ref, free the newly
		 * allocated ref
		 */
631
		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
632
		head_ref = existing;
633
	} else {
634 635
		if (old_ref_mod)
			*old_ref_mod = 0;
636 637
		if (is_data && count_mod < 0)
			delayed_refs->pending_csums += num_bytes;
638 639
		delayed_refs->num_heads++;
		delayed_refs->num_heads_ready++;
640
		atomic_inc(&delayed_refs->num_entries);
641 642
		trans->delayed_ref_updates++;
	}
643 644
	if (qrecord_inserted_ret)
		*qrecord_inserted_ret = qrecord_inserted;
645 646
	if (new_ref_mod)
		*new_ref_mod = head_ref->total_ref_mod;
647
	return head_ref;
648 649 650 651 652
}

/*
 * helper to insert a delayed tree ref into the rbtree.
 */
653 654 655 656 657 658
static noinline void
add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
		     struct btrfs_trans_handle *trans,
		     struct btrfs_delayed_ref_head *head_ref,
		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
		     u64 num_bytes, u64 parent, u64 ref_root, int level,
659
		     int action)
660 661 662
{
	struct btrfs_delayed_tree_ref *full_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
663
	u64 seq = 0;
664
	int ret;
665 666 667 668

	if (action == BTRFS_ADD_DELAYED_EXTENT)
		action = BTRFS_ADD_DELAYED_REF;

J
Josef Bacik 已提交
669 670
	if (is_fstree(ref_root))
		seq = atomic64_read(&fs_info->tree_mod_seq);
671 672 673
	delayed_refs = &trans->transaction->delayed_refs;

	/* first set the basic ref node struct up */
674
	refcount_set(&ref->refs, 1);
675
	ref->bytenr = bytenr;
676
	ref->num_bytes = num_bytes;
677 678 679 680
	ref->ref_mod = 1;
	ref->action = action;
	ref->is_head = 0;
	ref->in_tree = 1;
681
	ref->seq = seq;
682 683
	INIT_LIST_HEAD(&ref->list);
	INIT_LIST_HEAD(&ref->add_list);
684

685
	full_ref = btrfs_delayed_node_to_tree_ref(ref);
686 687 688
	full_ref->parent = parent;
	full_ref->root = ref_root;
	if (parent)
689
		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
690
	else
691 692
		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
	full_ref->level = level;
693

694
	trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
695

696 697 698 699 700 701 702
	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);

	/*
	 * XXX: memory should be freed at the same level allocated.
	 * But bad practice is anywhere... Follow it now. Need cleanup.
	 */
	if (ret > 0)
703
		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
704 705 706 707 708
}

/*
 * helper to insert a delayed data ref into the rbtree.
 */
709 710 711 712 713 714
static noinline void
add_delayed_data_ref(struct btrfs_fs_info *fs_info,
		     struct btrfs_trans_handle *trans,
		     struct btrfs_delayed_ref_head *head_ref,
		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
715
		     u64 offset, int action)
716 717 718
{
	struct btrfs_delayed_data_ref *full_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
719
	u64 seq = 0;
720
	int ret;
721 722 723 724 725 726

	if (action == BTRFS_ADD_DELAYED_EXTENT)
		action = BTRFS_ADD_DELAYED_REF;

	delayed_refs = &trans->transaction->delayed_refs;

J
Josef Bacik 已提交
727 728 729
	if (is_fstree(ref_root))
		seq = atomic64_read(&fs_info->tree_mod_seq);

730
	/* first set the basic ref node struct up */
731
	refcount_set(&ref->refs, 1);
732 733 734 735 736 737
	ref->bytenr = bytenr;
	ref->num_bytes = num_bytes;
	ref->ref_mod = 1;
	ref->action = action;
	ref->is_head = 0;
	ref->in_tree = 1;
738
	ref->seq = seq;
739 740
	INIT_LIST_HEAD(&ref->list);
	INIT_LIST_HEAD(&ref->add_list);
741

742
	full_ref = btrfs_delayed_node_to_data_ref(ref);
743 744 745
	full_ref->parent = parent;
	full_ref->root = ref_root;
	if (parent)
746
		ref->type = BTRFS_SHARED_DATA_REF_KEY;
747
	else
748
		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
A
Arne Jansen 已提交
749

750 751
	full_ref->objectid = owner;
	full_ref->offset = offset;
752

753
	trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
754

755 756 757
	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);

	if (ret > 0)
758
		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
759 760 761
}

/*
762
 * add a delayed tree ref.  This does all of the accounting required
763 764 765
 * to make sure the delayed ref is eventually processed before this
 * transaction commits.
 */
A
Arne Jansen 已提交
766 767
int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
			       struct btrfs_trans_handle *trans,
768 769
			       u64 bytenr, u64 num_bytes, u64 parent,
			       u64 ref_root,  int level, int action,
770 771
			       struct btrfs_delayed_extent_op *extent_op,
			       int *old_ref_mod, int *new_ref_mod)
772
{
773
	struct btrfs_delayed_tree_ref *ref;
774 775
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
776
	struct btrfs_qgroup_extent_record *record = NULL;
777
	int qrecord_inserted;
778

779
	BUG_ON(extent_op && extent_op->is_data);
780
	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
781 782 783
	if (!ref)
		return -ENOMEM;

784
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
785 786
	if (!head_ref)
		goto free_ref;
787

788 789
	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
	    is_fstree(ref_root)) {
790
		record = kmalloc(sizeof(*record), GFP_NOFS);
791 792
		if (!record)
			goto free_head_ref;
793 794
	}

795 796 797 798 799
	head_ref->extent_op = extent_op;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

800
	/*
801 802
	 * insert both the head node and the new ref without dropping
	 * the spin lock
803
	 */
804
	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
805
					bytenr, num_bytes, 0, 0, action, 0,
806 807
					&qrecord_inserted, old_ref_mod,
					new_ref_mod);
808

809
	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
810
			     num_bytes, parent, ref_root, level, action);
811
	spin_unlock(&delayed_refs->lock);
812

813 814
	if (qrecord_inserted)
		return btrfs_qgroup_trace_extent_post(fs_info, record);
815
	return 0;
816 817 818 819 820 821 822

free_head_ref:
	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
free_ref:
	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);

	return -ENOMEM;
823 824 825 826 827
}

/*
 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
 */
A
Arne Jansen 已提交
828 829
int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
			       struct btrfs_trans_handle *trans,
830 831
			       u64 bytenr, u64 num_bytes,
			       u64 parent, u64 ref_root,
832 833
			       u64 owner, u64 offset, u64 reserved, int action,
			       int *old_ref_mod, int *new_ref_mod)
834 835 836 837
{
	struct btrfs_delayed_data_ref *ref;
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
838
	struct btrfs_qgroup_extent_record *record = NULL;
839
	int qrecord_inserted;
840

841
	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
842 843
	if (!ref)
		return -ENOMEM;
844

845
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
846
	if (!head_ref) {
847
		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
848 849
		return -ENOMEM;
	}
850

851 852
	if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
	    is_fstree(ref_root)) {
853 854 855 856 857 858 859 860 861
		record = kmalloc(sizeof(*record), GFP_NOFS);
		if (!record) {
			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
			kmem_cache_free(btrfs_delayed_ref_head_cachep,
					head_ref);
			return -ENOMEM;
		}
	}

862
	head_ref->extent_op = NULL;
863

864 865 866 867 868 869 870
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

	/*
	 * insert both the head node and the new ref without dropping
	 * the spin lock
	 */
871
	head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
872
					bytenr, num_bytes, ref_root, reserved,
873 874
					action, 1, &qrecord_inserted,
					old_ref_mod, new_ref_mod);
875

876
	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
A
Arne Jansen 已提交
877
				   num_bytes, parent, ref_root, owner, offset,
878
				   action);
879
	spin_unlock(&delayed_refs->lock);
880

881 882
	if (qrecord_inserted)
		return btrfs_qgroup_trace_extent_post(fs_info, record);
883 884 885
	return 0;
}

A
Arne Jansen 已提交
886 887
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
				struct btrfs_trans_handle *trans,
888 889 890 891 892 893
				u64 bytenr, u64 num_bytes,
				struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_delayed_ref_head *head_ref;
	struct btrfs_delayed_ref_root *delayed_refs;

894
	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
895 896 897 898 899 900 901 902
	if (!head_ref)
		return -ENOMEM;

	head_ref->extent_op = extent_op;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);

903
	add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
904
			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
905
			     extent_op->is_data, NULL, NULL, NULL);
906

907 908 909 910
	spin_unlock(&delayed_refs->lock);
	return 0;
}

911 912 913 914 915 916
/*
 * this does a simple search for the head node for a given extent.
 * It must be called with the delayed ref spinlock held, and it returns
 * the head node if any where found, or NULL if not.
 */
struct btrfs_delayed_ref_head *
917
btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
918
{
919
	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
920
}
921 922 923

void btrfs_delayed_ref_exit(void)
{
924 925 926 927
	kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
	kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
	kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
	kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
928 929 930 931 932 933 934
}

int btrfs_delayed_ref_init(void)
{
	btrfs_delayed_ref_head_cachep = kmem_cache_create(
				"btrfs_delayed_ref_head",
				sizeof(struct btrfs_delayed_ref_head), 0,
935
				SLAB_MEM_SPREAD, NULL);
936 937 938 939 940 941
	if (!btrfs_delayed_ref_head_cachep)
		goto fail;

	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
				"btrfs_delayed_tree_ref",
				sizeof(struct btrfs_delayed_tree_ref), 0,
942
				SLAB_MEM_SPREAD, NULL);
943 944 945 946 947 948
	if (!btrfs_delayed_tree_ref_cachep)
		goto fail;

	btrfs_delayed_data_ref_cachep = kmem_cache_create(
				"btrfs_delayed_data_ref",
				sizeof(struct btrfs_delayed_data_ref), 0,
949
				SLAB_MEM_SPREAD, NULL);
950 951 952 953 954 955
	if (!btrfs_delayed_data_ref_cachep)
		goto fail;

	btrfs_delayed_extent_op_cachep = kmem_cache_create(
				"btrfs_delayed_extent_op",
				sizeof(struct btrfs_delayed_extent_op), 0,
956
				SLAB_MEM_SPREAD, NULL);
957 958 959 960 961 962 963 964
	if (!btrfs_delayed_extent_op_cachep)
		goto fail;

	return 0;
fail:
	btrfs_delayed_ref_exit();
	return -ENOMEM;
}