backref.c 65.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * Copyright (C) 2011 STRATO.  All rights reserved.
 */

6
#include <linux/mm.h>
7
#include <linux/rbtree.h>
8
#include <trace/events/btrfs.h>
9 10 11
#include "ctree.h"
#include "disk-io.h"
#include "backref.h"
12 13 14
#include "ulist.h"
#include "transaction.h"
#include "delayed-ref.h"
15
#include "locking.h"
16

17 18 19
/* Just an arbitrary number so we can be sure this happened */
#define BACKREF_FOUND_SHARED 6

20 21 22 23 24 25
struct extent_inode_elem {
	u64 inum;
	u64 offset;
	struct extent_inode_elem *next;
};

26 27 28 29
static int check_extent_in_eb(const struct btrfs_key *key,
			      const struct extent_buffer *eb,
			      const struct btrfs_file_extent_item *fi,
			      u64 extent_item_pos,
30 31
			      struct extent_inode_elem **eie,
			      bool ignore_offset)
32
{
33
	u64 offset = 0;
34 35
	struct extent_inode_elem *e;

36 37
	if (!ignore_offset &&
	    !btrfs_file_extent_compression(eb, fi) &&
38 39 40 41
	    !btrfs_file_extent_encryption(eb, fi) &&
	    !btrfs_file_extent_other_encoding(eb, fi)) {
		u64 data_offset;
		u64 data_len;
42

43 44 45 46 47 48 49 50
		data_offset = btrfs_file_extent_offset(eb, fi);
		data_len = btrfs_file_extent_num_bytes(eb, fi);

		if (extent_item_pos < data_offset ||
		    extent_item_pos >= data_offset + data_len)
			return 1;
		offset = extent_item_pos - data_offset;
	}
51 52 53 54 55 56 57

	e = kmalloc(sizeof(*e), GFP_NOFS);
	if (!e)
		return -ENOMEM;

	e->next = *eie;
	e->inum = key->objectid;
58
	e->offset = key->offset + offset;
59 60 61 62 63
	*eie = e;

	return 0;
}

64 65 66 67 68 69 70 71 72 73
static void free_inode_elem_list(struct extent_inode_elem *eie)
{
	struct extent_inode_elem *eie_next;

	for (; eie; eie = eie_next) {
		eie_next = eie->next;
		kfree(eie);
	}
}

74 75
static int find_extent_in_eb(const struct extent_buffer *eb,
			     u64 wanted_disk_byte, u64 extent_item_pos,
76 77
			     struct extent_inode_elem **eie,
			     bool ignore_offset)
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
{
	u64 disk_byte;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	int slot;
	int nritems;
	int extent_type;
	int ret;

	/*
	 * from the shared data ref, we only have the leaf but we need
	 * the key. thus, we must look into all items and see that we
	 * find one (some) with a reference to our extent item.
	 */
	nritems = btrfs_header_nritems(eb);
	for (slot = 0; slot < nritems; ++slot) {
		btrfs_item_key_to_cpu(eb, &key, slot);
		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;
		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(eb, fi);
		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
			continue;
		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
		if (disk_byte != wanted_disk_byte)
			continue;

106
		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
107 108 109 110 111 112 113
		if (ret < 0)
			return ret;
	}

	return 0;
}

114
struct preftree {
L
Liu Bo 已提交
115
	struct rb_root_cached root;
116
	unsigned int count;
117 118
};

L
Liu Bo 已提交
119
#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
120 121 122 123 124 125 126

struct preftrees {
	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
	struct preftree indirect_missing_keys;
};

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/*
 * Checks for a shared extent during backref search.
 *
 * The share_count tracks prelim_refs (direct and indirect) having a
 * ref->count >0:
 *  - incremented when a ref->count transitions to >0
 *  - decremented when a ref->count transitions to <1
 */
struct share_check {
	u64 root_objectid;
	u64 inum;
	int share_count;
};

static inline int extent_is_shared(struct share_check *sc)
{
	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
}

146 147 148 149 150
static struct kmem_cache *btrfs_prelim_ref_cache;

int __init btrfs_prelim_ref_init(void)
{
	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
151
					sizeof(struct prelim_ref),
152
					0,
153
					SLAB_MEM_SPREAD,
154 155 156 157 158 159
					NULL);
	if (!btrfs_prelim_ref_cache)
		return -ENOMEM;
	return 0;
}

160
void __cold btrfs_prelim_ref_exit(void)
161
{
162
	kmem_cache_destroy(btrfs_prelim_ref_cache);
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static void free_pref(struct prelim_ref *ref)
{
	kmem_cache_free(btrfs_prelim_ref_cache, ref);
}

/*
 * Return 0 when both refs are for the same block (and can be merged).
 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 * indicates a 'higher' block.
 */
static int prelim_ref_compare(struct prelim_ref *ref1,
			      struct prelim_ref *ref2)
{
	if (ref1->level < ref2->level)
		return -1;
	if (ref1->level > ref2->level)
		return 1;
	if (ref1->root_id < ref2->root_id)
		return -1;
	if (ref1->root_id > ref2->root_id)
		return 1;
	if (ref1->key_for_search.type < ref2->key_for_search.type)
		return -1;
	if (ref1->key_for_search.type > ref2->key_for_search.type)
		return 1;
	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
		return -1;
	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
		return 1;
	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
		return -1;
	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
		return 1;
	if (ref1->parent < ref2->parent)
		return -1;
	if (ref1->parent > ref2->parent)
		return 1;

	return 0;
}

206 207
static void update_share_count(struct share_check *sc, int oldcount,
			       int newcount)
208 209 210 211 212 213 214 215 216 217
{
	if ((!sc) || (oldcount == 0 && newcount < 1))
		return;

	if (oldcount > 0 && newcount < 1)
		sc->share_count--;
	else if (oldcount < 1 && newcount > 0)
		sc->share_count++;
}

218 219 220
/*
 * Add @newref to the @root rbtree, merging identical refs.
 *
221
 * Callers should assume that newref has been freed after calling.
222
 */
223 224
static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
			      struct preftree *preftree,
225 226
			      struct prelim_ref *newref,
			      struct share_check *sc)
227
{
L
Liu Bo 已提交
228
	struct rb_root_cached *root;
229 230 231 232
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct prelim_ref *ref;
	int result;
L
Liu Bo 已提交
233
	bool leftmost = true;
234 235

	root = &preftree->root;
L
Liu Bo 已提交
236
	p = &root->rb_root.rb_node;
237 238 239 240 241 242 243 244 245

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct prelim_ref, rbnode);
		result = prelim_ref_compare(ref, newref);
		if (result < 0) {
			p = &(*p)->rb_left;
		} else if (result > 0) {
			p = &(*p)->rb_right;
L
Liu Bo 已提交
246
			leftmost = false;
247 248 249 250 251 252 253 254 255 256 257
		} else {
			/* Identical refs, merge them and free @newref */
			struct extent_inode_elem *eie = ref->inode_list;

			while (eie && eie->next)
				eie = eie->next;

			if (!eie)
				ref->inode_list = newref->inode_list;
			else
				eie->next = newref->inode_list;
258 259
			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
						     preftree->count);
260 261 262 263 264 265 266
			/*
			 * A delayed ref can have newref->count < 0.
			 * The ref->count is updated to follow any
			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
			 */
			update_share_count(sc, ref->count,
					   ref->count + newref->count);
267 268 269 270 271 272
			ref->count += newref->count;
			free_pref(newref);
			return;
		}
	}

273
	update_share_count(sc, 0, newref->count);
274
	preftree->count++;
275
	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
276
	rb_link_node(&newref->rbnode, parent, p);
L
Liu Bo 已提交
277
	rb_insert_color_cached(&newref->rbnode, root, leftmost);
278 279 280 281 282 283 284 285 286 287
}

/*
 * Release the entire tree.  We don't care about internal consistency so
 * just free everything and then reset the tree root.
 */
static void prelim_release(struct preftree *preftree)
{
	struct prelim_ref *ref, *next_ref;

L
Liu Bo 已提交
288 289
	rbtree_postorder_for_each_entry_safe(ref, next_ref,
					     &preftree->root.rb_root, rbnode)
290 291
		free_pref(ref);

L
Liu Bo 已提交
292
	preftree->root = RB_ROOT_CACHED;
293
	preftree->count = 0;
294 295
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/*
 * the rules for all callers of this function are:
 * - obtaining the parent is the goal
 * - if you add a key, you must know that it is a correct key
 * - if you cannot add the parent or a correct key, then we will look into the
 *   block later to set a correct key
 *
 * delayed refs
 * ============
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    -   |     -
 *      key to resolve |    -   |     y    |    y   |     y
 *  tree block logical |    -   |     -    |    -   |     -
 *  root for resolving |    y   |     y    |    y   |     y
 *
 * - column 1:       we've the parent -> done
 * - column 2, 3, 4: we use the key to find the parent
 *
 * on disk refs (inline or keyed)
 * ==============================
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    y   |     -
 *      key to resolve |    -   |     -    |    -   |     y
 *  tree block logical |    y   |     y    |    y   |     y
 *  root for resolving |    -   |     y    |    y   |     y
 *
 * - column 1, 3: we've the parent -> done
 * - column 2:    we take the first key from the block to find the parent
328
 *                (see add_missing_keys)
329 330 331 332 333
 * - column 4:    we use the key to find the parent
 *
 * additional information that's available but not required to find the parent
 * block might help in merging entries to gain some speed.
 */
334 335
static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
			  struct preftree *preftree, u64 root_id,
336
			  const struct btrfs_key *key, int level, u64 parent,
337 338
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
339
{
340
	struct prelim_ref *ref;
341

342 343 344
	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
		return 0;

345
	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
346 347 348 349
	if (!ref)
		return -ENOMEM;

	ref->root_id = root_id;
350
	if (key)
351
		ref->key_for_search = *key;
352
	else
353
		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
354

355
	ref->inode_list = NULL;
356 357 358 359
	ref->level = level;
	ref->count = count;
	ref->parent = parent;
	ref->wanted_disk_byte = wanted_disk_byte;
360 361
	prelim_ref_insert(fs_info, preftree, ref, sc);
	return extent_is_shared(sc);
362 363
}

364
/* direct refs use root == 0, key == NULL */
365 366
static int add_direct_ref(const struct btrfs_fs_info *fs_info,
			  struct preftrees *preftrees, int level, u64 parent,
367 368
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
369
{
370
	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
371
			      parent, wanted_disk_byte, count, sc, gfp_mask);
372 373 374
}

/* indirect refs use parent == 0 */
375 376
static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
			    struct preftrees *preftrees, u64 root_id,
377
			    const struct btrfs_key *key, int level,
378 379
			    u64 wanted_disk_byte, int count,
			    struct share_check *sc, gfp_t gfp_mask)
380 381 382 383 384
{
	struct preftree *tree = &preftrees->indirect;

	if (!key)
		tree = &preftrees->indirect_missing_keys;
385
	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
386
			      wanted_disk_byte, count, sc, gfp_mask);
387 388
}

389 390 391 392 393
static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
{
	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
	struct rb_node *parent = NULL;
	struct prelim_ref *ref = NULL;
394
	struct prelim_ref target = {};
395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
	int result;

	target.parent = bytenr;

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct prelim_ref, rbnode);
		result = prelim_ref_compare(ref, &target);

		if (result < 0)
			p = &(*p)->rb_left;
		else if (result > 0)
			p = &(*p)->rb_right;
		else
			return 1;
	}
	return 0;
}

414
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
415 416
			   struct ulist *parents,
			   struct preftrees *preftrees, struct prelim_ref *ref,
417
			   int level, u64 time_seq, const u64 *extent_item_pos,
418
			   bool ignore_offset)
419
{
420 421 422 423
	int ret = 0;
	int slot;
	struct extent_buffer *eb;
	struct btrfs_key key;
424
	struct btrfs_key *key_for_search = &ref->key_for_search;
425
	struct btrfs_file_extent_item *fi;
426
	struct extent_inode_elem *eie = NULL, *old = NULL;
427
	u64 disk_byte;
428 429
	u64 wanted_disk_byte = ref->wanted_disk_byte;
	u64 count = 0;
430
	u64 data_offset;
431

432 433 434
	if (level != 0) {
		eb = path->nodes[level];
		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
435 436
		if (ret < 0)
			return ret;
437
		return 0;
438
	}
439 440

	/*
441 442 443 444 445
	 * 1. We normally enter this function with the path already pointing to
	 *    the first item to check. But sometimes, we may enter it with
	 *    slot == nritems.
	 * 2. We are searching for normal backref but bytenr of this leaf
	 *    matches shared data backref
446 447
	 * 3. The leaf owner is not equal to the root we are searching
	 *
448
	 * For these cases, go to the next leaf before we continue.
449
	 */
450 451
	eb = path->nodes[0];
	if (path->slots[0] >= btrfs_header_nritems(eb) ||
452 453
	    is_shared_data_backref(preftrees, eb->start) ||
	    ref->root_id != btrfs_header_owner(eb)) {
454
		if (time_seq == SEQ_LAST)
455 456 457 458
			ret = btrfs_next_leaf(root, path);
		else
			ret = btrfs_next_old_leaf(root, path, time_seq);
	}
459

460
	while (!ret && count < ref->count) {
461
		eb = path->nodes[0];
462 463 464 465 466 467 468 469
		slot = path->slots[0];

		btrfs_item_key_to_cpu(eb, &key, slot);

		if (key.objectid != key_for_search->objectid ||
		    key.type != BTRFS_EXTENT_DATA_KEY)
			break;

470 471
		/*
		 * We are searching for normal backref but bytenr of this leaf
472 473
		 * matches shared data backref, OR
		 * the leaf owner is not equal to the root we are searching for
474
		 */
475 476 477
		if (slot == 0 &&
		    (is_shared_data_backref(preftrees, eb->start) ||
		     ref->root_id != btrfs_header_owner(eb))) {
478 479 480 481 482 483
			if (time_seq == SEQ_LAST)
				ret = btrfs_next_leaf(root, path);
			else
				ret = btrfs_next_old_leaf(root, path, time_seq);
			continue;
		}
484 485
		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
486
		data_offset = btrfs_file_extent_offset(eb, fi);
487 488 489

		if (disk_byte == wanted_disk_byte) {
			eie = NULL;
490
			old = NULL;
491 492 493 494
			if (ref->key_for_search.offset == key.offset - data_offset)
				count++;
			else
				goto next;
495 496 497
			if (extent_item_pos) {
				ret = check_extent_in_eb(&key, eb, fi,
						*extent_item_pos,
498
						&eie, ignore_offset);
499 500 501
				if (ret < 0)
					break;
			}
502 503
			if (ret > 0)
				goto next;
504 505
			ret = ulist_add_merge_ptr(parents, eb->start,
						  eie, (void **)&old, GFP_NOFS);
506 507 508 509 510 511
			if (ret < 0)
				break;
			if (!ret && extent_item_pos) {
				while (old->next)
					old = old->next;
				old->next = eie;
512
			}
513
			eie = NULL;
514
		}
515
next:
516
		if (time_seq == SEQ_LAST)
517 518 519
			ret = btrfs_next_item(root, path);
		else
			ret = btrfs_next_old_item(root, path, time_seq);
520 521
	}

522 523
	if (ret > 0)
		ret = 0;
524 525
	else if (ret < 0)
		free_inode_elem_list(eie);
526
	return ret;
527 528 529 530 531 532
}

/*
 * resolve an indirect backref in the form (root_id, key, level)
 * to a logical address
 */
533 534
static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
				struct btrfs_path *path, u64 time_seq,
535
				struct preftrees *preftrees,
536
				struct prelim_ref *ref, struct ulist *parents,
537
				const u64 *extent_item_pos, bool ignore_offset)
538 539 540 541 542 543 544
{
	struct btrfs_root *root;
	struct btrfs_key root_key;
	struct extent_buffer *eb;
	int ret = 0;
	int root_level;
	int level = ref->level;
545
	struct btrfs_key search_key = ref->key_for_search;
546 547 548 549

	root_key.objectid = ref->root_id;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
550

551
	root = btrfs_get_fs_root(fs_info, &root_key, false);
552 553
	if (IS_ERR(root)) {
		ret = PTR_ERR(root);
554 555 556
		goto out_free;
	}

557 558 559 560 561 562
	if (!path->search_commit_root &&
	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
		ret = -ENOENT;
		goto out;
	}

563
	if (btrfs_is_testing(fs_info)) {
J
Josef Bacik 已提交
564 565 566 567
		ret = -ENOENT;
		goto out;
	}

568 569
	if (path->search_commit_root)
		root_level = btrfs_header_level(root->commit_root);
570
	else if (time_seq == SEQ_LAST)
571
		root_level = btrfs_header_level(root->node);
572 573
	else
		root_level = btrfs_old_root_level(root, time_seq);
574

J
Josef Bacik 已提交
575
	if (root_level + 1 == level)
576 577
		goto out;

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	/*
	 * We can often find data backrefs with an offset that is too large
	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
	 * subtracting a file's offset with the data offset of its
	 * corresponding extent data item. This can happen for example in the
	 * clone ioctl.
	 *
	 * So if we detect such case we set the search key's offset to zero to
	 * make sure we will find the matching file extent item at
	 * add_all_parents(), otherwise we will miss it because the offset
	 * taken form the backref is much larger then the offset of the file
	 * extent item. This can make us scan a very large number of file
	 * extent items, but at least it will not make us miss any.
	 *
	 * This is an ugly workaround for a behaviour that should have never
	 * existed, but it does and a fix for the clone ioctl would touch a lot
	 * of places, cause backwards incompatibility and would not fix the
	 * problem for extents cloned with older kernels.
	 */
	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
	    search_key.offset >= LLONG_MAX)
		search_key.offset = 0;
600
	path->lowest_level = level;
601
	if (time_seq == SEQ_LAST)
602
		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
603
	else
604
		ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
605

606 607
	btrfs_debug(fs_info,
		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
608 609 610
		 ref->root_id, level, ref->count, ret,
		 ref->key_for_search.objectid, ref->key_for_search.type,
		 ref->key_for_search.offset);
611 612 613 614
	if (ret < 0)
		goto out;

	eb = path->nodes[level];
615
	while (!eb) {
616
		if (WARN_ON(!level)) {
617 618 619 620 621
			ret = 1;
			goto out;
		}
		level--;
		eb = path->nodes[level];
622 623
	}

624
	ret = add_all_parents(root, path, parents, preftrees, ref, level,
625
			      time_seq, extent_item_pos, ignore_offset);
626
out:
627
	btrfs_put_root(root);
628
out_free:
629 630
	path->lowest_level = 0;
	btrfs_release_path(path);
631 632 633
	return ret;
}

634 635 636 637 638 639 640 641
static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node *node)
{
	if (!node)
		return NULL;
	return (struct extent_inode_elem *)(uintptr_t)node->aux;
}

642
/*
643
 * We maintain three separate rbtrees: one for direct refs, one for
644 645 646 647 648 649 650 651 652 653 654 655 656
 * indirect refs which have a key, and one for indirect refs which do not
 * have a key. Each tree does merge on insertion.
 *
 * Once all of the references are located, we iterate over the tree of
 * indirect refs with missing keys. An appropriate key is located and
 * the ref is moved onto the tree for indirect refs. After all missing
 * keys are thus located, we iterate over the indirect ref tree, resolve
 * each reference, and then insert the resolved reference onto the
 * direct tree (merging there too).
 *
 * New backrefs (i.e., for parent nodes) are added to the appropriate
 * rbtree as they are encountered. The new backrefs are subsequently
 * resolved as above.
657
 */
658 659
static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
				 struct btrfs_path *path, u64 time_seq,
660
				 struct preftrees *preftrees,
661
				 const u64 *extent_item_pos,
662
				 struct share_check *sc, bool ignore_offset)
663 664 665 666 667
{
	int err;
	int ret = 0;
	struct ulist *parents;
	struct ulist_node *node;
J
Jan Schmidt 已提交
668
	struct ulist_iterator uiter;
669
	struct rb_node *rnode;
670 671 672 673 674 675

	parents = ulist_alloc(GFP_NOFS);
	if (!parents)
		return -ENOMEM;

	/*
676 677 678 679
	 * We could trade memory usage for performance here by iterating
	 * the tree, allocating new refs for each insertion, and then
	 * freeing the entire indirect tree when we're done.  In some test
	 * cases, the tree can grow quite large (~200k objects).
680
	 */
L
Liu Bo 已提交
681
	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
682 683 684 685 686 687 688 689 690
		struct prelim_ref *ref;

		ref = rb_entry(rnode, struct prelim_ref, rbnode);
		if (WARN(ref->parent,
			 "BUG: direct ref found in indirect tree")) {
			ret = -EINVAL;
			goto out;
		}

L
Liu Bo 已提交
691
		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
692
		preftrees->indirect.count--;
693 694 695

		if (ref->count == 0) {
			free_pref(ref);
696
			continue;
697 698
		}

699 700
		if (sc && sc->root_objectid &&
		    ref->root_id != sc->root_objectid) {
701
			free_pref(ref);
702 703 704
			ret = BACKREF_FOUND_SHARED;
			goto out;
		}
705 706
		err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
					   ref, parents, extent_item_pos,
707
					   ignore_offset);
708 709 710 711 712
		/*
		 * we can only tolerate ENOENT,otherwise,we should catch error
		 * and return directly.
		 */
		if (err == -ENOENT) {
713 714
			prelim_ref_insert(fs_info, &preftrees->direct, ref,
					  NULL);
715
			continue;
716
		} else if (err) {
717
			free_pref(ref);
718 719 720
			ret = err;
			goto out;
		}
721 722

		/* we put the first parent into the ref at hand */
J
Jan Schmidt 已提交
723 724
		ULIST_ITER_INIT(&uiter);
		node = ulist_next(parents, &uiter);
725
		ref->parent = node ? node->val : 0;
726
		ref->inode_list = unode_aux_to_inode_list(node);
727

728
		/* Add a prelim_ref(s) for any other parent(s). */
J
Jan Schmidt 已提交
729
		while ((node = ulist_next(parents, &uiter))) {
730 731
			struct prelim_ref *new_ref;

732 733
			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
						   GFP_NOFS);
734
			if (!new_ref) {
735
				free_pref(ref);
736
				ret = -ENOMEM;
737
				goto out;
738 739 740
			}
			memcpy(new_ref, ref, sizeof(*ref));
			new_ref->parent = node->val;
741
			new_ref->inode_list = unode_aux_to_inode_list(node);
742 743
			prelim_ref_insert(fs_info, &preftrees->direct,
					  new_ref, NULL);
744
		}
745

746
		/*
747
		 * Now it's a direct ref, put it in the direct tree. We must
748 749 750
		 * do this last because the ref could be merged/freed here.
		 */
		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
751

752
		ulist_reinit(parents);
753
		cond_resched();
754
	}
755
out:
756 757 758 759
	ulist_free(parents);
	return ret;
}

760 761 762
/*
 * read tree blocks and add keys where required.
 */
763
static int add_missing_keys(struct btrfs_fs_info *fs_info,
764
			    struct preftrees *preftrees, bool lock)
765
{
766
	struct prelim_ref *ref;
767
	struct extent_buffer *eb;
768 769
	struct preftree *tree = &preftrees->indirect_missing_keys;
	struct rb_node *node;
770

L
Liu Bo 已提交
771
	while ((node = rb_first_cached(&tree->root))) {
772
		ref = rb_entry(node, struct prelim_ref, rbnode);
L
Liu Bo 已提交
773
		rb_erase_cached(node, &tree->root);
774 775 776

		BUG_ON(ref->parent);	/* should not be a direct ref */
		BUG_ON(ref->key_for_search.type);
777
		BUG_ON(!ref->wanted_disk_byte);
778

779 780
		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
				     ref->level - 1, NULL);
781
		if (IS_ERR(eb)) {
782
			free_pref(ref);
783 784
			return PTR_ERR(eb);
		} else if (!extent_buffer_uptodate(eb)) {
785
			free_pref(ref);
786 787 788
			free_extent_buffer(eb);
			return -EIO;
		}
789 790
		if (lock)
			btrfs_tree_read_lock(eb);
791 792 793 794
		if (btrfs_header_level(eb) == 0)
			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
		else
			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
795 796
		if (lock)
			btrfs_tree_read_unlock(eb);
797
		free_extent_buffer(eb);
798
		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
799
		cond_resched();
800 801 802 803
	}
	return 0;
}

804 805 806 807
/*
 * add all currently queued delayed refs from this head whose seq nr is
 * smaller or equal that seq to the list
 */
808 809
static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
			    struct btrfs_delayed_ref_head *head, u64 seq,
810
			    struct preftrees *preftrees, struct share_check *sc)
811
{
812
	struct btrfs_delayed_ref_node *node;
813
	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
814
	struct btrfs_key key;
815
	struct btrfs_key tmp_op_key;
816
	struct rb_node *n;
817
	int count;
818
	int ret = 0;
819

820
	if (extent_op && extent_op->update_key)
821
		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
822

823
	spin_lock(&head->lock);
824
	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
825 826
		node = rb_entry(n, struct btrfs_delayed_ref_node,
				ref_node);
827 828 829 830 831 832 833 834 835
		if (node->seq > seq)
			continue;

		switch (node->action) {
		case BTRFS_ADD_DELAYED_EXTENT:
		case BTRFS_UPDATE_DELAYED_HEAD:
			WARN_ON(1);
			continue;
		case BTRFS_ADD_DELAYED_REF:
836
			count = node->ref_mod;
837 838
			break;
		case BTRFS_DROP_DELAYED_REF:
839
			count = node->ref_mod * -1;
840 841
			break;
		default:
842
			BUG();
843 844 845
		}
		switch (node->type) {
		case BTRFS_TREE_BLOCK_REF_KEY: {
846
			/* NORMAL INDIRECT METADATA backref */
847 848 849
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
850 851
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
					       &tmp_op_key, ref->level + 1,
852 853
					       node->bytenr, count, sc,
					       GFP_ATOMIC);
854 855 856
			break;
		}
		case BTRFS_SHARED_BLOCK_REF_KEY: {
857
			/* SHARED DIRECT METADATA backref */
858 859 860
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
861

862 863
			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
					     ref->parent, node->bytenr, count,
864
					     sc, GFP_ATOMIC);
865 866 867
			break;
		}
		case BTRFS_EXTENT_DATA_REF_KEY: {
868
			/* NORMAL INDIRECT DATA backref */
869 870 871 872 873 874
			struct btrfs_delayed_data_ref *ref;
			ref = btrfs_delayed_node_to_data_ref(node);

			key.objectid = ref->objectid;
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = ref->offset;
875 876 877 878 879

			/*
			 * Found a inum that doesn't match our known inum, we
			 * know it's shared.
			 */
880
			if (sc && sc->inum && ref->objectid != sc->inum) {
881
				ret = BACKREF_FOUND_SHARED;
882
				goto out;
883 884
			}

885
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
886 887
					       &key, 0, node->bytenr, count, sc,
					       GFP_ATOMIC);
888 889 890
			break;
		}
		case BTRFS_SHARED_DATA_REF_KEY: {
891
			/* SHARED DIRECT FULL backref */
892 893 894
			struct btrfs_delayed_data_ref *ref;

			ref = btrfs_delayed_node_to_data_ref(node);
895

896 897 898
			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
					     node->bytenr, count, sc,
					     GFP_ATOMIC);
899 900 901 902 903
			break;
		}
		default:
			WARN_ON(1);
		}
904 905 906 907 908
		/*
		 * We must ignore BACKREF_FOUND_SHARED until all delayed
		 * refs have been checked.
		 */
		if (ret && (ret != BACKREF_FOUND_SHARED))
909
			break;
910
	}
911 912 913
	if (!ret)
		ret = extent_is_shared(sc);
out:
914 915
	spin_unlock(&head->lock);
	return ret;
916 917 918 919
}

/*
 * add all inline backrefs for bytenr to the list
920 921
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
922
 */
923 924
static int add_inline_refs(const struct btrfs_fs_info *fs_info,
			   struct btrfs_path *path, u64 bytenr,
925
			   int *info_level, struct preftrees *preftrees,
926
			   struct share_check *sc)
927
{
928
	int ret = 0;
929 930 931
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;
932
	struct btrfs_key found_key;
933 934 935 936 937 938 939 940 941 942
	unsigned long ptr;
	unsigned long end;
	struct btrfs_extent_item *ei;
	u64 flags;
	u64 item_size;

	/*
	 * enumerate all inline refs
	 */
	leaf = path->nodes[0];
943
	slot = path->slots[0];
944 945 946 947 948 949

	item_size = btrfs_item_size_nr(leaf, slot);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);
950
	btrfs_item_key_to_cpu(leaf, &found_key, slot);
951 952 953 954

	ptr = (unsigned long)(ei + 1);
	end = (unsigned long)ei + item_size;

955 956
	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
957 958 959 960 961 962
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)ptr;
		*info_level = btrfs_tree_block_level(leaf, info);
		ptr += sizeof(struct btrfs_tree_block_info);
		BUG_ON(ptr > end);
963 964
	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
		*info_level = found_key.offset;
965 966 967 968 969 970 971 972 973 974
	} else {
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
	}

	while (ptr < end) {
		struct btrfs_extent_inline_ref *iref;
		u64 offset;
		int type;

		iref = (struct btrfs_extent_inline_ref *)ptr;
975 976 977
		type = btrfs_get_extent_inline_ref_type(leaf, iref,
							BTRFS_REF_TYPE_ANY);
		if (type == BTRFS_REF_TYPE_INVALID)
978
			return -EUCLEAN;
979

980 981 982 983
		offset = btrfs_extent_inline_ref_offset(leaf, iref);

		switch (type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
984 985
			ret = add_direct_ref(fs_info, preftrees,
					     *info_level + 1, offset,
986
					     bytenr, 1, NULL, GFP_NOFS);
987 988 989 990 991 992 993
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
			count = btrfs_shared_data_ref_count(leaf, sdref);
994

995
			ret = add_direct_ref(fs_info, preftrees, 0, offset,
996
					     bytenr, count, sc, GFP_NOFS);
997 998 999
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
1000 1001
			ret = add_indirect_ref(fs_info, preftrees, offset,
					       NULL, *info_level + 1,
1002
					       bytenr, 1, NULL, GFP_NOFS);
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1015

1016
			if (sc && sc->inum && key.objectid != sc->inum) {
1017 1018 1019 1020
				ret = BACKREF_FOUND_SHARED;
				break;
			}

1021
			root = btrfs_extent_data_ref_root(leaf, dref);
1022

1023 1024
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
1025
					       sc, GFP_NOFS);
1026 1027 1028 1029 1030
			break;
		}
		default:
			WARN_ON(1);
		}
1031 1032
		if (ret)
			return ret;
1033 1034 1035 1036 1037 1038 1039 1040
		ptr += btrfs_extent_inline_ref_size(type);
	}

	return 0;
}

/*
 * add all non-inline backrefs for bytenr to the list
1041 1042
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1043
 */
1044 1045
static int add_keyed_refs(struct btrfs_fs_info *fs_info,
			  struct btrfs_path *path, u64 bytenr,
1046
			  int info_level, struct preftrees *preftrees,
1047
			  struct share_check *sc)
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076
{
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	while (1) {
		ret = btrfs_next_item(extent_root, path);
		if (ret < 0)
			break;
		if (ret) {
			ret = 0;
			break;
		}

		slot = path->slots[0];
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.objectid != bytenr)
			break;
		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
			continue;
		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
			break;

		switch (key.type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
1077
			/* SHARED DIRECT METADATA backref */
1078 1079
			ret = add_direct_ref(fs_info, preftrees,
					     info_level + 1, key.offset,
1080
					     bytenr, 1, NULL, GFP_NOFS);
1081 1082
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
1083
			/* SHARED DIRECT FULL backref */
1084 1085 1086 1087 1088 1089
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_shared_data_ref);
			count = btrfs_shared_data_ref_count(leaf, sdref);
1090 1091
			ret = add_direct_ref(fs_info, preftrees, 0,
					     key.offset, bytenr, count,
1092
					     sc, GFP_NOFS);
1093 1094 1095
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
1096
			/* NORMAL INDIRECT METADATA backref */
1097 1098
			ret = add_indirect_ref(fs_info, preftrees, key.offset,
					       NULL, info_level + 1, bytenr,
1099
					       1, NULL, GFP_NOFS);
1100 1101
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
1102
			/* NORMAL INDIRECT DATA backref */
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_extent_data_ref);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1114

1115
			if (sc && sc->inum && key.objectid != sc->inum) {
1116 1117 1118 1119
				ret = BACKREF_FOUND_SHARED;
				break;
			}

1120
			root = btrfs_extent_data_ref_root(leaf, dref);
1121 1122
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
1123
					       sc, GFP_NOFS);
1124 1125 1126 1127 1128
			break;
		}
		default:
			WARN_ON(1);
		}
1129 1130 1131
		if (ret)
			return ret;

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
	}

	return ret;
}

/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
1143
 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1144 1145 1146 1147
 * much like trans == NULL case, the difference only lies in it will not
 * commit root.
 * The special case is for qgroup to search roots in commit_transaction().
 *
1148 1149 1150 1151 1152
 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
 * shared extent is detected.
 *
 * Otherwise this returns 0 for success and <0 for an error.
 *
1153 1154 1155 1156
 * If ignore_offset is set to false, only extent refs whose offsets match
 * extent_item_pos are returned.  If true, every extent ref is returned
 * and extent_item_pos is ignored.
 *
1157 1158 1159 1160
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
1161
			     u64 time_seq, struct ulist *refs,
1162
			     struct ulist *roots, const u64 *extent_item_pos,
1163
			     struct share_check *sc, bool ignore_offset)
1164 1165 1166 1167
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1168
	struct btrfs_delayed_ref_head *head;
1169 1170
	int info_level = 0;
	int ret;
1171
	struct prelim_ref *ref;
1172
	struct rb_node *node;
1173
	struct extent_inode_elem *eie = NULL;
1174 1175 1176 1177 1178
	struct preftrees preftrees = {
		.direct = PREFTREE_INIT,
		.indirect = PREFTREE_INIT,
		.indirect_missing_keys = PREFTREE_INIT
	};
1179 1180 1181

	key.objectid = bytenr;
	key.offset = (u64)-1;
1182 1183 1184 1185
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1186 1187 1188 1189

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1190
	if (!trans) {
1191
		path->search_commit_root = 1;
1192 1193
		path->skip_locking = 1;
	}
1194

1195
	if (time_seq == SEQ_LAST)
1196 1197
		path->skip_locking = 1;

1198 1199 1200 1201 1202 1203
	/*
	 * grab both a lock on the path and a lock on the delayed ref head.
	 * We need both to get a consistent picture of how the refs look
	 * at a specified point in time
	 */
again:
1204 1205
	head = NULL;

1206 1207 1208 1209 1210
	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

1211
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1212
	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1213
	    time_seq != SEQ_LAST) {
1214
#else
1215
	if (trans && time_seq != SEQ_LAST) {
1216
#endif
1217 1218 1219 1220 1221 1222
		/*
		 * look if there are updates for this ref queued and lock the
		 * head
		 */
		delayed_refs = &trans->transaction->delayed_refs;
		spin_lock(&delayed_refs->lock);
1223
		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1224 1225
		if (head) {
			if (!mutex_trylock(&head->mutex)) {
1226
				refcount_inc(&head->refs);
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236
				spin_unlock(&delayed_refs->lock);

				btrfs_release_path(path);

				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);
1237
				btrfs_put_delayed_ref_head(head);
1238 1239
				goto again;
			}
1240
			spin_unlock(&delayed_refs->lock);
1241
			ret = add_delayed_refs(fs_info, head, time_seq,
1242
					       &preftrees, sc);
1243
			mutex_unlock(&head->mutex);
1244
			if (ret)
1245
				goto out;
1246 1247
		} else {
			spin_unlock(&delayed_refs->lock);
1248
		}
1249 1250 1251 1252 1253 1254
	}

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

1255
		path->slots[0]--;
1256
		leaf = path->nodes[0];
1257
		slot = path->slots[0];
1258 1259
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
1260 1261
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1262
			ret = add_inline_refs(fs_info, path, bytenr,
1263
					      &info_level, &preftrees, sc);
1264 1265
			if (ret)
				goto out;
1266
			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1267
					     &preftrees, sc);
1268 1269 1270 1271 1272
			if (ret)
				goto out;
		}
	}

1273
	btrfs_release_path(path);
1274

1275
	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1276 1277 1278
	if (ret)
		goto out;

L
Liu Bo 已提交
1279
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1280

1281
	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1282
				    extent_item_pos, sc, ignore_offset);
1283 1284 1285
	if (ret)
		goto out;

L
Liu Bo 已提交
1286
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1287

1288 1289 1290 1291 1292 1293 1294
	/*
	 * This walks the tree of merged and resolved refs. Tree blocks are
	 * read in as needed. Unique entries are added to the ulist, and
	 * the list of found roots is updated.
	 *
	 * We release the entire tree in one go before returning.
	 */
L
Liu Bo 已提交
1295
	node = rb_first_cached(&preftrees.direct.root);
1296 1297 1298
	while (node) {
		ref = rb_entry(node, struct prelim_ref, rbnode);
		node = rb_next(&ref->rbnode);
1299 1300 1301 1302 1303 1304 1305 1306 1307 1308
		/*
		 * ref->count < 0 can happen here if there are delayed
		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
		 * prelim_ref_insert() relies on this when merging
		 * identical refs to keep the overall count correct.
		 * prelim_ref_insert() will merge only those refs
		 * which compare identically.  Any refs having
		 * e.g. different offsets would not be merged,
		 * and would retain their original ref->count < 0.
		 */
1309
		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1310 1311
			if (sc && sc->root_objectid &&
			    ref->root_id != sc->root_objectid) {
1312 1313 1314 1315
				ret = BACKREF_FOUND_SHARED;
				goto out;
			}

1316 1317
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1318 1319
			if (ret < 0)
				goto out;
1320 1321
		}
		if (ref->count && ref->parent) {
1322 1323
			if (extent_item_pos && !ref->inode_list &&
			    ref->level == 0) {
1324
				struct extent_buffer *eb;
1325

1326 1327
				eb = read_tree_block(fs_info, ref->parent, 0,
						     ref->level, NULL);
1328 1329 1330 1331
				if (IS_ERR(eb)) {
					ret = PTR_ERR(eb);
					goto out;
				} else if (!extent_buffer_uptodate(eb)) {
1332
					free_extent_buffer(eb);
1333 1334
					ret = -EIO;
					goto out;
1335
				}
1336 1337 1338 1339 1340

				if (!path->skip_locking) {
					btrfs_tree_read_lock(eb);
					btrfs_set_lock_blocking_read(eb);
				}
1341
				ret = find_extent_in_eb(eb, bytenr,
1342
							*extent_item_pos, &eie, ignore_offset);
1343 1344
				if (!path->skip_locking)
					btrfs_tree_read_unlock_blocking(eb);
1345
				free_extent_buffer(eb);
1346 1347 1348
				if (ret < 0)
					goto out;
				ref->inode_list = eie;
1349
			}
1350 1351 1352
			ret = ulist_add_merge_ptr(refs, ref->parent,
						  ref->inode_list,
						  (void **)&eie, GFP_NOFS);
1353 1354
			if (ret < 0)
				goto out;
1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
1365
			eie = NULL;
1366
		}
1367
		cond_resched();
1368 1369 1370 1371
	}

out:
	btrfs_free_path(path);
1372 1373 1374 1375 1376

	prelim_release(&preftrees.direct);
	prelim_release(&preftrees.indirect);
	prelim_release(&preftrees.indirect_missing_keys);

1377 1378
	if (ret < 0)
		free_inode_elem_list(eie);
1379 1380 1381
	return ret;
}

1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
static void free_leaf_list(struct ulist *blocks)
{
	struct ulist_node *node = NULL;
	struct extent_inode_elem *eie;
	struct ulist_iterator uiter;

	ULIST_ITER_INIT(&uiter);
	while ((node = ulist_next(blocks, &uiter))) {
		if (!node->aux)
			continue;
1392
		eie = unode_aux_to_inode_list(node);
1393
		free_inode_elem_list(eie);
1394 1395 1396 1397 1398 1399
		node->aux = 0;
	}

	ulist_free(blocks);
}

1400 1401 1402 1403 1404 1405 1406 1407
/*
 * Finds all leafs with a reference to the specified combination of bytenr and
 * offset. key_list_head will point to a list of corresponding keys (caller must
 * free each list element). The leafs will be stored in the leafs ulist, which
 * must be freed with ulist_free.
 *
 * returns 0 on success, <0 on error
 */
1408 1409 1410 1411
int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
			 struct btrfs_fs_info *fs_info, u64 bytenr,
			 u64 time_seq, struct ulist **leafs,
			 const u64 *extent_item_pos, bool ignore_offset)
1412 1413 1414 1415
{
	int ret;

	*leafs = ulist_alloc(GFP_NOFS);
1416
	if (!*leafs)
1417 1418
		return -ENOMEM;

1419
	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1420
				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1421
	if (ret < 0 && ret != -ENOENT) {
1422
		free_leaf_list(*leafs);
1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441
		return ret;
	}

	return 0;
}

/*
 * walk all backrefs for a given extent to find all roots that reference this
 * extent. Walking a backref means finding all extents that reference this
 * extent and in turn walk the backrefs of those, too. Naturally this is a
 * recursive process, but here it is implemented in an iterative fashion: We
 * find all referencing extents for the extent in question and put them on a
 * list. In turn, we find all referencing extents for those, further appending
 * to the list. The way we iterate the list allows adding more elements after
 * the current while iterating. The process stops when we reach the end of the
 * list. Found roots are added to the roots list.
 *
 * returns 0 on success, < 0 on error.
 */
1442 1443
static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
				     struct btrfs_fs_info *fs_info, u64 bytenr,
1444 1445
				     u64 time_seq, struct ulist **roots,
				     bool ignore_offset)
1446 1447 1448
{
	struct ulist *tmp;
	struct ulist_node *node = NULL;
J
Jan Schmidt 已提交
1449
	struct ulist_iterator uiter;
1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
	int ret;

	tmp = ulist_alloc(GFP_NOFS);
	if (!tmp)
		return -ENOMEM;
	*roots = ulist_alloc(GFP_NOFS);
	if (!*roots) {
		ulist_free(tmp);
		return -ENOMEM;
	}

J
Jan Schmidt 已提交
1461
	ULIST_ITER_INIT(&uiter);
1462
	while (1) {
1463
		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1464
					tmp, *roots, NULL, NULL, ignore_offset);
1465 1466 1467 1468 1469
		if (ret < 0 && ret != -ENOENT) {
			ulist_free(tmp);
			ulist_free(*roots);
			return ret;
		}
J
Jan Schmidt 已提交
1470
		node = ulist_next(tmp, &uiter);
1471 1472 1473
		if (!node)
			break;
		bytenr = node->val;
1474
		cond_resched();
1475 1476 1477 1478 1479 1480
	}

	ulist_free(tmp);
	return 0;
}

1481 1482
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
			 struct btrfs_fs_info *fs_info, u64 bytenr,
1483 1484
			 u64 time_seq, struct ulist **roots,
			 bool ignore_offset)
1485 1486 1487 1488 1489
{
	int ret;

	if (!trans)
		down_read(&fs_info->commit_root_sem);
1490
	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1491
					time_seq, roots, ignore_offset);
1492 1493 1494 1495 1496
	if (!trans)
		up_read(&fs_info->commit_root_sem);
	return ret;
}

1497 1498 1499 1500 1501 1502 1503 1504 1505
/**
 * btrfs_check_shared - tell us whether an extent is shared
 *
 * btrfs_check_shared uses the backref walking code but will short
 * circuit as soon as it finds a root or inode that doesn't match the
 * one passed in. This provides a significant performance benefit for
 * callers (such as fiemap) which want to know whether the extent is
 * shared but do not need a ref count.
 *
1506 1507
 * This attempts to attach to the running transaction in order to account for
 * delayed refs, but continues on even when no running transaction exists.
1508
 *
1509 1510
 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
 */
1511 1512
int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
		struct ulist *roots, struct ulist *tmp)
1513
{
1514 1515
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_trans_handle *trans;
1516 1517
	struct ulist_iterator uiter;
	struct ulist_node *node;
1518
	struct seq_list elem = SEQ_LIST_INIT(elem);
1519
	int ret = 0;
1520
	struct share_check shared = {
1521
		.root_objectid = root->root_key.objectid,
1522 1523 1524
		.inum = inum,
		.share_count = 0,
	};
1525

1526 1527
	ulist_init(roots);
	ulist_init(tmp);
1528

1529
	trans = btrfs_join_transaction_nostart(root);
1530
	if (IS_ERR(trans)) {
1531 1532 1533 1534
		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
			ret = PTR_ERR(trans);
			goto out;
		}
1535
		trans = NULL;
1536
		down_read(&fs_info->commit_root_sem);
1537 1538 1539 1540
	} else {
		btrfs_get_tree_mod_seq(fs_info, &elem);
	}

1541 1542 1543
	ULIST_ITER_INIT(&uiter);
	while (1) {
		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1544
					roots, NULL, &shared, false);
1545
		if (ret == BACKREF_FOUND_SHARED) {
1546
			/* this is the only condition under which we return 1 */
1547 1548 1549 1550 1551
			ret = 1;
			break;
		}
		if (ret < 0 && ret != -ENOENT)
			break;
1552
		ret = 0;
1553 1554 1555 1556
		node = ulist_next(tmp, &uiter);
		if (!node)
			break;
		bytenr = node->val;
1557
		shared.share_count = 0;
1558 1559
		cond_resched();
	}
1560 1561

	if (trans) {
1562
		btrfs_put_tree_mod_seq(fs_info, &elem);
1563 1564
		btrfs_end_transaction(trans);
	} else {
1565
		up_read(&fs_info->commit_root_sem);
1566
	}
1567
out:
1568 1569
	ulist_release(roots);
	ulist_release(tmp);
1570 1571 1572
	return ret;
}

M
Mark Fasheh 已提交
1573 1574 1575 1576 1577 1578 1579 1580 1581
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
			  u64 start_off, struct btrfs_path *path,
			  struct btrfs_inode_extref **ret_extref,
			  u64 *found_off)
{
	int ret, slot;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_inode_extref *extref;
1582
	const struct extent_buffer *leaf;
M
Mark Fasheh 已提交
1583 1584 1585
	unsigned long ptr;

	key.objectid = inode_objectid;
1586
	key.type = BTRFS_INODE_EXTREF_KEY;
M
Mark Fasheh 已提交
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
	key.offset = start_off;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			/*
			 * If the item at offset is not found,
			 * btrfs_search_slot will point us to the slot
			 * where it should be inserted. In our case
			 * that will be the slot directly before the
			 * next INODE_REF_KEY_V2 item. In the case
			 * that we're pointing to the last slot in a
			 * leaf, we must move one leaf over.
			 */
			ret = btrfs_next_leaf(root, path);
			if (ret) {
				if (ret >= 1)
					ret = -ENOENT;
				break;
			}
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/*
		 * Check that we're still looking at an extended ref key for
		 * this particular objectid. If we have different
		 * objectid or type then there are no more to be found
		 * in the tree and we can exit.
		 */
		ret = -ENOENT;
		if (found_key.objectid != inode_objectid)
			break;
1626
		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
M
Mark Fasheh 已提交
1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
			break;

		ret = 0;
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		extref = (struct btrfs_inode_extref *)ptr;
		*ret_extref = extref;
		if (found_off)
			*found_off = found_key.offset;
		break;
	}

	return ret;
}

1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654
/*
 * this iterates to turn a name (from iref/extref) into a full filesystem path.
 * Elements of the path are separated by '/' and the path is guaranteed to be
 * 0-terminated. the path is only given within the current file system.
 * Therefore, it never starts with a '/'. the caller is responsible to provide
 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
 * the start point of the resulting string is returned. this pointer is within
 * dest, normally.
 * in case the path buffer would overflow, the pointer is decremented further
 * as if output was written to the buffer, though no more output is actually
 * generated. that way, the caller can determine how much space would be
 * required for the path to fit into the buffer. in that case, the returned
 * value will be smaller than dest. callers must check this!
 */
1655 1656 1657 1658
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
			u32 name_len, unsigned long name_off,
			struct extent_buffer *eb_in, u64 parent,
			char *dest, u32 size)
1659 1660 1661 1662
{
	int slot;
	u64 next_inum;
	int ret;
1663
	s64 bytes_left = ((s64)size) - 1;
1664 1665
	struct extent_buffer *eb = eb_in;
	struct btrfs_key found_key;
1666
	int leave_spinning = path->leave_spinning;
M
Mark Fasheh 已提交
1667
	struct btrfs_inode_ref *iref;
1668 1669 1670 1671

	if (bytes_left >= 0)
		dest[bytes_left] = '\0';

1672
	path->leave_spinning = 1;
1673
	while (1) {
M
Mark Fasheh 已提交
1674
		bytes_left -= name_len;
1675 1676
		if (bytes_left >= 0)
			read_extent_buffer(eb, dest + bytes_left,
M
Mark Fasheh 已提交
1677
					   name_off, name_len);
1678
		if (eb != eb_in) {
1679 1680
			if (!path->skip_locking)
				btrfs_tree_read_unlock_blocking(eb);
1681
			free_extent_buffer(eb);
1682
		}
1683 1684
		ret = btrfs_find_item(fs_root, path, parent, 0,
				BTRFS_INODE_REF_KEY, &found_key);
1685 1686
		if (ret > 0)
			ret = -ENOENT;
1687 1688
		if (ret)
			break;
M
Mark Fasheh 已提交
1689

1690 1691 1692 1693 1694 1695 1696 1697 1698
		next_inum = found_key.offset;

		/* regular exit ahead */
		if (parent == next_inum)
			break;

		slot = path->slots[0];
		eb = path->nodes[0];
		/* make sure we can use eb after releasing the path */
1699
		if (eb != eb_in) {
1700
			if (!path->skip_locking)
1701
				btrfs_set_lock_blocking_read(eb);
1702 1703
			path->nodes[0] = NULL;
			path->locks[0] = 0;
1704
		}
1705 1706
		btrfs_release_path(path);
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
M
Mark Fasheh 已提交
1707 1708 1709 1710

		name_len = btrfs_inode_ref_name_len(eb, iref);
		name_off = (unsigned long)(iref + 1);

1711 1712 1713 1714 1715 1716 1717
		parent = next_inum;
		--bytes_left;
		if (bytes_left >= 0)
			dest[bytes_left] = '/';
	}

	btrfs_release_path(path);
1718
	path->leave_spinning = leave_spinning;
1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731

	if (ret)
		return ERR_PTR(ret);

	return dest + bytes_left;
}

/*
 * this makes the path point to (logical EXTENT_ITEM *)
 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
 * tree blocks and <0 on error.
 */
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1732 1733
			struct btrfs_path *path, struct btrfs_key *found_key,
			u64 *flags_ret)
1734 1735 1736
{
	int ret;
	u64 flags;
1737
	u64 size = 0;
1738
	u32 item_size;
1739
	const struct extent_buffer *eb;
1740 1741 1742
	struct btrfs_extent_item *ei;
	struct btrfs_key key;

1743 1744 1745 1746
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1747 1748 1749 1750 1751 1752 1753
	key.objectid = logical;
	key.offset = (u64)-1;

	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

1754 1755 1756 1757 1758
	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		return ret;
1759
	}
1760
	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1761
	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1762
		size = fs_info->nodesize;
1763 1764 1765
	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
		size = found_key->offset;

1766
	if (found_key->objectid > logical ||
1767
	    found_key->objectid + size <= logical) {
1768 1769
		btrfs_debug(fs_info,
			"logical %llu is not within any extent", logical);
1770
		return -ENOENT;
J
Jan Schmidt 已提交
1771
	}
1772 1773 1774 1775 1776 1777 1778 1779

	eb = path->nodes[0];
	item_size = btrfs_item_size_nr(eb, path->slots[0]);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	flags = btrfs_extent_flags(eb, ei);

1780 1781
	btrfs_debug(fs_info,
		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1782 1783
		 logical, logical - found_key->objectid, found_key->objectid,
		 found_key->offset, flags, item_size);
1784 1785 1786 1787 1788 1789 1790 1791

	WARN_ON(!flags_ret);
	if (flags_ret) {
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
		else if (flags & BTRFS_EXTENT_FLAG_DATA)
			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
		else
1792
			BUG();
1793 1794
		return 0;
	}
1795 1796 1797 1798 1799 1800 1801 1802

	return -EIO;
}

/*
 * helper function to iterate extent inline refs. ptr must point to a 0 value
 * for the first call and may be modified. it is used to track state.
 * if more refs exist, 0 is returned and the next call to
1803
 * get_extent_inline_ref must pass the modified ptr parameter to get the
1804 1805 1806
 * next ref. after the last ref was processed, 1 is returned.
 * returns <0 on error
 */
1807 1808 1809 1810 1811 1812 1813
static int get_extent_inline_ref(unsigned long *ptr,
				 const struct extent_buffer *eb,
				 const struct btrfs_key *key,
				 const struct btrfs_extent_item *ei,
				 u32 item_size,
				 struct btrfs_extent_inline_ref **out_eiref,
				 int *out_type)
1814 1815 1816 1817 1818 1819 1820 1821 1822
{
	unsigned long end;
	u64 flags;
	struct btrfs_tree_block_info *info;

	if (!*ptr) {
		/* first call */
		flags = btrfs_extent_flags(eb, ei);
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832
			if (key->type == BTRFS_METADATA_ITEM_KEY) {
				/* a skinny metadata extent */
				*out_eiref =
				     (struct btrfs_extent_inline_ref *)(ei + 1);
			} else {
				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
				info = (struct btrfs_tree_block_info *)(ei + 1);
				*out_eiref =
				   (struct btrfs_extent_inline_ref *)(info + 1);
			}
1833 1834 1835 1836
		} else {
			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
		}
		*ptr = (unsigned long)*out_eiref;
1837
		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1838 1839 1840 1841
			return -ENOENT;
	}

	end = (unsigned long)ei + item_size;
1842
	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1843 1844 1845
	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
						     BTRFS_REF_TYPE_ANY);
	if (*out_type == BTRFS_REF_TYPE_INVALID)
1846
		return -EUCLEAN;
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858

	*ptr += btrfs_extent_inline_ref_size(*out_type);
	WARN_ON(*ptr > end);
	if (*ptr == end)
		return 1; /* last */

	return 0;
}

/*
 * reads the tree block backref for an extent. tree level and root are returned
 * through out_level and out_root. ptr must point to a 0 value for the first
1859
 * call and may be modified (see get_extent_inline_ref comment).
1860 1861 1862 1863
 * returns 0 if data was provided, 1 if there was no more data to provide or
 * <0 on error.
 */
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1864 1865
			    struct btrfs_key *key, struct btrfs_extent_item *ei,
			    u32 item_size, u64 *out_root, u8 *out_level)
1866 1867 1868 1869 1870 1871 1872 1873 1874
{
	int ret;
	int type;
	struct btrfs_extent_inline_ref *eiref;

	if (*ptr == (unsigned long)-1)
		return 1;

	while (1) {
1875
		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1876
					      &eiref, &type);
1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889
		if (ret < 0)
			return ret;

		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
		    type == BTRFS_SHARED_BLOCK_REF_KEY)
			break;

		if (ret == 1)
			return 1;
	}

	/* we can treat both ref types equally here */
	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1890 1891 1892 1893 1894 1895 1896 1897 1898 1899

	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)(ei + 1);
		*out_level = btrfs_tree_block_level(eb, info);
	} else {
		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
		*out_level = (u8)key->offset;
	}
1900 1901 1902 1903 1904 1905 1906

	if (ret == 1)
		*ptr = (unsigned long)-1;

	return 0;
}

1907 1908 1909 1910
static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
			     struct extent_inode_elem *inode_list,
			     u64 root, u64 extent_item_objectid,
			     iterate_extent_inodes_t *iterate, void *ctx)
1911
{
1912
	struct extent_inode_elem *eie;
J
Jan Schmidt 已提交
1913 1914
	int ret = 0;

1915
	for (eie = inode_list; eie; eie = eie->next) {
1916 1917 1918 1919
		btrfs_debug(fs_info,
			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
			    extent_item_objectid, eie->inum,
			    eie->offset, root);
1920
		ret = iterate(eie->inum, eie->offset, root, ctx);
J
Jan Schmidt 已提交
1921
		if (ret) {
1922 1923 1924
			btrfs_debug(fs_info,
				    "stopping iteration for %llu due to ret=%d",
				    extent_item_objectid, ret);
J
Jan Schmidt 已提交
1925 1926
			break;
		}
1927 1928 1929 1930 1931 1932 1933
	}

	return ret;
}

/*
 * calls iterate() for every inode that references the extent identified by
J
Jan Schmidt 已提交
1934
 * the given parameters.
1935 1936 1937
 * when the iterator function returns a non-zero value, iteration stops.
 */
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
1938
				u64 extent_item_objectid, u64 extent_item_pos,
1939
				int search_commit_root,
1940 1941
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
1942 1943
{
	int ret;
1944
	struct btrfs_trans_handle *trans = NULL;
1945 1946
	struct ulist *refs = NULL;
	struct ulist *roots = NULL;
J
Jan Schmidt 已提交
1947 1948
	struct ulist_node *ref_node = NULL;
	struct ulist_node *root_node = NULL;
1949
	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
J
Jan Schmidt 已提交
1950 1951
	struct ulist_iterator ref_uiter;
	struct ulist_iterator root_uiter;
1952

1953
	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
J
Jan Schmidt 已提交
1954
			extent_item_objectid);
1955

1956
	if (!search_commit_root) {
1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
		trans = btrfs_attach_transaction(fs_info->extent_root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) != -ENOENT &&
			    PTR_ERR(trans) != -EROFS)
				return PTR_ERR(trans);
			trans = NULL;
		}
	}

	if (trans)
1967
		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1968
	else
1969
		down_read(&fs_info->commit_root_sem);
1970

J
Jan Schmidt 已提交
1971
	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1972
				   tree_mod_seq_elem.seq, &refs,
1973
				   &extent_item_pos, ignore_offset);
J
Jan Schmidt 已提交
1974 1975
	if (ret)
		goto out;
1976

J
Jan Schmidt 已提交
1977 1978
	ULIST_ITER_INIT(&ref_uiter);
	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1979
		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1980 1981
						tree_mod_seq_elem.seq, &roots,
						ignore_offset);
J
Jan Schmidt 已提交
1982 1983
		if (ret)
			break;
J
Jan Schmidt 已提交
1984 1985
		ULIST_ITER_INIT(&root_uiter);
		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1986 1987 1988 1989 1990 1991
			btrfs_debug(fs_info,
				    "root %llu references leaf %llu, data list %#llx",
				    root_node->val, ref_node->val,
				    ref_node->aux);
			ret = iterate_leaf_refs(fs_info,
						(struct extent_inode_elem *)
1992 1993 1994 1995
						(uintptr_t)ref_node->aux,
						root_node->val,
						extent_item_objectid,
						iterate, ctx);
J
Jan Schmidt 已提交
1996
		}
1997
		ulist_free(roots);
1998 1999
	}

2000
	free_leaf_list(refs);
J
Jan Schmidt 已提交
2001
out:
2002
	if (trans) {
2003
		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2004
		btrfs_end_transaction(trans);
2005 2006
	} else {
		up_read(&fs_info->commit_root_sem);
2007 2008
	}

2009 2010 2011 2012 2013
	return ret;
}

int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
				struct btrfs_path *path,
2014 2015
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
2016 2017
{
	int ret;
J
Jan Schmidt 已提交
2018
	u64 extent_item_pos;
2019
	u64 flags = 0;
2020
	struct btrfs_key found_key;
2021
	int search_commit_root = path->search_commit_root;
2022

2023
	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
J
Jan Schmidt 已提交
2024
	btrfs_release_path(path);
2025 2026
	if (ret < 0)
		return ret;
2027
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2028
		return -EINVAL;
2029

J
Jan Schmidt 已提交
2030
	extent_item_pos = logical - found_key.objectid;
2031 2032
	ret = iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, search_commit_root,
2033
					iterate, ctx, ignore_offset);
2034 2035 2036 2037

	return ret;
}

M
Mark Fasheh 已提交
2038 2039 2040 2041 2042 2043
typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
			      struct extent_buffer *eb, void *ctx);

static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
			      struct btrfs_path *path,
			      iterate_irefs_t *iterate, void *ctx)
2044
{
2045
	int ret = 0;
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056
	int slot;
	u32 cur;
	u32 len;
	u32 name_len;
	u64 parent = 0;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_item *item;
	struct btrfs_inode_ref *iref;
	struct btrfs_key found_key;

2057
	while (!ret) {
2058 2059 2060 2061
		ret = btrfs_find_item(fs_root, path, inum,
				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
				&found_key);

2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		parent = found_key.offset;
		slot = path->slots[0];
2072 2073 2074 2075 2076
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
2077 2078
		btrfs_release_path(path);

2079
		item = btrfs_item_nr(slot);
2080 2081 2082 2083 2084
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);

		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
			name_len = btrfs_inode_ref_name_len(eb, iref);
			/* path must be released before calling iterate()! */
2085 2086
			btrfs_debug(fs_root->fs_info,
				"following ref at offset %u for inode %llu in tree %llu",
2087 2088
				cur, found_key.objectid,
				fs_root->root_key.objectid);
M
Mark Fasheh 已提交
2089 2090
			ret = iterate(parent, name_len,
				      (unsigned long)(iref + 1), eb, ctx);
2091
			if (ret)
2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
				break;
			len = sizeof(*iref) + name_len;
			iref = (struct btrfs_inode_ref *)((char *)iref + len);
		}
		free_extent_buffer(eb);
	}

	btrfs_release_path(path);

	return ret;
}

M
Mark Fasheh 已提交
2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130
static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
				 struct btrfs_path *path,
				 iterate_irefs_t *iterate, void *ctx)
{
	int ret;
	int slot;
	u64 offset = 0;
	u64 parent;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_inode_extref *extref;
	u32 item_size;
	u32 cur_offset;
	unsigned long ptr;

	while (1) {
		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
					    &offset);
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		slot = path->slots[0];
2131 2132 2133 2134 2135
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
M
Mark Fasheh 已提交
2136 2137
		btrfs_release_path(path);

2138 2139
		item_size = btrfs_item_size_nr(eb, slot);
		ptr = btrfs_item_ptr_offset(eb, slot);
M
Mark Fasheh 已提交
2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152
		cur_offset = 0;

		while (cur_offset < item_size) {
			u32 name_len;

			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
			parent = btrfs_inode_extref_parent(eb, extref);
			name_len = btrfs_inode_extref_name_len(eb, extref);
			ret = iterate(parent, name_len,
				      (unsigned long)&extref->name, eb, ctx);
			if (ret)
				break;

2153
			cur_offset += btrfs_inode_extref_name_len(eb, extref);
M
Mark Fasheh 已提交
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
			cur_offset += sizeof(*extref);
		}
		free_extent_buffer(eb);

		offset++;
	}

	btrfs_release_path(path);

	return ret;
}

static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
			 struct btrfs_path *path, iterate_irefs_t *iterate,
			 void *ctx)
{
	int ret;
	int found_refs = 0;

	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
	if (!ret)
		++found_refs;
	else if (ret != -ENOENT)
		return ret;

	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
	if (ret == -ENOENT && found_refs)
		return 0;

	return ret;
}

2186 2187 2188 2189
/*
 * returns 0 if the path could be dumped (probably truncated)
 * returns <0 in case of an error
 */
M
Mark Fasheh 已提交
2190 2191
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
			 struct extent_buffer *eb, void *ctx)
2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202
{
	struct inode_fs_paths *ipath = ctx;
	char *fspath;
	char *fspath_min;
	int i = ipath->fspath->elem_cnt;
	const int s_ptr = sizeof(char *);
	u32 bytes_left;

	bytes_left = ipath->fspath->bytes_left > s_ptr ?
					ipath->fspath->bytes_left - s_ptr : 0;

2203
	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2204 2205
	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
				   name_off, eb, inum, fspath_min, bytes_left);
2206 2207 2208 2209
	if (IS_ERR(fspath))
		return PTR_ERR(fspath);

	if (fspath > fspath_min) {
2210
		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224
		++ipath->fspath->elem_cnt;
		ipath->fspath->bytes_left = fspath - fspath_min;
	} else {
		++ipath->fspath->elem_missed;
		ipath->fspath->bytes_missing += fspath_min - fspath;
		ipath->fspath->bytes_left = 0;
	}

	return 0;
}

/*
 * this dumps all file system paths to the inode into the ipath struct, provided
 * is has been created large enough. each path is zero-terminated and accessed
2225
 * from ipath->fspath->val[i].
2226
 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2227
 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2228
 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2229 2230 2231 2232 2233 2234
 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
 * have been needed to return all paths.
 */
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
{
	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
M
Mark Fasheh 已提交
2235
			     inode_to_path, ipath);
2236 2237 2238 2239 2240 2241 2242 2243
}

struct btrfs_data_container *init_data_container(u32 total_bytes)
{
	struct btrfs_data_container *data;
	size_t alloc_bytes;

	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2244
	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275
	if (!data)
		return ERR_PTR(-ENOMEM);

	if (total_bytes >= sizeof(*data)) {
		data->bytes_left = total_bytes - sizeof(*data);
		data->bytes_missing = 0;
	} else {
		data->bytes_missing = sizeof(*data) - total_bytes;
		data->bytes_left = 0;
	}

	data->elem_cnt = 0;
	data->elem_missed = 0;

	return data;
}

/*
 * allocates space to return multiple file system paths for an inode.
 * total_bytes to allocate are passed, note that space usable for actual path
 * information will be total_bytes - sizeof(struct inode_fs_paths).
 * the returned pointer must be freed with free_ipath() in the end.
 */
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
					struct btrfs_path *path)
{
	struct inode_fs_paths *ifp;
	struct btrfs_data_container *fspath;

	fspath = init_data_container(total_bytes);
	if (IS_ERR(fspath))
2276
		return ERR_CAST(fspath);
2277

2278
	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2279
	if (!ifp) {
2280
		kvfree(fspath);
2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292
		return ERR_PTR(-ENOMEM);
	}

	ifp->btrfs_path = path;
	ifp->fspath = fspath;
	ifp->fs_root = fs_root;

	return ifp;
}

void free_ipath(struct inode_fs_paths *ipath)
{
2293 2294
	if (!ipath)
		return;
2295
	kvfree(ipath->fspath);
2296 2297
	kfree(ipath);
}
2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407

struct btrfs_backref_iter *btrfs_backref_iter_alloc(
		struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
{
	struct btrfs_backref_iter *ret;

	ret = kzalloc(sizeof(*ret), gfp_flag);
	if (!ret)
		return NULL;

	ret->path = btrfs_alloc_path();
	if (!ret) {
		kfree(ret);
		return NULL;
	}

	/* Current backref iterator only supports iteration in commit root */
	ret->path->search_commit_root = 1;
	ret->path->skip_locking = 1;
	ret->fs_info = fs_info;

	return ret;
}

int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
{
	struct btrfs_fs_info *fs_info = iter->fs_info;
	struct btrfs_path *path = iter->path;
	struct btrfs_extent_item *ei;
	struct btrfs_key key;
	int ret;

	key.objectid = bytenr;
	key.type = BTRFS_METADATA_ITEM_KEY;
	key.offset = (u64)-1;
	iter->bytenr = bytenr;

	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		return ret;
	if (ret == 0) {
		ret = -EUCLEAN;
		goto release;
	}
	if (path->slots[0] == 0) {
		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
		ret = -EUCLEAN;
		goto release;
	}
	path->slots[0]--;

	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
		ret = -ENOENT;
		goto release;
	}
	memcpy(&iter->cur_key, &key, sizeof(key));
	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
						    path->slots[0]);
	iter->end_ptr = (u32)(iter->item_ptr +
			btrfs_item_size_nr(path->nodes[0], path->slots[0]));
	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
			    struct btrfs_extent_item);

	/*
	 * Only support iteration on tree backref yet.
	 *
	 * This is an extra precaution for non skinny-metadata, where
	 * EXTENT_ITEM is also used for tree blocks, that we can only use
	 * extent flags to determine if it's a tree block.
	 */
	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
		ret = -ENOTSUPP;
		goto release;
	}
	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));

	/* If there is no inline backref, go search for keyed backref */
	if (iter->cur_ptr >= iter->end_ptr) {
		ret = btrfs_next_item(fs_info->extent_root, path);

		/* No inline nor keyed ref */
		if (ret > 0) {
			ret = -ENOENT;
			goto release;
		}
		if (ret < 0)
			goto release;

		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
				path->slots[0]);
		if (iter->cur_key.objectid != bytenr ||
		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
			ret = -ENOENT;
			goto release;
		}
		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
							   path->slots[0]);
		iter->item_ptr = iter->cur_ptr;
		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
				      path->nodes[0], path->slots[0]));
	}

	return 0;
release:
	btrfs_backref_iter_release(iter);
	return ret;
}
2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467

/*
 * Go to the next backref item of current bytenr, can be either inlined or
 * keyed.
 *
 * Caller needs to check whether it's inline ref or not by iter->cur_key.
 *
 * Return 0 if we get next backref without problem.
 * Return >0 if there is no extra backref for this bytenr.
 * Return <0 if there is something wrong happened.
 */
int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
{
	struct extent_buffer *eb = btrfs_backref_get_eb(iter);
	struct btrfs_path *path = iter->path;
	struct btrfs_extent_inline_ref *iref;
	int ret;
	u32 size;

	if (btrfs_backref_iter_is_inline_ref(iter)) {
		/* We're still inside the inline refs */
		ASSERT(iter->cur_ptr < iter->end_ptr);

		if (btrfs_backref_has_tree_block_info(iter)) {
			/* First tree block info */
			size = sizeof(struct btrfs_tree_block_info);
		} else {
			/* Use inline ref type to determine the size */
			int type;

			iref = (struct btrfs_extent_inline_ref *)
				((unsigned long)iter->cur_ptr);
			type = btrfs_extent_inline_ref_type(eb, iref);

			size = btrfs_extent_inline_ref_size(type);
		}
		iter->cur_ptr += size;
		if (iter->cur_ptr < iter->end_ptr)
			return 0;

		/* All inline items iterated, fall through */
	}

	/* We're at keyed items, there is no inline item, go to the next one */
	ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
	if (ret)
		return ret;

	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
	if (iter->cur_key.objectid != iter->bytenr ||
	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
		return 1;
	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
					path->slots[0]);
	iter->cur_ptr = iter->item_ptr;
	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
						path->slots[0]);
	return 0;
}
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484

void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
			      struct btrfs_backref_cache *cache, int is_reloc)
{
	int i;

	cache->rb_root = RB_ROOT;
	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
		INIT_LIST_HEAD(&cache->pending[i]);
	INIT_LIST_HEAD(&cache->changed);
	INIT_LIST_HEAD(&cache->detached);
	INIT_LIST_HEAD(&cache->leaves);
	INIT_LIST_HEAD(&cache->pending_edge);
	INIT_LIST_HEAD(&cache->useless_node);
	cache->fs_info = fs_info;
	cache->is_reloc = is_reloc;
}