backref.c 60.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * Copyright (C) 2011 STRATO.  All rights reserved.
 */

6
#include <linux/mm.h>
7
#include <linux/rbtree.h>
8
#include <trace/events/btrfs.h>
9 10 11
#include "ctree.h"
#include "disk-io.h"
#include "backref.h"
12 13 14
#include "ulist.h"
#include "transaction.h"
#include "delayed-ref.h"
15
#include "locking.h"
16

17 18 19
/* Just an arbitrary number so we can be sure this happened */
#define BACKREF_FOUND_SHARED 6

20 21 22 23 24 25
struct extent_inode_elem {
	u64 inum;
	u64 offset;
	struct extent_inode_elem *next;
};

26 27 28 29
static int check_extent_in_eb(const struct btrfs_key *key,
			      const struct extent_buffer *eb,
			      const struct btrfs_file_extent_item *fi,
			      u64 extent_item_pos,
30 31
			      struct extent_inode_elem **eie,
			      bool ignore_offset)
32
{
33
	u64 offset = 0;
34 35
	struct extent_inode_elem *e;

36 37
	if (!ignore_offset &&
	    !btrfs_file_extent_compression(eb, fi) &&
38 39 40 41
	    !btrfs_file_extent_encryption(eb, fi) &&
	    !btrfs_file_extent_other_encoding(eb, fi)) {
		u64 data_offset;
		u64 data_len;
42

43 44 45 46 47 48 49 50
		data_offset = btrfs_file_extent_offset(eb, fi);
		data_len = btrfs_file_extent_num_bytes(eb, fi);

		if (extent_item_pos < data_offset ||
		    extent_item_pos >= data_offset + data_len)
			return 1;
		offset = extent_item_pos - data_offset;
	}
51 52 53 54 55 56 57

	e = kmalloc(sizeof(*e), GFP_NOFS);
	if (!e)
		return -ENOMEM;

	e->next = *eie;
	e->inum = key->objectid;
58
	e->offset = key->offset + offset;
59 60 61 62 63
	*eie = e;

	return 0;
}

64 65 66 67 68 69 70 71 72 73
static void free_inode_elem_list(struct extent_inode_elem *eie)
{
	struct extent_inode_elem *eie_next;

	for (; eie; eie = eie_next) {
		eie_next = eie->next;
		kfree(eie);
	}
}

74 75
static int find_extent_in_eb(const struct extent_buffer *eb,
			     u64 wanted_disk_byte, u64 extent_item_pos,
76 77
			     struct extent_inode_elem **eie,
			     bool ignore_offset)
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
{
	u64 disk_byte;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	int slot;
	int nritems;
	int extent_type;
	int ret;

	/*
	 * from the shared data ref, we only have the leaf but we need
	 * the key. thus, we must look into all items and see that we
	 * find one (some) with a reference to our extent item.
	 */
	nritems = btrfs_header_nritems(eb);
	for (slot = 0; slot < nritems; ++slot) {
		btrfs_item_key_to_cpu(eb, &key, slot);
		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;
		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(eb, fi);
		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
			continue;
		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
		if (disk_byte != wanted_disk_byte)
			continue;

106
		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
107 108 109 110 111 112 113
		if (ret < 0)
			return ret;
	}

	return 0;
}

114
struct preftree {
L
Liu Bo 已提交
115
	struct rb_root_cached root;
116
	unsigned int count;
117 118
};

L
Liu Bo 已提交
119
#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
120 121 122 123 124 125 126

struct preftrees {
	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
	struct preftree indirect_missing_keys;
};

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/*
 * Checks for a shared extent during backref search.
 *
 * The share_count tracks prelim_refs (direct and indirect) having a
 * ref->count >0:
 *  - incremented when a ref->count transitions to >0
 *  - decremented when a ref->count transitions to <1
 */
struct share_check {
	u64 root_objectid;
	u64 inum;
	int share_count;
};

static inline int extent_is_shared(struct share_check *sc)
{
	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
}

146 147 148 149 150
static struct kmem_cache *btrfs_prelim_ref_cache;

int __init btrfs_prelim_ref_init(void)
{
	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
151
					sizeof(struct prelim_ref),
152
					0,
153
					SLAB_MEM_SPREAD,
154 155 156 157 158 159
					NULL);
	if (!btrfs_prelim_ref_cache)
		return -ENOMEM;
	return 0;
}

160
void __cold btrfs_prelim_ref_exit(void)
161
{
162
	kmem_cache_destroy(btrfs_prelim_ref_cache);
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static void free_pref(struct prelim_ref *ref)
{
	kmem_cache_free(btrfs_prelim_ref_cache, ref);
}

/*
 * Return 0 when both refs are for the same block (and can be merged).
 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 * indicates a 'higher' block.
 */
static int prelim_ref_compare(struct prelim_ref *ref1,
			      struct prelim_ref *ref2)
{
	if (ref1->level < ref2->level)
		return -1;
	if (ref1->level > ref2->level)
		return 1;
	if (ref1->root_id < ref2->root_id)
		return -1;
	if (ref1->root_id > ref2->root_id)
		return 1;
	if (ref1->key_for_search.type < ref2->key_for_search.type)
		return -1;
	if (ref1->key_for_search.type > ref2->key_for_search.type)
		return 1;
	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
		return -1;
	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
		return 1;
	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
		return -1;
	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
		return 1;
	if (ref1->parent < ref2->parent)
		return -1;
	if (ref1->parent > ref2->parent)
		return 1;

	return 0;
}

206 207
static void update_share_count(struct share_check *sc, int oldcount,
			       int newcount)
208 209 210 211 212 213 214 215 216 217
{
	if ((!sc) || (oldcount == 0 && newcount < 1))
		return;

	if (oldcount > 0 && newcount < 1)
		sc->share_count--;
	else if (oldcount < 1 && newcount > 0)
		sc->share_count++;
}

218 219 220
/*
 * Add @newref to the @root rbtree, merging identical refs.
 *
221
 * Callers should assume that newref has been freed after calling.
222
 */
223 224
static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
			      struct preftree *preftree,
225 226
			      struct prelim_ref *newref,
			      struct share_check *sc)
227
{
L
Liu Bo 已提交
228
	struct rb_root_cached *root;
229 230 231 232
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct prelim_ref *ref;
	int result;
L
Liu Bo 已提交
233
	bool leftmost = true;
234 235

	root = &preftree->root;
L
Liu Bo 已提交
236
	p = &root->rb_root.rb_node;
237 238 239 240 241 242 243 244 245

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct prelim_ref, rbnode);
		result = prelim_ref_compare(ref, newref);
		if (result < 0) {
			p = &(*p)->rb_left;
		} else if (result > 0) {
			p = &(*p)->rb_right;
L
Liu Bo 已提交
246
			leftmost = false;
247 248 249 250 251 252 253 254 255 256 257
		} else {
			/* Identical refs, merge them and free @newref */
			struct extent_inode_elem *eie = ref->inode_list;

			while (eie && eie->next)
				eie = eie->next;

			if (!eie)
				ref->inode_list = newref->inode_list;
			else
				eie->next = newref->inode_list;
258 259
			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
						     preftree->count);
260 261 262 263 264 265 266
			/*
			 * A delayed ref can have newref->count < 0.
			 * The ref->count is updated to follow any
			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
			 */
			update_share_count(sc, ref->count,
					   ref->count + newref->count);
267 268 269 270 271 272
			ref->count += newref->count;
			free_pref(newref);
			return;
		}
	}

273
	update_share_count(sc, 0, newref->count);
274
	preftree->count++;
275
	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
276
	rb_link_node(&newref->rbnode, parent, p);
L
Liu Bo 已提交
277
	rb_insert_color_cached(&newref->rbnode, root, leftmost);
278 279 280 281 282 283 284 285 286 287
}

/*
 * Release the entire tree.  We don't care about internal consistency so
 * just free everything and then reset the tree root.
 */
static void prelim_release(struct preftree *preftree)
{
	struct prelim_ref *ref, *next_ref;

L
Liu Bo 已提交
288 289
	rbtree_postorder_for_each_entry_safe(ref, next_ref,
					     &preftree->root.rb_root, rbnode)
290 291
		free_pref(ref);

L
Liu Bo 已提交
292
	preftree->root = RB_ROOT_CACHED;
293
	preftree->count = 0;
294 295
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/*
 * the rules for all callers of this function are:
 * - obtaining the parent is the goal
 * - if you add a key, you must know that it is a correct key
 * - if you cannot add the parent or a correct key, then we will look into the
 *   block later to set a correct key
 *
 * delayed refs
 * ============
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    -   |     -
 *      key to resolve |    -   |     y    |    y   |     y
 *  tree block logical |    -   |     -    |    -   |     -
 *  root for resolving |    y   |     y    |    y   |     y
 *
 * - column 1:       we've the parent -> done
 * - column 2, 3, 4: we use the key to find the parent
 *
 * on disk refs (inline or keyed)
 * ==============================
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    y   |     -
 *      key to resolve |    -   |     -    |    -   |     y
 *  tree block logical |    y   |     y    |    y   |     y
 *  root for resolving |    -   |     y    |    y   |     y
 *
 * - column 1, 3: we've the parent -> done
 * - column 2:    we take the first key from the block to find the parent
328
 *                (see add_missing_keys)
329 330 331 332 333
 * - column 4:    we use the key to find the parent
 *
 * additional information that's available but not required to find the parent
 * block might help in merging entries to gain some speed.
 */
334 335
static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
			  struct preftree *preftree, u64 root_id,
336
			  const struct btrfs_key *key, int level, u64 parent,
337 338
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
339
{
340
	struct prelim_ref *ref;
341

342 343 344
	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
		return 0;

345
	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
346 347 348 349
	if (!ref)
		return -ENOMEM;

	ref->root_id = root_id;
350
	if (key)
351
		ref->key_for_search = *key;
352
	else
353
		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
354

355
	ref->inode_list = NULL;
356 357 358 359
	ref->level = level;
	ref->count = count;
	ref->parent = parent;
	ref->wanted_disk_byte = wanted_disk_byte;
360 361
	prelim_ref_insert(fs_info, preftree, ref, sc);
	return extent_is_shared(sc);
362 363
}

364
/* direct refs use root == 0, key == NULL */
365 366
static int add_direct_ref(const struct btrfs_fs_info *fs_info,
			  struct preftrees *preftrees, int level, u64 parent,
367 368
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
369
{
370
	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
371
			      parent, wanted_disk_byte, count, sc, gfp_mask);
372 373 374
}

/* indirect refs use parent == 0 */
375 376
static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
			    struct preftrees *preftrees, u64 root_id,
377
			    const struct btrfs_key *key, int level,
378 379
			    u64 wanted_disk_byte, int count,
			    struct share_check *sc, gfp_t gfp_mask)
380 381 382 383 384
{
	struct preftree *tree = &preftrees->indirect;

	if (!key)
		tree = &preftrees->indirect_missing_keys;
385
	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
386
			      wanted_disk_byte, count, sc, gfp_mask);
387 388
}

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
{
	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
	struct rb_node *parent = NULL;
	struct prelim_ref *ref = NULL;
	struct prelim_ref target = {0};
	int result;

	target.parent = bytenr;

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct prelim_ref, rbnode);
		result = prelim_ref_compare(ref, &target);

		if (result < 0)
			p = &(*p)->rb_left;
		else if (result > 0)
			p = &(*p)->rb_right;
		else
			return 1;
	}
	return 0;
}

414
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
415 416
			   struct ulist *parents,
			   struct preftrees *preftrees, struct prelim_ref *ref,
417
			   int level, u64 time_seq, const u64 *extent_item_pos,
418
			   u64 total_refs, bool ignore_offset)
419
{
420 421 422 423
	int ret = 0;
	int slot;
	struct extent_buffer *eb;
	struct btrfs_key key;
424
	struct btrfs_key *key_for_search = &ref->key_for_search;
425
	struct btrfs_file_extent_item *fi;
426
	struct extent_inode_elem *eie = NULL, *old = NULL;
427
	u64 disk_byte;
428 429
	u64 wanted_disk_byte = ref->wanted_disk_byte;
	u64 count = 0;
430
	u64 data_offset;
431

432 433 434
	if (level != 0) {
		eb = path->nodes[level];
		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
435 436
		if (ret < 0)
			return ret;
437
		return 0;
438
	}
439 440

	/*
441 442 443 444 445 446
	 * 1. We normally enter this function with the path already pointing to
	 *    the first item to check. But sometimes, we may enter it with
	 *    slot == nritems.
	 * 2. We are searching for normal backref but bytenr of this leaf
	 *    matches shared data backref
	 * For these cases, go to the next leaf before we continue.
447
	 */
448 449 450
	eb = path->nodes[0];
	if (path->slots[0] >= btrfs_header_nritems(eb) ||
	    is_shared_data_backref(preftrees, eb->start)) {
451
		if (time_seq == SEQ_LAST)
452 453 454 455
			ret = btrfs_next_leaf(root, path);
		else
			ret = btrfs_next_old_leaf(root, path, time_seq);
	}
456

457
	while (!ret && count < total_refs) {
458
		eb = path->nodes[0];
459 460 461 462 463 464 465 466
		slot = path->slots[0];

		btrfs_item_key_to_cpu(eb, &key, slot);

		if (key.objectid != key_for_search->objectid ||
		    key.type != BTRFS_EXTENT_DATA_KEY)
			break;

467 468 469 470 471 472 473 474 475 476 477
		/*
		 * We are searching for normal backref but bytenr of this leaf
		 * matches shared data backref.
		 */
		if (slot == 0 && is_shared_data_backref(preftrees, eb->start)) {
			if (time_seq == SEQ_LAST)
				ret = btrfs_next_leaf(root, path);
			else
				ret = btrfs_next_old_leaf(root, path, time_seq);
			continue;
		}
478 479
		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
480
		data_offset = btrfs_file_extent_offset(eb, fi);
481 482 483

		if (disk_byte == wanted_disk_byte) {
			eie = NULL;
484
			old = NULL;
485 486 487 488
			if (ref->key_for_search.offset == key.offset - data_offset)
				count++;
			else
				goto next;
489 490 491
			if (extent_item_pos) {
				ret = check_extent_in_eb(&key, eb, fi,
						*extent_item_pos,
492
						&eie, ignore_offset);
493 494 495
				if (ret < 0)
					break;
			}
496 497
			if (ret > 0)
				goto next;
498 499
			ret = ulist_add_merge_ptr(parents, eb->start,
						  eie, (void **)&old, GFP_NOFS);
500 501 502 503 504 505
			if (ret < 0)
				break;
			if (!ret && extent_item_pos) {
				while (old->next)
					old = old->next;
				old->next = eie;
506
			}
507
			eie = NULL;
508
		}
509
next:
510
		if (time_seq == SEQ_LAST)
511 512 513
			ret = btrfs_next_item(root, path);
		else
			ret = btrfs_next_old_item(root, path, time_seq);
514 515
	}

516 517
	if (ret > 0)
		ret = 0;
518 519
	else if (ret < 0)
		free_inode_elem_list(eie);
520
	return ret;
521 522 523 524 525 526
}

/*
 * resolve an indirect backref in the form (root_id, key, level)
 * to a logical address
 */
527 528
static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
				struct btrfs_path *path, u64 time_seq,
529
				struct preftrees *preftrees,
530
				struct prelim_ref *ref, struct ulist *parents,
531 532
				const u64 *extent_item_pos, u64 total_refs,
				bool ignore_offset)
533 534 535 536 537 538 539
{
	struct btrfs_root *root;
	struct btrfs_key root_key;
	struct extent_buffer *eb;
	int ret = 0;
	int root_level;
	int level = ref->level;
540
	int index;
541
	struct btrfs_key search_key = ref->key_for_search;
542 543 544 545

	root_key.objectid = ref->root_id;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
546 547 548

	index = srcu_read_lock(&fs_info->subvol_srcu);

549
	root = btrfs_get_fs_root(fs_info, &root_key, false);
550
	if (IS_ERR(root)) {
551
		srcu_read_unlock(&fs_info->subvol_srcu, index);
552
		ret = PTR_ERR(root);
553 554 555
		goto out_free;
	}

556
	if (btrfs_is_testing(fs_info)) {
J
Josef Bacik 已提交
557 558 559 560 561
		srcu_read_unlock(&fs_info->subvol_srcu, index);
		ret = -ENOENT;
		goto out;
	}

562 563
	if (path->search_commit_root)
		root_level = btrfs_header_level(root->commit_root);
564
	else if (time_seq == SEQ_LAST)
565
		root_level = btrfs_header_level(root->node);
566 567
	else
		root_level = btrfs_old_root_level(root, time_seq);
568

569 570
	if (root_level + 1 == level) {
		srcu_read_unlock(&fs_info->subvol_srcu, index);
571
		goto out;
572
	}
573

574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	/*
	 * We can often find data backrefs with an offset that is too large
	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
	 * subtracting a file's offset with the data offset of its
	 * corresponding extent data item. This can happen for example in the
	 * clone ioctl.
	 *
	 * So if we detect such case we set the search key's offset to zero to
	 * make sure we will find the matching file extent item at
	 * add_all_parents(), otherwise we will miss it because the offset
	 * taken form the backref is much larger then the offset of the file
	 * extent item. This can make us scan a very large number of file
	 * extent items, but at least it will not make us miss any.
	 *
	 * This is an ugly workaround for a behaviour that should have never
	 * existed, but it does and a fix for the clone ioctl would touch a lot
	 * of places, cause backwards incompatibility and would not fix the
	 * problem for extents cloned with older kernels.
	 */
	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
	    search_key.offset >= LLONG_MAX)
		search_key.offset = 0;
596
	path->lowest_level = level;
597
	if (time_seq == SEQ_LAST)
598
		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
599
	else
600
		ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
601 602 603 604

	/* root node has been locked, we can release @subvol_srcu safely here */
	srcu_read_unlock(&fs_info->subvol_srcu, index);

605 606
	btrfs_debug(fs_info,
		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
607 608 609
		 ref->root_id, level, ref->count, ret,
		 ref->key_for_search.objectid, ref->key_for_search.type,
		 ref->key_for_search.offset);
610 611 612 613
	if (ret < 0)
		goto out;

	eb = path->nodes[level];
614
	while (!eb) {
615
		if (WARN_ON(!level)) {
616 617 618 619 620
			ret = 1;
			goto out;
		}
		level--;
		eb = path->nodes[level];
621 622
	}

623 624
	ret = add_all_parents(root, path, parents, preftrees, ref, level,
			      time_seq, extent_item_pos, total_refs, ignore_offset);
625
out:
626
	btrfs_put_root(root);
627
out_free:
628 629
	path->lowest_level = 0;
	btrfs_release_path(path);
630 631 632
	return ret;
}

633 634 635 636 637 638 639 640
static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node *node)
{
	if (!node)
		return NULL;
	return (struct extent_inode_elem *)(uintptr_t)node->aux;
}

641
/*
642
 * We maintain three separate rbtrees: one for direct refs, one for
643 644 645 646 647 648 649 650 651 652 653 654 655
 * indirect refs which have a key, and one for indirect refs which do not
 * have a key. Each tree does merge on insertion.
 *
 * Once all of the references are located, we iterate over the tree of
 * indirect refs with missing keys. An appropriate key is located and
 * the ref is moved onto the tree for indirect refs. After all missing
 * keys are thus located, we iterate over the indirect ref tree, resolve
 * each reference, and then insert the resolved reference onto the
 * direct tree (merging there too).
 *
 * New backrefs (i.e., for parent nodes) are added to the appropriate
 * rbtree as they are encountered. The new backrefs are subsequently
 * resolved as above.
656
 */
657 658
static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
				 struct btrfs_path *path, u64 time_seq,
659
				 struct preftrees *preftrees,
660
				 const u64 *extent_item_pos, u64 total_refs,
661
				 struct share_check *sc, bool ignore_offset)
662 663 664 665 666
{
	int err;
	int ret = 0;
	struct ulist *parents;
	struct ulist_node *node;
J
Jan Schmidt 已提交
667
	struct ulist_iterator uiter;
668
	struct rb_node *rnode;
669 670 671 672 673 674

	parents = ulist_alloc(GFP_NOFS);
	if (!parents)
		return -ENOMEM;

	/*
675 676 677 678
	 * We could trade memory usage for performance here by iterating
	 * the tree, allocating new refs for each insertion, and then
	 * freeing the entire indirect tree when we're done.  In some test
	 * cases, the tree can grow quite large (~200k objects).
679
	 */
L
Liu Bo 已提交
680
	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
681 682 683 684 685 686 687 688 689
		struct prelim_ref *ref;

		ref = rb_entry(rnode, struct prelim_ref, rbnode);
		if (WARN(ref->parent,
			 "BUG: direct ref found in indirect tree")) {
			ret = -EINVAL;
			goto out;
		}

L
Liu Bo 已提交
690
		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
691
		preftrees->indirect.count--;
692 693 694

		if (ref->count == 0) {
			free_pref(ref);
695
			continue;
696 697
		}

698 699
		if (sc && sc->root_objectid &&
		    ref->root_id != sc->root_objectid) {
700
			free_pref(ref);
701 702 703
			ret = BACKREF_FOUND_SHARED;
			goto out;
		}
704 705
		err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
					   ref, parents, extent_item_pos,
706
					   total_refs, ignore_offset);
707 708 709 710 711
		/*
		 * we can only tolerate ENOENT,otherwise,we should catch error
		 * and return directly.
		 */
		if (err == -ENOENT) {
712 713
			prelim_ref_insert(fs_info, &preftrees->direct, ref,
					  NULL);
714
			continue;
715
		} else if (err) {
716
			free_pref(ref);
717 718 719
			ret = err;
			goto out;
		}
720 721

		/* we put the first parent into the ref at hand */
J
Jan Schmidt 已提交
722 723
		ULIST_ITER_INIT(&uiter);
		node = ulist_next(parents, &uiter);
724
		ref->parent = node ? node->val : 0;
725
		ref->inode_list = unode_aux_to_inode_list(node);
726

727
		/* Add a prelim_ref(s) for any other parent(s). */
J
Jan Schmidt 已提交
728
		while ((node = ulist_next(parents, &uiter))) {
729 730
			struct prelim_ref *new_ref;

731 732
			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
						   GFP_NOFS);
733
			if (!new_ref) {
734
				free_pref(ref);
735
				ret = -ENOMEM;
736
				goto out;
737 738 739
			}
			memcpy(new_ref, ref, sizeof(*ref));
			new_ref->parent = node->val;
740
			new_ref->inode_list = unode_aux_to_inode_list(node);
741 742
			prelim_ref_insert(fs_info, &preftrees->direct,
					  new_ref, NULL);
743
		}
744

745
		/*
746
		 * Now it's a direct ref, put it in the direct tree. We must
747 748 749
		 * do this last because the ref could be merged/freed here.
		 */
		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
750

751
		ulist_reinit(parents);
752
		cond_resched();
753
	}
754
out:
755 756 757 758
	ulist_free(parents);
	return ret;
}

759 760 761
/*
 * read tree blocks and add keys where required.
 */
762
static int add_missing_keys(struct btrfs_fs_info *fs_info,
763
			    struct preftrees *preftrees, bool lock)
764
{
765
	struct prelim_ref *ref;
766
	struct extent_buffer *eb;
767 768
	struct preftree *tree = &preftrees->indirect_missing_keys;
	struct rb_node *node;
769

L
Liu Bo 已提交
770
	while ((node = rb_first_cached(&tree->root))) {
771
		ref = rb_entry(node, struct prelim_ref, rbnode);
L
Liu Bo 已提交
772
		rb_erase_cached(node, &tree->root);
773 774 775

		BUG_ON(ref->parent);	/* should not be a direct ref */
		BUG_ON(ref->key_for_search.type);
776
		BUG_ON(!ref->wanted_disk_byte);
777

778 779
		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
				     ref->level - 1, NULL);
780
		if (IS_ERR(eb)) {
781
			free_pref(ref);
782 783
			return PTR_ERR(eb);
		} else if (!extent_buffer_uptodate(eb)) {
784
			free_pref(ref);
785 786 787
			free_extent_buffer(eb);
			return -EIO;
		}
788 789
		if (lock)
			btrfs_tree_read_lock(eb);
790 791 792 793
		if (btrfs_header_level(eb) == 0)
			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
		else
			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
794 795
		if (lock)
			btrfs_tree_read_unlock(eb);
796
		free_extent_buffer(eb);
797
		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
798
		cond_resched();
799 800 801 802
	}
	return 0;
}

803 804 805 806
/*
 * add all currently queued delayed refs from this head whose seq nr is
 * smaller or equal that seq to the list
 */
807 808
static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
			    struct btrfs_delayed_ref_head *head, u64 seq,
809
			    struct preftrees *preftrees, u64 *total_refs,
810
			    struct share_check *sc)
811
{
812
	struct btrfs_delayed_ref_node *node;
813
	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
814
	struct btrfs_key key;
815
	struct btrfs_key tmp_op_key;
816
	struct rb_node *n;
817
	int count;
818
	int ret = 0;
819

820
	if (extent_op && extent_op->update_key)
821
		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
822

823
	spin_lock(&head->lock);
824
	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
825 826
		node = rb_entry(n, struct btrfs_delayed_ref_node,
				ref_node);
827 828 829 830 831 832 833 834 835
		if (node->seq > seq)
			continue;

		switch (node->action) {
		case BTRFS_ADD_DELAYED_EXTENT:
		case BTRFS_UPDATE_DELAYED_HEAD:
			WARN_ON(1);
			continue;
		case BTRFS_ADD_DELAYED_REF:
836
			count = node->ref_mod;
837 838
			break;
		case BTRFS_DROP_DELAYED_REF:
839
			count = node->ref_mod * -1;
840 841
			break;
		default:
842
			BUG();
843
		}
844
		*total_refs += count;
845 846
		switch (node->type) {
		case BTRFS_TREE_BLOCK_REF_KEY: {
847
			/* NORMAL INDIRECT METADATA backref */
848 849 850
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
851 852
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
					       &tmp_op_key, ref->level + 1,
853 854
					       node->bytenr, count, sc,
					       GFP_ATOMIC);
855 856 857
			break;
		}
		case BTRFS_SHARED_BLOCK_REF_KEY: {
858
			/* SHARED DIRECT METADATA backref */
859 860 861
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
862

863 864
			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
					     ref->parent, node->bytenr, count,
865
					     sc, GFP_ATOMIC);
866 867 868
			break;
		}
		case BTRFS_EXTENT_DATA_REF_KEY: {
869
			/* NORMAL INDIRECT DATA backref */
870 871 872 873 874 875
			struct btrfs_delayed_data_ref *ref;
			ref = btrfs_delayed_node_to_data_ref(node);

			key.objectid = ref->objectid;
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = ref->offset;
876 877 878 879 880

			/*
			 * Found a inum that doesn't match our known inum, we
			 * know it's shared.
			 */
881
			if (sc && sc->inum && ref->objectid != sc->inum) {
882
				ret = BACKREF_FOUND_SHARED;
883
				goto out;
884 885
			}

886
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
887 888
					       &key, 0, node->bytenr, count, sc,
					       GFP_ATOMIC);
889 890 891
			break;
		}
		case BTRFS_SHARED_DATA_REF_KEY: {
892
			/* SHARED DIRECT FULL backref */
893 894 895
			struct btrfs_delayed_data_ref *ref;

			ref = btrfs_delayed_node_to_data_ref(node);
896

897 898 899
			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
					     node->bytenr, count, sc,
					     GFP_ATOMIC);
900 901 902 903 904
			break;
		}
		default:
			WARN_ON(1);
		}
905 906 907 908 909
		/*
		 * We must ignore BACKREF_FOUND_SHARED until all delayed
		 * refs have been checked.
		 */
		if (ret && (ret != BACKREF_FOUND_SHARED))
910
			break;
911
	}
912 913 914
	if (!ret)
		ret = extent_is_shared(sc);
out:
915 916
	spin_unlock(&head->lock);
	return ret;
917 918 919 920
}

/*
 * add all inline backrefs for bytenr to the list
921 922
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
923
 */
924 925
static int add_inline_refs(const struct btrfs_fs_info *fs_info,
			   struct btrfs_path *path, u64 bytenr,
926
			   int *info_level, struct preftrees *preftrees,
927
			   u64 *total_refs, struct share_check *sc)
928
{
929
	int ret = 0;
930 931 932
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;
933
	struct btrfs_key found_key;
934 935 936 937 938 939 940 941 942 943
	unsigned long ptr;
	unsigned long end;
	struct btrfs_extent_item *ei;
	u64 flags;
	u64 item_size;

	/*
	 * enumerate all inline refs
	 */
	leaf = path->nodes[0];
944
	slot = path->slots[0];
945 946 947 948 949 950

	item_size = btrfs_item_size_nr(leaf, slot);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);
951
	*total_refs += btrfs_extent_refs(leaf, ei);
952
	btrfs_item_key_to_cpu(leaf, &found_key, slot);
953 954 955 956

	ptr = (unsigned long)(ei + 1);
	end = (unsigned long)ei + item_size;

957 958
	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
959 960 961 962 963 964
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)ptr;
		*info_level = btrfs_tree_block_level(leaf, info);
		ptr += sizeof(struct btrfs_tree_block_info);
		BUG_ON(ptr > end);
965 966
	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
		*info_level = found_key.offset;
967 968 969 970 971 972 973 974 975 976
	} else {
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
	}

	while (ptr < end) {
		struct btrfs_extent_inline_ref *iref;
		u64 offset;
		int type;

		iref = (struct btrfs_extent_inline_ref *)ptr;
977 978 979
		type = btrfs_get_extent_inline_ref_type(leaf, iref,
							BTRFS_REF_TYPE_ANY);
		if (type == BTRFS_REF_TYPE_INVALID)
980
			return -EUCLEAN;
981

982 983 984 985
		offset = btrfs_extent_inline_ref_offset(leaf, iref);

		switch (type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
986 987
			ret = add_direct_ref(fs_info, preftrees,
					     *info_level + 1, offset,
988
					     bytenr, 1, NULL, GFP_NOFS);
989 990 991 992 993 994 995
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
			count = btrfs_shared_data_ref_count(leaf, sdref);
996

997
			ret = add_direct_ref(fs_info, preftrees, 0, offset,
998
					     bytenr, count, sc, GFP_NOFS);
999 1000 1001
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
1002 1003
			ret = add_indirect_ref(fs_info, preftrees, offset,
					       NULL, *info_level + 1,
1004
					       bytenr, 1, NULL, GFP_NOFS);
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1017

1018
			if (sc && sc->inum && key.objectid != sc->inum) {
1019 1020 1021 1022
				ret = BACKREF_FOUND_SHARED;
				break;
			}

1023
			root = btrfs_extent_data_ref_root(leaf, dref);
1024

1025 1026
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
1027
					       sc, GFP_NOFS);
1028 1029 1030 1031 1032
			break;
		}
		default:
			WARN_ON(1);
		}
1033 1034
		if (ret)
			return ret;
1035 1036 1037 1038 1039 1040 1041 1042
		ptr += btrfs_extent_inline_ref_size(type);
	}

	return 0;
}

/*
 * add all non-inline backrefs for bytenr to the list
1043 1044
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1045
 */
1046 1047
static int add_keyed_refs(struct btrfs_fs_info *fs_info,
			  struct btrfs_path *path, u64 bytenr,
1048
			  int info_level, struct preftrees *preftrees,
1049
			  struct share_check *sc)
1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078
{
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	while (1) {
		ret = btrfs_next_item(extent_root, path);
		if (ret < 0)
			break;
		if (ret) {
			ret = 0;
			break;
		}

		slot = path->slots[0];
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.objectid != bytenr)
			break;
		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
			continue;
		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
			break;

		switch (key.type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
1079
			/* SHARED DIRECT METADATA backref */
1080 1081
			ret = add_direct_ref(fs_info, preftrees,
					     info_level + 1, key.offset,
1082
					     bytenr, 1, NULL, GFP_NOFS);
1083 1084
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
1085
			/* SHARED DIRECT FULL backref */
1086 1087 1088 1089 1090 1091
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_shared_data_ref);
			count = btrfs_shared_data_ref_count(leaf, sdref);
1092 1093
			ret = add_direct_ref(fs_info, preftrees, 0,
					     key.offset, bytenr, count,
1094
					     sc, GFP_NOFS);
1095 1096 1097
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
1098
			/* NORMAL INDIRECT METADATA backref */
1099 1100
			ret = add_indirect_ref(fs_info, preftrees, key.offset,
					       NULL, info_level + 1, bytenr,
1101
					       1, NULL, GFP_NOFS);
1102 1103
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
1104
			/* NORMAL INDIRECT DATA backref */
1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_extent_data_ref);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1116

1117
			if (sc && sc->inum && key.objectid != sc->inum) {
1118 1119 1120 1121
				ret = BACKREF_FOUND_SHARED;
				break;
			}

1122
			root = btrfs_extent_data_ref_root(leaf, dref);
1123 1124
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
1125
					       sc, GFP_NOFS);
1126 1127 1128 1129 1130
			break;
		}
		default:
			WARN_ON(1);
		}
1131 1132 1133
		if (ret)
			return ret;

1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	}

	return ret;
}

/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
1145
 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1146 1147 1148 1149
 * much like trans == NULL case, the difference only lies in it will not
 * commit root.
 * The special case is for qgroup to search roots in commit_transaction().
 *
1150 1151 1152 1153 1154
 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
 * shared extent is detected.
 *
 * Otherwise this returns 0 for success and <0 for an error.
 *
1155 1156 1157 1158
 * If ignore_offset is set to false, only extent refs whose offsets match
 * extent_item_pos are returned.  If true, every extent ref is returned
 * and extent_item_pos is ignored.
 *
1159 1160 1161 1162
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
1163
			     u64 time_seq, struct ulist *refs,
1164
			     struct ulist *roots, const u64 *extent_item_pos,
1165
			     struct share_check *sc, bool ignore_offset)
1166 1167 1168 1169
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1170
	struct btrfs_delayed_ref_head *head;
1171 1172
	int info_level = 0;
	int ret;
1173
	struct prelim_ref *ref;
1174
	struct rb_node *node;
1175
	struct extent_inode_elem *eie = NULL;
1176
	/* total of both direct AND indirect refs! */
1177
	u64 total_refs = 0;
1178 1179 1180 1181 1182
	struct preftrees preftrees = {
		.direct = PREFTREE_INIT,
		.indirect = PREFTREE_INIT,
		.indirect_missing_keys = PREFTREE_INIT
	};
1183 1184 1185

	key.objectid = bytenr;
	key.offset = (u64)-1;
1186 1187 1188 1189
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1190 1191 1192 1193

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1194
	if (!trans) {
1195
		path->search_commit_root = 1;
1196 1197
		path->skip_locking = 1;
	}
1198

1199
	if (time_seq == SEQ_LAST)
1200 1201
		path->skip_locking = 1;

1202 1203 1204 1205 1206 1207
	/*
	 * grab both a lock on the path and a lock on the delayed ref head.
	 * We need both to get a consistent picture of how the refs look
	 * at a specified point in time
	 */
again:
1208 1209
	head = NULL;

1210 1211 1212 1213 1214
	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

1215
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1216
	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1217
	    time_seq != SEQ_LAST) {
1218
#else
1219
	if (trans && time_seq != SEQ_LAST) {
1220
#endif
1221 1222 1223 1224 1225 1226
		/*
		 * look if there are updates for this ref queued and lock the
		 * head
		 */
		delayed_refs = &trans->transaction->delayed_refs;
		spin_lock(&delayed_refs->lock);
1227
		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1228 1229
		if (head) {
			if (!mutex_trylock(&head->mutex)) {
1230
				refcount_inc(&head->refs);
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
				spin_unlock(&delayed_refs->lock);

				btrfs_release_path(path);

				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);
1241
				btrfs_put_delayed_ref_head(head);
1242 1243
				goto again;
			}
1244
			spin_unlock(&delayed_refs->lock);
1245
			ret = add_delayed_refs(fs_info, head, time_seq,
1246
					       &preftrees, &total_refs, sc);
1247
			mutex_unlock(&head->mutex);
1248
			if (ret)
1249
				goto out;
1250 1251
		} else {
			spin_unlock(&delayed_refs->lock);
1252
		}
1253 1254 1255 1256 1257 1258
	}

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

1259
		path->slots[0]--;
1260
		leaf = path->nodes[0];
1261
		slot = path->slots[0];
1262 1263
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
1264 1265
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1266 1267
			ret = add_inline_refs(fs_info, path, bytenr,
					      &info_level, &preftrees,
1268
					      &total_refs, sc);
1269 1270
			if (ret)
				goto out;
1271
			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1272
					     &preftrees, sc);
1273 1274 1275 1276 1277
			if (ret)
				goto out;
		}
	}

1278
	btrfs_release_path(path);
1279

1280
	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1281 1282 1283
	if (ret)
		goto out;

L
Liu Bo 已提交
1284
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1285

1286
	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1287
				    extent_item_pos, total_refs, sc, ignore_offset);
1288 1289 1290
	if (ret)
		goto out;

L
Liu Bo 已提交
1291
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1292

1293 1294 1295 1296 1297 1298 1299
	/*
	 * This walks the tree of merged and resolved refs. Tree blocks are
	 * read in as needed. Unique entries are added to the ulist, and
	 * the list of found roots is updated.
	 *
	 * We release the entire tree in one go before returning.
	 */
L
Liu Bo 已提交
1300
	node = rb_first_cached(&preftrees.direct.root);
1301 1302 1303
	while (node) {
		ref = rb_entry(node, struct prelim_ref, rbnode);
		node = rb_next(&ref->rbnode);
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313
		/*
		 * ref->count < 0 can happen here if there are delayed
		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
		 * prelim_ref_insert() relies on this when merging
		 * identical refs to keep the overall count correct.
		 * prelim_ref_insert() will merge only those refs
		 * which compare identically.  Any refs having
		 * e.g. different offsets would not be merged,
		 * and would retain their original ref->count < 0.
		 */
1314
		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1315 1316
			if (sc && sc->root_objectid &&
			    ref->root_id != sc->root_objectid) {
1317 1318 1319 1320
				ret = BACKREF_FOUND_SHARED;
				goto out;
			}

1321 1322
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1323 1324
			if (ret < 0)
				goto out;
1325 1326
		}
		if (ref->count && ref->parent) {
1327 1328
			if (extent_item_pos && !ref->inode_list &&
			    ref->level == 0) {
1329
				struct extent_buffer *eb;
1330

1331 1332
				eb = read_tree_block(fs_info, ref->parent, 0,
						     ref->level, NULL);
1333 1334 1335 1336
				if (IS_ERR(eb)) {
					ret = PTR_ERR(eb);
					goto out;
				} else if (!extent_buffer_uptodate(eb)) {
1337
					free_extent_buffer(eb);
1338 1339
					ret = -EIO;
					goto out;
1340
				}
1341 1342 1343 1344 1345

				if (!path->skip_locking) {
					btrfs_tree_read_lock(eb);
					btrfs_set_lock_blocking_read(eb);
				}
1346
				ret = find_extent_in_eb(eb, bytenr,
1347
							*extent_item_pos, &eie, ignore_offset);
1348 1349
				if (!path->skip_locking)
					btrfs_tree_read_unlock_blocking(eb);
1350
				free_extent_buffer(eb);
1351 1352 1353
				if (ret < 0)
					goto out;
				ref->inode_list = eie;
1354
			}
1355 1356 1357
			ret = ulist_add_merge_ptr(refs, ref->parent,
						  ref->inode_list,
						  (void **)&eie, GFP_NOFS);
1358 1359
			if (ret < 0)
				goto out;
1360 1361 1362 1363 1364 1365 1366 1367 1368 1369
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
1370
			eie = NULL;
1371
		}
1372
		cond_resched();
1373 1374 1375 1376
	}

out:
	btrfs_free_path(path);
1377 1378 1379 1380 1381

	prelim_release(&preftrees.direct);
	prelim_release(&preftrees.indirect);
	prelim_release(&preftrees.indirect_missing_keys);

1382 1383
	if (ret < 0)
		free_inode_elem_list(eie);
1384 1385 1386
	return ret;
}

1387 1388 1389 1390 1391 1392 1393 1394 1395 1396
static void free_leaf_list(struct ulist *blocks)
{
	struct ulist_node *node = NULL;
	struct extent_inode_elem *eie;
	struct ulist_iterator uiter;

	ULIST_ITER_INIT(&uiter);
	while ((node = ulist_next(blocks, &uiter))) {
		if (!node->aux)
			continue;
1397
		eie = unode_aux_to_inode_list(node);
1398
		free_inode_elem_list(eie);
1399 1400 1401 1402 1403 1404
		node->aux = 0;
	}

	ulist_free(blocks);
}

1405 1406 1407 1408 1409 1410 1411 1412 1413 1414
/*
 * Finds all leafs with a reference to the specified combination of bytenr and
 * offset. key_list_head will point to a list of corresponding keys (caller must
 * free each list element). The leafs will be stored in the leafs ulist, which
 * must be freed with ulist_free.
 *
 * returns 0 on success, <0 on error
 */
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
				struct btrfs_fs_info *fs_info, u64 bytenr,
1415
				u64 time_seq, struct ulist **leafs,
1416
				const u64 *extent_item_pos, bool ignore_offset)
1417 1418 1419 1420
{
	int ret;

	*leafs = ulist_alloc(GFP_NOFS);
1421
	if (!*leafs)
1422 1423
		return -ENOMEM;

1424
	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1425
				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1426
	if (ret < 0 && ret != -ENOENT) {
1427
		free_leaf_list(*leafs);
1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446
		return ret;
	}

	return 0;
}

/*
 * walk all backrefs for a given extent to find all roots that reference this
 * extent. Walking a backref means finding all extents that reference this
 * extent and in turn walk the backrefs of those, too. Naturally this is a
 * recursive process, but here it is implemented in an iterative fashion: We
 * find all referencing extents for the extent in question and put them on a
 * list. In turn, we find all referencing extents for those, further appending
 * to the list. The way we iterate the list allows adding more elements after
 * the current while iterating. The process stops when we reach the end of the
 * list. Found roots are added to the roots list.
 *
 * returns 0 on success, < 0 on error.
 */
1447 1448
static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
				     struct btrfs_fs_info *fs_info, u64 bytenr,
1449 1450
				     u64 time_seq, struct ulist **roots,
				     bool ignore_offset)
1451 1452 1453
{
	struct ulist *tmp;
	struct ulist_node *node = NULL;
J
Jan Schmidt 已提交
1454
	struct ulist_iterator uiter;
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
	int ret;

	tmp = ulist_alloc(GFP_NOFS);
	if (!tmp)
		return -ENOMEM;
	*roots = ulist_alloc(GFP_NOFS);
	if (!*roots) {
		ulist_free(tmp);
		return -ENOMEM;
	}

J
Jan Schmidt 已提交
1466
	ULIST_ITER_INIT(&uiter);
1467
	while (1) {
1468
		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1469
					tmp, *roots, NULL, NULL, ignore_offset);
1470 1471 1472 1473 1474
		if (ret < 0 && ret != -ENOENT) {
			ulist_free(tmp);
			ulist_free(*roots);
			return ret;
		}
J
Jan Schmidt 已提交
1475
		node = ulist_next(tmp, &uiter);
1476 1477 1478
		if (!node)
			break;
		bytenr = node->val;
1479
		cond_resched();
1480 1481 1482 1483 1484 1485
	}

	ulist_free(tmp);
	return 0;
}

1486 1487
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
			 struct btrfs_fs_info *fs_info, u64 bytenr,
1488 1489
			 u64 time_seq, struct ulist **roots,
			 bool ignore_offset)
1490 1491 1492 1493 1494
{
	int ret;

	if (!trans)
		down_read(&fs_info->commit_root_sem);
1495
	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1496
					time_seq, roots, ignore_offset);
1497 1498 1499 1500 1501
	if (!trans)
		up_read(&fs_info->commit_root_sem);
	return ret;
}

1502 1503 1504 1505 1506 1507 1508 1509 1510
/**
 * btrfs_check_shared - tell us whether an extent is shared
 *
 * btrfs_check_shared uses the backref walking code but will short
 * circuit as soon as it finds a root or inode that doesn't match the
 * one passed in. This provides a significant performance benefit for
 * callers (such as fiemap) which want to know whether the extent is
 * shared but do not need a ref count.
 *
1511 1512
 * This attempts to attach to the running transaction in order to account for
 * delayed refs, but continues on even when no running transaction exists.
1513
 *
1514 1515
 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
 */
1516 1517
int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
		struct ulist *roots, struct ulist *tmp)
1518
{
1519 1520
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_trans_handle *trans;
1521 1522
	struct ulist_iterator uiter;
	struct ulist_node *node;
1523
	struct seq_list elem = SEQ_LIST_INIT(elem);
1524
	int ret = 0;
1525
	struct share_check shared = {
1526
		.root_objectid = root->root_key.objectid,
1527 1528 1529
		.inum = inum,
		.share_count = 0,
	};
1530

1531 1532
	ulist_init(roots);
	ulist_init(tmp);
1533

1534
	trans = btrfs_join_transaction_nostart(root);
1535
	if (IS_ERR(trans)) {
1536 1537 1538 1539
		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
			ret = PTR_ERR(trans);
			goto out;
		}
1540
		trans = NULL;
1541
		down_read(&fs_info->commit_root_sem);
1542 1543 1544 1545
	} else {
		btrfs_get_tree_mod_seq(fs_info, &elem);
	}

1546 1547 1548
	ULIST_ITER_INIT(&uiter);
	while (1) {
		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1549
					roots, NULL, &shared, false);
1550
		if (ret == BACKREF_FOUND_SHARED) {
1551
			/* this is the only condition under which we return 1 */
1552 1553 1554 1555 1556
			ret = 1;
			break;
		}
		if (ret < 0 && ret != -ENOENT)
			break;
1557
		ret = 0;
1558 1559 1560 1561
		node = ulist_next(tmp, &uiter);
		if (!node)
			break;
		bytenr = node->val;
1562
		shared.share_count = 0;
1563 1564
		cond_resched();
	}
1565 1566

	if (trans) {
1567
		btrfs_put_tree_mod_seq(fs_info, &elem);
1568 1569
		btrfs_end_transaction(trans);
	} else {
1570
		up_read(&fs_info->commit_root_sem);
1571
	}
1572
out:
1573 1574
	ulist_release(roots);
	ulist_release(tmp);
1575 1576 1577
	return ret;
}

M
Mark Fasheh 已提交
1578 1579 1580 1581 1582 1583 1584 1585 1586
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
			  u64 start_off, struct btrfs_path *path,
			  struct btrfs_inode_extref **ret_extref,
			  u64 *found_off)
{
	int ret, slot;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_inode_extref *extref;
1587
	const struct extent_buffer *leaf;
M
Mark Fasheh 已提交
1588 1589 1590
	unsigned long ptr;

	key.objectid = inode_objectid;
1591
	key.type = BTRFS_INODE_EXTREF_KEY;
M
Mark Fasheh 已提交
1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
	key.offset = start_off;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			/*
			 * If the item at offset is not found,
			 * btrfs_search_slot will point us to the slot
			 * where it should be inserted. In our case
			 * that will be the slot directly before the
			 * next INODE_REF_KEY_V2 item. In the case
			 * that we're pointing to the last slot in a
			 * leaf, we must move one leaf over.
			 */
			ret = btrfs_next_leaf(root, path);
			if (ret) {
				if (ret >= 1)
					ret = -ENOENT;
				break;
			}
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/*
		 * Check that we're still looking at an extended ref key for
		 * this particular objectid. If we have different
		 * objectid or type then there are no more to be found
		 * in the tree and we can exit.
		 */
		ret = -ENOENT;
		if (found_key.objectid != inode_objectid)
			break;
1631
		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
M
Mark Fasheh 已提交
1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645
			break;

		ret = 0;
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		extref = (struct btrfs_inode_extref *)ptr;
		*ret_extref = extref;
		if (found_off)
			*found_off = found_key.offset;
		break;
	}

	return ret;
}

1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
/*
 * this iterates to turn a name (from iref/extref) into a full filesystem path.
 * Elements of the path are separated by '/' and the path is guaranteed to be
 * 0-terminated. the path is only given within the current file system.
 * Therefore, it never starts with a '/'. the caller is responsible to provide
 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
 * the start point of the resulting string is returned. this pointer is within
 * dest, normally.
 * in case the path buffer would overflow, the pointer is decremented further
 * as if output was written to the buffer, though no more output is actually
 * generated. that way, the caller can determine how much space would be
 * required for the path to fit into the buffer. in that case, the returned
 * value will be smaller than dest. callers must check this!
 */
1660 1661 1662 1663
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
			u32 name_len, unsigned long name_off,
			struct extent_buffer *eb_in, u64 parent,
			char *dest, u32 size)
1664 1665 1666 1667
{
	int slot;
	u64 next_inum;
	int ret;
1668
	s64 bytes_left = ((s64)size) - 1;
1669 1670
	struct extent_buffer *eb = eb_in;
	struct btrfs_key found_key;
1671
	int leave_spinning = path->leave_spinning;
M
Mark Fasheh 已提交
1672
	struct btrfs_inode_ref *iref;
1673 1674 1675 1676

	if (bytes_left >= 0)
		dest[bytes_left] = '\0';

1677
	path->leave_spinning = 1;
1678
	while (1) {
M
Mark Fasheh 已提交
1679
		bytes_left -= name_len;
1680 1681
		if (bytes_left >= 0)
			read_extent_buffer(eb, dest + bytes_left,
M
Mark Fasheh 已提交
1682
					   name_off, name_len);
1683
		if (eb != eb_in) {
1684 1685
			if (!path->skip_locking)
				btrfs_tree_read_unlock_blocking(eb);
1686
			free_extent_buffer(eb);
1687
		}
1688 1689
		ret = btrfs_find_item(fs_root, path, parent, 0,
				BTRFS_INODE_REF_KEY, &found_key);
1690 1691
		if (ret > 0)
			ret = -ENOENT;
1692 1693
		if (ret)
			break;
M
Mark Fasheh 已提交
1694

1695 1696 1697 1698 1699 1700 1701 1702 1703
		next_inum = found_key.offset;

		/* regular exit ahead */
		if (parent == next_inum)
			break;

		slot = path->slots[0];
		eb = path->nodes[0];
		/* make sure we can use eb after releasing the path */
1704
		if (eb != eb_in) {
1705
			if (!path->skip_locking)
1706
				btrfs_set_lock_blocking_read(eb);
1707 1708
			path->nodes[0] = NULL;
			path->locks[0] = 0;
1709
		}
1710 1711
		btrfs_release_path(path);
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
M
Mark Fasheh 已提交
1712 1713 1714 1715

		name_len = btrfs_inode_ref_name_len(eb, iref);
		name_off = (unsigned long)(iref + 1);

1716 1717 1718 1719 1720 1721 1722
		parent = next_inum;
		--bytes_left;
		if (bytes_left >= 0)
			dest[bytes_left] = '/';
	}

	btrfs_release_path(path);
1723
	path->leave_spinning = leave_spinning;
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736

	if (ret)
		return ERR_PTR(ret);

	return dest + bytes_left;
}

/*
 * this makes the path point to (logical EXTENT_ITEM *)
 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
 * tree blocks and <0 on error.
 */
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1737 1738
			struct btrfs_path *path, struct btrfs_key *found_key,
			u64 *flags_ret)
1739 1740 1741
{
	int ret;
	u64 flags;
1742
	u64 size = 0;
1743
	u32 item_size;
1744
	const struct extent_buffer *eb;
1745 1746 1747
	struct btrfs_extent_item *ei;
	struct btrfs_key key;

1748 1749 1750 1751
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1752 1753 1754 1755 1756 1757 1758
	key.objectid = logical;
	key.offset = (u64)-1;

	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

1759 1760 1761 1762 1763
	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		return ret;
1764
	}
1765
	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1766
	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1767
		size = fs_info->nodesize;
1768 1769 1770
	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
		size = found_key->offset;

1771
	if (found_key->objectid > logical ||
1772
	    found_key->objectid + size <= logical) {
1773 1774
		btrfs_debug(fs_info,
			"logical %llu is not within any extent", logical);
1775
		return -ENOENT;
J
Jan Schmidt 已提交
1776
	}
1777 1778 1779 1780 1781 1782 1783 1784

	eb = path->nodes[0];
	item_size = btrfs_item_size_nr(eb, path->slots[0]);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	flags = btrfs_extent_flags(eb, ei);

1785 1786
	btrfs_debug(fs_info,
		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1787 1788
		 logical, logical - found_key->objectid, found_key->objectid,
		 found_key->offset, flags, item_size);
1789 1790 1791 1792 1793 1794 1795 1796

	WARN_ON(!flags_ret);
	if (flags_ret) {
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
		else if (flags & BTRFS_EXTENT_FLAG_DATA)
			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
		else
1797
			BUG();
1798 1799
		return 0;
	}
1800 1801 1802 1803 1804 1805 1806 1807

	return -EIO;
}

/*
 * helper function to iterate extent inline refs. ptr must point to a 0 value
 * for the first call and may be modified. it is used to track state.
 * if more refs exist, 0 is returned and the next call to
1808
 * get_extent_inline_ref must pass the modified ptr parameter to get the
1809 1810 1811
 * next ref. after the last ref was processed, 1 is returned.
 * returns <0 on error
 */
1812 1813 1814 1815 1816 1817 1818
static int get_extent_inline_ref(unsigned long *ptr,
				 const struct extent_buffer *eb,
				 const struct btrfs_key *key,
				 const struct btrfs_extent_item *ei,
				 u32 item_size,
				 struct btrfs_extent_inline_ref **out_eiref,
				 int *out_type)
1819 1820 1821 1822 1823 1824 1825 1826 1827
{
	unsigned long end;
	u64 flags;
	struct btrfs_tree_block_info *info;

	if (!*ptr) {
		/* first call */
		flags = btrfs_extent_flags(eb, ei);
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
			if (key->type == BTRFS_METADATA_ITEM_KEY) {
				/* a skinny metadata extent */
				*out_eiref =
				     (struct btrfs_extent_inline_ref *)(ei + 1);
			} else {
				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
				info = (struct btrfs_tree_block_info *)(ei + 1);
				*out_eiref =
				   (struct btrfs_extent_inline_ref *)(info + 1);
			}
1838 1839 1840 1841
		} else {
			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
		}
		*ptr = (unsigned long)*out_eiref;
1842
		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1843 1844 1845 1846
			return -ENOENT;
	}

	end = (unsigned long)ei + item_size;
1847
	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1848 1849 1850
	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
						     BTRFS_REF_TYPE_ANY);
	if (*out_type == BTRFS_REF_TYPE_INVALID)
1851
		return -EUCLEAN;
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863

	*ptr += btrfs_extent_inline_ref_size(*out_type);
	WARN_ON(*ptr > end);
	if (*ptr == end)
		return 1; /* last */

	return 0;
}

/*
 * reads the tree block backref for an extent. tree level and root are returned
 * through out_level and out_root. ptr must point to a 0 value for the first
1864
 * call and may be modified (see get_extent_inline_ref comment).
1865 1866 1867 1868
 * returns 0 if data was provided, 1 if there was no more data to provide or
 * <0 on error.
 */
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1869 1870
			    struct btrfs_key *key, struct btrfs_extent_item *ei,
			    u32 item_size, u64 *out_root, u8 *out_level)
1871 1872 1873 1874 1875 1876 1877 1878 1879
{
	int ret;
	int type;
	struct btrfs_extent_inline_ref *eiref;

	if (*ptr == (unsigned long)-1)
		return 1;

	while (1) {
1880
		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1881
					      &eiref, &type);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
		if (ret < 0)
			return ret;

		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
		    type == BTRFS_SHARED_BLOCK_REF_KEY)
			break;

		if (ret == 1)
			return 1;
	}

	/* we can treat both ref types equally here */
	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904

	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)(ei + 1);
		*out_level = btrfs_tree_block_level(eb, info);
	} else {
		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
		*out_level = (u8)key->offset;
	}
1905 1906 1907 1908 1909 1910 1911

	if (ret == 1)
		*ptr = (unsigned long)-1;

	return 0;
}

1912 1913 1914 1915
static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
			     struct extent_inode_elem *inode_list,
			     u64 root, u64 extent_item_objectid,
			     iterate_extent_inodes_t *iterate, void *ctx)
1916
{
1917
	struct extent_inode_elem *eie;
J
Jan Schmidt 已提交
1918 1919
	int ret = 0;

1920
	for (eie = inode_list; eie; eie = eie->next) {
1921 1922 1923 1924
		btrfs_debug(fs_info,
			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
			    extent_item_objectid, eie->inum,
			    eie->offset, root);
1925
		ret = iterate(eie->inum, eie->offset, root, ctx);
J
Jan Schmidt 已提交
1926
		if (ret) {
1927 1928 1929
			btrfs_debug(fs_info,
				    "stopping iteration for %llu due to ret=%d",
				    extent_item_objectid, ret);
J
Jan Schmidt 已提交
1930 1931
			break;
		}
1932 1933 1934 1935 1936 1937 1938
	}

	return ret;
}

/*
 * calls iterate() for every inode that references the extent identified by
J
Jan Schmidt 已提交
1939
 * the given parameters.
1940 1941 1942
 * when the iterator function returns a non-zero value, iteration stops.
 */
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
1943
				u64 extent_item_objectid, u64 extent_item_pos,
1944
				int search_commit_root,
1945 1946
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
1947 1948
{
	int ret;
1949
	struct btrfs_trans_handle *trans = NULL;
1950 1951
	struct ulist *refs = NULL;
	struct ulist *roots = NULL;
J
Jan Schmidt 已提交
1952 1953
	struct ulist_node *ref_node = NULL;
	struct ulist_node *root_node = NULL;
1954
	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
J
Jan Schmidt 已提交
1955 1956
	struct ulist_iterator ref_uiter;
	struct ulist_iterator root_uiter;
1957

1958
	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
J
Jan Schmidt 已提交
1959
			extent_item_objectid);
1960

1961
	if (!search_commit_root) {
1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
		trans = btrfs_attach_transaction(fs_info->extent_root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) != -ENOENT &&
			    PTR_ERR(trans) != -EROFS)
				return PTR_ERR(trans);
			trans = NULL;
		}
	}

	if (trans)
1972
		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1973
	else
1974
		down_read(&fs_info->commit_root_sem);
1975

J
Jan Schmidt 已提交
1976
	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1977
				   tree_mod_seq_elem.seq, &refs,
1978
				   &extent_item_pos, ignore_offset);
J
Jan Schmidt 已提交
1979 1980
	if (ret)
		goto out;
1981

J
Jan Schmidt 已提交
1982 1983
	ULIST_ITER_INIT(&ref_uiter);
	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1984
		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1985 1986
						tree_mod_seq_elem.seq, &roots,
						ignore_offset);
J
Jan Schmidt 已提交
1987 1988
		if (ret)
			break;
J
Jan Schmidt 已提交
1989 1990
		ULIST_ITER_INIT(&root_uiter);
		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1991 1992 1993 1994 1995 1996
			btrfs_debug(fs_info,
				    "root %llu references leaf %llu, data list %#llx",
				    root_node->val, ref_node->val,
				    ref_node->aux);
			ret = iterate_leaf_refs(fs_info,
						(struct extent_inode_elem *)
1997 1998 1999 2000
						(uintptr_t)ref_node->aux,
						root_node->val,
						extent_item_objectid,
						iterate, ctx);
J
Jan Schmidt 已提交
2001
		}
2002
		ulist_free(roots);
2003 2004
	}

2005
	free_leaf_list(refs);
J
Jan Schmidt 已提交
2006
out:
2007
	if (trans) {
2008
		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2009
		btrfs_end_transaction(trans);
2010 2011
	} else {
		up_read(&fs_info->commit_root_sem);
2012 2013
	}

2014 2015 2016 2017 2018
	return ret;
}

int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
				struct btrfs_path *path,
2019 2020
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
2021 2022
{
	int ret;
J
Jan Schmidt 已提交
2023
	u64 extent_item_pos;
2024
	u64 flags = 0;
2025
	struct btrfs_key found_key;
2026
	int search_commit_root = path->search_commit_root;
2027

2028
	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
J
Jan Schmidt 已提交
2029
	btrfs_release_path(path);
2030 2031
	if (ret < 0)
		return ret;
2032
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2033
		return -EINVAL;
2034

J
Jan Schmidt 已提交
2035
	extent_item_pos = logical - found_key.objectid;
2036 2037
	ret = iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, search_commit_root,
2038
					iterate, ctx, ignore_offset);
2039 2040 2041 2042

	return ret;
}

M
Mark Fasheh 已提交
2043 2044 2045 2046 2047 2048
typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
			      struct extent_buffer *eb, void *ctx);

static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
			      struct btrfs_path *path,
			      iterate_irefs_t *iterate, void *ctx)
2049
{
2050
	int ret = 0;
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	int slot;
	u32 cur;
	u32 len;
	u32 name_len;
	u64 parent = 0;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_item *item;
	struct btrfs_inode_ref *iref;
	struct btrfs_key found_key;

2062
	while (!ret) {
2063 2064 2065 2066
		ret = btrfs_find_item(fs_root, path, inum,
				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
				&found_key);

2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		parent = found_key.offset;
		slot = path->slots[0];
2077 2078 2079 2080 2081
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
2082 2083
		btrfs_release_path(path);

2084
		item = btrfs_item_nr(slot);
2085 2086 2087 2088 2089
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);

		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
			name_len = btrfs_inode_ref_name_len(eb, iref);
			/* path must be released before calling iterate()! */
2090 2091
			btrfs_debug(fs_root->fs_info,
				"following ref at offset %u for inode %llu in tree %llu",
2092 2093
				cur, found_key.objectid,
				fs_root->root_key.objectid);
M
Mark Fasheh 已提交
2094 2095
			ret = iterate(parent, name_len,
				      (unsigned long)(iref + 1), eb, ctx);
2096
			if (ret)
2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108
				break;
			len = sizeof(*iref) + name_len;
			iref = (struct btrfs_inode_ref *)((char *)iref + len);
		}
		free_extent_buffer(eb);
	}

	btrfs_release_path(path);

	return ret;
}

M
Mark Fasheh 已提交
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
				 struct btrfs_path *path,
				 iterate_irefs_t *iterate, void *ctx)
{
	int ret;
	int slot;
	u64 offset = 0;
	u64 parent;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_inode_extref *extref;
	u32 item_size;
	u32 cur_offset;
	unsigned long ptr;

	while (1) {
		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
					    &offset);
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		slot = path->slots[0];
2136 2137 2138 2139 2140
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
M
Mark Fasheh 已提交
2141 2142
		btrfs_release_path(path);

2143 2144
		item_size = btrfs_item_size_nr(eb, slot);
		ptr = btrfs_item_ptr_offset(eb, slot);
M
Mark Fasheh 已提交
2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
		cur_offset = 0;

		while (cur_offset < item_size) {
			u32 name_len;

			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
			parent = btrfs_inode_extref_parent(eb, extref);
			name_len = btrfs_inode_extref_name_len(eb, extref);
			ret = iterate(parent, name_len,
				      (unsigned long)&extref->name, eb, ctx);
			if (ret)
				break;

2158
			cur_offset += btrfs_inode_extref_name_len(eb, extref);
M
Mark Fasheh 已提交
2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
			cur_offset += sizeof(*extref);
		}
		free_extent_buffer(eb);

		offset++;
	}

	btrfs_release_path(path);

	return ret;
}

static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
			 struct btrfs_path *path, iterate_irefs_t *iterate,
			 void *ctx)
{
	int ret;
	int found_refs = 0;

	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
	if (!ret)
		++found_refs;
	else if (ret != -ENOENT)
		return ret;

	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
	if (ret == -ENOENT && found_refs)
		return 0;

	return ret;
}

2191 2192 2193 2194
/*
 * returns 0 if the path could be dumped (probably truncated)
 * returns <0 in case of an error
 */
M
Mark Fasheh 已提交
2195 2196
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
			 struct extent_buffer *eb, void *ctx)
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207
{
	struct inode_fs_paths *ipath = ctx;
	char *fspath;
	char *fspath_min;
	int i = ipath->fspath->elem_cnt;
	const int s_ptr = sizeof(char *);
	u32 bytes_left;

	bytes_left = ipath->fspath->bytes_left > s_ptr ?
					ipath->fspath->bytes_left - s_ptr : 0;

2208
	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2209 2210
	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
				   name_off, eb, inum, fspath_min, bytes_left);
2211 2212 2213 2214
	if (IS_ERR(fspath))
		return PTR_ERR(fspath);

	if (fspath > fspath_min) {
2215
		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
		++ipath->fspath->elem_cnt;
		ipath->fspath->bytes_left = fspath - fspath_min;
	} else {
		++ipath->fspath->elem_missed;
		ipath->fspath->bytes_missing += fspath_min - fspath;
		ipath->fspath->bytes_left = 0;
	}

	return 0;
}

/*
 * this dumps all file system paths to the inode into the ipath struct, provided
 * is has been created large enough. each path is zero-terminated and accessed
2230
 * from ipath->fspath->val[i].
2231
 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2232
 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2233
 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2234 2235 2236 2237 2238 2239
 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
 * have been needed to return all paths.
 */
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
{
	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
M
Mark Fasheh 已提交
2240
			     inode_to_path, ipath);
2241 2242 2243 2244 2245 2246 2247 2248
}

struct btrfs_data_container *init_data_container(u32 total_bytes)
{
	struct btrfs_data_container *data;
	size_t alloc_bytes;

	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2249
	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280
	if (!data)
		return ERR_PTR(-ENOMEM);

	if (total_bytes >= sizeof(*data)) {
		data->bytes_left = total_bytes - sizeof(*data);
		data->bytes_missing = 0;
	} else {
		data->bytes_missing = sizeof(*data) - total_bytes;
		data->bytes_left = 0;
	}

	data->elem_cnt = 0;
	data->elem_missed = 0;

	return data;
}

/*
 * allocates space to return multiple file system paths for an inode.
 * total_bytes to allocate are passed, note that space usable for actual path
 * information will be total_bytes - sizeof(struct inode_fs_paths).
 * the returned pointer must be freed with free_ipath() in the end.
 */
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
					struct btrfs_path *path)
{
	struct inode_fs_paths *ifp;
	struct btrfs_data_container *fspath;

	fspath = init_data_container(total_bytes);
	if (IS_ERR(fspath))
2281
		return ERR_CAST(fspath);
2282

2283
	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2284
	if (!ifp) {
2285
		kvfree(fspath);
2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297
		return ERR_PTR(-ENOMEM);
	}

	ifp->btrfs_path = path;
	ifp->fspath = fspath;
	ifp->fs_root = fs_root;

	return ifp;
}

void free_ipath(struct inode_fs_paths *ipath)
{
2298 2299
	if (!ipath)
		return;
2300
	kvfree(ipath->fspath);
2301 2302
	kfree(ipath);
}