backref.c 60.5 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5
/*
 * Copyright (C) 2011 STRATO.  All rights reserved.
 */

6
#include <linux/mm.h>
7
#include <linux/rbtree.h>
8
#include <trace/events/btrfs.h>
9 10 11
#include "ctree.h"
#include "disk-io.h"
#include "backref.h"
12 13 14
#include "ulist.h"
#include "transaction.h"
#include "delayed-ref.h"
15
#include "locking.h"
16

17 18 19
/* Just an arbitrary number so we can be sure this happened */
#define BACKREF_FOUND_SHARED 6

20 21 22 23 24 25
struct extent_inode_elem {
	u64 inum;
	u64 offset;
	struct extent_inode_elem *next;
};

26 27 28 29
static int check_extent_in_eb(const struct btrfs_key *key,
			      const struct extent_buffer *eb,
			      const struct btrfs_file_extent_item *fi,
			      u64 extent_item_pos,
30 31
			      struct extent_inode_elem **eie,
			      bool ignore_offset)
32
{
33
	u64 offset = 0;
34 35
	struct extent_inode_elem *e;

36 37
	if (!ignore_offset &&
	    !btrfs_file_extent_compression(eb, fi) &&
38 39 40 41
	    !btrfs_file_extent_encryption(eb, fi) &&
	    !btrfs_file_extent_other_encoding(eb, fi)) {
		u64 data_offset;
		u64 data_len;
42

43 44 45 46 47 48 49 50
		data_offset = btrfs_file_extent_offset(eb, fi);
		data_len = btrfs_file_extent_num_bytes(eb, fi);

		if (extent_item_pos < data_offset ||
		    extent_item_pos >= data_offset + data_len)
			return 1;
		offset = extent_item_pos - data_offset;
	}
51 52 53 54 55 56 57

	e = kmalloc(sizeof(*e), GFP_NOFS);
	if (!e)
		return -ENOMEM;

	e->next = *eie;
	e->inum = key->objectid;
58
	e->offset = key->offset + offset;
59 60 61 62 63
	*eie = e;

	return 0;
}

64 65 66 67 68 69 70 71 72 73
static void free_inode_elem_list(struct extent_inode_elem *eie)
{
	struct extent_inode_elem *eie_next;

	for (; eie; eie = eie_next) {
		eie_next = eie->next;
		kfree(eie);
	}
}

74 75
static int find_extent_in_eb(const struct extent_buffer *eb,
			     u64 wanted_disk_byte, u64 extent_item_pos,
76 77
			     struct extent_inode_elem **eie,
			     bool ignore_offset)
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
{
	u64 disk_byte;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	int slot;
	int nritems;
	int extent_type;
	int ret;

	/*
	 * from the shared data ref, we only have the leaf but we need
	 * the key. thus, we must look into all items and see that we
	 * find one (some) with a reference to our extent item.
	 */
	nritems = btrfs_header_nritems(eb);
	for (slot = 0; slot < nritems; ++slot) {
		btrfs_item_key_to_cpu(eb, &key, slot);
		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;
		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(eb, fi);
		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
			continue;
		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
		if (disk_byte != wanted_disk_byte)
			continue;

106
		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
107 108 109 110 111 112 113
		if (ret < 0)
			return ret;
	}

	return 0;
}

114
struct preftree {
L
Liu Bo 已提交
115
	struct rb_root_cached root;
116
	unsigned int count;
117 118
};

L
Liu Bo 已提交
119
#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
120 121 122 123 124 125 126

struct preftrees {
	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
	struct preftree indirect_missing_keys;
};

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/*
 * Checks for a shared extent during backref search.
 *
 * The share_count tracks prelim_refs (direct and indirect) having a
 * ref->count >0:
 *  - incremented when a ref->count transitions to >0
 *  - decremented when a ref->count transitions to <1
 */
struct share_check {
	u64 root_objectid;
	u64 inum;
	int share_count;
};

static inline int extent_is_shared(struct share_check *sc)
{
	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
}

146 147 148 149 150
static struct kmem_cache *btrfs_prelim_ref_cache;

int __init btrfs_prelim_ref_init(void)
{
	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
151
					sizeof(struct prelim_ref),
152
					0,
153
					SLAB_MEM_SPREAD,
154 155 156 157 158 159
					NULL);
	if (!btrfs_prelim_ref_cache)
		return -ENOMEM;
	return 0;
}

160
void __cold btrfs_prelim_ref_exit(void)
161
{
162
	kmem_cache_destroy(btrfs_prelim_ref_cache);
163 164
}

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static void free_pref(struct prelim_ref *ref)
{
	kmem_cache_free(btrfs_prelim_ref_cache, ref);
}

/*
 * Return 0 when both refs are for the same block (and can be merged).
 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 * indicates a 'higher' block.
 */
static int prelim_ref_compare(struct prelim_ref *ref1,
			      struct prelim_ref *ref2)
{
	if (ref1->level < ref2->level)
		return -1;
	if (ref1->level > ref2->level)
		return 1;
	if (ref1->root_id < ref2->root_id)
		return -1;
	if (ref1->root_id > ref2->root_id)
		return 1;
	if (ref1->key_for_search.type < ref2->key_for_search.type)
		return -1;
	if (ref1->key_for_search.type > ref2->key_for_search.type)
		return 1;
	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
		return -1;
	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
		return 1;
	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
		return -1;
	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
		return 1;
	if (ref1->parent < ref2->parent)
		return -1;
	if (ref1->parent > ref2->parent)
		return 1;

	return 0;
}

206 207
static void update_share_count(struct share_check *sc, int oldcount,
			       int newcount)
208 209 210 211 212 213 214 215 216 217
{
	if ((!sc) || (oldcount == 0 && newcount < 1))
		return;

	if (oldcount > 0 && newcount < 1)
		sc->share_count--;
	else if (oldcount < 1 && newcount > 0)
		sc->share_count++;
}

218 219 220
/*
 * Add @newref to the @root rbtree, merging identical refs.
 *
221
 * Callers should assume that newref has been freed after calling.
222
 */
223 224
static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
			      struct preftree *preftree,
225 226
			      struct prelim_ref *newref,
			      struct share_check *sc)
227
{
L
Liu Bo 已提交
228
	struct rb_root_cached *root;
229 230 231 232
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct prelim_ref *ref;
	int result;
L
Liu Bo 已提交
233
	bool leftmost = true;
234 235

	root = &preftree->root;
L
Liu Bo 已提交
236
	p = &root->rb_root.rb_node;
237 238 239 240 241 242 243 244 245

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct prelim_ref, rbnode);
		result = prelim_ref_compare(ref, newref);
		if (result < 0) {
			p = &(*p)->rb_left;
		} else if (result > 0) {
			p = &(*p)->rb_right;
L
Liu Bo 已提交
246
			leftmost = false;
247 248 249 250 251 252 253 254 255 256 257
		} else {
			/* Identical refs, merge them and free @newref */
			struct extent_inode_elem *eie = ref->inode_list;

			while (eie && eie->next)
				eie = eie->next;

			if (!eie)
				ref->inode_list = newref->inode_list;
			else
				eie->next = newref->inode_list;
258 259
			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
						     preftree->count);
260 261 262 263 264 265 266
			/*
			 * A delayed ref can have newref->count < 0.
			 * The ref->count is updated to follow any
			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
			 */
			update_share_count(sc, ref->count,
					   ref->count + newref->count);
267 268 269 270 271 272
			ref->count += newref->count;
			free_pref(newref);
			return;
		}
	}

273
	update_share_count(sc, 0, newref->count);
274
	preftree->count++;
275
	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
276
	rb_link_node(&newref->rbnode, parent, p);
L
Liu Bo 已提交
277
	rb_insert_color_cached(&newref->rbnode, root, leftmost);
278 279 280 281 282 283 284 285 286 287
}

/*
 * Release the entire tree.  We don't care about internal consistency so
 * just free everything and then reset the tree root.
 */
static void prelim_release(struct preftree *preftree)
{
	struct prelim_ref *ref, *next_ref;

L
Liu Bo 已提交
288 289
	rbtree_postorder_for_each_entry_safe(ref, next_ref,
					     &preftree->root.rb_root, rbnode)
290 291
		free_pref(ref);

L
Liu Bo 已提交
292
	preftree->root = RB_ROOT_CACHED;
293
	preftree->count = 0;
294 295
}

296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
/*
 * the rules for all callers of this function are:
 * - obtaining the parent is the goal
 * - if you add a key, you must know that it is a correct key
 * - if you cannot add the parent or a correct key, then we will look into the
 *   block later to set a correct key
 *
 * delayed refs
 * ============
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    -   |     -
 *      key to resolve |    -   |     y    |    y   |     y
 *  tree block logical |    -   |     -    |    -   |     -
 *  root for resolving |    y   |     y    |    y   |     y
 *
 * - column 1:       we've the parent -> done
 * - column 2, 3, 4: we use the key to find the parent
 *
 * on disk refs (inline or keyed)
 * ==============================
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    y   |     -
 *      key to resolve |    -   |     -    |    -   |     y
 *  tree block logical |    y   |     y    |    y   |     y
 *  root for resolving |    -   |     y    |    y   |     y
 *
 * - column 1, 3: we've the parent -> done
 * - column 2:    we take the first key from the block to find the parent
328
 *                (see add_missing_keys)
329 330 331 332 333
 * - column 4:    we use the key to find the parent
 *
 * additional information that's available but not required to find the parent
 * block might help in merging entries to gain some speed.
 */
334 335
static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
			  struct preftree *preftree, u64 root_id,
336
			  const struct btrfs_key *key, int level, u64 parent,
337 338
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
339
{
340
	struct prelim_ref *ref;
341

342 343 344
	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
		return 0;

345
	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
346 347 348 349
	if (!ref)
		return -ENOMEM;

	ref->root_id = root_id;
350
	if (key)
351
		ref->key_for_search = *key;
352
	else
353
		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
354

355
	ref->inode_list = NULL;
356 357 358 359
	ref->level = level;
	ref->count = count;
	ref->parent = parent;
	ref->wanted_disk_byte = wanted_disk_byte;
360 361
	prelim_ref_insert(fs_info, preftree, ref, sc);
	return extent_is_shared(sc);
362 363
}

364
/* direct refs use root == 0, key == NULL */
365 366
static int add_direct_ref(const struct btrfs_fs_info *fs_info,
			  struct preftrees *preftrees, int level, u64 parent,
367 368
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
369
{
370
	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
371
			      parent, wanted_disk_byte, count, sc, gfp_mask);
372 373 374
}

/* indirect refs use parent == 0 */
375 376
static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
			    struct preftrees *preftrees, u64 root_id,
377
			    const struct btrfs_key *key, int level,
378 379
			    u64 wanted_disk_byte, int count,
			    struct share_check *sc, gfp_t gfp_mask)
380 381 382 383 384
{
	struct preftree *tree = &preftrees->indirect;

	if (!key)
		tree = &preftrees->indirect_missing_keys;
385
	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
386
			      wanted_disk_byte, count, sc, gfp_mask);
387 388
}

389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
{
	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
	struct rb_node *parent = NULL;
	struct prelim_ref *ref = NULL;
	struct prelim_ref target = {0};
	int result;

	target.parent = bytenr;

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct prelim_ref, rbnode);
		result = prelim_ref_compare(ref, &target);

		if (result < 0)
			p = &(*p)->rb_left;
		else if (result > 0)
			p = &(*p)->rb_right;
		else
			return 1;
	}
	return 0;
}

414
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
415 416
			   struct ulist *parents,
			   struct preftrees *preftrees, struct prelim_ref *ref,
417
			   int level, u64 time_seq, const u64 *extent_item_pos,
418
			   bool ignore_offset)
419
{
420 421 422 423
	int ret = 0;
	int slot;
	struct extent_buffer *eb;
	struct btrfs_key key;
424
	struct btrfs_key *key_for_search = &ref->key_for_search;
425
	struct btrfs_file_extent_item *fi;
426
	struct extent_inode_elem *eie = NULL, *old = NULL;
427
	u64 disk_byte;
428 429
	u64 wanted_disk_byte = ref->wanted_disk_byte;
	u64 count = 0;
430
	u64 data_offset;
431

432 433 434
	if (level != 0) {
		eb = path->nodes[level];
		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
435 436
		if (ret < 0)
			return ret;
437
		return 0;
438
	}
439 440

	/*
441 442 443 444 445
	 * 1. We normally enter this function with the path already pointing to
	 *    the first item to check. But sometimes, we may enter it with
	 *    slot == nritems.
	 * 2. We are searching for normal backref but bytenr of this leaf
	 *    matches shared data backref
446 447
	 * 3. The leaf owner is not equal to the root we are searching
	 *
448
	 * For these cases, go to the next leaf before we continue.
449
	 */
450 451
	eb = path->nodes[0];
	if (path->slots[0] >= btrfs_header_nritems(eb) ||
452 453
	    is_shared_data_backref(preftrees, eb->start) ||
	    ref->root_id != btrfs_header_owner(eb)) {
454
		if (time_seq == SEQ_LAST)
455 456 457 458
			ret = btrfs_next_leaf(root, path);
		else
			ret = btrfs_next_old_leaf(root, path, time_seq);
	}
459

460
	while (!ret && count < ref->count) {
461
		eb = path->nodes[0];
462 463 464 465 466 467 468 469
		slot = path->slots[0];

		btrfs_item_key_to_cpu(eb, &key, slot);

		if (key.objectid != key_for_search->objectid ||
		    key.type != BTRFS_EXTENT_DATA_KEY)
			break;

470 471
		/*
		 * We are searching for normal backref but bytenr of this leaf
472 473
		 * matches shared data backref, OR
		 * the leaf owner is not equal to the root we are searching for
474
		 */
475 476 477
		if (slot == 0 &&
		    (is_shared_data_backref(preftrees, eb->start) ||
		     ref->root_id != btrfs_header_owner(eb))) {
478 479 480 481 482 483
			if (time_seq == SEQ_LAST)
				ret = btrfs_next_leaf(root, path);
			else
				ret = btrfs_next_old_leaf(root, path, time_seq);
			continue;
		}
484 485
		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
486
		data_offset = btrfs_file_extent_offset(eb, fi);
487 488 489

		if (disk_byte == wanted_disk_byte) {
			eie = NULL;
490
			old = NULL;
491 492 493 494
			if (ref->key_for_search.offset == key.offset - data_offset)
				count++;
			else
				goto next;
495 496 497
			if (extent_item_pos) {
				ret = check_extent_in_eb(&key, eb, fi,
						*extent_item_pos,
498
						&eie, ignore_offset);
499 500 501
				if (ret < 0)
					break;
			}
502 503
			if (ret > 0)
				goto next;
504 505
			ret = ulist_add_merge_ptr(parents, eb->start,
						  eie, (void **)&old, GFP_NOFS);
506 507 508 509 510 511
			if (ret < 0)
				break;
			if (!ret && extent_item_pos) {
				while (old->next)
					old = old->next;
				old->next = eie;
512
			}
513
			eie = NULL;
514
		}
515
next:
516
		if (time_seq == SEQ_LAST)
517 518 519
			ret = btrfs_next_item(root, path);
		else
			ret = btrfs_next_old_item(root, path, time_seq);
520 521
	}

522 523
	if (ret > 0)
		ret = 0;
524 525
	else if (ret < 0)
		free_inode_elem_list(eie);
526
	return ret;
527 528 529 530 531 532
}

/*
 * resolve an indirect backref in the form (root_id, key, level)
 * to a logical address
 */
533 534
static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
				struct btrfs_path *path, u64 time_seq,
535
				struct preftrees *preftrees,
536
				struct prelim_ref *ref, struct ulist *parents,
537
				const u64 *extent_item_pos, bool ignore_offset)
538 539 540 541 542 543 544
{
	struct btrfs_root *root;
	struct btrfs_key root_key;
	struct extent_buffer *eb;
	int ret = 0;
	int root_level;
	int level = ref->level;
545
	int index;
546
	struct btrfs_key search_key = ref->key_for_search;
547 548 549 550

	root_key.objectid = ref->root_id;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
551 552 553

	index = srcu_read_lock(&fs_info->subvol_srcu);

554
	root = btrfs_get_fs_root(fs_info, &root_key, false);
555
	if (IS_ERR(root)) {
556
		srcu_read_unlock(&fs_info->subvol_srcu, index);
557
		ret = PTR_ERR(root);
558 559 560
		goto out_free;
	}

561
	if (btrfs_is_testing(fs_info)) {
J
Josef Bacik 已提交
562 563 564 565 566
		srcu_read_unlock(&fs_info->subvol_srcu, index);
		ret = -ENOENT;
		goto out;
	}

567 568
	if (path->search_commit_root)
		root_level = btrfs_header_level(root->commit_root);
569
	else if (time_seq == SEQ_LAST)
570
		root_level = btrfs_header_level(root->node);
571 572
	else
		root_level = btrfs_old_root_level(root, time_seq);
573

574 575
	if (root_level + 1 == level) {
		srcu_read_unlock(&fs_info->subvol_srcu, index);
576
		goto out;
577
	}
578

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	/*
	 * We can often find data backrefs with an offset that is too large
	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
	 * subtracting a file's offset with the data offset of its
	 * corresponding extent data item. This can happen for example in the
	 * clone ioctl.
	 *
	 * So if we detect such case we set the search key's offset to zero to
	 * make sure we will find the matching file extent item at
	 * add_all_parents(), otherwise we will miss it because the offset
	 * taken form the backref is much larger then the offset of the file
	 * extent item. This can make us scan a very large number of file
	 * extent items, but at least it will not make us miss any.
	 *
	 * This is an ugly workaround for a behaviour that should have never
	 * existed, but it does and a fix for the clone ioctl would touch a lot
	 * of places, cause backwards incompatibility and would not fix the
	 * problem for extents cloned with older kernels.
	 */
	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
	    search_key.offset >= LLONG_MAX)
		search_key.offset = 0;
601
	path->lowest_level = level;
602
	if (time_seq == SEQ_LAST)
603
		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
604
	else
605
		ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
606 607 608 609

	/* root node has been locked, we can release @subvol_srcu safely here */
	srcu_read_unlock(&fs_info->subvol_srcu, index);

610 611
	btrfs_debug(fs_info,
		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
612 613 614
		 ref->root_id, level, ref->count, ret,
		 ref->key_for_search.objectid, ref->key_for_search.type,
		 ref->key_for_search.offset);
615 616 617 618
	if (ret < 0)
		goto out;

	eb = path->nodes[level];
619
	while (!eb) {
620
		if (WARN_ON(!level)) {
621 622 623 624 625
			ret = 1;
			goto out;
		}
		level--;
		eb = path->nodes[level];
626 627
	}

628
	ret = add_all_parents(root, path, parents, preftrees, ref, level,
629
			      time_seq, extent_item_pos, ignore_offset);
630
out:
631
	btrfs_put_root(root);
632
out_free:
633 634
	path->lowest_level = 0;
	btrfs_release_path(path);
635 636 637
	return ret;
}

638 639 640 641 642 643 644 645
static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node *node)
{
	if (!node)
		return NULL;
	return (struct extent_inode_elem *)(uintptr_t)node->aux;
}

646
/*
647
 * We maintain three separate rbtrees: one for direct refs, one for
648 649 650 651 652 653 654 655 656 657 658 659 660
 * indirect refs which have a key, and one for indirect refs which do not
 * have a key. Each tree does merge on insertion.
 *
 * Once all of the references are located, we iterate over the tree of
 * indirect refs with missing keys. An appropriate key is located and
 * the ref is moved onto the tree for indirect refs. After all missing
 * keys are thus located, we iterate over the indirect ref tree, resolve
 * each reference, and then insert the resolved reference onto the
 * direct tree (merging there too).
 *
 * New backrefs (i.e., for parent nodes) are added to the appropriate
 * rbtree as they are encountered. The new backrefs are subsequently
 * resolved as above.
661
 */
662 663
static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
				 struct btrfs_path *path, u64 time_seq,
664
				 struct preftrees *preftrees,
665
				 const u64 *extent_item_pos,
666
				 struct share_check *sc, bool ignore_offset)
667 668 669 670 671
{
	int err;
	int ret = 0;
	struct ulist *parents;
	struct ulist_node *node;
J
Jan Schmidt 已提交
672
	struct ulist_iterator uiter;
673
	struct rb_node *rnode;
674 675 676 677 678 679

	parents = ulist_alloc(GFP_NOFS);
	if (!parents)
		return -ENOMEM;

	/*
680 681 682 683
	 * We could trade memory usage for performance here by iterating
	 * the tree, allocating new refs for each insertion, and then
	 * freeing the entire indirect tree when we're done.  In some test
	 * cases, the tree can grow quite large (~200k objects).
684
	 */
L
Liu Bo 已提交
685
	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
686 687 688 689 690 691 692 693 694
		struct prelim_ref *ref;

		ref = rb_entry(rnode, struct prelim_ref, rbnode);
		if (WARN(ref->parent,
			 "BUG: direct ref found in indirect tree")) {
			ret = -EINVAL;
			goto out;
		}

L
Liu Bo 已提交
695
		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
696
		preftrees->indirect.count--;
697 698 699

		if (ref->count == 0) {
			free_pref(ref);
700
			continue;
701 702
		}

703 704
		if (sc && sc->root_objectid &&
		    ref->root_id != sc->root_objectid) {
705
			free_pref(ref);
706 707 708
			ret = BACKREF_FOUND_SHARED;
			goto out;
		}
709 710
		err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
					   ref, parents, extent_item_pos,
711
					   ignore_offset);
712 713 714 715 716
		/*
		 * we can only tolerate ENOENT,otherwise,we should catch error
		 * and return directly.
		 */
		if (err == -ENOENT) {
717 718
			prelim_ref_insert(fs_info, &preftrees->direct, ref,
					  NULL);
719
			continue;
720
		} else if (err) {
721
			free_pref(ref);
722 723 724
			ret = err;
			goto out;
		}
725 726

		/* we put the first parent into the ref at hand */
J
Jan Schmidt 已提交
727 728
		ULIST_ITER_INIT(&uiter);
		node = ulist_next(parents, &uiter);
729
		ref->parent = node ? node->val : 0;
730
		ref->inode_list = unode_aux_to_inode_list(node);
731

732
		/* Add a prelim_ref(s) for any other parent(s). */
J
Jan Schmidt 已提交
733
		while ((node = ulist_next(parents, &uiter))) {
734 735
			struct prelim_ref *new_ref;

736 737
			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
						   GFP_NOFS);
738
			if (!new_ref) {
739
				free_pref(ref);
740
				ret = -ENOMEM;
741
				goto out;
742 743 744
			}
			memcpy(new_ref, ref, sizeof(*ref));
			new_ref->parent = node->val;
745
			new_ref->inode_list = unode_aux_to_inode_list(node);
746 747
			prelim_ref_insert(fs_info, &preftrees->direct,
					  new_ref, NULL);
748
		}
749

750
		/*
751
		 * Now it's a direct ref, put it in the direct tree. We must
752 753 754
		 * do this last because the ref could be merged/freed here.
		 */
		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
755

756
		ulist_reinit(parents);
757
		cond_resched();
758
	}
759
out:
760 761 762 763
	ulist_free(parents);
	return ret;
}

764 765 766
/*
 * read tree blocks and add keys where required.
 */
767
static int add_missing_keys(struct btrfs_fs_info *fs_info,
768
			    struct preftrees *preftrees, bool lock)
769
{
770
	struct prelim_ref *ref;
771
	struct extent_buffer *eb;
772 773
	struct preftree *tree = &preftrees->indirect_missing_keys;
	struct rb_node *node;
774

L
Liu Bo 已提交
775
	while ((node = rb_first_cached(&tree->root))) {
776
		ref = rb_entry(node, struct prelim_ref, rbnode);
L
Liu Bo 已提交
777
		rb_erase_cached(node, &tree->root);
778 779 780

		BUG_ON(ref->parent);	/* should not be a direct ref */
		BUG_ON(ref->key_for_search.type);
781
		BUG_ON(!ref->wanted_disk_byte);
782

783 784
		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
				     ref->level - 1, NULL);
785
		if (IS_ERR(eb)) {
786
			free_pref(ref);
787 788
			return PTR_ERR(eb);
		} else if (!extent_buffer_uptodate(eb)) {
789
			free_pref(ref);
790 791 792
			free_extent_buffer(eb);
			return -EIO;
		}
793 794
		if (lock)
			btrfs_tree_read_lock(eb);
795 796 797 798
		if (btrfs_header_level(eb) == 0)
			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
		else
			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
799 800
		if (lock)
			btrfs_tree_read_unlock(eb);
801
		free_extent_buffer(eb);
802
		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
803
		cond_resched();
804 805 806 807
	}
	return 0;
}

808 809 810 811
/*
 * add all currently queued delayed refs from this head whose seq nr is
 * smaller or equal that seq to the list
 */
812 813
static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
			    struct btrfs_delayed_ref_head *head, u64 seq,
814
			    struct preftrees *preftrees, struct share_check *sc)
815
{
816
	struct btrfs_delayed_ref_node *node;
817
	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
818
	struct btrfs_key key;
819
	struct btrfs_key tmp_op_key;
820
	struct rb_node *n;
821
	int count;
822
	int ret = 0;
823

824
	if (extent_op && extent_op->update_key)
825
		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
826

827
	spin_lock(&head->lock);
828
	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
829 830
		node = rb_entry(n, struct btrfs_delayed_ref_node,
				ref_node);
831 832 833 834 835 836 837 838 839
		if (node->seq > seq)
			continue;

		switch (node->action) {
		case BTRFS_ADD_DELAYED_EXTENT:
		case BTRFS_UPDATE_DELAYED_HEAD:
			WARN_ON(1);
			continue;
		case BTRFS_ADD_DELAYED_REF:
840
			count = node->ref_mod;
841 842
			break;
		case BTRFS_DROP_DELAYED_REF:
843
			count = node->ref_mod * -1;
844 845
			break;
		default:
846
			BUG();
847 848 849
		}
		switch (node->type) {
		case BTRFS_TREE_BLOCK_REF_KEY: {
850
			/* NORMAL INDIRECT METADATA backref */
851 852 853
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
854 855
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
					       &tmp_op_key, ref->level + 1,
856 857
					       node->bytenr, count, sc,
					       GFP_ATOMIC);
858 859 860
			break;
		}
		case BTRFS_SHARED_BLOCK_REF_KEY: {
861
			/* SHARED DIRECT METADATA backref */
862 863 864
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
865

866 867
			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
					     ref->parent, node->bytenr, count,
868
					     sc, GFP_ATOMIC);
869 870 871
			break;
		}
		case BTRFS_EXTENT_DATA_REF_KEY: {
872
			/* NORMAL INDIRECT DATA backref */
873 874 875 876 877 878
			struct btrfs_delayed_data_ref *ref;
			ref = btrfs_delayed_node_to_data_ref(node);

			key.objectid = ref->objectid;
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = ref->offset;
879 880 881 882 883

			/*
			 * Found a inum that doesn't match our known inum, we
			 * know it's shared.
			 */
884
			if (sc && sc->inum && ref->objectid != sc->inum) {
885
				ret = BACKREF_FOUND_SHARED;
886
				goto out;
887 888
			}

889
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
890 891
					       &key, 0, node->bytenr, count, sc,
					       GFP_ATOMIC);
892 893 894
			break;
		}
		case BTRFS_SHARED_DATA_REF_KEY: {
895
			/* SHARED DIRECT FULL backref */
896 897 898
			struct btrfs_delayed_data_ref *ref;

			ref = btrfs_delayed_node_to_data_ref(node);
899

900 901 902
			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
					     node->bytenr, count, sc,
					     GFP_ATOMIC);
903 904 905 906 907
			break;
		}
		default:
			WARN_ON(1);
		}
908 909 910 911 912
		/*
		 * We must ignore BACKREF_FOUND_SHARED until all delayed
		 * refs have been checked.
		 */
		if (ret && (ret != BACKREF_FOUND_SHARED))
913
			break;
914
	}
915 916 917
	if (!ret)
		ret = extent_is_shared(sc);
out:
918 919
	spin_unlock(&head->lock);
	return ret;
920 921 922 923
}

/*
 * add all inline backrefs for bytenr to the list
924 925
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
926
 */
927 928
static int add_inline_refs(const struct btrfs_fs_info *fs_info,
			   struct btrfs_path *path, u64 bytenr,
929
			   int *info_level, struct preftrees *preftrees,
930
			   struct share_check *sc)
931
{
932
	int ret = 0;
933 934 935
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;
936
	struct btrfs_key found_key;
937 938 939 940 941 942 943 944 945 946
	unsigned long ptr;
	unsigned long end;
	struct btrfs_extent_item *ei;
	u64 flags;
	u64 item_size;

	/*
	 * enumerate all inline refs
	 */
	leaf = path->nodes[0];
947
	slot = path->slots[0];
948 949 950 951 952 953

	item_size = btrfs_item_size_nr(leaf, slot);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);
954
	btrfs_item_key_to_cpu(leaf, &found_key, slot);
955 956 957 958

	ptr = (unsigned long)(ei + 1);
	end = (unsigned long)ei + item_size;

959 960
	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
961 962 963 964 965 966
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)ptr;
		*info_level = btrfs_tree_block_level(leaf, info);
		ptr += sizeof(struct btrfs_tree_block_info);
		BUG_ON(ptr > end);
967 968
	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
		*info_level = found_key.offset;
969 970 971 972 973 974 975 976 977 978
	} else {
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
	}

	while (ptr < end) {
		struct btrfs_extent_inline_ref *iref;
		u64 offset;
		int type;

		iref = (struct btrfs_extent_inline_ref *)ptr;
979 980 981
		type = btrfs_get_extent_inline_ref_type(leaf, iref,
							BTRFS_REF_TYPE_ANY);
		if (type == BTRFS_REF_TYPE_INVALID)
982
			return -EUCLEAN;
983

984 985 986 987
		offset = btrfs_extent_inline_ref_offset(leaf, iref);

		switch (type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
988 989
			ret = add_direct_ref(fs_info, preftrees,
					     *info_level + 1, offset,
990
					     bytenr, 1, NULL, GFP_NOFS);
991 992 993 994 995 996 997
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
			count = btrfs_shared_data_ref_count(leaf, sdref);
998

999
			ret = add_direct_ref(fs_info, preftrees, 0, offset,
1000
					     bytenr, count, sc, GFP_NOFS);
1001 1002 1003
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
1004 1005
			ret = add_indirect_ref(fs_info, preftrees, offset,
					       NULL, *info_level + 1,
1006
					       bytenr, 1, NULL, GFP_NOFS);
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1019

1020
			if (sc && sc->inum && key.objectid != sc->inum) {
1021 1022 1023 1024
				ret = BACKREF_FOUND_SHARED;
				break;
			}

1025
			root = btrfs_extent_data_ref_root(leaf, dref);
1026

1027 1028
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
1029
					       sc, GFP_NOFS);
1030 1031 1032 1033 1034
			break;
		}
		default:
			WARN_ON(1);
		}
1035 1036
		if (ret)
			return ret;
1037 1038 1039 1040 1041 1042 1043 1044
		ptr += btrfs_extent_inline_ref_size(type);
	}

	return 0;
}

/*
 * add all non-inline backrefs for bytenr to the list
1045 1046
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1047
 */
1048 1049
static int add_keyed_refs(struct btrfs_fs_info *fs_info,
			  struct btrfs_path *path, u64 bytenr,
1050
			  int info_level, struct preftrees *preftrees,
1051
			  struct share_check *sc)
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
{
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	while (1) {
		ret = btrfs_next_item(extent_root, path);
		if (ret < 0)
			break;
		if (ret) {
			ret = 0;
			break;
		}

		slot = path->slots[0];
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.objectid != bytenr)
			break;
		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
			continue;
		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
			break;

		switch (key.type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
1081
			/* SHARED DIRECT METADATA backref */
1082 1083
			ret = add_direct_ref(fs_info, preftrees,
					     info_level + 1, key.offset,
1084
					     bytenr, 1, NULL, GFP_NOFS);
1085 1086
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
1087
			/* SHARED DIRECT FULL backref */
1088 1089 1090 1091 1092 1093
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_shared_data_ref);
			count = btrfs_shared_data_ref_count(leaf, sdref);
1094 1095
			ret = add_direct_ref(fs_info, preftrees, 0,
					     key.offset, bytenr, count,
1096
					     sc, GFP_NOFS);
1097 1098 1099
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
1100
			/* NORMAL INDIRECT METADATA backref */
1101 1102
			ret = add_indirect_ref(fs_info, preftrees, key.offset,
					       NULL, info_level + 1, bytenr,
1103
					       1, NULL, GFP_NOFS);
1104 1105
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
1106
			/* NORMAL INDIRECT DATA backref */
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_extent_data_ref);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1118

1119
			if (sc && sc->inum && key.objectid != sc->inum) {
1120 1121 1122 1123
				ret = BACKREF_FOUND_SHARED;
				break;
			}

1124
			root = btrfs_extent_data_ref_root(leaf, dref);
1125 1126
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
1127
					       sc, GFP_NOFS);
1128 1129 1130 1131 1132
			break;
		}
		default:
			WARN_ON(1);
		}
1133 1134 1135
		if (ret)
			return ret;

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
	}

	return ret;
}

/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
1147
 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1148 1149 1150 1151
 * much like trans == NULL case, the difference only lies in it will not
 * commit root.
 * The special case is for qgroup to search roots in commit_transaction().
 *
1152 1153 1154 1155 1156
 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
 * shared extent is detected.
 *
 * Otherwise this returns 0 for success and <0 for an error.
 *
1157 1158 1159 1160
 * If ignore_offset is set to false, only extent refs whose offsets match
 * extent_item_pos are returned.  If true, every extent ref is returned
 * and extent_item_pos is ignored.
 *
1161 1162 1163 1164
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
1165
			     u64 time_seq, struct ulist *refs,
1166
			     struct ulist *roots, const u64 *extent_item_pos,
1167
			     struct share_check *sc, bool ignore_offset)
1168 1169 1170 1171
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1172
	struct btrfs_delayed_ref_head *head;
1173 1174
	int info_level = 0;
	int ret;
1175
	struct prelim_ref *ref;
1176
	struct rb_node *node;
1177
	struct extent_inode_elem *eie = NULL;
1178 1179 1180 1181 1182
	struct preftrees preftrees = {
		.direct = PREFTREE_INIT,
		.indirect = PREFTREE_INIT,
		.indirect_missing_keys = PREFTREE_INIT
	};
1183 1184 1185

	key.objectid = bytenr;
	key.offset = (u64)-1;
1186 1187 1188 1189
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1190 1191 1192 1193

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1194
	if (!trans) {
1195
		path->search_commit_root = 1;
1196 1197
		path->skip_locking = 1;
	}
1198

1199
	if (time_seq == SEQ_LAST)
1200 1201
		path->skip_locking = 1;

1202 1203 1204 1205 1206 1207
	/*
	 * grab both a lock on the path and a lock on the delayed ref head.
	 * We need both to get a consistent picture of how the refs look
	 * at a specified point in time
	 */
again:
1208 1209
	head = NULL;

1210 1211 1212 1213 1214
	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

1215
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1216
	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1217
	    time_seq != SEQ_LAST) {
1218
#else
1219
	if (trans && time_seq != SEQ_LAST) {
1220
#endif
1221 1222 1223 1224 1225 1226
		/*
		 * look if there are updates for this ref queued and lock the
		 * head
		 */
		delayed_refs = &trans->transaction->delayed_refs;
		spin_lock(&delayed_refs->lock);
1227
		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1228 1229
		if (head) {
			if (!mutex_trylock(&head->mutex)) {
1230
				refcount_inc(&head->refs);
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
				spin_unlock(&delayed_refs->lock);

				btrfs_release_path(path);

				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);
1241
				btrfs_put_delayed_ref_head(head);
1242 1243
				goto again;
			}
1244
			spin_unlock(&delayed_refs->lock);
1245
			ret = add_delayed_refs(fs_info, head, time_seq,
1246
					       &preftrees, sc);
1247
			mutex_unlock(&head->mutex);
1248
			if (ret)
1249
				goto out;
1250 1251
		} else {
			spin_unlock(&delayed_refs->lock);
1252
		}
1253 1254 1255 1256 1257 1258
	}

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

1259
		path->slots[0]--;
1260
		leaf = path->nodes[0];
1261
		slot = path->slots[0];
1262 1263
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
1264 1265
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1266
			ret = add_inline_refs(fs_info, path, bytenr,
1267
					      &info_level, &preftrees, sc);
1268 1269
			if (ret)
				goto out;
1270
			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1271
					     &preftrees, sc);
1272 1273 1274 1275 1276
			if (ret)
				goto out;
		}
	}

1277
	btrfs_release_path(path);
1278

1279
	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1280 1281 1282
	if (ret)
		goto out;

L
Liu Bo 已提交
1283
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1284

1285
	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1286
				    extent_item_pos, sc, ignore_offset);
1287 1288 1289
	if (ret)
		goto out;

L
Liu Bo 已提交
1290
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1291

1292 1293 1294 1295 1296 1297 1298
	/*
	 * This walks the tree of merged and resolved refs. Tree blocks are
	 * read in as needed. Unique entries are added to the ulist, and
	 * the list of found roots is updated.
	 *
	 * We release the entire tree in one go before returning.
	 */
L
Liu Bo 已提交
1299
	node = rb_first_cached(&preftrees.direct.root);
1300 1301 1302
	while (node) {
		ref = rb_entry(node, struct prelim_ref, rbnode);
		node = rb_next(&ref->rbnode);
1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
		/*
		 * ref->count < 0 can happen here if there are delayed
		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
		 * prelim_ref_insert() relies on this when merging
		 * identical refs to keep the overall count correct.
		 * prelim_ref_insert() will merge only those refs
		 * which compare identically.  Any refs having
		 * e.g. different offsets would not be merged,
		 * and would retain their original ref->count < 0.
		 */
1313
		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1314 1315
			if (sc && sc->root_objectid &&
			    ref->root_id != sc->root_objectid) {
1316 1317 1318 1319
				ret = BACKREF_FOUND_SHARED;
				goto out;
			}

1320 1321
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1322 1323
			if (ret < 0)
				goto out;
1324 1325
		}
		if (ref->count && ref->parent) {
1326 1327
			if (extent_item_pos && !ref->inode_list &&
			    ref->level == 0) {
1328
				struct extent_buffer *eb;
1329

1330 1331
				eb = read_tree_block(fs_info, ref->parent, 0,
						     ref->level, NULL);
1332 1333 1334 1335
				if (IS_ERR(eb)) {
					ret = PTR_ERR(eb);
					goto out;
				} else if (!extent_buffer_uptodate(eb)) {
1336
					free_extent_buffer(eb);
1337 1338
					ret = -EIO;
					goto out;
1339
				}
1340 1341 1342 1343 1344

				if (!path->skip_locking) {
					btrfs_tree_read_lock(eb);
					btrfs_set_lock_blocking_read(eb);
				}
1345
				ret = find_extent_in_eb(eb, bytenr,
1346
							*extent_item_pos, &eie, ignore_offset);
1347 1348
				if (!path->skip_locking)
					btrfs_tree_read_unlock_blocking(eb);
1349
				free_extent_buffer(eb);
1350 1351 1352
				if (ret < 0)
					goto out;
				ref->inode_list = eie;
1353
			}
1354 1355 1356
			ret = ulist_add_merge_ptr(refs, ref->parent,
						  ref->inode_list,
						  (void **)&eie, GFP_NOFS);
1357 1358
			if (ret < 0)
				goto out;
1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
1369
			eie = NULL;
1370
		}
1371
		cond_resched();
1372 1373 1374 1375
	}

out:
	btrfs_free_path(path);
1376 1377 1378 1379 1380

	prelim_release(&preftrees.direct);
	prelim_release(&preftrees.indirect);
	prelim_release(&preftrees.indirect_missing_keys);

1381 1382
	if (ret < 0)
		free_inode_elem_list(eie);
1383 1384 1385
	return ret;
}

1386 1387 1388 1389 1390 1391 1392 1393 1394 1395
static void free_leaf_list(struct ulist *blocks)
{
	struct ulist_node *node = NULL;
	struct extent_inode_elem *eie;
	struct ulist_iterator uiter;

	ULIST_ITER_INIT(&uiter);
	while ((node = ulist_next(blocks, &uiter))) {
		if (!node->aux)
			continue;
1396
		eie = unode_aux_to_inode_list(node);
1397
		free_inode_elem_list(eie);
1398 1399 1400 1401 1402 1403
		node->aux = 0;
	}

	ulist_free(blocks);
}

1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
/*
 * Finds all leafs with a reference to the specified combination of bytenr and
 * offset. key_list_head will point to a list of corresponding keys (caller must
 * free each list element). The leafs will be stored in the leafs ulist, which
 * must be freed with ulist_free.
 *
 * returns 0 on success, <0 on error
 */
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
				struct btrfs_fs_info *fs_info, u64 bytenr,
1414
				u64 time_seq, struct ulist **leafs,
1415
				const u64 *extent_item_pos, bool ignore_offset)
1416 1417 1418 1419
{
	int ret;

	*leafs = ulist_alloc(GFP_NOFS);
1420
	if (!*leafs)
1421 1422
		return -ENOMEM;

1423
	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1424
				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1425
	if (ret < 0 && ret != -ENOENT) {
1426
		free_leaf_list(*leafs);
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445
		return ret;
	}

	return 0;
}

/*
 * walk all backrefs for a given extent to find all roots that reference this
 * extent. Walking a backref means finding all extents that reference this
 * extent and in turn walk the backrefs of those, too. Naturally this is a
 * recursive process, but here it is implemented in an iterative fashion: We
 * find all referencing extents for the extent in question and put them on a
 * list. In turn, we find all referencing extents for those, further appending
 * to the list. The way we iterate the list allows adding more elements after
 * the current while iterating. The process stops when we reach the end of the
 * list. Found roots are added to the roots list.
 *
 * returns 0 on success, < 0 on error.
 */
1446 1447
static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
				     struct btrfs_fs_info *fs_info, u64 bytenr,
1448 1449
				     u64 time_seq, struct ulist **roots,
				     bool ignore_offset)
1450 1451 1452
{
	struct ulist *tmp;
	struct ulist_node *node = NULL;
J
Jan Schmidt 已提交
1453
	struct ulist_iterator uiter;
1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464
	int ret;

	tmp = ulist_alloc(GFP_NOFS);
	if (!tmp)
		return -ENOMEM;
	*roots = ulist_alloc(GFP_NOFS);
	if (!*roots) {
		ulist_free(tmp);
		return -ENOMEM;
	}

J
Jan Schmidt 已提交
1465
	ULIST_ITER_INIT(&uiter);
1466
	while (1) {
1467
		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1468
					tmp, *roots, NULL, NULL, ignore_offset);
1469 1470 1471 1472 1473
		if (ret < 0 && ret != -ENOENT) {
			ulist_free(tmp);
			ulist_free(*roots);
			return ret;
		}
J
Jan Schmidt 已提交
1474
		node = ulist_next(tmp, &uiter);
1475 1476 1477
		if (!node)
			break;
		bytenr = node->val;
1478
		cond_resched();
1479 1480 1481 1482 1483 1484
	}

	ulist_free(tmp);
	return 0;
}

1485 1486
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
			 struct btrfs_fs_info *fs_info, u64 bytenr,
1487 1488
			 u64 time_seq, struct ulist **roots,
			 bool ignore_offset)
1489 1490 1491 1492 1493
{
	int ret;

	if (!trans)
		down_read(&fs_info->commit_root_sem);
1494
	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1495
					time_seq, roots, ignore_offset);
1496 1497 1498 1499 1500
	if (!trans)
		up_read(&fs_info->commit_root_sem);
	return ret;
}

1501 1502 1503 1504 1505 1506 1507 1508 1509
/**
 * btrfs_check_shared - tell us whether an extent is shared
 *
 * btrfs_check_shared uses the backref walking code but will short
 * circuit as soon as it finds a root or inode that doesn't match the
 * one passed in. This provides a significant performance benefit for
 * callers (such as fiemap) which want to know whether the extent is
 * shared but do not need a ref count.
 *
1510 1511
 * This attempts to attach to the running transaction in order to account for
 * delayed refs, but continues on even when no running transaction exists.
1512
 *
1513 1514
 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
 */
1515 1516
int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
		struct ulist *roots, struct ulist *tmp)
1517
{
1518 1519
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_trans_handle *trans;
1520 1521
	struct ulist_iterator uiter;
	struct ulist_node *node;
1522
	struct seq_list elem = SEQ_LIST_INIT(elem);
1523
	int ret = 0;
1524
	struct share_check shared = {
1525
		.root_objectid = root->root_key.objectid,
1526 1527 1528
		.inum = inum,
		.share_count = 0,
	};
1529

1530 1531
	ulist_init(roots);
	ulist_init(tmp);
1532

1533
	trans = btrfs_join_transaction_nostart(root);
1534
	if (IS_ERR(trans)) {
1535 1536 1537 1538
		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
			ret = PTR_ERR(trans);
			goto out;
		}
1539
		trans = NULL;
1540
		down_read(&fs_info->commit_root_sem);
1541 1542 1543 1544
	} else {
		btrfs_get_tree_mod_seq(fs_info, &elem);
	}

1545 1546 1547
	ULIST_ITER_INIT(&uiter);
	while (1) {
		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1548
					roots, NULL, &shared, false);
1549
		if (ret == BACKREF_FOUND_SHARED) {
1550
			/* this is the only condition under which we return 1 */
1551 1552 1553 1554 1555
			ret = 1;
			break;
		}
		if (ret < 0 && ret != -ENOENT)
			break;
1556
		ret = 0;
1557 1558 1559 1560
		node = ulist_next(tmp, &uiter);
		if (!node)
			break;
		bytenr = node->val;
1561
		shared.share_count = 0;
1562 1563
		cond_resched();
	}
1564 1565

	if (trans) {
1566
		btrfs_put_tree_mod_seq(fs_info, &elem);
1567 1568
		btrfs_end_transaction(trans);
	} else {
1569
		up_read(&fs_info->commit_root_sem);
1570
	}
1571
out:
1572 1573
	ulist_release(roots);
	ulist_release(tmp);
1574 1575 1576
	return ret;
}

M
Mark Fasheh 已提交
1577 1578 1579 1580 1581 1582 1583 1584 1585
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
			  u64 start_off, struct btrfs_path *path,
			  struct btrfs_inode_extref **ret_extref,
			  u64 *found_off)
{
	int ret, slot;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_inode_extref *extref;
1586
	const struct extent_buffer *leaf;
M
Mark Fasheh 已提交
1587 1588 1589
	unsigned long ptr;

	key.objectid = inode_objectid;
1590
	key.type = BTRFS_INODE_EXTREF_KEY;
M
Mark Fasheh 已提交
1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629
	key.offset = start_off;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			/*
			 * If the item at offset is not found,
			 * btrfs_search_slot will point us to the slot
			 * where it should be inserted. In our case
			 * that will be the slot directly before the
			 * next INODE_REF_KEY_V2 item. In the case
			 * that we're pointing to the last slot in a
			 * leaf, we must move one leaf over.
			 */
			ret = btrfs_next_leaf(root, path);
			if (ret) {
				if (ret >= 1)
					ret = -ENOENT;
				break;
			}
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/*
		 * Check that we're still looking at an extended ref key for
		 * this particular objectid. If we have different
		 * objectid or type then there are no more to be found
		 * in the tree and we can exit.
		 */
		ret = -ENOENT;
		if (found_key.objectid != inode_objectid)
			break;
1630
		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
M
Mark Fasheh 已提交
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644
			break;

		ret = 0;
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		extref = (struct btrfs_inode_extref *)ptr;
		*ret_extref = extref;
		if (found_off)
			*found_off = found_key.offset;
		break;
	}

	return ret;
}

1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658
/*
 * this iterates to turn a name (from iref/extref) into a full filesystem path.
 * Elements of the path are separated by '/' and the path is guaranteed to be
 * 0-terminated. the path is only given within the current file system.
 * Therefore, it never starts with a '/'. the caller is responsible to provide
 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
 * the start point of the resulting string is returned. this pointer is within
 * dest, normally.
 * in case the path buffer would overflow, the pointer is decremented further
 * as if output was written to the buffer, though no more output is actually
 * generated. that way, the caller can determine how much space would be
 * required for the path to fit into the buffer. in that case, the returned
 * value will be smaller than dest. callers must check this!
 */
1659 1660 1661 1662
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
			u32 name_len, unsigned long name_off,
			struct extent_buffer *eb_in, u64 parent,
			char *dest, u32 size)
1663 1664 1665 1666
{
	int slot;
	u64 next_inum;
	int ret;
1667
	s64 bytes_left = ((s64)size) - 1;
1668 1669
	struct extent_buffer *eb = eb_in;
	struct btrfs_key found_key;
1670
	int leave_spinning = path->leave_spinning;
M
Mark Fasheh 已提交
1671
	struct btrfs_inode_ref *iref;
1672 1673 1674 1675

	if (bytes_left >= 0)
		dest[bytes_left] = '\0';

1676
	path->leave_spinning = 1;
1677
	while (1) {
M
Mark Fasheh 已提交
1678
		bytes_left -= name_len;
1679 1680
		if (bytes_left >= 0)
			read_extent_buffer(eb, dest + bytes_left,
M
Mark Fasheh 已提交
1681
					   name_off, name_len);
1682
		if (eb != eb_in) {
1683 1684
			if (!path->skip_locking)
				btrfs_tree_read_unlock_blocking(eb);
1685
			free_extent_buffer(eb);
1686
		}
1687 1688
		ret = btrfs_find_item(fs_root, path, parent, 0,
				BTRFS_INODE_REF_KEY, &found_key);
1689 1690
		if (ret > 0)
			ret = -ENOENT;
1691 1692
		if (ret)
			break;
M
Mark Fasheh 已提交
1693

1694 1695 1696 1697 1698 1699 1700 1701 1702
		next_inum = found_key.offset;

		/* regular exit ahead */
		if (parent == next_inum)
			break;

		slot = path->slots[0];
		eb = path->nodes[0];
		/* make sure we can use eb after releasing the path */
1703
		if (eb != eb_in) {
1704
			if (!path->skip_locking)
1705
				btrfs_set_lock_blocking_read(eb);
1706 1707
			path->nodes[0] = NULL;
			path->locks[0] = 0;
1708
		}
1709 1710
		btrfs_release_path(path);
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
M
Mark Fasheh 已提交
1711 1712 1713 1714

		name_len = btrfs_inode_ref_name_len(eb, iref);
		name_off = (unsigned long)(iref + 1);

1715 1716 1717 1718 1719 1720 1721
		parent = next_inum;
		--bytes_left;
		if (bytes_left >= 0)
			dest[bytes_left] = '/';
	}

	btrfs_release_path(path);
1722
	path->leave_spinning = leave_spinning;
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735

	if (ret)
		return ERR_PTR(ret);

	return dest + bytes_left;
}

/*
 * this makes the path point to (logical EXTENT_ITEM *)
 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
 * tree blocks and <0 on error.
 */
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1736 1737
			struct btrfs_path *path, struct btrfs_key *found_key,
			u64 *flags_ret)
1738 1739 1740
{
	int ret;
	u64 flags;
1741
	u64 size = 0;
1742
	u32 item_size;
1743
	const struct extent_buffer *eb;
1744 1745 1746
	struct btrfs_extent_item *ei;
	struct btrfs_key key;

1747 1748 1749 1750
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1751 1752 1753 1754 1755 1756 1757
	key.objectid = logical;
	key.offset = (u64)-1;

	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

1758 1759 1760 1761 1762
	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		return ret;
1763
	}
1764
	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1765
	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1766
		size = fs_info->nodesize;
1767 1768 1769
	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
		size = found_key->offset;

1770
	if (found_key->objectid > logical ||
1771
	    found_key->objectid + size <= logical) {
1772 1773
		btrfs_debug(fs_info,
			"logical %llu is not within any extent", logical);
1774
		return -ENOENT;
J
Jan Schmidt 已提交
1775
	}
1776 1777 1778 1779 1780 1781 1782 1783

	eb = path->nodes[0];
	item_size = btrfs_item_size_nr(eb, path->slots[0]);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	flags = btrfs_extent_flags(eb, ei);

1784 1785
	btrfs_debug(fs_info,
		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1786 1787
		 logical, logical - found_key->objectid, found_key->objectid,
		 found_key->offset, flags, item_size);
1788 1789 1790 1791 1792 1793 1794 1795

	WARN_ON(!flags_ret);
	if (flags_ret) {
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
		else if (flags & BTRFS_EXTENT_FLAG_DATA)
			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
		else
1796
			BUG();
1797 1798
		return 0;
	}
1799 1800 1801 1802 1803 1804 1805 1806

	return -EIO;
}

/*
 * helper function to iterate extent inline refs. ptr must point to a 0 value
 * for the first call and may be modified. it is used to track state.
 * if more refs exist, 0 is returned and the next call to
1807
 * get_extent_inline_ref must pass the modified ptr parameter to get the
1808 1809 1810
 * next ref. after the last ref was processed, 1 is returned.
 * returns <0 on error
 */
1811 1812 1813 1814 1815 1816 1817
static int get_extent_inline_ref(unsigned long *ptr,
				 const struct extent_buffer *eb,
				 const struct btrfs_key *key,
				 const struct btrfs_extent_item *ei,
				 u32 item_size,
				 struct btrfs_extent_inline_ref **out_eiref,
				 int *out_type)
1818 1819 1820 1821 1822 1823 1824 1825 1826
{
	unsigned long end;
	u64 flags;
	struct btrfs_tree_block_info *info;

	if (!*ptr) {
		/* first call */
		flags = btrfs_extent_flags(eb, ei);
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
			if (key->type == BTRFS_METADATA_ITEM_KEY) {
				/* a skinny metadata extent */
				*out_eiref =
				     (struct btrfs_extent_inline_ref *)(ei + 1);
			} else {
				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
				info = (struct btrfs_tree_block_info *)(ei + 1);
				*out_eiref =
				   (struct btrfs_extent_inline_ref *)(info + 1);
			}
1837 1838 1839 1840
		} else {
			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
		}
		*ptr = (unsigned long)*out_eiref;
1841
		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1842 1843 1844 1845
			return -ENOENT;
	}

	end = (unsigned long)ei + item_size;
1846
	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1847 1848 1849
	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
						     BTRFS_REF_TYPE_ANY);
	if (*out_type == BTRFS_REF_TYPE_INVALID)
1850
		return -EUCLEAN;
1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862

	*ptr += btrfs_extent_inline_ref_size(*out_type);
	WARN_ON(*ptr > end);
	if (*ptr == end)
		return 1; /* last */

	return 0;
}

/*
 * reads the tree block backref for an extent. tree level and root are returned
 * through out_level and out_root. ptr must point to a 0 value for the first
1863
 * call and may be modified (see get_extent_inline_ref comment).
1864 1865 1866 1867
 * returns 0 if data was provided, 1 if there was no more data to provide or
 * <0 on error.
 */
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1868 1869
			    struct btrfs_key *key, struct btrfs_extent_item *ei,
			    u32 item_size, u64 *out_root, u8 *out_level)
1870 1871 1872 1873 1874 1875 1876 1877 1878
{
	int ret;
	int type;
	struct btrfs_extent_inline_ref *eiref;

	if (*ptr == (unsigned long)-1)
		return 1;

	while (1) {
1879
		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1880
					      &eiref, &type);
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
		if (ret < 0)
			return ret;

		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
		    type == BTRFS_SHARED_BLOCK_REF_KEY)
			break;

		if (ret == 1)
			return 1;
	}

	/* we can treat both ref types equally here */
	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1894 1895 1896 1897 1898 1899 1900 1901 1902 1903

	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)(ei + 1);
		*out_level = btrfs_tree_block_level(eb, info);
	} else {
		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
		*out_level = (u8)key->offset;
	}
1904 1905 1906 1907 1908 1909 1910

	if (ret == 1)
		*ptr = (unsigned long)-1;

	return 0;
}

1911 1912 1913 1914
static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
			     struct extent_inode_elem *inode_list,
			     u64 root, u64 extent_item_objectid,
			     iterate_extent_inodes_t *iterate, void *ctx)
1915
{
1916
	struct extent_inode_elem *eie;
J
Jan Schmidt 已提交
1917 1918
	int ret = 0;

1919
	for (eie = inode_list; eie; eie = eie->next) {
1920 1921 1922 1923
		btrfs_debug(fs_info,
			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
			    extent_item_objectid, eie->inum,
			    eie->offset, root);
1924
		ret = iterate(eie->inum, eie->offset, root, ctx);
J
Jan Schmidt 已提交
1925
		if (ret) {
1926 1927 1928
			btrfs_debug(fs_info,
				    "stopping iteration for %llu due to ret=%d",
				    extent_item_objectid, ret);
J
Jan Schmidt 已提交
1929 1930
			break;
		}
1931 1932 1933 1934 1935 1936 1937
	}

	return ret;
}

/*
 * calls iterate() for every inode that references the extent identified by
J
Jan Schmidt 已提交
1938
 * the given parameters.
1939 1940 1941
 * when the iterator function returns a non-zero value, iteration stops.
 */
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
1942
				u64 extent_item_objectid, u64 extent_item_pos,
1943
				int search_commit_root,
1944 1945
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
1946 1947
{
	int ret;
1948
	struct btrfs_trans_handle *trans = NULL;
1949 1950
	struct ulist *refs = NULL;
	struct ulist *roots = NULL;
J
Jan Schmidt 已提交
1951 1952
	struct ulist_node *ref_node = NULL;
	struct ulist_node *root_node = NULL;
1953
	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
J
Jan Schmidt 已提交
1954 1955
	struct ulist_iterator ref_uiter;
	struct ulist_iterator root_uiter;
1956

1957
	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
J
Jan Schmidt 已提交
1958
			extent_item_objectid);
1959

1960
	if (!search_commit_root) {
1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
		trans = btrfs_attach_transaction(fs_info->extent_root);
		if (IS_ERR(trans)) {
			if (PTR_ERR(trans) != -ENOENT &&
			    PTR_ERR(trans) != -EROFS)
				return PTR_ERR(trans);
			trans = NULL;
		}
	}

	if (trans)
1971
		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1972
	else
1973
		down_read(&fs_info->commit_root_sem);
1974

J
Jan Schmidt 已提交
1975
	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1976
				   tree_mod_seq_elem.seq, &refs,
1977
				   &extent_item_pos, ignore_offset);
J
Jan Schmidt 已提交
1978 1979
	if (ret)
		goto out;
1980

J
Jan Schmidt 已提交
1981 1982
	ULIST_ITER_INIT(&ref_uiter);
	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1983
		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1984 1985
						tree_mod_seq_elem.seq, &roots,
						ignore_offset);
J
Jan Schmidt 已提交
1986 1987
		if (ret)
			break;
J
Jan Schmidt 已提交
1988 1989
		ULIST_ITER_INIT(&root_uiter);
		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1990 1991 1992 1993 1994 1995
			btrfs_debug(fs_info,
				    "root %llu references leaf %llu, data list %#llx",
				    root_node->val, ref_node->val,
				    ref_node->aux);
			ret = iterate_leaf_refs(fs_info,
						(struct extent_inode_elem *)
1996 1997 1998 1999
						(uintptr_t)ref_node->aux,
						root_node->val,
						extent_item_objectid,
						iterate, ctx);
J
Jan Schmidt 已提交
2000
		}
2001
		ulist_free(roots);
2002 2003
	}

2004
	free_leaf_list(refs);
J
Jan Schmidt 已提交
2005
out:
2006
	if (trans) {
2007
		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2008
		btrfs_end_transaction(trans);
2009 2010
	} else {
		up_read(&fs_info->commit_root_sem);
2011 2012
	}

2013 2014 2015 2016 2017
	return ret;
}

int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
				struct btrfs_path *path,
2018 2019
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
2020 2021
{
	int ret;
J
Jan Schmidt 已提交
2022
	u64 extent_item_pos;
2023
	u64 flags = 0;
2024
	struct btrfs_key found_key;
2025
	int search_commit_root = path->search_commit_root;
2026

2027
	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
J
Jan Schmidt 已提交
2028
	btrfs_release_path(path);
2029 2030
	if (ret < 0)
		return ret;
2031
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2032
		return -EINVAL;
2033

J
Jan Schmidt 已提交
2034
	extent_item_pos = logical - found_key.objectid;
2035 2036
	ret = iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, search_commit_root,
2037
					iterate, ctx, ignore_offset);
2038 2039 2040 2041

	return ret;
}

M
Mark Fasheh 已提交
2042 2043 2044 2045 2046 2047
typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
			      struct extent_buffer *eb, void *ctx);

static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
			      struct btrfs_path *path,
			      iterate_irefs_t *iterate, void *ctx)
2048
{
2049
	int ret = 0;
2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
	int slot;
	u32 cur;
	u32 len;
	u32 name_len;
	u64 parent = 0;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_item *item;
	struct btrfs_inode_ref *iref;
	struct btrfs_key found_key;

2061
	while (!ret) {
2062 2063 2064 2065
		ret = btrfs_find_item(fs_root, path, inum,
				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
				&found_key);

2066 2067 2068 2069 2070 2071 2072 2073 2074 2075
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		parent = found_key.offset;
		slot = path->slots[0];
2076 2077 2078 2079 2080
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
2081 2082
		btrfs_release_path(path);

2083
		item = btrfs_item_nr(slot);
2084 2085 2086 2087 2088
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);

		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
			name_len = btrfs_inode_ref_name_len(eb, iref);
			/* path must be released before calling iterate()! */
2089 2090
			btrfs_debug(fs_root->fs_info,
				"following ref at offset %u for inode %llu in tree %llu",
2091 2092
				cur, found_key.objectid,
				fs_root->root_key.objectid);
M
Mark Fasheh 已提交
2093 2094
			ret = iterate(parent, name_len,
				      (unsigned long)(iref + 1), eb, ctx);
2095
			if (ret)
2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107
				break;
			len = sizeof(*iref) + name_len;
			iref = (struct btrfs_inode_ref *)((char *)iref + len);
		}
		free_extent_buffer(eb);
	}

	btrfs_release_path(path);

	return ret;
}

M
Mark Fasheh 已提交
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134
static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
				 struct btrfs_path *path,
				 iterate_irefs_t *iterate, void *ctx)
{
	int ret;
	int slot;
	u64 offset = 0;
	u64 parent;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_inode_extref *extref;
	u32 item_size;
	u32 cur_offset;
	unsigned long ptr;

	while (1) {
		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
					    &offset);
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		slot = path->slots[0];
2135 2136 2137 2138 2139
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
M
Mark Fasheh 已提交
2140 2141
		btrfs_release_path(path);

2142 2143
		item_size = btrfs_item_size_nr(eb, slot);
		ptr = btrfs_item_ptr_offset(eb, slot);
M
Mark Fasheh 已提交
2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156
		cur_offset = 0;

		while (cur_offset < item_size) {
			u32 name_len;

			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
			parent = btrfs_inode_extref_parent(eb, extref);
			name_len = btrfs_inode_extref_name_len(eb, extref);
			ret = iterate(parent, name_len,
				      (unsigned long)&extref->name, eb, ctx);
			if (ret)
				break;

2157
			cur_offset += btrfs_inode_extref_name_len(eb, extref);
M
Mark Fasheh 已提交
2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189
			cur_offset += sizeof(*extref);
		}
		free_extent_buffer(eb);

		offset++;
	}

	btrfs_release_path(path);

	return ret;
}

static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
			 struct btrfs_path *path, iterate_irefs_t *iterate,
			 void *ctx)
{
	int ret;
	int found_refs = 0;

	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
	if (!ret)
		++found_refs;
	else if (ret != -ENOENT)
		return ret;

	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
	if (ret == -ENOENT && found_refs)
		return 0;

	return ret;
}

2190 2191 2192 2193
/*
 * returns 0 if the path could be dumped (probably truncated)
 * returns <0 in case of an error
 */
M
Mark Fasheh 已提交
2194 2195
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
			 struct extent_buffer *eb, void *ctx)
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206
{
	struct inode_fs_paths *ipath = ctx;
	char *fspath;
	char *fspath_min;
	int i = ipath->fspath->elem_cnt;
	const int s_ptr = sizeof(char *);
	u32 bytes_left;

	bytes_left = ipath->fspath->bytes_left > s_ptr ?
					ipath->fspath->bytes_left - s_ptr : 0;

2207
	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2208 2209
	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
				   name_off, eb, inum, fspath_min, bytes_left);
2210 2211 2212 2213
	if (IS_ERR(fspath))
		return PTR_ERR(fspath);

	if (fspath > fspath_min) {
2214
		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228
		++ipath->fspath->elem_cnt;
		ipath->fspath->bytes_left = fspath - fspath_min;
	} else {
		++ipath->fspath->elem_missed;
		ipath->fspath->bytes_missing += fspath_min - fspath;
		ipath->fspath->bytes_left = 0;
	}

	return 0;
}

/*
 * this dumps all file system paths to the inode into the ipath struct, provided
 * is has been created large enough. each path is zero-terminated and accessed
2229
 * from ipath->fspath->val[i].
2230
 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2231
 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2232
 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2233 2234 2235 2236 2237 2238
 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
 * have been needed to return all paths.
 */
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
{
	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
M
Mark Fasheh 已提交
2239
			     inode_to_path, ipath);
2240 2241 2242 2243 2244 2245 2246 2247
}

struct btrfs_data_container *init_data_container(u32 total_bytes)
{
	struct btrfs_data_container *data;
	size_t alloc_bytes;

	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2248
	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
	if (!data)
		return ERR_PTR(-ENOMEM);

	if (total_bytes >= sizeof(*data)) {
		data->bytes_left = total_bytes - sizeof(*data);
		data->bytes_missing = 0;
	} else {
		data->bytes_missing = sizeof(*data) - total_bytes;
		data->bytes_left = 0;
	}

	data->elem_cnt = 0;
	data->elem_missed = 0;

	return data;
}

/*
 * allocates space to return multiple file system paths for an inode.
 * total_bytes to allocate are passed, note that space usable for actual path
 * information will be total_bytes - sizeof(struct inode_fs_paths).
 * the returned pointer must be freed with free_ipath() in the end.
 */
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
					struct btrfs_path *path)
{
	struct inode_fs_paths *ifp;
	struct btrfs_data_container *fspath;

	fspath = init_data_container(total_bytes);
	if (IS_ERR(fspath))
2280
		return ERR_CAST(fspath);
2281

2282
	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2283
	if (!ifp) {
2284
		kvfree(fspath);
2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296
		return ERR_PTR(-ENOMEM);
	}

	ifp->btrfs_path = path;
	ifp->fspath = fspath;
	ifp->fs_root = fs_root;

	return ifp;
}

void free_ipath(struct inode_fs_paths *ipath)
{
2297 2298
	if (!ipath)
		return;
2299
	kvfree(ipath->fspath);
2300 2301
	kfree(ipath);
}