backref.c 59.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * Copyright (C) 2011 STRATO.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

19
#include <linux/mm.h>
20
#include <linux/rbtree.h>
21
#include <trace/events/btrfs.h>
22 23 24
#include "ctree.h"
#include "disk-io.h"
#include "backref.h"
25 26 27
#include "ulist.h"
#include "transaction.h"
#include "delayed-ref.h"
28
#include "locking.h"
29

30 31 32
/* Just an arbitrary number so we can be sure this happened */
#define BACKREF_FOUND_SHARED 6

33 34 35 36 37 38
struct extent_inode_elem {
	u64 inum;
	u64 offset;
	struct extent_inode_elem *next;
};

39 40 41 42
static int check_extent_in_eb(const struct btrfs_key *key,
			      const struct extent_buffer *eb,
			      const struct btrfs_file_extent_item *fi,
			      u64 extent_item_pos,
43 44
			      struct extent_inode_elem **eie,
			      bool ignore_offset)
45
{
46
	u64 offset = 0;
47 48
	struct extent_inode_elem *e;

49 50
	if (!ignore_offset &&
	    !btrfs_file_extent_compression(eb, fi) &&
51 52 53 54
	    !btrfs_file_extent_encryption(eb, fi) &&
	    !btrfs_file_extent_other_encoding(eb, fi)) {
		u64 data_offset;
		u64 data_len;
55

56 57 58 59 60 61 62 63
		data_offset = btrfs_file_extent_offset(eb, fi);
		data_len = btrfs_file_extent_num_bytes(eb, fi);

		if (extent_item_pos < data_offset ||
		    extent_item_pos >= data_offset + data_len)
			return 1;
		offset = extent_item_pos - data_offset;
	}
64 65 66 67 68 69 70

	e = kmalloc(sizeof(*e), GFP_NOFS);
	if (!e)
		return -ENOMEM;

	e->next = *eie;
	e->inum = key->objectid;
71
	e->offset = key->offset + offset;
72 73 74 75 76
	*eie = e;

	return 0;
}

77 78 79 80 81 82 83 84 85 86
static void free_inode_elem_list(struct extent_inode_elem *eie)
{
	struct extent_inode_elem *eie_next;

	for (; eie; eie = eie_next) {
		eie_next = eie->next;
		kfree(eie);
	}
}

87 88
static int find_extent_in_eb(const struct extent_buffer *eb,
			     u64 wanted_disk_byte, u64 extent_item_pos,
89 90
			     struct extent_inode_elem **eie,
			     bool ignore_offset)
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
{
	u64 disk_byte;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	int slot;
	int nritems;
	int extent_type;
	int ret;

	/*
	 * from the shared data ref, we only have the leaf but we need
	 * the key. thus, we must look into all items and see that we
	 * find one (some) with a reference to our extent item.
	 */
	nritems = btrfs_header_nritems(eb);
	for (slot = 0; slot < nritems; ++slot) {
		btrfs_item_key_to_cpu(eb, &key, slot);
		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;
		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		extent_type = btrfs_file_extent_type(eb, fi);
		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
			continue;
		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
		if (disk_byte != wanted_disk_byte)
			continue;

119
		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
120 121 122 123 124 125 126
		if (ret < 0)
			return ret;
	}

	return 0;
}

127 128
struct preftree {
	struct rb_root root;
129
	unsigned int count;
130 131
};

132
#define PREFTREE_INIT	{ .root = RB_ROOT, .count = 0 }
133 134 135 136 137 138 139

struct preftrees {
	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
	struct preftree indirect_missing_keys;
};

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
/*
 * Checks for a shared extent during backref search.
 *
 * The share_count tracks prelim_refs (direct and indirect) having a
 * ref->count >0:
 *  - incremented when a ref->count transitions to >0
 *  - decremented when a ref->count transitions to <1
 */
struct share_check {
	u64 root_objectid;
	u64 inum;
	int share_count;
};

static inline int extent_is_shared(struct share_check *sc)
{
	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
}

159 160 161 162 163
static struct kmem_cache *btrfs_prelim_ref_cache;

int __init btrfs_prelim_ref_init(void)
{
	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
164
					sizeof(struct prelim_ref),
165
					0,
166
					SLAB_MEM_SPREAD,
167 168 169 170 171 172 173 174
					NULL);
	if (!btrfs_prelim_ref_cache)
		return -ENOMEM;
	return 0;
}

void btrfs_prelim_ref_exit(void)
{
175
	kmem_cache_destroy(btrfs_prelim_ref_cache);
176 177
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
static void free_pref(struct prelim_ref *ref)
{
	kmem_cache_free(btrfs_prelim_ref_cache, ref);
}

/*
 * Return 0 when both refs are for the same block (and can be merged).
 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 * indicates a 'higher' block.
 */
static int prelim_ref_compare(struct prelim_ref *ref1,
			      struct prelim_ref *ref2)
{
	if (ref1->level < ref2->level)
		return -1;
	if (ref1->level > ref2->level)
		return 1;
	if (ref1->root_id < ref2->root_id)
		return -1;
	if (ref1->root_id > ref2->root_id)
		return 1;
	if (ref1->key_for_search.type < ref2->key_for_search.type)
		return -1;
	if (ref1->key_for_search.type > ref2->key_for_search.type)
		return 1;
	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
		return -1;
	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
		return 1;
	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
		return -1;
	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
		return 1;
	if (ref1->parent < ref2->parent)
		return -1;
	if (ref1->parent > ref2->parent)
		return 1;

	return 0;
}

219 220 221 222 223 224 225 226 227 228 229
void update_share_count(struct share_check *sc, int oldcount, int newcount)
{
	if ((!sc) || (oldcount == 0 && newcount < 1))
		return;

	if (oldcount > 0 && newcount < 1)
		sc->share_count--;
	else if (oldcount < 1 && newcount > 0)
		sc->share_count++;
}

230 231 232
/*
 * Add @newref to the @root rbtree, merging identical refs.
 *
233
 * Callers should assume that newref has been freed after calling.
234
 */
235 236
static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
			      struct preftree *preftree,
237 238
			      struct prelim_ref *newref,
			      struct share_check *sc)
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
{
	struct rb_root *root;
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct prelim_ref *ref;
	int result;

	root = &preftree->root;
	p = &root->rb_node;

	while (*p) {
		parent = *p;
		ref = rb_entry(parent, struct prelim_ref, rbnode);
		result = prelim_ref_compare(ref, newref);
		if (result < 0) {
			p = &(*p)->rb_left;
		} else if (result > 0) {
			p = &(*p)->rb_right;
		} else {
			/* Identical refs, merge them and free @newref */
			struct extent_inode_elem *eie = ref->inode_list;

			while (eie && eie->next)
				eie = eie->next;

			if (!eie)
				ref->inode_list = newref->inode_list;
			else
				eie->next = newref->inode_list;
268 269
			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
						     preftree->count);
270 271 272 273 274 275 276
			/*
			 * A delayed ref can have newref->count < 0.
			 * The ref->count is updated to follow any
			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
			 */
			update_share_count(sc, ref->count,
					   ref->count + newref->count);
277 278 279 280 281 282
			ref->count += newref->count;
			free_pref(newref);
			return;
		}
	}

283
	update_share_count(sc, 0, newref->count);
284
	preftree->count++;
285
	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
	rb_link_node(&newref->rbnode, parent, p);
	rb_insert_color(&newref->rbnode, root);
}

/*
 * Release the entire tree.  We don't care about internal consistency so
 * just free everything and then reset the tree root.
 */
static void prelim_release(struct preftree *preftree)
{
	struct prelim_ref *ref, *next_ref;

	rbtree_postorder_for_each_entry_safe(ref, next_ref, &preftree->root,
					     rbnode)
		free_pref(ref);

	preftree->root = RB_ROOT;
303
	preftree->count = 0;
304 305
}

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
/*
 * the rules for all callers of this function are:
 * - obtaining the parent is the goal
 * - if you add a key, you must know that it is a correct key
 * - if you cannot add the parent or a correct key, then we will look into the
 *   block later to set a correct key
 *
 * delayed refs
 * ============
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    -   |     -
 *      key to resolve |    -   |     y    |    y   |     y
 *  tree block logical |    -   |     -    |    -   |     -
 *  root for resolving |    y   |     y    |    y   |     y
 *
 * - column 1:       we've the parent -> done
 * - column 2, 3, 4: we use the key to find the parent
 *
 * on disk refs (inline or keyed)
 * ==============================
 *        backref type | shared | indirect | shared | indirect
 * information         |   tree |     tree |   data |     data
 * --------------------+--------+----------+--------+----------
 *      parent logical |    y   |     -    |    y   |     -
 *      key to resolve |    -   |     -    |    -   |     y
 *  tree block logical |    y   |     y    |    y   |     y
 *  root for resolving |    -   |     y    |    y   |     y
 *
 * - column 1, 3: we've the parent -> done
 * - column 2:    we take the first key from the block to find the parent
338
 *                (see add_missing_keys)
339 340 341 342 343
 * - column 4:    we use the key to find the parent
 *
 * additional information that's available but not required to find the parent
 * block might help in merging entries to gain some speed.
 */
344 345
static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
			  struct preftree *preftree, u64 root_id,
346
			  const struct btrfs_key *key, int level, u64 parent,
347 348
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
349
{
350
	struct prelim_ref *ref;
351

352 353 354
	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
		return 0;

355
	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
356 357 358 359
	if (!ref)
		return -ENOMEM;

	ref->root_id = root_id;
360
	if (key) {
361
		ref->key_for_search = *key;
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
		/*
		 * We can often find data backrefs with an offset that is too
		 * large (>= LLONG_MAX, maximum allowed file offset) due to
		 * underflows when subtracting a file's offset with the data
		 * offset of its corresponding extent data item. This can
		 * happen for example in the clone ioctl.
		 * So if we detect such case we set the search key's offset to
		 * zero to make sure we will find the matching file extent item
		 * at add_all_parents(), otherwise we will miss it because the
		 * offset taken form the backref is much larger then the offset
		 * of the file extent item. This can make us scan a very large
		 * number of file extent items, but at least it will not make
		 * us miss any.
		 * This is an ugly workaround for a behaviour that should have
		 * never existed, but it does and a fix for the clone ioctl
		 * would touch a lot of places, cause backwards incompatibility
		 * and would not fix the problem for extents cloned with older
		 * kernels.
		 */
		if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
		    ref->key_for_search.offset >= LLONG_MAX)
			ref->key_for_search.offset = 0;
	} else {
385
		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
386
	}
387

388
	ref->inode_list = NULL;
389 390 391 392
	ref->level = level;
	ref->count = count;
	ref->parent = parent;
	ref->wanted_disk_byte = wanted_disk_byte;
393 394
	prelim_ref_insert(fs_info, preftree, ref, sc);
	return extent_is_shared(sc);
395 396
}

397
/* direct refs use root == 0, key == NULL */
398 399
static int add_direct_ref(const struct btrfs_fs_info *fs_info,
			  struct preftrees *preftrees, int level, u64 parent,
400 401
			  u64 wanted_disk_byte, int count,
			  struct share_check *sc, gfp_t gfp_mask)
402
{
403
	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
404
			      parent, wanted_disk_byte, count, sc, gfp_mask);
405 406 407
}

/* indirect refs use parent == 0 */
408 409
static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
			    struct preftrees *preftrees, u64 root_id,
410
			    const struct btrfs_key *key, int level,
411 412
			    u64 wanted_disk_byte, int count,
			    struct share_check *sc, gfp_t gfp_mask)
413 414 415 416 417
{
	struct preftree *tree = &preftrees->indirect;

	if (!key)
		tree = &preftrees->indirect_missing_keys;
418
	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
419
			      wanted_disk_byte, count, sc, gfp_mask);
420 421
}

422
static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
423
			   struct ulist *parents, struct prelim_ref *ref,
424
			   int level, u64 time_seq, const u64 *extent_item_pos,
425
			   u64 total_refs, bool ignore_offset)
426
{
427 428 429 430
	int ret = 0;
	int slot;
	struct extent_buffer *eb;
	struct btrfs_key key;
431
	struct btrfs_key *key_for_search = &ref->key_for_search;
432
	struct btrfs_file_extent_item *fi;
433
	struct extent_inode_elem *eie = NULL, *old = NULL;
434
	u64 disk_byte;
435 436
	u64 wanted_disk_byte = ref->wanted_disk_byte;
	u64 count = 0;
437

438 439 440
	if (level != 0) {
		eb = path->nodes[level];
		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
441 442
		if (ret < 0)
			return ret;
443
		return 0;
444
	}
445 446

	/*
447 448 449
	 * We normally enter this function with the path already pointing to
	 * the first item to check. But sometimes, we may enter it with
	 * slot==nritems. In that case, go to the next leaf before we continue.
450
	 */
451
	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
452
		if (time_seq == SEQ_LAST)
453 454 455 456
			ret = btrfs_next_leaf(root, path);
		else
			ret = btrfs_next_old_leaf(root, path, time_seq);
	}
457

458
	while (!ret && count < total_refs) {
459
		eb = path->nodes[0];
460 461 462 463 464 465 466 467 468 469 470 471 472
		slot = path->slots[0];

		btrfs_item_key_to_cpu(eb, &key, slot);

		if (key.objectid != key_for_search->objectid ||
		    key.type != BTRFS_EXTENT_DATA_KEY)
			break;

		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);

		if (disk_byte == wanted_disk_byte) {
			eie = NULL;
473
			old = NULL;
474
			count++;
475 476 477
			if (extent_item_pos) {
				ret = check_extent_in_eb(&key, eb, fi,
						*extent_item_pos,
478
						&eie, ignore_offset);
479 480 481
				if (ret < 0)
					break;
			}
482 483
			if (ret > 0)
				goto next;
484 485
			ret = ulist_add_merge_ptr(parents, eb->start,
						  eie, (void **)&old, GFP_NOFS);
486 487 488 489 490 491
			if (ret < 0)
				break;
			if (!ret && extent_item_pos) {
				while (old->next)
					old = old->next;
				old->next = eie;
492
			}
493
			eie = NULL;
494
		}
495
next:
496
		if (time_seq == SEQ_LAST)
497 498 499
			ret = btrfs_next_item(root, path);
		else
			ret = btrfs_next_old_item(root, path, time_seq);
500 501
	}

502 503
	if (ret > 0)
		ret = 0;
504 505
	else if (ret < 0)
		free_inode_elem_list(eie);
506
	return ret;
507 508 509 510 511 512
}

/*
 * resolve an indirect backref in the form (root_id, key, level)
 * to a logical address
 */
513 514 515
static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
				struct btrfs_path *path, u64 time_seq,
				struct prelim_ref *ref, struct ulist *parents,
516 517
				const u64 *extent_item_pos, u64 total_refs,
				bool ignore_offset)
518 519 520 521 522 523 524
{
	struct btrfs_root *root;
	struct btrfs_key root_key;
	struct extent_buffer *eb;
	int ret = 0;
	int root_level;
	int level = ref->level;
525
	int index;
526 527 528 529

	root_key.objectid = ref->root_id;
	root_key.type = BTRFS_ROOT_ITEM_KEY;
	root_key.offset = (u64)-1;
530 531 532

	index = srcu_read_lock(&fs_info->subvol_srcu);

533
	root = btrfs_get_fs_root(fs_info, &root_key, false);
534
	if (IS_ERR(root)) {
535
		srcu_read_unlock(&fs_info->subvol_srcu, index);
536 537 538 539
		ret = PTR_ERR(root);
		goto out;
	}

540
	if (btrfs_is_testing(fs_info)) {
J
Josef Bacik 已提交
541 542 543 544 545
		srcu_read_unlock(&fs_info->subvol_srcu, index);
		ret = -ENOENT;
		goto out;
	}

546 547
	if (path->search_commit_root)
		root_level = btrfs_header_level(root->commit_root);
548
	else if (time_seq == SEQ_LAST)
549
		root_level = btrfs_header_level(root->node);
550 551
	else
		root_level = btrfs_old_root_level(root, time_seq);
552

553 554
	if (root_level + 1 == level) {
		srcu_read_unlock(&fs_info->subvol_srcu, index);
555
		goto out;
556
	}
557 558

	path->lowest_level = level;
559
	if (time_seq == SEQ_LAST)
560 561 562 563 564
		ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
					0, 0);
	else
		ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
					    time_seq);
565 566 567 568

	/* root node has been locked, we can release @subvol_srcu safely here */
	srcu_read_unlock(&fs_info->subvol_srcu, index);

569 570
	btrfs_debug(fs_info,
		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
571 572 573
		 ref->root_id, level, ref->count, ret,
		 ref->key_for_search.objectid, ref->key_for_search.type,
		 ref->key_for_search.offset);
574 575 576 577
	if (ret < 0)
		goto out;

	eb = path->nodes[level];
578
	while (!eb) {
579
		if (WARN_ON(!level)) {
580 581 582 583 584
			ret = 1;
			goto out;
		}
		level--;
		eb = path->nodes[level];
585 586
	}

587
	ret = add_all_parents(root, path, parents, ref, level, time_seq,
588
			      extent_item_pos, total_refs, ignore_offset);
589
out:
590 591
	path->lowest_level = 0;
	btrfs_release_path(path);
592 593 594
	return ret;
}

595 596 597 598 599 600 601 602
static struct extent_inode_elem *
unode_aux_to_inode_list(struct ulist_node *node)
{
	if (!node)
		return NULL;
	return (struct extent_inode_elem *)(uintptr_t)node->aux;
}

603
/*
604 605 606 607 608 609 610 611 612 613 614 615 616 617
 * We maintain three seperate rbtrees: one for direct refs, one for
 * indirect refs which have a key, and one for indirect refs which do not
 * have a key. Each tree does merge on insertion.
 *
 * Once all of the references are located, we iterate over the tree of
 * indirect refs with missing keys. An appropriate key is located and
 * the ref is moved onto the tree for indirect refs. After all missing
 * keys are thus located, we iterate over the indirect ref tree, resolve
 * each reference, and then insert the resolved reference onto the
 * direct tree (merging there too).
 *
 * New backrefs (i.e., for parent nodes) are added to the appropriate
 * rbtree as they are encountered. The new backrefs are subsequently
 * resolved as above.
618
 */
619 620
static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
				 struct btrfs_path *path, u64 time_seq,
621
				 struct preftrees *preftrees,
622
				 const u64 *extent_item_pos, u64 total_refs,
623
				 struct share_check *sc, bool ignore_offset)
624 625 626 627 628
{
	int err;
	int ret = 0;
	struct ulist *parents;
	struct ulist_node *node;
J
Jan Schmidt 已提交
629
	struct ulist_iterator uiter;
630
	struct rb_node *rnode;
631 632 633 634 635 636

	parents = ulist_alloc(GFP_NOFS);
	if (!parents)
		return -ENOMEM;

	/*
637 638 639 640
	 * We could trade memory usage for performance here by iterating
	 * the tree, allocating new refs for each insertion, and then
	 * freeing the entire indirect tree when we're done.  In some test
	 * cases, the tree can grow quite large (~200k objects).
641
	 */
642 643 644 645 646 647 648 649 650 651 652
	while ((rnode = rb_first(&preftrees->indirect.root))) {
		struct prelim_ref *ref;

		ref = rb_entry(rnode, struct prelim_ref, rbnode);
		if (WARN(ref->parent,
			 "BUG: direct ref found in indirect tree")) {
			ret = -EINVAL;
			goto out;
		}

		rb_erase(&ref->rbnode, &preftrees->indirect.root);
653
		preftrees->indirect.count--;
654 655 656

		if (ref->count == 0) {
			free_pref(ref);
657
			continue;
658 659
		}

660 661
		if (sc && sc->root_objectid &&
		    ref->root_id != sc->root_objectid) {
662
			free_pref(ref);
663 664 665
			ret = BACKREF_FOUND_SHARED;
			goto out;
		}
666 667
		err = resolve_indirect_ref(fs_info, path, time_seq, ref,
					   parents, extent_item_pos,
668
					   total_refs, ignore_offset);
669 670 671 672 673
		/*
		 * we can only tolerate ENOENT,otherwise,we should catch error
		 * and return directly.
		 */
		if (err == -ENOENT) {
674 675
			prelim_ref_insert(fs_info, &preftrees->direct, ref,
					  NULL);
676
			continue;
677
		} else if (err) {
678
			free_pref(ref);
679 680 681
			ret = err;
			goto out;
		}
682 683

		/* we put the first parent into the ref at hand */
J
Jan Schmidt 已提交
684 685
		ULIST_ITER_INIT(&uiter);
		node = ulist_next(parents, &uiter);
686
		ref->parent = node ? node->val : 0;
687
		ref->inode_list = unode_aux_to_inode_list(node);
688

689
		/* Add a prelim_ref(s) for any other parent(s). */
J
Jan Schmidt 已提交
690
		while ((node = ulist_next(parents, &uiter))) {
691 692
			struct prelim_ref *new_ref;

693 694
			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
						   GFP_NOFS);
695
			if (!new_ref) {
696
				free_pref(ref);
697
				ret = -ENOMEM;
698
				goto out;
699 700 701
			}
			memcpy(new_ref, ref, sizeof(*ref));
			new_ref->parent = node->val;
702
			new_ref->inode_list = unode_aux_to_inode_list(node);
703 704
			prelim_ref_insert(fs_info, &preftrees->direct,
					  new_ref, NULL);
705
		}
706

707 708 709 710 711
		/*
		 * Now it's a direct ref, put it in the the direct tree. We must
		 * do this last because the ref could be merged/freed here.
		 */
		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
712

713
		ulist_reinit(parents);
714
		cond_resched();
715
	}
716
out:
717 718 719 720
	ulist_free(parents);
	return ret;
}

721 722 723
/*
 * read tree blocks and add keys where required.
 */
724
static int add_missing_keys(struct btrfs_fs_info *fs_info,
725
			    struct preftrees *preftrees)
726
{
727
	struct prelim_ref *ref;
728
	struct extent_buffer *eb;
729 730
	struct preftree *tree = &preftrees->indirect_missing_keys;
	struct rb_node *node;
731

732 733 734 735 736 737
	while ((node = rb_first(&tree->root))) {
		ref = rb_entry(node, struct prelim_ref, rbnode);
		rb_erase(node, &tree->root);

		BUG_ON(ref->parent);	/* should not be a direct ref */
		BUG_ON(ref->key_for_search.type);
738
		BUG_ON(!ref->wanted_disk_byte);
739

740
		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
741
		if (IS_ERR(eb)) {
742
			free_pref(ref);
743 744
			return PTR_ERR(eb);
		} else if (!extent_buffer_uptodate(eb)) {
745
			free_pref(ref);
746 747 748
			free_extent_buffer(eb);
			return -EIO;
		}
749 750 751 752 753 754 755
		btrfs_tree_read_lock(eb);
		if (btrfs_header_level(eb) == 0)
			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
		else
			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
		btrfs_tree_read_unlock(eb);
		free_extent_buffer(eb);
756
		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
757
		cond_resched();
758 759 760 761
	}
	return 0;
}

762 763 764 765
/*
 * add all currently queued delayed refs from this head whose seq nr is
 * smaller or equal that seq to the list
 */
766 767
static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
			    struct btrfs_delayed_ref_head *head, u64 seq,
768
			    struct preftrees *preftrees, u64 *total_refs,
769
			    struct share_check *sc)
770
{
771
	struct btrfs_delayed_ref_node *node;
772
	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
773
	struct btrfs_key key;
774 775
	struct btrfs_key tmp_op_key;
	struct btrfs_key *op_key = NULL;
776
	struct rb_node *n;
777
	int count;
778
	int ret = 0;
779

780 781 782 783
	if (extent_op && extent_op->update_key) {
		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
		op_key = &tmp_op_key;
	}
784

785
	spin_lock(&head->lock);
786 787 788
	for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
		node = rb_entry(n, struct btrfs_delayed_ref_node,
				ref_node);
789 790 791 792 793 794 795 796 797
		if (node->seq > seq)
			continue;

		switch (node->action) {
		case BTRFS_ADD_DELAYED_EXTENT:
		case BTRFS_UPDATE_DELAYED_HEAD:
			WARN_ON(1);
			continue;
		case BTRFS_ADD_DELAYED_REF:
798
			count = node->ref_mod;
799 800
			break;
		case BTRFS_DROP_DELAYED_REF:
801
			count = node->ref_mod * -1;
802 803 804 805
			break;
		default:
			BUG_ON(1);
		}
806
		*total_refs += count;
807 808
		switch (node->type) {
		case BTRFS_TREE_BLOCK_REF_KEY: {
809
			/* NORMAL INDIRECT METADATA backref */
810 811 812
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
813 814
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
					       &tmp_op_key, ref->level + 1,
815 816
					       node->bytenr, count, sc,
					       GFP_ATOMIC);
817 818 819
			break;
		}
		case BTRFS_SHARED_BLOCK_REF_KEY: {
820
			/* SHARED DIRECT METADATA backref */
821 822 823
			struct btrfs_delayed_tree_ref *ref;

			ref = btrfs_delayed_node_to_tree_ref(node);
824

825 826
			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
					     ref->parent, node->bytenr, count,
827
					     sc, GFP_ATOMIC);
828 829 830
			break;
		}
		case BTRFS_EXTENT_DATA_REF_KEY: {
831
			/* NORMAL INDIRECT DATA backref */
832 833 834 835 836 837
			struct btrfs_delayed_data_ref *ref;
			ref = btrfs_delayed_node_to_data_ref(node);

			key.objectid = ref->objectid;
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = ref->offset;
838 839 840 841 842

			/*
			 * Found a inum that doesn't match our known inum, we
			 * know it's shared.
			 */
843
			if (sc && sc->inum && ref->objectid != sc->inum) {
844
				ret = BACKREF_FOUND_SHARED;
845
				goto out;
846 847
			}

848
			ret = add_indirect_ref(fs_info, preftrees, ref->root,
849 850
					       &key, 0, node->bytenr, count, sc,
					       GFP_ATOMIC);
851 852 853
			break;
		}
		case BTRFS_SHARED_DATA_REF_KEY: {
854
			/* SHARED DIRECT FULL backref */
855 856 857
			struct btrfs_delayed_data_ref *ref;

			ref = btrfs_delayed_node_to_data_ref(node);
858

859 860 861
			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
					     node->bytenr, count, sc,
					     GFP_ATOMIC);
862 863 864 865 866
			break;
		}
		default:
			WARN_ON(1);
		}
867 868 869 870 871
		/*
		 * We must ignore BACKREF_FOUND_SHARED until all delayed
		 * refs have been checked.
		 */
		if (ret && (ret != BACKREF_FOUND_SHARED))
872
			break;
873
	}
874 875 876
	if (!ret)
		ret = extent_is_shared(sc);
out:
877 878
	spin_unlock(&head->lock);
	return ret;
879 880 881 882
}

/*
 * add all inline backrefs for bytenr to the list
883 884
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
885
 */
886 887
static int add_inline_refs(const struct btrfs_fs_info *fs_info,
			   struct btrfs_path *path, u64 bytenr,
888
			   int *info_level, struct preftrees *preftrees,
889
			   u64 *total_refs, struct share_check *sc)
890
{
891
	int ret = 0;
892 893 894
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;
895
	struct btrfs_key found_key;
896 897 898 899 900 901 902 903 904 905
	unsigned long ptr;
	unsigned long end;
	struct btrfs_extent_item *ei;
	u64 flags;
	u64 item_size;

	/*
	 * enumerate all inline refs
	 */
	leaf = path->nodes[0];
906
	slot = path->slots[0];
907 908 909 910 911 912

	item_size = btrfs_item_size_nr(leaf, slot);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);
913
	*total_refs += btrfs_extent_refs(leaf, ei);
914
	btrfs_item_key_to_cpu(leaf, &found_key, slot);
915 916 917 918

	ptr = (unsigned long)(ei + 1);
	end = (unsigned long)ei + item_size;

919 920
	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
921 922 923 924 925 926
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)ptr;
		*info_level = btrfs_tree_block_level(leaf, info);
		ptr += sizeof(struct btrfs_tree_block_info);
		BUG_ON(ptr > end);
927 928
	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
		*info_level = found_key.offset;
929 930 931 932 933 934 935 936 937 938
	} else {
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
	}

	while (ptr < end) {
		struct btrfs_extent_inline_ref *iref;
		u64 offset;
		int type;

		iref = (struct btrfs_extent_inline_ref *)ptr;
939 940 941 942 943
		type = btrfs_get_extent_inline_ref_type(leaf, iref,
							BTRFS_REF_TYPE_ANY);
		if (type == BTRFS_REF_TYPE_INVALID)
			return -EINVAL;

944 945 946 947
		offset = btrfs_extent_inline_ref_offset(leaf, iref);

		switch (type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
948 949
			ret = add_direct_ref(fs_info, preftrees,
					     *info_level + 1, offset,
950
					     bytenr, 1, NULL, GFP_NOFS);
951 952 953 954 955 956 957
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
			count = btrfs_shared_data_ref_count(leaf, sdref);
958

959
			ret = add_direct_ref(fs_info, preftrees, 0, offset,
960
					     bytenr, count, sc, GFP_NOFS);
961 962 963
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
964 965
			ret = add_indirect_ref(fs_info, preftrees, offset,
					       NULL, *info_level + 1,
966
					       bytenr, 1, NULL, GFP_NOFS);
967 968 969 970 971 972 973 974 975 976 977 978
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
979

980
			if (sc && sc->inum && key.objectid != sc->inum) {
981 982 983 984
				ret = BACKREF_FOUND_SHARED;
				break;
			}

985
			root = btrfs_extent_data_ref_root(leaf, dref);
986

987 988
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
989
					       sc, GFP_NOFS);
990 991 992 993 994
			break;
		}
		default:
			WARN_ON(1);
		}
995 996
		if (ret)
			return ret;
997 998 999 1000 1001 1002 1003 1004
		ptr += btrfs_extent_inline_ref_size(type);
	}

	return 0;
}

/*
 * add all non-inline backrefs for bytenr to the list
1005 1006
 *
 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1007
 */
1008 1009
static int add_keyed_refs(struct btrfs_fs_info *fs_info,
			  struct btrfs_path *path, u64 bytenr,
1010
			  int info_level, struct preftrees *preftrees,
1011
			  struct share_check *sc)
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
{
	struct btrfs_root *extent_root = fs_info->extent_root;
	int ret;
	int slot;
	struct extent_buffer *leaf;
	struct btrfs_key key;

	while (1) {
		ret = btrfs_next_item(extent_root, path);
		if (ret < 0)
			break;
		if (ret) {
			ret = 0;
			break;
		}

		slot = path->slots[0];
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &key, slot);

		if (key.objectid != bytenr)
			break;
		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
			continue;
		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
			break;

		switch (key.type) {
		case BTRFS_SHARED_BLOCK_REF_KEY:
1041
			/* SHARED DIRECT METADATA backref */
1042 1043
			ret = add_direct_ref(fs_info, preftrees,
					     info_level + 1, key.offset,
1044
					     bytenr, 1, NULL, GFP_NOFS);
1045 1046
			break;
		case BTRFS_SHARED_DATA_REF_KEY: {
1047
			/* SHARED DIRECT FULL backref */
1048 1049 1050 1051 1052 1053
			struct btrfs_shared_data_ref *sdref;
			int count;

			sdref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_shared_data_ref);
			count = btrfs_shared_data_ref_count(leaf, sdref);
1054 1055
			ret = add_direct_ref(fs_info, preftrees, 0,
					     key.offset, bytenr, count,
1056
					     sc, GFP_NOFS);
1057 1058 1059
			break;
		}
		case BTRFS_TREE_BLOCK_REF_KEY:
1060
			/* NORMAL INDIRECT METADATA backref */
1061 1062
			ret = add_indirect_ref(fs_info, preftrees, key.offset,
					       NULL, info_level + 1, bytenr,
1063
					       1, NULL, GFP_NOFS);
1064 1065
			break;
		case BTRFS_EXTENT_DATA_REF_KEY: {
1066
			/* NORMAL INDIRECT DATA backref */
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
			struct btrfs_extent_data_ref *dref;
			int count;
			u64 root;

			dref = btrfs_item_ptr(leaf, slot,
					      struct btrfs_extent_data_ref);
			count = btrfs_extent_data_ref_count(leaf, dref);
			key.objectid = btrfs_extent_data_ref_objectid(leaf,
								      dref);
			key.type = BTRFS_EXTENT_DATA_KEY;
			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1078

1079
			if (sc && sc->inum && key.objectid != sc->inum) {
1080 1081 1082 1083
				ret = BACKREF_FOUND_SHARED;
				break;
			}

1084
			root = btrfs_extent_data_ref_root(leaf, dref);
1085 1086
			ret = add_indirect_ref(fs_info, preftrees, root,
					       &key, 0, bytenr, count,
1087
					       sc, GFP_NOFS);
1088 1089 1090 1091 1092
			break;
		}
		default:
			WARN_ON(1);
		}
1093 1094 1095
		if (ret)
			return ret;

1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
	}

	return ret;
}

/*
 * this adds all existing backrefs (inline backrefs, backrefs and delayed
 * refs) for the given bytenr to the refs list, merges duplicates and resolves
 * indirect refs to their parent bytenr.
 * When roots are found, they're added to the roots list
 *
1107
 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1108 1109 1110 1111
 * much like trans == NULL case, the difference only lies in it will not
 * commit root.
 * The special case is for qgroup to search roots in commit_transaction().
 *
1112 1113 1114 1115 1116
 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
 * shared extent is detected.
 *
 * Otherwise this returns 0 for success and <0 for an error.
 *
1117 1118 1119 1120
 * If ignore_offset is set to false, only extent refs whose offsets match
 * extent_item_pos are returned.  If true, every extent ref is returned
 * and extent_item_pos is ignored.
 *
1121 1122 1123 1124
 * FIXME some caching might speed things up
 */
static int find_parent_nodes(struct btrfs_trans_handle *trans,
			     struct btrfs_fs_info *fs_info, u64 bytenr,
1125
			     u64 time_seq, struct ulist *refs,
1126
			     struct ulist *roots, const u64 *extent_item_pos,
1127
			     struct share_check *sc, bool ignore_offset)
1128 1129 1130 1131
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1132
	struct btrfs_delayed_ref_head *head;
1133 1134
	int info_level = 0;
	int ret;
1135
	struct prelim_ref *ref;
1136
	struct rb_node *node;
1137
	struct extent_inode_elem *eie = NULL;
1138
	/* total of both direct AND indirect refs! */
1139
	u64 total_refs = 0;
1140 1141 1142 1143 1144
	struct preftrees preftrees = {
		.direct = PREFTREE_INIT,
		.indirect = PREFTREE_INIT,
		.indirect_missing_keys = PREFTREE_INIT
	};
1145 1146 1147

	key.objectid = bytenr;
	key.offset = (u64)-1;
1148 1149 1150 1151
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1152 1153 1154 1155

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
1156
	if (!trans) {
1157
		path->search_commit_root = 1;
1158 1159
		path->skip_locking = 1;
	}
1160

1161
	if (time_seq == SEQ_LAST)
1162 1163
		path->skip_locking = 1;

1164 1165 1166 1167 1168 1169
	/*
	 * grab both a lock on the path and a lock on the delayed ref head.
	 * We need both to get a consistent picture of how the refs look
	 * at a specified point in time
	 */
again:
1170 1171
	head = NULL;

1172 1173 1174 1175 1176
	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	BUG_ON(ret == 0);

1177
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1178
	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1179
	    time_seq != SEQ_LAST) {
1180
#else
1181
	if (trans && time_seq != SEQ_LAST) {
1182
#endif
1183 1184 1185 1186 1187 1188
		/*
		 * look if there are updates for this ref queued and lock the
		 * head
		 */
		delayed_refs = &trans->transaction->delayed_refs;
		spin_lock(&delayed_refs->lock);
1189
		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1190 1191
		if (head) {
			if (!mutex_trylock(&head->mutex)) {
1192
				refcount_inc(&head->refs);
1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
				spin_unlock(&delayed_refs->lock);

				btrfs_release_path(path);

				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);
1203
				btrfs_put_delayed_ref_head(head);
1204 1205
				goto again;
			}
1206
			spin_unlock(&delayed_refs->lock);
1207
			ret = add_delayed_refs(fs_info, head, time_seq,
1208
					       &preftrees, &total_refs, sc);
1209
			mutex_unlock(&head->mutex);
1210
			if (ret)
1211
				goto out;
1212 1213
		} else {
			spin_unlock(&delayed_refs->lock);
1214
		}
1215 1216 1217 1218 1219 1220
	}

	if (path->slots[0]) {
		struct extent_buffer *leaf;
		int slot;

1221
		path->slots[0]--;
1222
		leaf = path->nodes[0];
1223
		slot = path->slots[0];
1224 1225
		btrfs_item_key_to_cpu(leaf, &key, slot);
		if (key.objectid == bytenr &&
1226 1227
		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1228 1229
			ret = add_inline_refs(fs_info, path, bytenr,
					      &info_level, &preftrees,
1230
					      &total_refs, sc);
1231 1232
			if (ret)
				goto out;
1233
			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1234
					     &preftrees, sc);
1235 1236 1237 1238 1239
			if (ret)
				goto out;
		}
	}

1240
	btrfs_release_path(path);
1241

1242
	ret = add_missing_keys(fs_info, &preftrees);
1243 1244 1245
	if (ret)
		goto out;

1246
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root));
1247

1248
	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1249
				    extent_item_pos, total_refs, sc, ignore_offset);
1250 1251 1252
	if (ret)
		goto out;

1253
	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root));
1254

1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
	/*
	 * This walks the tree of merged and resolved refs. Tree blocks are
	 * read in as needed. Unique entries are added to the ulist, and
	 * the list of found roots is updated.
	 *
	 * We release the entire tree in one go before returning.
	 */
	node = rb_first(&preftrees.direct.root);
	while (node) {
		ref = rb_entry(node, struct prelim_ref, rbnode);
		node = rb_next(&ref->rbnode);
J
Julia Lawall 已提交
1266
		WARN_ON(ref->count < 0);
1267
		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1268 1269
			if (sc && sc->root_objectid &&
			    ref->root_id != sc->root_objectid) {
1270 1271 1272 1273
				ret = BACKREF_FOUND_SHARED;
				goto out;
			}

1274 1275
			/* no parent == root of tree */
			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1276 1277
			if (ret < 0)
				goto out;
1278 1279
		}
		if (ref->count && ref->parent) {
1280 1281
			if (extent_item_pos && !ref->inode_list &&
			    ref->level == 0) {
1282
				struct extent_buffer *eb;
1283

1284
				eb = read_tree_block(fs_info, ref->parent, 0);
1285 1286 1287 1288
				if (IS_ERR(eb)) {
					ret = PTR_ERR(eb);
					goto out;
				} else if (!extent_buffer_uptodate(eb)) {
1289
					free_extent_buffer(eb);
1290 1291
					ret = -EIO;
					goto out;
1292
				}
1293 1294
				btrfs_tree_read_lock(eb);
				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1295
				ret = find_extent_in_eb(eb, bytenr,
1296
							*extent_item_pos, &eie, ignore_offset);
1297
				btrfs_tree_read_unlock_blocking(eb);
1298
				free_extent_buffer(eb);
1299 1300 1301
				if (ret < 0)
					goto out;
				ref->inode_list = eie;
1302
			}
1303 1304 1305
			ret = ulist_add_merge_ptr(refs, ref->parent,
						  ref->inode_list,
						  (void **)&eie, GFP_NOFS);
1306 1307
			if (ret < 0)
				goto out;
1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
			if (!ret && extent_item_pos) {
				/*
				 * we've recorded that parent, so we must extend
				 * its inode list here
				 */
				BUG_ON(!eie);
				while (eie->next)
					eie = eie->next;
				eie->next = ref->inode_list;
			}
1318
			eie = NULL;
1319
		}
1320
		cond_resched();
1321 1322 1323 1324
	}

out:
	btrfs_free_path(path);
1325 1326 1327 1328 1329

	prelim_release(&preftrees.direct);
	prelim_release(&preftrees.indirect);
	prelim_release(&preftrees.indirect_missing_keys);

1330 1331
	if (ret < 0)
		free_inode_elem_list(eie);
1332 1333 1334
	return ret;
}

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344
static void free_leaf_list(struct ulist *blocks)
{
	struct ulist_node *node = NULL;
	struct extent_inode_elem *eie;
	struct ulist_iterator uiter;

	ULIST_ITER_INIT(&uiter);
	while ((node = ulist_next(blocks, &uiter))) {
		if (!node->aux)
			continue;
1345
		eie = unode_aux_to_inode_list(node);
1346
		free_inode_elem_list(eie);
1347 1348 1349 1350 1351 1352
		node->aux = 0;
	}

	ulist_free(blocks);
}

1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
/*
 * Finds all leafs with a reference to the specified combination of bytenr and
 * offset. key_list_head will point to a list of corresponding keys (caller must
 * free each list element). The leafs will be stored in the leafs ulist, which
 * must be freed with ulist_free.
 *
 * returns 0 on success, <0 on error
 */
static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
				struct btrfs_fs_info *fs_info, u64 bytenr,
1363
				u64 time_seq, struct ulist **leafs,
1364
				const u64 *extent_item_pos, bool ignore_offset)
1365 1366 1367 1368
{
	int ret;

	*leafs = ulist_alloc(GFP_NOFS);
1369
	if (!*leafs)
1370 1371
		return -ENOMEM;

1372
	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1373
				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1374
	if (ret < 0 && ret != -ENOENT) {
1375
		free_leaf_list(*leafs);
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
		return ret;
	}

	return 0;
}

/*
 * walk all backrefs for a given extent to find all roots that reference this
 * extent. Walking a backref means finding all extents that reference this
 * extent and in turn walk the backrefs of those, too. Naturally this is a
 * recursive process, but here it is implemented in an iterative fashion: We
 * find all referencing extents for the extent in question and put them on a
 * list. In turn, we find all referencing extents for those, further appending
 * to the list. The way we iterate the list allows adding more elements after
 * the current while iterating. The process stops when we reach the end of the
 * list. Found roots are added to the roots list.
 *
 * returns 0 on success, < 0 on error.
 */
1395 1396
static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
				     struct btrfs_fs_info *fs_info, u64 bytenr,
1397 1398
				     u64 time_seq, struct ulist **roots,
				     bool ignore_offset)
1399 1400 1401
{
	struct ulist *tmp;
	struct ulist_node *node = NULL;
J
Jan Schmidt 已提交
1402
	struct ulist_iterator uiter;
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413
	int ret;

	tmp = ulist_alloc(GFP_NOFS);
	if (!tmp)
		return -ENOMEM;
	*roots = ulist_alloc(GFP_NOFS);
	if (!*roots) {
		ulist_free(tmp);
		return -ENOMEM;
	}

J
Jan Schmidt 已提交
1414
	ULIST_ITER_INIT(&uiter);
1415
	while (1) {
1416
		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1417
					tmp, *roots, NULL, NULL, ignore_offset);
1418 1419 1420 1421 1422
		if (ret < 0 && ret != -ENOENT) {
			ulist_free(tmp);
			ulist_free(*roots);
			return ret;
		}
J
Jan Schmidt 已提交
1423
		node = ulist_next(tmp, &uiter);
1424 1425 1426
		if (!node)
			break;
		bytenr = node->val;
1427
		cond_resched();
1428 1429 1430 1431 1432 1433
	}

	ulist_free(tmp);
	return 0;
}

1434 1435
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
			 struct btrfs_fs_info *fs_info, u64 bytenr,
1436 1437
			 u64 time_seq, struct ulist **roots,
			 bool ignore_offset)
1438 1439 1440 1441 1442
{
	int ret;

	if (!trans)
		down_read(&fs_info->commit_root_sem);
1443
	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1444
					time_seq, roots, ignore_offset);
1445 1446 1447 1448 1449
	if (!trans)
		up_read(&fs_info->commit_root_sem);
	return ret;
}

1450 1451 1452 1453 1454 1455 1456 1457 1458
/**
 * btrfs_check_shared - tell us whether an extent is shared
 *
 * btrfs_check_shared uses the backref walking code but will short
 * circuit as soon as it finds a root or inode that doesn't match the
 * one passed in. This provides a significant performance benefit for
 * callers (such as fiemap) which want to know whether the extent is
 * shared but do not need a ref count.
 *
1459 1460 1461
 * This attempts to allocate a transaction in order to account for
 * delayed refs, but continues on even when the alloc fails.
 *
1462 1463
 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
 */
1464
int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
1465
{
1466 1467
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_trans_handle *trans;
1468 1469 1470 1471
	struct ulist *tmp = NULL;
	struct ulist *roots = NULL;
	struct ulist_iterator uiter;
	struct ulist_node *node;
1472
	struct seq_list elem = SEQ_LIST_INIT(elem);
1473
	int ret = 0;
1474 1475 1476 1477 1478
	struct share_check shared = {
		.root_objectid = root->objectid,
		.inum = inum,
		.share_count = 0,
	};
1479 1480 1481 1482 1483 1484 1485 1486 1487

	tmp = ulist_alloc(GFP_NOFS);
	roots = ulist_alloc(GFP_NOFS);
	if (!tmp || !roots) {
		ulist_free(tmp);
		ulist_free(roots);
		return -ENOMEM;
	}

1488 1489 1490
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		trans = NULL;
1491
		down_read(&fs_info->commit_root_sem);
1492 1493 1494 1495
	} else {
		btrfs_get_tree_mod_seq(fs_info, &elem);
	}

1496 1497 1498
	ULIST_ITER_INIT(&uiter);
	while (1) {
		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1499
					roots, NULL, &shared, false);
1500
		if (ret == BACKREF_FOUND_SHARED) {
1501
			/* this is the only condition under which we return 1 */
1502 1503 1504 1505 1506
			ret = 1;
			break;
		}
		if (ret < 0 && ret != -ENOENT)
			break;
1507
		ret = 0;
1508 1509 1510 1511 1512 1513
		node = ulist_next(tmp, &uiter);
		if (!node)
			break;
		bytenr = node->val;
		cond_resched();
	}
1514 1515

	if (trans) {
1516
		btrfs_put_tree_mod_seq(fs_info, &elem);
1517 1518
		btrfs_end_transaction(trans);
	} else {
1519
		up_read(&fs_info->commit_root_sem);
1520
	}
1521 1522 1523 1524 1525
	ulist_free(tmp);
	ulist_free(roots);
	return ret;
}

M
Mark Fasheh 已提交
1526 1527 1528 1529 1530 1531 1532 1533 1534
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
			  u64 start_off, struct btrfs_path *path,
			  struct btrfs_inode_extref **ret_extref,
			  u64 *found_off)
{
	int ret, slot;
	struct btrfs_key key;
	struct btrfs_key found_key;
	struct btrfs_inode_extref *extref;
1535
	const struct extent_buffer *leaf;
M
Mark Fasheh 已提交
1536 1537 1538
	unsigned long ptr;

	key.objectid = inode_objectid;
1539
	key.type = BTRFS_INODE_EXTREF_KEY;
M
Mark Fasheh 已提交
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
	key.offset = start_off;

	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

	while (1) {
		leaf = path->nodes[0];
		slot = path->slots[0];
		if (slot >= btrfs_header_nritems(leaf)) {
			/*
			 * If the item at offset is not found,
			 * btrfs_search_slot will point us to the slot
			 * where it should be inserted. In our case
			 * that will be the slot directly before the
			 * next INODE_REF_KEY_V2 item. In the case
			 * that we're pointing to the last slot in a
			 * leaf, we must move one leaf over.
			 */
			ret = btrfs_next_leaf(root, path);
			if (ret) {
				if (ret >= 1)
					ret = -ENOENT;
				break;
			}
			continue;
		}

		btrfs_item_key_to_cpu(leaf, &found_key, slot);

		/*
		 * Check that we're still looking at an extended ref key for
		 * this particular objectid. If we have different
		 * objectid or type then there are no more to be found
		 * in the tree and we can exit.
		 */
		ret = -ENOENT;
		if (found_key.objectid != inode_objectid)
			break;
1579
		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
M
Mark Fasheh 已提交
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593
			break;

		ret = 0;
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		extref = (struct btrfs_inode_extref *)ptr;
		*ret_extref = extref;
		if (found_off)
			*found_off = found_key.offset;
		break;
	}

	return ret;
}

1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607
/*
 * this iterates to turn a name (from iref/extref) into a full filesystem path.
 * Elements of the path are separated by '/' and the path is guaranteed to be
 * 0-terminated. the path is only given within the current file system.
 * Therefore, it never starts with a '/'. the caller is responsible to provide
 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
 * the start point of the resulting string is returned. this pointer is within
 * dest, normally.
 * in case the path buffer would overflow, the pointer is decremented further
 * as if output was written to the buffer, though no more output is actually
 * generated. that way, the caller can determine how much space would be
 * required for the path to fit into the buffer. in that case, the returned
 * value will be smaller than dest. callers must check this!
 */
1608 1609 1610 1611
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
			u32 name_len, unsigned long name_off,
			struct extent_buffer *eb_in, u64 parent,
			char *dest, u32 size)
1612 1613 1614 1615
{
	int slot;
	u64 next_inum;
	int ret;
1616
	s64 bytes_left = ((s64)size) - 1;
1617 1618
	struct extent_buffer *eb = eb_in;
	struct btrfs_key found_key;
1619
	int leave_spinning = path->leave_spinning;
M
Mark Fasheh 已提交
1620
	struct btrfs_inode_ref *iref;
1621 1622 1623 1624

	if (bytes_left >= 0)
		dest[bytes_left] = '\0';

1625
	path->leave_spinning = 1;
1626
	while (1) {
M
Mark Fasheh 已提交
1627
		bytes_left -= name_len;
1628 1629
		if (bytes_left >= 0)
			read_extent_buffer(eb, dest + bytes_left,
M
Mark Fasheh 已提交
1630
					   name_off, name_len);
1631
		if (eb != eb_in) {
1632 1633
			if (!path->skip_locking)
				btrfs_tree_read_unlock_blocking(eb);
1634
			free_extent_buffer(eb);
1635
		}
1636 1637
		ret = btrfs_find_item(fs_root, path, parent, 0,
				BTRFS_INODE_REF_KEY, &found_key);
1638 1639
		if (ret > 0)
			ret = -ENOENT;
1640 1641
		if (ret)
			break;
M
Mark Fasheh 已提交
1642

1643 1644 1645 1646 1647 1648 1649 1650 1651
		next_inum = found_key.offset;

		/* regular exit ahead */
		if (parent == next_inum)
			break;

		slot = path->slots[0];
		eb = path->nodes[0];
		/* make sure we can use eb after releasing the path */
1652
		if (eb != eb_in) {
1653 1654 1655 1656
			if (!path->skip_locking)
				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
			path->nodes[0] = NULL;
			path->locks[0] = 0;
1657
		}
1658 1659
		btrfs_release_path(path);
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
M
Mark Fasheh 已提交
1660 1661 1662 1663

		name_len = btrfs_inode_ref_name_len(eb, iref);
		name_off = (unsigned long)(iref + 1);

1664 1665 1666 1667 1668 1669 1670
		parent = next_inum;
		--bytes_left;
		if (bytes_left >= 0)
			dest[bytes_left] = '/';
	}

	btrfs_release_path(path);
1671
	path->leave_spinning = leave_spinning;
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684

	if (ret)
		return ERR_PTR(ret);

	return dest + bytes_left;
}

/*
 * this makes the path point to (logical EXTENT_ITEM *)
 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
 * tree blocks and <0 on error.
 */
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1685 1686
			struct btrfs_path *path, struct btrfs_key *found_key,
			u64 *flags_ret)
1687 1688 1689
{
	int ret;
	u64 flags;
1690
	u64 size = 0;
1691
	u32 item_size;
1692
	const struct extent_buffer *eb;
1693 1694 1695
	struct btrfs_extent_item *ei;
	struct btrfs_key key;

1696 1697 1698 1699
	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;
1700 1701 1702 1703 1704 1705 1706
	key.objectid = logical;
	key.offset = (u64)-1;

	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
	if (ret < 0)
		return ret;

1707 1708 1709 1710 1711
	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
		return ret;
1712
	}
1713
	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1714
	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1715
		size = fs_info->nodesize;
1716 1717 1718
	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
		size = found_key->offset;

1719
	if (found_key->objectid > logical ||
1720
	    found_key->objectid + size <= logical) {
1721 1722
		btrfs_debug(fs_info,
			"logical %llu is not within any extent", logical);
1723
		return -ENOENT;
J
Jan Schmidt 已提交
1724
	}
1725 1726 1727 1728 1729 1730 1731 1732

	eb = path->nodes[0];
	item_size = btrfs_item_size_nr(eb, path->slots[0]);
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
	flags = btrfs_extent_flags(eb, ei);

1733 1734
	btrfs_debug(fs_info,
		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1735 1736
		 logical, logical - found_key->objectid, found_key->objectid,
		 found_key->offset, flags, item_size);
1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747

	WARN_ON(!flags_ret);
	if (flags_ret) {
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
		else if (flags & BTRFS_EXTENT_FLAG_DATA)
			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
		else
			BUG_ON(1);
		return 0;
	}
1748 1749 1750 1751 1752 1753 1754 1755

	return -EIO;
}

/*
 * helper function to iterate extent inline refs. ptr must point to a 0 value
 * for the first call and may be modified. it is used to track state.
 * if more refs exist, 0 is returned and the next call to
1756
 * get_extent_inline_ref must pass the modified ptr parameter to get the
1757 1758 1759
 * next ref. after the last ref was processed, 1 is returned.
 * returns <0 on error
 */
1760 1761 1762 1763 1764 1765 1766
static int get_extent_inline_ref(unsigned long *ptr,
				 const struct extent_buffer *eb,
				 const struct btrfs_key *key,
				 const struct btrfs_extent_item *ei,
				 u32 item_size,
				 struct btrfs_extent_inline_ref **out_eiref,
				 int *out_type)
1767 1768 1769 1770 1771 1772 1773 1774 1775
{
	unsigned long end;
	u64 flags;
	struct btrfs_tree_block_info *info;

	if (!*ptr) {
		/* first call */
		flags = btrfs_extent_flags(eb, ei);
		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1776 1777 1778 1779 1780 1781 1782 1783 1784 1785
			if (key->type == BTRFS_METADATA_ITEM_KEY) {
				/* a skinny metadata extent */
				*out_eiref =
				     (struct btrfs_extent_inline_ref *)(ei + 1);
			} else {
				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
				info = (struct btrfs_tree_block_info *)(ei + 1);
				*out_eiref =
				   (struct btrfs_extent_inline_ref *)(info + 1);
			}
1786 1787 1788 1789
		} else {
			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
		}
		*ptr = (unsigned long)*out_eiref;
1790
		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1791 1792 1793 1794
			return -ENOENT;
	}

	end = (unsigned long)ei + item_size;
1795
	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1796 1797 1798 1799
	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
						     BTRFS_REF_TYPE_ANY);
	if (*out_type == BTRFS_REF_TYPE_INVALID)
		return -EINVAL;
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811

	*ptr += btrfs_extent_inline_ref_size(*out_type);
	WARN_ON(*ptr > end);
	if (*ptr == end)
		return 1; /* last */

	return 0;
}

/*
 * reads the tree block backref for an extent. tree level and root are returned
 * through out_level and out_root. ptr must point to a 0 value for the first
1812
 * call and may be modified (see get_extent_inline_ref comment).
1813 1814 1815 1816
 * returns 0 if data was provided, 1 if there was no more data to provide or
 * <0 on error.
 */
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1817 1818
			    struct btrfs_key *key, struct btrfs_extent_item *ei,
			    u32 item_size, u64 *out_root, u8 *out_level)
1819 1820 1821 1822 1823 1824 1825 1826 1827
{
	int ret;
	int type;
	struct btrfs_extent_inline_ref *eiref;

	if (*ptr == (unsigned long)-1)
		return 1;

	while (1) {
1828
		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1829
					      &eiref, &type);
1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
		if (ret < 0)
			return ret;

		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
		    type == BTRFS_SHARED_BLOCK_REF_KEY)
			break;

		if (ret == 1)
			return 1;
	}

	/* we can treat both ref types equally here */
	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1843 1844 1845 1846 1847 1848 1849 1850 1851 1852

	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
		struct btrfs_tree_block_info *info;

		info = (struct btrfs_tree_block_info *)(ei + 1);
		*out_level = btrfs_tree_block_level(eb, info);
	} else {
		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
		*out_level = (u8)key->offset;
	}
1853 1854 1855 1856 1857 1858 1859

	if (ret == 1)
		*ptr = (unsigned long)-1;

	return 0;
}

1860 1861 1862 1863
static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
			     struct extent_inode_elem *inode_list,
			     u64 root, u64 extent_item_objectid,
			     iterate_extent_inodes_t *iterate, void *ctx)
1864
{
1865
	struct extent_inode_elem *eie;
J
Jan Schmidt 已提交
1866 1867
	int ret = 0;

1868
	for (eie = inode_list; eie; eie = eie->next) {
1869 1870 1871 1872
		btrfs_debug(fs_info,
			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
			    extent_item_objectid, eie->inum,
			    eie->offset, root);
1873
		ret = iterate(eie->inum, eie->offset, root, ctx);
J
Jan Schmidt 已提交
1874
		if (ret) {
1875 1876 1877
			btrfs_debug(fs_info,
				    "stopping iteration for %llu due to ret=%d",
				    extent_item_objectid, ret);
J
Jan Schmidt 已提交
1878 1879
			break;
		}
1880 1881 1882 1883 1884 1885 1886
	}

	return ret;
}

/*
 * calls iterate() for every inode that references the extent identified by
J
Jan Schmidt 已提交
1887
 * the given parameters.
1888 1889 1890
 * when the iterator function returns a non-zero value, iteration stops.
 */
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
J
Jan Schmidt 已提交
1891
				u64 extent_item_objectid, u64 extent_item_pos,
1892
				int search_commit_root,
1893 1894
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
1895 1896
{
	int ret;
1897
	struct btrfs_trans_handle *trans = NULL;
1898 1899
	struct ulist *refs = NULL;
	struct ulist *roots = NULL;
J
Jan Schmidt 已提交
1900 1901
	struct ulist_node *ref_node = NULL;
	struct ulist_node *root_node = NULL;
1902
	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
J
Jan Schmidt 已提交
1903 1904
	struct ulist_iterator ref_uiter;
	struct ulist_iterator root_uiter;
1905

1906
	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
J
Jan Schmidt 已提交
1907
			extent_item_objectid);
1908

1909
	if (!search_commit_root) {
1910 1911 1912
		trans = btrfs_join_transaction(fs_info->extent_root);
		if (IS_ERR(trans))
			return PTR_ERR(trans);
1913
		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1914 1915
	} else {
		down_read(&fs_info->commit_root_sem);
1916
	}
1917

J
Jan Schmidt 已提交
1918
	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1919
				   tree_mod_seq_elem.seq, &refs,
1920
				   &extent_item_pos, ignore_offset);
J
Jan Schmidt 已提交
1921 1922
	if (ret)
		goto out;
1923

J
Jan Schmidt 已提交
1924 1925
	ULIST_ITER_INIT(&ref_uiter);
	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1926
		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1927 1928
						tree_mod_seq_elem.seq, &roots,
						ignore_offset);
J
Jan Schmidt 已提交
1929 1930
		if (ret)
			break;
J
Jan Schmidt 已提交
1931 1932
		ULIST_ITER_INIT(&root_uiter);
		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1933 1934 1935 1936 1937 1938
			btrfs_debug(fs_info,
				    "root %llu references leaf %llu, data list %#llx",
				    root_node->val, ref_node->val,
				    ref_node->aux);
			ret = iterate_leaf_refs(fs_info,
						(struct extent_inode_elem *)
1939 1940 1941 1942
						(uintptr_t)ref_node->aux,
						root_node->val,
						extent_item_objectid,
						iterate, ctx);
J
Jan Schmidt 已提交
1943
		}
1944
		ulist_free(roots);
1945 1946
	}

1947
	free_leaf_list(refs);
J
Jan Schmidt 已提交
1948
out:
1949
	if (!search_commit_root) {
1950
		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1951
		btrfs_end_transaction(trans);
1952 1953
	} else {
		up_read(&fs_info->commit_root_sem);
1954 1955
	}

1956 1957 1958 1959 1960
	return ret;
}

int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
				struct btrfs_path *path,
1961 1962
				iterate_extent_inodes_t *iterate, void *ctx,
				bool ignore_offset)
1963 1964
{
	int ret;
J
Jan Schmidt 已提交
1965
	u64 extent_item_pos;
1966
	u64 flags = 0;
1967
	struct btrfs_key found_key;
1968
	int search_commit_root = path->search_commit_root;
1969

1970
	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
J
Jan Schmidt 已提交
1971
	btrfs_release_path(path);
1972 1973
	if (ret < 0)
		return ret;
1974
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1975
		return -EINVAL;
1976

J
Jan Schmidt 已提交
1977
	extent_item_pos = logical - found_key.objectid;
1978 1979
	ret = iterate_extent_inodes(fs_info, found_key.objectid,
					extent_item_pos, search_commit_root,
1980
					iterate, ctx, ignore_offset);
1981 1982 1983 1984

	return ret;
}

M
Mark Fasheh 已提交
1985 1986 1987 1988 1989 1990
typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
			      struct extent_buffer *eb, void *ctx);

static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
			      struct btrfs_path *path,
			      iterate_irefs_t *iterate, void *ctx)
1991
{
1992
	int ret = 0;
1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003
	int slot;
	u32 cur;
	u32 len;
	u32 name_len;
	u64 parent = 0;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_item *item;
	struct btrfs_inode_ref *iref;
	struct btrfs_key found_key;

2004
	while (!ret) {
2005 2006 2007 2008
		ret = btrfs_find_item(fs_root, path, inum,
				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
				&found_key);

2009 2010 2011 2012 2013 2014 2015 2016 2017 2018
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		parent = found_key.offset;
		slot = path->slots[0];
2019 2020 2021 2022 2023 2024
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
		extent_buffer_get(eb);
2025 2026
		btrfs_tree_read_lock(eb);
		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
2027 2028
		btrfs_release_path(path);

2029
		item = btrfs_item_nr(slot);
2030 2031 2032 2033 2034
		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);

		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
			name_len = btrfs_inode_ref_name_len(eb, iref);
			/* path must be released before calling iterate()! */
2035 2036 2037
			btrfs_debug(fs_root->fs_info,
				"following ref at offset %u for inode %llu in tree %llu",
				cur, found_key.objectid, fs_root->objectid);
M
Mark Fasheh 已提交
2038 2039
			ret = iterate(parent, name_len,
				      (unsigned long)(iref + 1), eb, ctx);
2040
			if (ret)
2041 2042 2043 2044
				break;
			len = sizeof(*iref) + name_len;
			iref = (struct btrfs_inode_ref *)((char *)iref + len);
		}
2045
		btrfs_tree_read_unlock_blocking(eb);
2046 2047 2048 2049 2050 2051 2052 2053
		free_extent_buffer(eb);
	}

	btrfs_release_path(path);

	return ret;
}

M
Mark Fasheh 已提交
2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080
static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
				 struct btrfs_path *path,
				 iterate_irefs_t *iterate, void *ctx)
{
	int ret;
	int slot;
	u64 offset = 0;
	u64 parent;
	int found = 0;
	struct extent_buffer *eb;
	struct btrfs_inode_extref *extref;
	u32 item_size;
	u32 cur_offset;
	unsigned long ptr;

	while (1) {
		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
					    &offset);
		if (ret < 0)
			break;
		if (ret) {
			ret = found ? 0 : -ENOENT;
			break;
		}
		++found;

		slot = path->slots[0];
2081 2082 2083 2084 2085 2086
		eb = btrfs_clone_extent_buffer(path->nodes[0]);
		if (!eb) {
			ret = -ENOMEM;
			break;
		}
		extent_buffer_get(eb);
M
Mark Fasheh 已提交
2087 2088 2089 2090 2091

		btrfs_tree_read_lock(eb);
		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
		btrfs_release_path(path);

2092 2093
		item_size = btrfs_item_size_nr(eb, slot);
		ptr = btrfs_item_ptr_offset(eb, slot);
M
Mark Fasheh 已提交
2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
		cur_offset = 0;

		while (cur_offset < item_size) {
			u32 name_len;

			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
			parent = btrfs_inode_extref_parent(eb, extref);
			name_len = btrfs_inode_extref_name_len(eb, extref);
			ret = iterate(parent, name_len,
				      (unsigned long)&extref->name, eb, ctx);
			if (ret)
				break;

2107
			cur_offset += btrfs_inode_extref_name_len(eb, extref);
M
Mark Fasheh 已提交
2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140
			cur_offset += sizeof(*extref);
		}
		btrfs_tree_read_unlock_blocking(eb);
		free_extent_buffer(eb);

		offset++;
	}

	btrfs_release_path(path);

	return ret;
}

static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
			 struct btrfs_path *path, iterate_irefs_t *iterate,
			 void *ctx)
{
	int ret;
	int found_refs = 0;

	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
	if (!ret)
		++found_refs;
	else if (ret != -ENOENT)
		return ret;

	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
	if (ret == -ENOENT && found_refs)
		return 0;

	return ret;
}

2141 2142 2143 2144
/*
 * returns 0 if the path could be dumped (probably truncated)
 * returns <0 in case of an error
 */
M
Mark Fasheh 已提交
2145 2146
static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
			 struct extent_buffer *eb, void *ctx)
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157
{
	struct inode_fs_paths *ipath = ctx;
	char *fspath;
	char *fspath_min;
	int i = ipath->fspath->elem_cnt;
	const int s_ptr = sizeof(char *);
	u32 bytes_left;

	bytes_left = ipath->fspath->bytes_left > s_ptr ?
					ipath->fspath->bytes_left - s_ptr : 0;

2158
	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2159 2160
	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
				   name_off, eb, inum, fspath_min, bytes_left);
2161 2162 2163 2164
	if (IS_ERR(fspath))
		return PTR_ERR(fspath);

	if (fspath > fspath_min) {
2165
		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179
		++ipath->fspath->elem_cnt;
		ipath->fspath->bytes_left = fspath - fspath_min;
	} else {
		++ipath->fspath->elem_missed;
		ipath->fspath->bytes_missing += fspath_min - fspath;
		ipath->fspath->bytes_left = 0;
	}

	return 0;
}

/*
 * this dumps all file system paths to the inode into the ipath struct, provided
 * is has been created large enough. each path is zero-terminated and accessed
2180
 * from ipath->fspath->val[i].
2181
 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2182
 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2183
 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2184 2185 2186 2187 2188 2189
 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
 * have been needed to return all paths.
 */
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
{
	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
M
Mark Fasheh 已提交
2190
			     inode_to_path, ipath);
2191 2192 2193 2194 2195 2196 2197 2198
}

struct btrfs_data_container *init_data_container(u32 total_bytes)
{
	struct btrfs_data_container *data;
	size_t alloc_bytes;

	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2199
	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
	if (!data)
		return ERR_PTR(-ENOMEM);

	if (total_bytes >= sizeof(*data)) {
		data->bytes_left = total_bytes - sizeof(*data);
		data->bytes_missing = 0;
	} else {
		data->bytes_missing = sizeof(*data) - total_bytes;
		data->bytes_left = 0;
	}

	data->elem_cnt = 0;
	data->elem_missed = 0;

	return data;
}

/*
 * allocates space to return multiple file system paths for an inode.
 * total_bytes to allocate are passed, note that space usable for actual path
 * information will be total_bytes - sizeof(struct inode_fs_paths).
 * the returned pointer must be freed with free_ipath() in the end.
 */
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
					struct btrfs_path *path)
{
	struct inode_fs_paths *ifp;
	struct btrfs_data_container *fspath;

	fspath = init_data_container(total_bytes);
	if (IS_ERR(fspath))
		return (void *)fspath;

2233
	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2234
	if (!ifp) {
2235
		kvfree(fspath);
2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
		return ERR_PTR(-ENOMEM);
	}

	ifp->btrfs_path = path;
	ifp->fspath = fspath;
	ifp->fs_root = fs_root;

	return ifp;
}

void free_ipath(struct inode_fs_paths *ipath)
{
2248 2249
	if (!ipath)
		return;
2250
	kvfree(ipath->fspath);
2251 2252
	kfree(ipath);
}