extent-tree.c 281.4 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
Z
Zach Brown 已提交
18
#include <linux/sched.h>
19
#include <linux/pagemap.h>
20
#include <linux/writeback.h>
21
#include <linux/blkdev.h>
22
#include <linux/sort.h>
23
#include <linux/rcupdate.h>
J
Josef Bacik 已提交
24
#include <linux/kthread.h>
25
#include <linux/slab.h>
26
#include <linux/ratelimit.h>
27
#include <linux/percpu_counter.h>
28
#include "hash.h"
29
#include "tree-log.h"
30 31
#include "disk-io.h"
#include "print-tree.h"
32
#include "volumes.h"
D
David Woodhouse 已提交
33
#include "raid56.h"
34
#include "locking.h"
35
#include "free-space-cache.h"
36
#include "math.h"
37
#include "sysfs.h"
J
Josef Bacik 已提交
38
#include "qgroup.h"
39

40 41
#undef SCRAMBLE_DELAYED_REFS

42 43
/*
 * control flags for do_chunk_alloc's force field
44 45 46 47 48 49 50 51 52
 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
 * if we really need one.
 *
 * CHUNK_ALLOC_LIMITED means to only try and allocate one
 * if we have very few chunks already allocated.  This is
 * used as part of the clustering code to help make sure
 * we have a good pool of storage to cluster in, without
 * filling the FS with empty chunks
 *
53 54
 * CHUNK_ALLOC_FORCE means it must try to allocate one
 *
55 56 57
 */
enum {
	CHUNK_ALLOC_NO_FORCE = 0,
58 59
	CHUNK_ALLOC_LIMITED = 1,
	CHUNK_ALLOC_FORCE = 2,
60 61
};

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
/*
 * Control how reservations are dealt with.
 *
 * RESERVE_FREE - freeing a reservation.
 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
 *   ENOSPC accounting
 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
 *   bytes_may_use as the ENOSPC accounting is done elsewhere
 */
enum {
	RESERVE_FREE = 0,
	RESERVE_ALLOC = 1,
	RESERVE_ALLOC_NO_ACCOUNT = 2,
};

77 78 79
static int update_block_group(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root, u64 bytenr,
			      u64 num_bytes, int alloc);
80 81
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
82
				struct btrfs_delayed_ref_node *node, u64 parent,
83 84
				u64 root_objectid, u64 owner_objectid,
				u64 owner_offset, int refs_to_drop,
85
				struct btrfs_delayed_extent_op *extra_op);
86 87 88 89 90 91 92 93 94 95 96 97
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
				    struct extent_buffer *leaf,
				    struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      u64 parent, u64 root_objectid,
				      u64 flags, u64 owner, u64 offset,
				      struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 parent, u64 root_objectid,
				     u64 flags, struct btrfs_disk_key *key,
J
Josef Bacik 已提交
98 99
				     int level, struct btrfs_key *ins,
				     int no_quota);
J
Josef Bacik 已提交
100
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101 102
			  struct btrfs_root *extent_root, u64 flags,
			  int force);
103 104
static int find_next_key(struct btrfs_path *path, int level,
			 struct btrfs_key *key);
J
Josef Bacik 已提交
105 106
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
			    int dump_block_groups);
107
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108 109
				       u64 num_bytes, int reserve,
				       int delalloc);
110 111
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
			       u64 num_bytes);
112 113
int btrfs_pin_extent(struct btrfs_root *root,
		     u64 bytenr, u64 num_bytes, int reserved);
J
Josef Bacik 已提交
114

J
Josef Bacik 已提交
115 116 117 118
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
{
	smp_mb();
119 120
	return cache->cached == BTRFS_CACHE_FINISHED ||
		cache->cached == BTRFS_CACHE_ERROR;
J
Josef Bacik 已提交
121 122
}

J
Josef Bacik 已提交
123 124 125 126 127
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
	return (cache->flags & bits) == bits;
}

128
static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 130 131 132 133 134
{
	atomic_inc(&cache->count);
}

void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
{
135 136 137
	if (atomic_dec_and_test(&cache->count)) {
		WARN_ON(cache->pinned > 0);
		WARN_ON(cache->reserved > 0);
138
		kfree(cache->free_space_ctl);
139
		kfree(cache);
140
	}
141 142
}

J
Josef Bacik 已提交
143 144 145 146
/*
 * this adds the block group to the fs_info rb tree for the block group
 * cache
 */
147
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
J
Josef Bacik 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
				struct btrfs_block_group_cache *block_group)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct btrfs_block_group_cache *cache;

	spin_lock(&info->block_group_cache_lock);
	p = &info->block_group_cache_tree.rb_node;

	while (*p) {
		parent = *p;
		cache = rb_entry(parent, struct btrfs_block_group_cache,
				 cache_node);
		if (block_group->key.objectid < cache->key.objectid) {
			p = &(*p)->rb_left;
		} else if (block_group->key.objectid > cache->key.objectid) {
			p = &(*p)->rb_right;
		} else {
			spin_unlock(&info->block_group_cache_lock);
			return -EEXIST;
		}
	}

	rb_link_node(&block_group->cache_node, parent, p);
	rb_insert_color(&block_group->cache_node,
			&info->block_group_cache_tree);
174 175 176 177

	if (info->first_logical_byte > block_group->key.objectid)
		info->first_logical_byte = block_group->key.objectid;

J
Josef Bacik 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	spin_unlock(&info->block_group_cache_lock);

	return 0;
}

/*
 * This will return the block group at or after bytenr if contains is 0, else
 * it will return the block group that contains the bytenr
 */
static struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
			      int contains)
{
	struct btrfs_block_group_cache *cache, *ret = NULL;
	struct rb_node *n;
	u64 end, start;

	spin_lock(&info->block_group_cache_lock);
	n = info->block_group_cache_tree.rb_node;

	while (n) {
		cache = rb_entry(n, struct btrfs_block_group_cache,
				 cache_node);
		end = cache->key.objectid + cache->key.offset - 1;
		start = cache->key.objectid;

		if (bytenr < start) {
			if (!contains && (!ret || start < ret->key.objectid))
				ret = cache;
			n = n->rb_left;
		} else if (bytenr > start) {
			if (contains && bytenr <= end) {
				ret = cache;
				break;
			}
			n = n->rb_right;
		} else {
			ret = cache;
			break;
		}
	}
219
	if (ret) {
220
		btrfs_get_block_group(ret);
221 222 223
		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
			info->first_logical_byte = ret->key.objectid;
	}
J
Josef Bacik 已提交
224 225 226 227 228
	spin_unlock(&info->block_group_cache_lock);

	return ret;
}

229 230
static int add_excluded_extent(struct btrfs_root *root,
			       u64 start, u64 num_bytes)
J
Josef Bacik 已提交
231
{
232 233 234 235 236 237 238
	u64 end = start + num_bytes - 1;
	set_extent_bits(&root->fs_info->freed_extents[0],
			start, end, EXTENT_UPTODATE, GFP_NOFS);
	set_extent_bits(&root->fs_info->freed_extents[1],
			start, end, EXTENT_UPTODATE, GFP_NOFS);
	return 0;
}
J
Josef Bacik 已提交
239

240 241 242 243
static void free_excluded_extents(struct btrfs_root *root,
				  struct btrfs_block_group_cache *cache)
{
	u64 start, end;
J
Josef Bacik 已提交
244

245 246 247 248 249 250 251
	start = cache->key.objectid;
	end = start + cache->key.offset - 1;

	clear_extent_bits(&root->fs_info->freed_extents[0],
			  start, end, EXTENT_UPTODATE, GFP_NOFS);
	clear_extent_bits(&root->fs_info->freed_extents[1],
			  start, end, EXTENT_UPTODATE, GFP_NOFS);
J
Josef Bacik 已提交
252 253
}

254 255
static int exclude_super_stripes(struct btrfs_root *root,
				 struct btrfs_block_group_cache *cache)
J
Josef Bacik 已提交
256 257 258 259 260 261
{
	u64 bytenr;
	u64 *logical;
	int stripe_len;
	int i, nr, ret;

262 263 264 265 266
	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
		cache->bytes_super += stripe_len;
		ret = add_excluded_extent(root, cache->key.objectid,
					  stripe_len);
267 268
		if (ret)
			return ret;
269 270
	}

J
Josef Bacik 已提交
271 272 273 274 275
	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
				       cache->key.objectid, bytenr,
				       0, &logical, &nr, &stripe_len);
276 277
		if (ret)
			return ret;
278

J
Josef Bacik 已提交
279
		while (nr--) {
280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
			u64 start, len;

			if (logical[nr] > cache->key.objectid +
			    cache->key.offset)
				continue;

			if (logical[nr] + stripe_len <= cache->key.objectid)
				continue;

			start = logical[nr];
			if (start < cache->key.objectid) {
				start = cache->key.objectid;
				len = (logical[nr] + stripe_len) - start;
			} else {
				len = min_t(u64, stripe_len,
					    cache->key.objectid +
					    cache->key.offset - start);
			}

			cache->bytes_super += len;
			ret = add_excluded_extent(root, start, len);
301 302 303 304
			if (ret) {
				kfree(logical);
				return ret;
			}
J
Josef Bacik 已提交
305
		}
306

J
Josef Bacik 已提交
307 308 309 310 311
		kfree(logical);
	}
	return 0;
}

312 313 314 315 316 317
static struct btrfs_caching_control *
get_caching_control(struct btrfs_block_group_cache *cache)
{
	struct btrfs_caching_control *ctl;

	spin_lock(&cache->lock);
318 319
	if (!cache->caching_ctl) {
		spin_unlock(&cache->lock);
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334
		return NULL;
	}

	ctl = cache->caching_ctl;
	atomic_inc(&ctl->count);
	spin_unlock(&cache->lock);
	return ctl;
}

static void put_caching_control(struct btrfs_caching_control *ctl)
{
	if (atomic_dec_and_test(&ctl->count))
		kfree(ctl);
}

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
#ifdef CONFIG_BTRFS_DEBUG
static void fragment_free_space(struct btrfs_root *root,
				struct btrfs_block_group_cache *block_group)
{
	u64 start = block_group->key.objectid;
	u64 len = block_group->key.offset;
	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
		root->nodesize : root->sectorsize;
	u64 step = chunk << 1;

	while (len > chunk) {
		btrfs_remove_free_space(block_group, start, chunk);
		start += step;
		if (len < step)
			len = 0;
		else
			len -= step;
	}
}
#endif

J
Josef Bacik 已提交
356 357 358 359 360
/*
 * this is only called by cache_block_group, since we could have freed extents
 * we need to check the pinned_extents for any extents that can't be used yet
 * since their free space will be released as soon as the transaction commits.
 */
J
Josef Bacik 已提交
361
static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
J
Josef Bacik 已提交
362 363
			      struct btrfs_fs_info *info, u64 start, u64 end)
{
J
Josef Bacik 已提交
364
	u64 extent_start, extent_end, size, total_added = 0;
J
Josef Bacik 已提交
365 366 367
	int ret;

	while (start < end) {
368
		ret = find_first_extent_bit(info->pinned_extents, start,
J
Josef Bacik 已提交
369
					    &extent_start, &extent_end,
370 371
					    EXTENT_DIRTY | EXTENT_UPTODATE,
					    NULL);
J
Josef Bacik 已提交
372 373 374
		if (ret)
			break;

375
		if (extent_start <= start) {
J
Josef Bacik 已提交
376 377 378
			start = extent_end + 1;
		} else if (extent_start > start && extent_start < end) {
			size = extent_start - start;
J
Josef Bacik 已提交
379
			total_added += size;
380 381
			ret = btrfs_add_free_space(block_group, start,
						   size);
382
			BUG_ON(ret); /* -ENOMEM or logic error */
J
Josef Bacik 已提交
383 384 385 386 387 388 389 390
			start = extent_end + 1;
		} else {
			break;
		}
	}

	if (start < end) {
		size = end - start;
J
Josef Bacik 已提交
391
		total_added += size;
392
		ret = btrfs_add_free_space(block_group, start, size);
393
		BUG_ON(ret); /* -ENOMEM or logic error */
J
Josef Bacik 已提交
394 395
	}

J
Josef Bacik 已提交
396
	return total_added;
J
Josef Bacik 已提交
397 398
}

399
static noinline void caching_thread(struct btrfs_work *work)
400
{
401 402 403 404
	struct btrfs_block_group_cache *block_group;
	struct btrfs_fs_info *fs_info;
	struct btrfs_caching_control *caching_ctl;
	struct btrfs_root *extent_root;
405
	struct btrfs_path *path;
406
	struct extent_buffer *leaf;
407
	struct btrfs_key key;
J
Josef Bacik 已提交
408
	u64 total_found = 0;
409 410
	u64 last = 0;
	u32 nritems;
411
	int ret = -ENOMEM;
412
	bool wakeup = true;
413

414 415 416 417 418
	caching_ctl = container_of(work, struct btrfs_caching_control, work);
	block_group = caching_ctl->block_group;
	fs_info = block_group->fs_info;
	extent_root = fs_info->extent_root;

419 420
	path = btrfs_alloc_path();
	if (!path)
421
		goto out;
422

J
Josef Bacik 已提交
423
	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
424

425 426 427 428 429 430 431 432 433
#ifdef CONFIG_BTRFS_DEBUG
	/*
	 * If we're fragmenting we don't want to make anybody think we can
	 * allocate from this block group until we've had a chance to fragment
	 * the free space.
	 */
	if (btrfs_should_fragment_free_space(extent_root, block_group))
		wakeup = false;
#endif
434
	/*
J
Josef Bacik 已提交
435 436 437 438
	 * We don't want to deadlock with somebody trying to allocate a new
	 * extent for the extent root while also trying to search the extent
	 * root to add free space.  So we skip locking and search the commit
	 * root, since its read-only
439 440
	 */
	path->skip_locking = 1;
J
Josef Bacik 已提交
441
	path->search_commit_root = 1;
J
Josef Bacik 已提交
442
	path->reada = 1;
J
Josef Bacik 已提交
443

Y
Yan Zheng 已提交
444
	key.objectid = last;
445
	key.offset = 0;
446
	key.type = BTRFS_EXTENT_ITEM_KEY;
447
again:
448
	mutex_lock(&caching_ctl->mutex);
449
	/* need to make sure the commit_root doesn't disappear */
450
	down_read(&fs_info->commit_root_sem);
451

452
next:
453
	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
454
	if (ret < 0)
455
		goto err;
Y
Yan Zheng 已提交
456

457 458 459
	leaf = path->nodes[0];
	nritems = btrfs_header_nritems(leaf);

C
Chris Mason 已提交
460
	while (1) {
461
		if (btrfs_fs_closing(fs_info) > 1) {
462
			last = (u64)-1;
J
Josef Bacik 已提交
463
			break;
464
		}
J
Josef Bacik 已提交
465

466 467 468 469 470
		if (path->slots[0] < nritems) {
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		} else {
			ret = find_next_key(path, 0, &key);
			if (ret)
471
				break;
J
Josef Bacik 已提交
472

473
			if (need_resched() ||
474
			    rwsem_is_contended(&fs_info->commit_root_sem)) {
475 476
				if (wakeup)
					caching_ctl->progress = last;
C
Chris Mason 已提交
477
				btrfs_release_path(path);
478
				up_read(&fs_info->commit_root_sem);
479
				mutex_unlock(&caching_ctl->mutex);
480
				cond_resched();
481 482
				goto again;
			}
483 484 485 486 487 488

			ret = btrfs_next_leaf(extent_root, path);
			if (ret < 0)
				goto err;
			if (ret)
				break;
489 490 491
			leaf = path->nodes[0];
			nritems = btrfs_header_nritems(leaf);
			continue;
492
		}
J
Josef Bacik 已提交
493

494 495 496 497 498
		if (key.objectid < last) {
			key.objectid = last;
			key.offset = 0;
			key.type = BTRFS_EXTENT_ITEM_KEY;

499 500
			if (wakeup)
				caching_ctl->progress = last;
501 502 503 504
			btrfs_release_path(path);
			goto next;
		}

505 506
		if (key.objectid < block_group->key.objectid) {
			path->slots[0]++;
J
Josef Bacik 已提交
507
			continue;
508
		}
J
Josef Bacik 已提交
509

510
		if (key.objectid >= block_group->key.objectid +
J
Josef Bacik 已提交
511
		    block_group->key.offset)
512
			break;
513

514 515
		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
		    key.type == BTRFS_METADATA_ITEM_KEY) {
J
Josef Bacik 已提交
516 517 518
			total_found += add_new_free_space(block_group,
							  fs_info, last,
							  key.objectid);
519 520
			if (key.type == BTRFS_METADATA_ITEM_KEY)
				last = key.objectid +
521
					fs_info->tree_root->nodesize;
522 523
			else
				last = key.objectid + key.offset;
J
Josef Bacik 已提交
524

525 526
			if (total_found > (1024 * 1024 * 2)) {
				total_found = 0;
527 528
				if (wakeup)
					wake_up(&caching_ctl->wait);
529
			}
J
Josef Bacik 已提交
530
		}
531 532
		path->slots[0]++;
	}
J
Josef Bacik 已提交
533
	ret = 0;
534

J
Josef Bacik 已提交
535 536 537 538
	total_found += add_new_free_space(block_group, fs_info, last,
					  block_group->key.objectid +
					  block_group->key.offset);
	spin_lock(&block_group->lock);
539
	block_group->caching_ctl = NULL;
J
Josef Bacik 已提交
540 541
	block_group->cached = BTRFS_CACHE_FINISHED;
	spin_unlock(&block_group->lock);
J
Josef Bacik 已提交
542

543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
#ifdef CONFIG_BTRFS_DEBUG
	if (btrfs_should_fragment_free_space(extent_root, block_group)) {
		u64 bytes_used;

		spin_lock(&block_group->space_info->lock);
		spin_lock(&block_group->lock);
		bytes_used = block_group->key.offset -
			btrfs_block_group_used(&block_group->item);
		block_group->space_info->bytes_used += bytes_used >> 1;
		spin_unlock(&block_group->lock);
		spin_unlock(&block_group->space_info->lock);
		fragment_free_space(extent_root, block_group);
	}
#endif

	caching_ctl->progress = (u64)-1;
559
err:
560
	btrfs_free_path(path);
561
	up_read(&fs_info->commit_root_sem);
J
Josef Bacik 已提交
562

563 564 565
	free_excluded_extents(extent_root, block_group);

	mutex_unlock(&caching_ctl->mutex);
566
out:
567 568 569 570 571 572
	if (ret) {
		spin_lock(&block_group->lock);
		block_group->caching_ctl = NULL;
		block_group->cached = BTRFS_CACHE_ERROR;
		spin_unlock(&block_group->lock);
	}
573 574 575
	wake_up(&caching_ctl->wait);

	put_caching_control(caching_ctl);
576
	btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
577 578
}

579 580
static int cache_block_group(struct btrfs_block_group_cache *cache,
			     int load_cache_only)
J
Josef Bacik 已提交
581
{
582
	DEFINE_WAIT(wait);
583 584
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct btrfs_caching_control *caching_ctl;
J
Josef Bacik 已提交
585 586
	int ret = 0;

587
	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
588 589
	if (!caching_ctl)
		return -ENOMEM;
590 591 592 593 594 595 596

	INIT_LIST_HEAD(&caching_ctl->list);
	mutex_init(&caching_ctl->mutex);
	init_waitqueue_head(&caching_ctl->wait);
	caching_ctl->block_group = cache;
	caching_ctl->progress = cache->key.objectid;
	atomic_set(&caching_ctl->count, 1);
597 598
	btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
			caching_thread, NULL, NULL);
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630

	spin_lock(&cache->lock);
	/*
	 * This should be a rare occasion, but this could happen I think in the
	 * case where one thread starts to load the space cache info, and then
	 * some other thread starts a transaction commit which tries to do an
	 * allocation while the other thread is still loading the space cache
	 * info.  The previous loop should have kept us from choosing this block
	 * group, but if we've moved to the state where we will wait on caching
	 * block groups we need to first check if we're doing a fast load here,
	 * so we can wait for it to finish, otherwise we could end up allocating
	 * from a block group who's cache gets evicted for one reason or
	 * another.
	 */
	while (cache->cached == BTRFS_CACHE_FAST) {
		struct btrfs_caching_control *ctl;

		ctl = cache->caching_ctl;
		atomic_inc(&ctl->count);
		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
		spin_unlock(&cache->lock);

		schedule();

		finish_wait(&ctl->wait, &wait);
		put_caching_control(ctl);
		spin_lock(&cache->lock);
	}

	if (cache->cached != BTRFS_CACHE_NO) {
		spin_unlock(&cache->lock);
		kfree(caching_ctl);
631
		return 0;
632 633 634 635 636
	}
	WARN_ON(cache->caching_ctl);
	cache->caching_ctl = caching_ctl;
	cache->cached = BTRFS_CACHE_FAST;
	spin_unlock(&cache->lock);
637

638
	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
639
		mutex_lock(&caching_ctl->mutex);
640 641 642 643
		ret = load_free_space_cache(fs_info, cache);

		spin_lock(&cache->lock);
		if (ret == 1) {
644
			cache->caching_ctl = NULL;
645 646
			cache->cached = BTRFS_CACHE_FINISHED;
			cache->last_byte_to_unpin = (u64)-1;
647
			caching_ctl->progress = (u64)-1;
648
		} else {
649 650 651 652 653
			if (load_cache_only) {
				cache->caching_ctl = NULL;
				cache->cached = BTRFS_CACHE_NO;
			} else {
				cache->cached = BTRFS_CACHE_STARTED;
654
				cache->has_caching_ctl = 1;
655
			}
656 657
		}
		spin_unlock(&cache->lock);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
#ifdef CONFIG_BTRFS_DEBUG
		if (ret == 1 &&
		    btrfs_should_fragment_free_space(fs_info->extent_root,
						     cache)) {
			u64 bytes_used;

			spin_lock(&cache->space_info->lock);
			spin_lock(&cache->lock);
			bytes_used = cache->key.offset -
				btrfs_block_group_used(&cache->item);
			cache->space_info->bytes_used += bytes_used >> 1;
			spin_unlock(&cache->lock);
			spin_unlock(&cache->space_info->lock);
			fragment_free_space(fs_info->extent_root, cache);
		}
#endif
674 675
		mutex_unlock(&caching_ctl->mutex);

676
		wake_up(&caching_ctl->wait);
677
		if (ret == 1) {
678
			put_caching_control(caching_ctl);
679
			free_excluded_extents(fs_info->extent_root, cache);
680
			return 0;
681
		}
682 683 684 685 686 687 688 689 690 691 692
	} else {
		/*
		 * We are not going to do the fast caching, set cached to the
		 * appropriate value and wakeup any waiters.
		 */
		spin_lock(&cache->lock);
		if (load_cache_only) {
			cache->caching_ctl = NULL;
			cache->cached = BTRFS_CACHE_NO;
		} else {
			cache->cached = BTRFS_CACHE_STARTED;
693
			cache->has_caching_ctl = 1;
694 695 696
		}
		spin_unlock(&cache->lock);
		wake_up(&caching_ctl->wait);
697 698
	}

699 700
	if (load_cache_only) {
		put_caching_control(caching_ctl);
701
		return 0;
J
Josef Bacik 已提交
702 703
	}

704
	down_write(&fs_info->commit_root_sem);
705
	atomic_inc(&caching_ctl->count);
706
	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
707
	up_write(&fs_info->commit_root_sem);
708

709
	btrfs_get_block_group(cache);
710

711
	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
J
Josef Bacik 已提交
712

713
	return ret;
714 715
}

J
Josef Bacik 已提交
716 717 718
/*
 * return the block group that starts at or after bytenr
 */
C
Chris Mason 已提交
719 720
static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
C
Chris Mason 已提交
721
{
J
Josef Bacik 已提交
722
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
723

J
Josef Bacik 已提交
724
	cache = block_group_cache_tree_search(info, bytenr, 0);
C
Chris Mason 已提交
725

J
Josef Bacik 已提交
726
	return cache;
C
Chris Mason 已提交
727 728
}

J
Josef Bacik 已提交
729
/*
730
 * return the block group that contains the given bytenr
J
Josef Bacik 已提交
731
 */
C
Chris Mason 已提交
732 733 734
struct btrfs_block_group_cache *btrfs_lookup_block_group(
						 struct btrfs_fs_info *info,
						 u64 bytenr)
C
Chris Mason 已提交
735
{
J
Josef Bacik 已提交
736
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
737

J
Josef Bacik 已提交
738
	cache = block_group_cache_tree_search(info, bytenr, 1);
739

J
Josef Bacik 已提交
740
	return cache;
C
Chris Mason 已提交
741
}
742

J
Josef Bacik 已提交
743 744
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
						  u64 flags)
745
{
J
Josef Bacik 已提交
746 747
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;
748

749
	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
750

751 752
	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list) {
753
		if (found->flags & flags) {
754
			rcu_read_unlock();
J
Josef Bacik 已提交
755
			return found;
756
		}
J
Josef Bacik 已提交
757
	}
758
	rcu_read_unlock();
J
Josef Bacik 已提交
759
	return NULL;
760 761
}

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
/*
 * after adding space to the filesystem, we need to clear the full flags
 * on all the space infos.
 */
void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
{
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;

	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list)
		found->full = 0;
	rcu_read_unlock();
}

777 778
/* simple helper to search for an existing data extent at a given offset */
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
779 780 781
{
	int ret;
	struct btrfs_key key;
Z
Zheng Yan 已提交
782
	struct btrfs_path *path;
783

Z
Zheng Yan 已提交
784
	path = btrfs_alloc_path();
785 786 787
	if (!path)
		return -ENOMEM;

788 789
	key.objectid = start;
	key.offset = len;
790
	key.type = BTRFS_EXTENT_ITEM_KEY;
791 792
	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
				0, 0);
Z
Zheng Yan 已提交
793
	btrfs_free_path(path);
794 795 796
	return ret;
}

797
/*
798
 * helper function to lookup reference count and flags of a tree block.
799 800 801 802 803 804 805 806 807
 *
 * the head node for delayed ref is used to store the sum of all the
 * reference count modifications queued up in the rbtree. the head
 * node may also store the extent flags to set. This way you can check
 * to see what the reference count and extent flags would be if all of
 * the delayed refs are not processed.
 */
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root, u64 bytenr,
808
			     u64 offset, int metadata, u64 *refs, u64 *flags)
809 810 811 812 813 814 815 816 817 818 819 820
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_path *path;
	struct btrfs_extent_item *ei;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	u32 item_size;
	u64 num_refs;
	u64 extent_flags;
	int ret;

821 822 823 824 825
	/*
	 * If we don't have skinny metadata, don't bother doing anything
	 * different
	 */
	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
826
		offset = root->nodesize;
827 828 829
		metadata = 0;
	}

830 831 832 833 834 835 836 837
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	if (!trans) {
		path->skip_locking = 1;
		path->search_commit_root = 1;
	}
838 839 840 841 842 843 844 845 846

search_again:
	key.objectid = bytenr;
	key.offset = offset;
	if (metadata)
		key.type = BTRFS_METADATA_ITEM_KEY;
	else
		key.type = BTRFS_EXTENT_ITEM_KEY;

847 848 849 850 851
	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
				&key, path, 0, 0);
	if (ret < 0)
		goto out_free;

852
	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
853 854 855 856 857 858
		if (path->slots[0]) {
			path->slots[0]--;
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0]);
			if (key.objectid == bytenr &&
			    key.type == BTRFS_EXTENT_ITEM_KEY &&
859
			    key.offset == root->nodesize)
860 861
				ret = 0;
		}
862 863
	}

864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902
	if (ret == 0) {
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		if (item_size >= sizeof(*ei)) {
			ei = btrfs_item_ptr(leaf, path->slots[0],
					    struct btrfs_extent_item);
			num_refs = btrfs_extent_refs(leaf, ei);
			extent_flags = btrfs_extent_flags(leaf, ei);
		} else {
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
			struct btrfs_extent_item_v0 *ei0;
			BUG_ON(item_size != sizeof(*ei0));
			ei0 = btrfs_item_ptr(leaf, path->slots[0],
					     struct btrfs_extent_item_v0);
			num_refs = btrfs_extent_refs_v0(leaf, ei0);
			/* FIXME: this isn't correct for data */
			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
#else
			BUG();
#endif
		}
		BUG_ON(num_refs == 0);
	} else {
		num_refs = 0;
		extent_flags = 0;
		ret = 0;
	}

	if (!trans)
		goto out;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (head) {
		if (!mutex_trylock(&head->mutex)) {
			atomic_inc(&head->node.refs);
			spin_unlock(&delayed_refs->lock);

903
			btrfs_release_path(path);
904

905 906 907 908
			/*
			 * Mutex was contended, block until it's released and try
			 * again
			 */
909 910 911
			mutex_lock(&head->mutex);
			mutex_unlock(&head->mutex);
			btrfs_put_delayed_ref(&head->node);
912
			goto search_again;
913
		}
914
		spin_lock(&head->lock);
915 916 917 918 919 920
		if (head->extent_op && head->extent_op->update_flags)
			extent_flags |= head->extent_op->flags_to_set;
		else
			BUG_ON(num_refs == 0);

		num_refs += head->node.ref_mod;
921
		spin_unlock(&head->lock);
922 923 924 925 926 927 928 929 930 931 932 933 934 935
		mutex_unlock(&head->mutex);
	}
	spin_unlock(&delayed_refs->lock);
out:
	WARN_ON(num_refs == 0);
	if (refs)
		*refs = num_refs;
	if (flags)
		*flags = extent_flags;
out_free:
	btrfs_free_path(path);
	return ret;
}

936 937 938 939 940 941 942 943 944 945 946 947 948 949
/*
 * Back reference rules.  Back refs have three main goals:
 *
 * 1) differentiate between all holders of references to an extent so that
 *    when a reference is dropped we can make sure it was a valid reference
 *    before freeing the extent.
 *
 * 2) Provide enough information to quickly find the holders of an extent
 *    if we notice a given block is corrupted or bad.
 *
 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
 *    maintenance.  This is actually the same as #2, but with a slightly
 *    different use case.
 *
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
 * There are two kinds of back refs. The implicit back refs is optimized
 * for pointers in non-shared tree blocks. For a given pointer in a block,
 * back refs of this kind provide information about the block's owner tree
 * and the pointer's key. These information allow us to find the block by
 * b-tree searching. The full back refs is for pointers in tree blocks not
 * referenced by their owner trees. The location of tree block is recorded
 * in the back refs. Actually the full back refs is generic, and can be
 * used in all cases the implicit back refs is used. The major shortcoming
 * of the full back refs is its overhead. Every time a tree block gets
 * COWed, we have to update back refs entry for all pointers in it.
 *
 * For a newly allocated tree block, we use implicit back refs for
 * pointers in it. This means most tree related operations only involve
 * implicit back refs. For a tree block created in old transaction, the
 * only way to drop a reference to it is COW it. So we can detect the
 * event that tree block loses its owner tree's reference and do the
 * back refs conversion.
 *
 * When a tree block is COW'd through a tree, there are four cases:
 *
 * The reference count of the block is one and the tree is the block's
 * owner tree. Nothing to do in this case.
 *
 * The reference count of the block is one and the tree is not the
 * block's owner tree. In this case, full back refs is used for pointers
 * in the block. Remove these full back refs, add implicit back refs for
 * every pointers in the new block.
 *
 * The reference count of the block is greater than one and the tree is
 * the block's owner tree. In this case, implicit back refs is used for
 * pointers in the block. Add full back refs for every pointers in the
 * block, increase lower level extents' reference counts. The original
 * implicit back refs are entailed to the new block.
 *
 * The reference count of the block is greater than one and the tree is
 * not the block's owner tree. Add implicit back refs for every pointer in
 * the new block, increase lower level extents' reference count.
 *
 * Back Reference Key composing:
 *
 * The key objectid corresponds to the first byte in the extent,
 * The key type is used to differentiate between types of back refs.
 * There are different meanings of the key offset for different types
 * of back refs.
 *
995 996 997
 * File extents can be referenced by:
 *
 * - multiple snapshots, subvolumes, or different generations in one subvol
Z
Zheng Yan 已提交
998
 * - different files inside a single subvolume
999 1000
 * - different offsets inside a file (bookend extents in file.c)
 *
1001
 * The extent ref structure for the implicit back refs has fields for:
1002 1003 1004
 *
 * - Objectid of the subvolume root
 * - objectid of the file holding the reference
1005 1006
 * - original offset in the file
 * - how many bookend extents
1007
 *
1008 1009
 * The key offset for the implicit back refs is hash of the first
 * three fields.
1010
 *
1011
 * The extent ref structure for the full back refs has field for:
1012
 *
1013
 * - number of pointers in the tree leaf
1014
 *
1015 1016
 * The key offset for the implicit back refs is the first byte of
 * the tree leaf
1017
 *
1018 1019
 * When a file extent is allocated, The implicit back refs is used.
 * the fields are filled in:
1020
 *
1021
 *     (root_key.objectid, inode objectid, offset in file, 1)
1022
 *
1023 1024
 * When a file extent is removed file truncation, we find the
 * corresponding implicit back refs and check the following fields:
1025
 *
1026
 *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1027
 *
1028
 * Btree extents can be referenced by:
1029
 *
1030
 * - Different subvolumes
1031
 *
1032 1033 1034 1035
 * Both the implicit back refs and the full back refs for tree blocks
 * only consist of key. The key offset for the implicit back refs is
 * objectid of block's owner tree. The key offset for the full back refs
 * is the first byte of parent block.
1036
 *
1037 1038 1039
 * When implicit back refs is used, information about the lowest key and
 * level of the tree block are required. These information are stored in
 * tree block info structure.
1040
 */
Z
Zheng Yan 已提交
1041

1042 1043 1044 1045 1046
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
				  struct btrfs_path *path,
				  u64 owner, u32 extra_size)
1047
{
1048 1049 1050 1051 1052
	struct btrfs_extent_item *item;
	struct btrfs_extent_item_v0 *ei0;
	struct btrfs_extent_ref_v0 *ref0;
	struct btrfs_tree_block_info *bi;
	struct extent_buffer *leaf;
1053
	struct btrfs_key key;
1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
	struct btrfs_key found_key;
	u32 new_size = sizeof(*item);
	u64 refs;
	int ret;

	leaf = path->nodes[0];
	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));

	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
	ei0 = btrfs_item_ptr(leaf, path->slots[0],
			     struct btrfs_extent_item_v0);
	refs = btrfs_extent_refs_v0(leaf, ei0);

	if (owner == (u64)-1) {
		while (1) {
			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
				ret = btrfs_next_leaf(root, path);
				if (ret < 0)
					return ret;
1073
				BUG_ON(ret > 0); /* Corruption */
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
				leaf = path->nodes[0];
			}
			btrfs_item_key_to_cpu(leaf, &found_key,
					      path->slots[0]);
			BUG_ON(key.objectid != found_key.objectid);
			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
				path->slots[0]++;
				continue;
			}
			ref0 = btrfs_item_ptr(leaf, path->slots[0],
					      struct btrfs_extent_ref_v0);
			owner = btrfs_ref_objectid_v0(leaf, ref0);
			break;
		}
	}
1089
	btrfs_release_path(path);
1090 1091 1092 1093 1094 1095 1096 1097 1098

	if (owner < BTRFS_FIRST_FREE_OBJECTID)
		new_size += sizeof(*bi);

	new_size -= sizeof(*ei0);
	ret = btrfs_search_slot(trans, root, &key, path,
				new_size + extra_size, 1);
	if (ret < 0)
		return ret;
1099
	BUG_ON(ret); /* Corruption */
1100

1101
	btrfs_extend_item(root, path, new_size);
1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	btrfs_set_extent_refs(leaf, item, refs);
	/* FIXME: get real generation */
	btrfs_set_extent_generation(leaf, item, 0);
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		btrfs_set_extent_flags(leaf, item,
				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
		bi = (struct btrfs_tree_block_info *)(item + 1);
		/* FIXME: get first key of the block */
		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
		btrfs_set_tree_block_level(leaf, bi, (int)owner);
	} else {
		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
	}
	btrfs_mark_buffer_dirty(leaf);
	return 0;
}
#endif

static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
{
	u32 high_crc = ~(u32)0;
	u32 low_crc = ~(u32)0;
	__le64 lenum;

	lenum = cpu_to_le64(root_objectid);
1131
	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1132
	lenum = cpu_to_le64(owner);
1133
	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1134
	lenum = cpu_to_le64(offset);
1135
	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167

	return ((u64)high_crc << 31) ^ (u64)low_crc;
}

static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
				     struct btrfs_extent_data_ref *ref)
{
	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
				    btrfs_extent_data_ref_objectid(leaf, ref),
				    btrfs_extent_data_ref_offset(leaf, ref));
}

static int match_extent_data_ref(struct extent_buffer *leaf,
				 struct btrfs_extent_data_ref *ref,
				 u64 root_objectid, u64 owner, u64 offset)
{
	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
		return 0;
	return 1;
}

static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   u64 bytenr, u64 parent,
					   u64 root_objectid,
					   u64 owner, u64 offset)
{
	struct btrfs_key key;
	struct btrfs_extent_data_ref *ref;
Z
Zheng Yan 已提交
1168
	struct extent_buffer *leaf;
1169
	u32 nritems;
1170
	int ret;
1171 1172
	int recow;
	int err = -ENOENT;
1173

Z
Zheng Yan 已提交
1174
	key.objectid = bytenr;
1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
	if (parent) {
		key.type = BTRFS_SHARED_DATA_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_EXTENT_DATA_REF_KEY;
		key.offset = hash_extent_data_ref(root_objectid,
						  owner, offset);
	}
again:
	recow = 0;
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0) {
		err = ret;
		goto fail;
	}
Z
Zheng Yan 已提交
1190

1191 1192 1193 1194 1195
	if (parent) {
		if (!ret)
			return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		key.type = BTRFS_EXTENT_REF_V0_KEY;
1196
		btrfs_release_path(path);
1197 1198 1199 1200 1201 1202 1203 1204 1205
		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
		if (ret < 0) {
			err = ret;
			goto fail;
		}
		if (!ret)
			return 0;
#endif
		goto fail;
Z
Zheng Yan 已提交
1206 1207 1208
	}

	leaf = path->nodes[0];
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
	nritems = btrfs_header_nritems(leaf);
	while (1) {
		if (path->slots[0] >= nritems) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				err = ret;
			if (ret)
				goto fail;

			leaf = path->nodes[0];
			nritems = btrfs_header_nritems(leaf);
			recow = 1;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != bytenr ||
		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
			goto fail;

		ref = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_data_ref);

		if (match_extent_data_ref(leaf, ref, root_objectid,
					  owner, offset)) {
			if (recow) {
1234
				btrfs_release_path(path);
1235 1236 1237 1238 1239 1240
				goto again;
			}
			err = 0;
			break;
		}
		path->slots[0]++;
Z
Zheng Yan 已提交
1241
	}
1242 1243
fail:
	return err;
Z
Zheng Yan 已提交
1244 1245
}

1246 1247 1248 1249 1250 1251
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   u64 bytenr, u64 parent,
					   u64 root_objectid, u64 owner,
					   u64 offset, int refs_to_add)
Z
Zheng Yan 已提交
1252 1253 1254
{
	struct btrfs_key key;
	struct extent_buffer *leaf;
1255
	u32 size;
Z
Zheng Yan 已提交
1256 1257
	u32 num_refs;
	int ret;
1258 1259

	key.objectid = bytenr;
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	if (parent) {
		key.type = BTRFS_SHARED_DATA_REF_KEY;
		key.offset = parent;
		size = sizeof(struct btrfs_shared_data_ref);
	} else {
		key.type = BTRFS_EXTENT_DATA_REF_KEY;
		key.offset = hash_extent_data_ref(root_objectid,
						  owner, offset);
		size = sizeof(struct btrfs_extent_data_ref);
	}
1270

1271 1272 1273 1274 1275 1276 1277
	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
	if (ret && ret != -EEXIST)
		goto fail;

	leaf = path->nodes[0];
	if (parent) {
		struct btrfs_shared_data_ref *ref;
Z
Zheng Yan 已提交
1278
		ref = btrfs_item_ptr(leaf, path->slots[0],
1279 1280 1281 1282 1283 1284 1285
				     struct btrfs_shared_data_ref);
		if (ret == 0) {
			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
		} else {
			num_refs = btrfs_shared_data_ref_count(leaf, ref);
			num_refs += refs_to_add;
			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
Z
Zheng Yan 已提交
1286
		}
1287 1288 1289 1290 1291 1292 1293 1294
	} else {
		struct btrfs_extent_data_ref *ref;
		while (ret == -EEXIST) {
			ref = btrfs_item_ptr(leaf, path->slots[0],
					     struct btrfs_extent_data_ref);
			if (match_extent_data_ref(leaf, ref, root_objectid,
						  owner, offset))
				break;
1295
			btrfs_release_path(path);
1296 1297 1298 1299 1300
			key.offset++;
			ret = btrfs_insert_empty_item(trans, root, path, &key,
						      size);
			if (ret && ret != -EEXIST)
				goto fail;
Z
Zheng Yan 已提交
1301

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315
			leaf = path->nodes[0];
		}
		ref = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_data_ref);
		if (ret == 0) {
			btrfs_set_extent_data_ref_root(leaf, ref,
						       root_objectid);
			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
		} else {
			num_refs = btrfs_extent_data_ref_count(leaf, ref);
			num_refs += refs_to_add;
			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
Z
Zheng Yan 已提交
1316 1317
		}
	}
1318 1319 1320
	btrfs_mark_buffer_dirty(leaf);
	ret = 0;
fail:
1321
	btrfs_release_path(path);
1322
	return ret;
1323 1324
}

1325 1326 1327
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
J
Josef Bacik 已提交
1328
					   int refs_to_drop, int *last_ref)
Z
Zheng Yan 已提交
1329
{
1330 1331 1332
	struct btrfs_key key;
	struct btrfs_extent_data_ref *ref1 = NULL;
	struct btrfs_shared_data_ref *ref2 = NULL;
Z
Zheng Yan 已提交
1333
	struct extent_buffer *leaf;
1334
	u32 num_refs = 0;
Z
Zheng Yan 已提交
1335 1336 1337
	int ret = 0;

	leaf = path->nodes[0];
1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);

	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
		ref1 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_data_ref);
		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
		ref2 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_shared_data_ref);
		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
		struct btrfs_extent_ref_v0 *ref0;
		ref0 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_ref_v0);
		num_refs = btrfs_ref_count_v0(leaf, ref0);
#endif
	} else {
		BUG();
	}

1359 1360
	BUG_ON(num_refs < refs_to_drop);
	num_refs -= refs_to_drop;
1361

Z
Zheng Yan 已提交
1362 1363
	if (num_refs == 0) {
		ret = btrfs_del_item(trans, root, path);
J
Josef Bacik 已提交
1364
		*last_ref = 1;
Z
Zheng Yan 已提交
1365
	} else {
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377
		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		else {
			struct btrfs_extent_ref_v0 *ref0;
			ref0 = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_extent_ref_v0);
			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
		}
#endif
Z
Zheng Yan 已提交
1378 1379 1380 1381 1382
		btrfs_mark_buffer_dirty(leaf);
	}
	return ret;
}

1383
static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1384
					  struct btrfs_extent_inline_ref *iref)
1385
{
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct btrfs_extent_data_ref *ref1;
	struct btrfs_shared_data_ref *ref2;
	u32 num_refs = 0;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
	if (iref) {
		if (btrfs_extent_inline_ref_type(leaf, iref) ==
		    BTRFS_EXTENT_DATA_REF_KEY) {
			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
		} else {
			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
		}
	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
		ref1 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_data_ref);
		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
		ref2 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_shared_data_ref);
		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
		struct btrfs_extent_ref_v0 *ref0;
		ref0 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_ref_v0);
		num_refs = btrfs_ref_count_v0(leaf, ref0);
C
Chris Mason 已提交
1417
#endif
1418 1419 1420 1421 1422
	} else {
		WARN_ON(1);
	}
	return num_refs;
}
1423

1424 1425 1426 1427 1428
static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
					  struct btrfs_root *root,
					  struct btrfs_path *path,
					  u64 bytenr, u64 parent,
					  u64 root_objectid)
1429
{
1430
	struct btrfs_key key;
1431 1432
	int ret;

1433 1434 1435 1436 1437 1438 1439
	key.objectid = bytenr;
	if (parent) {
		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_TREE_BLOCK_REF_KEY;
		key.offset = root_objectid;
1440 1441
	}

1442 1443 1444 1445 1446
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret > 0)
		ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (ret == -ENOENT && parent) {
1447
		btrfs_release_path(path);
1448 1449 1450 1451 1452
		key.type = BTRFS_EXTENT_REF_V0_KEY;
		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
		if (ret > 0)
			ret = -ENOENT;
	}
1453
#endif
1454
	return ret;
1455 1456
}

1457 1458 1459 1460 1461
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
					  struct btrfs_root *root,
					  struct btrfs_path *path,
					  u64 bytenr, u64 parent,
					  u64 root_objectid)
Z
Zheng Yan 已提交
1462
{
1463
	struct btrfs_key key;
Z
Zheng Yan 已提交
1464 1465
	int ret;

1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
	key.objectid = bytenr;
	if (parent) {
		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_TREE_BLOCK_REF_KEY;
		key.offset = root_objectid;
	}

	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1476
	btrfs_release_path(path);
Z
Zheng Yan 已提交
1477 1478 1479
	return ret;
}

1480
static inline int extent_ref_type(u64 parent, u64 owner)
Z
Zheng Yan 已提交
1481
{
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
	int type;
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		if (parent > 0)
			type = BTRFS_SHARED_BLOCK_REF_KEY;
		else
			type = BTRFS_TREE_BLOCK_REF_KEY;
	} else {
		if (parent > 0)
			type = BTRFS_SHARED_DATA_REF_KEY;
		else
			type = BTRFS_EXTENT_DATA_REF_KEY;
	}
	return type;
Z
Zheng Yan 已提交
1495
}
1496

1497 1498
static int find_next_key(struct btrfs_path *path, int level,
			 struct btrfs_key *key)
1499

C
Chris Mason 已提交
1500
{
1501
	for (; level < BTRFS_MAX_LEVEL; level++) {
1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516
		if (!path->nodes[level])
			break;
		if (path->slots[level] + 1 >=
		    btrfs_header_nritems(path->nodes[level]))
			continue;
		if (level == 0)
			btrfs_item_key_to_cpu(path->nodes[level], key,
					      path->slots[level] + 1);
		else
			btrfs_node_key_to_cpu(path->nodes[level], key,
					      path->slots[level] + 1);
		return 0;
	}
	return 1;
}
C
Chris Mason 已提交
1517

1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552
/*
 * look for inline back ref. if back ref is found, *ref_ret is set
 * to the address of inline back ref, and 0 is returned.
 *
 * if back ref isn't found, *ref_ret is set to the address where it
 * should be inserted, and -ENOENT is returned.
 *
 * if insert is true and there are too many inline back refs, the path
 * points to the extent item, and -EAGAIN is returned.
 *
 * NOTE: inline back refs are ordered in the same way that back ref
 *	 items in the tree are ordered.
 */
static noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref **ref_ret,
				 u64 bytenr, u64 num_bytes,
				 u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int insert)
{
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	struct btrfs_extent_inline_ref *iref;
	u64 flags;
	u64 item_size;
	unsigned long ptr;
	unsigned long end;
	int extra_size;
	int type;
	int want;
	int ret;
	int err = 0;
1553 1554
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
1555

1556
	key.objectid = bytenr;
Z
Zheng Yan 已提交
1557
	key.type = BTRFS_EXTENT_ITEM_KEY;
1558
	key.offset = num_bytes;
Z
Zheng Yan 已提交
1559

1560 1561 1562
	want = extent_ref_type(parent, owner);
	if (insert) {
		extra_size = btrfs_extent_inline_ref_size(want);
1563
		path->keep_locks = 1;
1564 1565
	} else
		extra_size = -1;
1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576

	/*
	 * Owner is our parent level, so we can just add one to get the level
	 * for the block we are interested in.
	 */
	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
		key.type = BTRFS_METADATA_ITEM_KEY;
		key.offset = owner;
	}

again:
1577
	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1578
	if (ret < 0) {
1579 1580 1581
		err = ret;
		goto out;
	}
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598

	/*
	 * We may be a newly converted file system which still has the old fat
	 * extent entries for metadata, so try and see if we have one of those.
	 */
	if (ret > 0 && skinny_metadata) {
		skinny_metadata = false;
		if (path->slots[0]) {
			path->slots[0]--;
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0]);
			if (key.objectid == bytenr &&
			    key.type == BTRFS_EXTENT_ITEM_KEY &&
			    key.offset == num_bytes)
				ret = 0;
		}
		if (ret) {
1599
			key.objectid = bytenr;
1600 1601 1602 1603 1604 1605 1606
			key.type = BTRFS_EXTENT_ITEM_KEY;
			key.offset = num_bytes;
			btrfs_release_path(path);
			goto again;
		}
	}

1607 1608 1609
	if (ret && !insert) {
		err = -ENOENT;
		goto out;
1610
	} else if (WARN_ON(ret)) {
1611 1612
		err = -EIO;
		goto out;
1613
	}
1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640

	leaf = path->nodes[0];
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		if (!insert) {
			err = -ENOENT;
			goto out;
		}
		ret = convert_extent_item_v0(trans, root, path, owner,
					     extra_size);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);

	ptr = (unsigned long)(ei + 1);
	end = (unsigned long)ei + item_size;

1641
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
		ptr += sizeof(struct btrfs_tree_block_info);
		BUG_ON(ptr > end);
	}

	err = -ENOENT;
	while (1) {
		if (ptr >= end) {
			WARN_ON(ptr > end);
			break;
		}
		iref = (struct btrfs_extent_inline_ref *)ptr;
		type = btrfs_extent_inline_ref_type(leaf, iref);
		if (want < type)
			break;
		if (want > type) {
			ptr += btrfs_extent_inline_ref_size(type);
			continue;
		}

		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
			struct btrfs_extent_data_ref *dref;
			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			if (match_extent_data_ref(leaf, dref, root_objectid,
						  owner, offset)) {
				err = 0;
				break;
			}
			if (hash_extent_data_ref_item(leaf, dref) <
			    hash_extent_data_ref(root_objectid, owner, offset))
				break;
		} else {
			u64 ref_offset;
			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
			if (parent > 0) {
				if (parent == ref_offset) {
					err = 0;
					break;
				}
				if (ref_offset < parent)
					break;
			} else {
				if (root_objectid == ref_offset) {
					err = 0;
					break;
				}
				if (ref_offset < root_objectid)
					break;
			}
		}
		ptr += btrfs_extent_inline_ref_size(type);
	}
	if (err == -ENOENT && insert) {
		if (item_size + extra_size >=
		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
			err = -EAGAIN;
			goto out;
		}
		/*
		 * To add new inline back ref, we have to make sure
		 * there is no corresponding back ref item.
		 * For simplicity, we just do not add new inline back
		 * ref if there is any kind of item for this block
		 */
1705 1706
		if (find_next_key(path, 0, &key) == 0 &&
		    key.objectid == bytenr &&
1707
		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1708 1709 1710 1711 1712 1713
			err = -EAGAIN;
			goto out;
		}
	}
	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
1714
	if (insert) {
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
		path->keep_locks = 0;
		btrfs_unlock_up_safe(path, 1);
	}
	return err;
}

/*
 * helper to add new inline back ref
 */
static noinline_for_stack
1725
void setup_inline_extent_backref(struct btrfs_root *root,
1726 1727 1728 1729 1730
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref *iref,
				 u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int refs_to_add,
				 struct btrfs_delayed_extent_op *extent_op)
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
{
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	unsigned long ptr;
	unsigned long end;
	unsigned long item_offset;
	u64 refs;
	int size;
	int type;

	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	item_offset = (unsigned long)iref - (unsigned long)ei;

	type = extent_ref_type(parent, owner);
	size = btrfs_extent_inline_ref_size(type);

1748
	btrfs_extend_item(root, path, size);
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797

	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, ei);
	refs += refs_to_add;
	btrfs_set_extent_refs(leaf, ei, refs);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, ei);

	ptr = (unsigned long)ei + item_offset;
	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
	if (ptr < end - size)
		memmove_extent_buffer(leaf, ptr + size, ptr,
				      end - size - ptr);

	iref = (struct btrfs_extent_inline_ref *)ptr;
	btrfs_set_extent_inline_ref_type(leaf, iref, type);
	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
		struct btrfs_extent_data_ref *dref;
		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
		struct btrfs_shared_data_ref *sref;
		sref = (struct btrfs_shared_data_ref *)(iref + 1);
		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else {
		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
	}
	btrfs_mark_buffer_dirty(leaf);
}

static int lookup_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref **ref_ret,
				 u64 bytenr, u64 num_bytes, u64 parent,
				 u64 root_objectid, u64 owner, u64 offset)
{
	int ret;

	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
					   bytenr, num_bytes, parent,
					   root_objectid, owner, offset, 0);
	if (ret != -ENOENT)
1798
		return ret;
1799

1800
	btrfs_release_path(path);
1801 1802 1803 1804 1805 1806 1807 1808
	*ref_ret = NULL;

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
					    root_objectid);
	} else {
		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
					     root_objectid, owner, offset);
1809
	}
1810 1811
	return ret;
}
Z
Zheng Yan 已提交
1812

1813 1814 1815 1816
/*
 * helper to update/remove inline back ref
 */
static noinline_for_stack
1817
void update_inline_extent_backref(struct btrfs_root *root,
1818 1819 1820
				  struct btrfs_path *path,
				  struct btrfs_extent_inline_ref *iref,
				  int refs_to_mod,
J
Josef Bacik 已提交
1821 1822
				  struct btrfs_delayed_extent_op *extent_op,
				  int *last_ref)
1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
{
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	struct btrfs_extent_data_ref *dref = NULL;
	struct btrfs_shared_data_ref *sref = NULL;
	unsigned long ptr;
	unsigned long end;
	u32 item_size;
	int size;
	int type;
	u64 refs;

	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, ei);
	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
	refs += refs_to_mod;
	btrfs_set_extent_refs(leaf, ei, refs);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, ei);

	type = btrfs_extent_inline_ref_type(leaf, iref);

	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
		refs = btrfs_extent_data_ref_count(leaf, dref);
	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
		sref = (struct btrfs_shared_data_ref *)(iref + 1);
		refs = btrfs_shared_data_ref_count(leaf, sref);
	} else {
		refs = 1;
		BUG_ON(refs_to_mod != -1);
1855
	}
Z
Zheng Yan 已提交
1856

1857 1858 1859 1860 1861 1862 1863 1864 1865
	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
	refs += refs_to_mod;

	if (refs > 0) {
		if (type == BTRFS_EXTENT_DATA_REF_KEY)
			btrfs_set_extent_data_ref_count(leaf, dref, refs);
		else
			btrfs_set_shared_data_ref_count(leaf, sref, refs);
	} else {
J
Josef Bacik 已提交
1866
		*last_ref = 1;
1867 1868 1869 1870 1871 1872 1873 1874
		size =  btrfs_extent_inline_ref_size(type);
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		ptr = (unsigned long)iref;
		end = (unsigned long)ei + item_size;
		if (ptr + size < end)
			memmove_extent_buffer(leaf, ptr, ptr + size,
					      end - ptr - size);
		item_size -= size;
1875
		btrfs_truncate_item(root, path, item_size, 1);
1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
	}
	btrfs_mark_buffer_dirty(leaf);
}

static noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 bytenr, u64 num_bytes, u64 parent,
				 u64 root_objectid, u64 owner,
				 u64 offset, int refs_to_add,
				 struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_extent_inline_ref *iref;
	int ret;

	ret = lookup_inline_extent_backref(trans, root, path, &iref,
					   bytenr, num_bytes, parent,
					   root_objectid, owner, offset, 1);
	if (ret == 0) {
		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1897
		update_inline_extent_backref(root, path, iref,
J
Josef Bacik 已提交
1898
					     refs_to_add, extent_op, NULL);
1899
	} else if (ret == -ENOENT) {
1900
		setup_inline_extent_backref(root, path, iref, parent,
1901 1902 1903
					    root_objectid, owner, offset,
					    refs_to_add, extent_op);
		ret = 0;
1904
	}
1905 1906
	return ret;
}
Z
Zheng Yan 已提交
1907

1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
static int insert_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 bytenr, u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int refs_to_add)
{
	int ret;
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		BUG_ON(refs_to_add != 1);
		ret = insert_tree_block_ref(trans, root, path, bytenr,
					    parent, root_objectid);
	} else {
		ret = insert_extent_data_ref(trans, root, path, bytenr,
					     parent, root_objectid,
					     owner, offset, refs_to_add);
	}
	return ret;
}
1926

1927 1928 1929 1930
static int remove_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref *iref,
J
Josef Bacik 已提交
1931
				 int refs_to_drop, int is_data, int *last_ref)
1932
{
1933
	int ret = 0;
1934

1935 1936
	BUG_ON(!is_data && refs_to_drop != 1);
	if (iref) {
1937
		update_inline_extent_backref(root, path, iref,
J
Josef Bacik 已提交
1938
					     -refs_to_drop, NULL, last_ref);
1939
	} else if (is_data) {
J
Josef Bacik 已提交
1940 1941
		ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
					     last_ref);
1942
	} else {
J
Josef Bacik 已提交
1943
		*last_ref = 1;
1944 1945 1946 1947 1948
		ret = btrfs_del_item(trans, root, path);
	}
	return ret;
}

1949
#define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
1950 1951
static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
			       u64 *discarded_bytes)
1952
{
1953 1954
	int j, ret = 0;
	u64 bytes_left, end;
1955
	u64 aligned_start = ALIGN(start, 1 << 9);
1956

1957 1958 1959 1960 1961
	if (WARN_ON(start != aligned_start)) {
		len -= aligned_start - start;
		len = round_down(len, 1 << 9);
		start = aligned_start;
	}
1962

1963
	*discarded_bytes = 0;
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014

	if (!len)
		return 0;

	end = start + len;
	bytes_left = len;

	/* Skip any superblocks on this device. */
	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
		u64 sb_start = btrfs_sb_offset(j);
		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
		u64 size = sb_start - start;

		if (!in_range(sb_start, start, bytes_left) &&
		    !in_range(sb_end, start, bytes_left) &&
		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
			continue;

		/*
		 * Superblock spans beginning of range.  Adjust start and
		 * try again.
		 */
		if (sb_start <= start) {
			start += sb_end - start;
			if (start > end) {
				bytes_left = 0;
				break;
			}
			bytes_left = end - start;
			continue;
		}

		if (size) {
			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
						   GFP_NOFS, 0);
			if (!ret)
				*discarded_bytes += size;
			else if (ret != -EOPNOTSUPP)
				return ret;
		}

		start = sb_end;
		if (start > end) {
			bytes_left = 0;
			break;
		}
		bytes_left = end - start;
	}

	if (bytes_left) {
		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2015 2016
					   GFP_NOFS, 0);
		if (!ret)
2017
			*discarded_bytes += bytes_left;
2018
	}
2019
	return ret;
2020 2021
}

2022 2023
int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
			 u64 num_bytes, u64 *actual_bytes)
2024 2025
{
	int ret;
2026
	u64 discarded_bytes = 0;
2027
	struct btrfs_bio *bbio = NULL;
2028

C
Christoph Hellwig 已提交
2029

2030
	/* Tell the block device(s) that the sectors can be discarded */
2031
	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2032
			      bytenr, &num_bytes, &bbio, 0);
2033
	/* Error condition is -ENOMEM */
2034
	if (!ret) {
2035
		struct btrfs_bio_stripe *stripe = bbio->stripes;
2036 2037 2038
		int i;


2039
		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2040
			u64 bytes;
2041 2042 2043
			if (!stripe->dev->can_discard)
				continue;

2044 2045
			ret = btrfs_issue_discard(stripe->dev->bdev,
						  stripe->physical,
2046 2047
						  stripe->length,
						  &bytes);
2048
			if (!ret)
2049
				discarded_bytes += bytes;
2050
			else if (ret != -EOPNOTSUPP)
2051
				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2052 2053 2054 2055 2056 2057 2058

			/*
			 * Just in case we get back EOPNOTSUPP for some reason,
			 * just ignore the return value so we don't screw up
			 * people calling discard_extent.
			 */
			ret = 0;
2059
		}
2060
		btrfs_put_bbio(bbio);
2061
	}
2062 2063 2064 2065

	if (actual_bytes)
		*actual_bytes = discarded_bytes;

2066

D
David Woodhouse 已提交
2067 2068
	if (ret == -EOPNOTSUPP)
		ret = 0;
2069 2070 2071
	return ret;
}

2072
/* Can return -ENOMEM */
2073 2074 2075
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root,
			 u64 bytenr, u64 num_bytes, u64 parent,
J
Josef Bacik 已提交
2076 2077
			 u64 root_objectid, u64 owner, u64 offset,
			 int no_quota)
2078 2079
{
	int ret;
A
Arne Jansen 已提交
2080 2081
	struct btrfs_fs_info *fs_info = root->fs_info;

2082 2083 2084 2085
	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
	       root_objectid == BTRFS_TREE_LOG_OBJECTID);

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
A
Arne Jansen 已提交
2086 2087
		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
					num_bytes,
2088
					parent, root_objectid, (int)owner,
J
Josef Bacik 已提交
2089
					BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2090
	} else {
A
Arne Jansen 已提交
2091 2092
		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
					num_bytes,
2093
					parent, root_objectid, owner, offset,
J
Josef Bacik 已提交
2094
					BTRFS_ADD_DELAYED_REF, NULL, no_quota);
2095 2096 2097 2098 2099 2100
	}
	return ret;
}

static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
2101
				  struct btrfs_delayed_ref_node *node,
2102 2103 2104 2105
				  u64 parent, u64 root_objectid,
				  u64 owner, u64 offset, int refs_to_add,
				  struct btrfs_delayed_extent_op *extent_op)
{
J
Josef Bacik 已提交
2106
	struct btrfs_fs_info *fs_info = root->fs_info;
2107 2108 2109
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_extent_item *item;
J
Josef Bacik 已提交
2110
	struct btrfs_key key;
2111 2112
	u64 bytenr = node->bytenr;
	u64 num_bytes = node->num_bytes;
2113 2114
	u64 refs;
	int ret;
2115
	int no_quota = node->no_quota;
2116 2117 2118 2119 2120

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

J
Josef Bacik 已提交
2121 2122 2123
	if (!is_fstree(root_objectid) || !root->fs_info->quota_enabled)
		no_quota = 1;

2124 2125 2126
	path->reada = 1;
	path->leave_spinning = 1;
	/* this will setup the path even if it fails to insert the back ref */
J
Josef Bacik 已提交
2127 2128
	ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
					   bytenr, num_bytes, parent,
2129 2130
					   root_objectid, owner, offset,
					   refs_to_add, extent_op);
2131
	if ((ret < 0 && ret != -EAGAIN) || !ret)
2132
		goto out;
J
Josef Bacik 已提交
2133 2134 2135 2136 2137 2138

	/*
	 * Ok we had -EAGAIN which means we didn't have space to insert and
	 * inline extent ref, so just update the reference count and add a
	 * normal backref.
	 */
2139
	leaf = path->nodes[0];
J
Josef Bacik 已提交
2140
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2141 2142 2143 2144 2145
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, item);
	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, item);
2146

2147
	btrfs_mark_buffer_dirty(leaf);
2148
	btrfs_release_path(path);
2149 2150

	path->reada = 1;
2151
	path->leave_spinning = 1;
2152 2153
	/* now insert the actual backref */
	ret = insert_extent_backref(trans, root->fs_info->extent_root,
2154 2155
				    path, bytenr, parent, root_objectid,
				    owner, offset, refs_to_add);
2156 2157
	if (ret)
		btrfs_abort_transaction(trans, root, ret);
2158
out:
2159
	btrfs_free_path(path);
2160
	return ret;
2161 2162
}

2163 2164 2165 2166 2167
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_delayed_ref_node *node,
				struct btrfs_delayed_extent_op *extent_op,
				int insert_reserved)
2168
{
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	int ret = 0;
	struct btrfs_delayed_data_ref *ref;
	struct btrfs_key ins;
	u64 parent = 0;
	u64 ref_root = 0;
	u64 flags = 0;

	ins.objectid = node->bytenr;
	ins.offset = node->num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;

	ref = btrfs_delayed_node_to_data_ref(node);
2181 2182
	trace_run_delayed_data_ref(node, ref, node->action);

2183 2184
	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
		parent = ref->parent;
J
Josef Bacik 已提交
2185
	ref_root = ref->root;
2186 2187

	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2188
		if (extent_op)
2189 2190 2191 2192 2193 2194
			flags |= extent_op->flags_to_set;
		ret = alloc_reserved_file_extent(trans, root,
						 parent, ref_root, flags,
						 ref->objectid, ref->offset,
						 &ins, node->ref_mod);
	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2195
		ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2196 2197
					     ref_root, ref->objectid,
					     ref->offset, node->ref_mod,
2198
					     extent_op);
2199
	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2200
		ret = __btrfs_free_extent(trans, root, node, parent,
2201 2202
					  ref_root, ref->objectid,
					  ref->offset, node->ref_mod,
2203
					  extent_op);
2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237
	} else {
		BUG();
	}
	return ret;
}

static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
				    struct extent_buffer *leaf,
				    struct btrfs_extent_item *ei)
{
	u64 flags = btrfs_extent_flags(leaf, ei);
	if (extent_op->update_flags) {
		flags |= extent_op->flags_to_set;
		btrfs_set_extent_flags(leaf, ei, flags);
	}

	if (extent_op->update_key) {
		struct btrfs_tree_block_info *bi;
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
		bi = (struct btrfs_tree_block_info *)(ei + 1);
		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
	}
}

static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_delayed_ref_node *node,
				 struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_extent_item *ei;
	struct extent_buffer *leaf;
	u32 item_size;
2238
	int ret;
2239
	int err = 0;
2240
	int metadata = !extent_op->is_data;
2241

2242 2243 2244
	if (trans->aborted)
		return 0;

2245 2246 2247
	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
		metadata = 0;

2248 2249 2250 2251 2252 2253
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = node->bytenr;

2254 2255
	if (metadata) {
		key.type = BTRFS_METADATA_ITEM_KEY;
2256
		key.offset = extent_op->level;
2257 2258 2259 2260 2261 2262
	} else {
		key.type = BTRFS_EXTENT_ITEM_KEY;
		key.offset = node->num_bytes;
	}

again:
2263 2264 2265 2266 2267 2268 2269 2270 2271
	path->reada = 1;
	path->leave_spinning = 1;
	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
				path, 0, 1);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret > 0) {
2272
		if (metadata) {
2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284
			if (path->slots[0] > 0) {
				path->slots[0]--;
				btrfs_item_key_to_cpu(path->nodes[0], &key,
						      path->slots[0]);
				if (key.objectid == node->bytenr &&
				    key.type == BTRFS_EXTENT_ITEM_KEY &&
				    key.offset == node->num_bytes)
					ret = 0;
			}
			if (ret > 0) {
				btrfs_release_path(path);
				metadata = 0;
2285

2286 2287 2288 2289 2290 2291 2292 2293
				key.objectid = node->bytenr;
				key.offset = node->num_bytes;
				key.type = BTRFS_EXTENT_ITEM_KEY;
				goto again;
			}
		} else {
			err = -EIO;
			goto out;
2294
		}
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313
	}

	leaf = path->nodes[0];
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
					     path, (u64)-1, 0);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	__run_delayed_extent_op(extent_op, leaf, ei);
2314

2315 2316 2317 2318
	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	return err;
2319 2320
}

2321 2322 2323 2324 2325
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_delayed_ref_node *node,
				struct btrfs_delayed_extent_op *extent_op,
				int insert_reserved)
2326 2327
{
	int ret = 0;
2328 2329 2330 2331
	struct btrfs_delayed_tree_ref *ref;
	struct btrfs_key ins;
	u64 parent = 0;
	u64 ref_root = 0;
2332 2333
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
2334

2335
	ref = btrfs_delayed_node_to_tree_ref(node);
2336 2337
	trace_run_delayed_tree_ref(node, ref, node->action);

2338 2339
	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
		parent = ref->parent;
J
Josef Bacik 已提交
2340
	ref_root = ref->root;
2341

2342 2343 2344 2345 2346 2347 2348 2349 2350
	ins.objectid = node->bytenr;
	if (skinny_metadata) {
		ins.offset = ref->level;
		ins.type = BTRFS_METADATA_ITEM_KEY;
	} else {
		ins.offset = node->num_bytes;
		ins.type = BTRFS_EXTENT_ITEM_KEY;
	}

2351 2352
	BUG_ON(node->ref_mod != 1);
	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2353
		BUG_ON(!extent_op || !extent_op->update_flags);
2354 2355 2356 2357
		ret = alloc_reserved_tree_block(trans, root,
						parent, ref_root,
						extent_op->flags_to_set,
						&extent_op->key,
J
Josef Bacik 已提交
2358 2359
						ref->level, &ins,
						node->no_quota);
2360
	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2361 2362 2363
		ret = __btrfs_inc_extent_ref(trans, root, node,
					     parent, ref_root,
					     ref->level, 0, 1,
J
Josef Bacik 已提交
2364
					     extent_op);
2365
	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2366 2367 2368
		ret = __btrfs_free_extent(trans, root, node,
					  parent, ref_root,
					  ref->level, 0, 1, extent_op);
2369 2370 2371
	} else {
		BUG();
	}
2372 2373 2374 2375
	return ret;
}

/* helper function to actually process a single delayed ref entry */
2376 2377 2378 2379 2380
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct btrfs_delayed_ref_node *node,
			       struct btrfs_delayed_extent_op *extent_op,
			       int insert_reserved)
2381
{
2382 2383
	int ret = 0;

2384 2385 2386 2387
	if (trans->aborted) {
		if (insert_reserved)
			btrfs_pin_extent(root, node->bytenr,
					 node->num_bytes, 1);
2388
		return 0;
2389
	}
2390

2391
	if (btrfs_delayed_ref_is_head(node)) {
2392 2393 2394 2395 2396 2397 2398
		struct btrfs_delayed_ref_head *head;
		/*
		 * we've hit the end of the chain and we were supposed
		 * to insert this extent into the tree.  But, it got
		 * deleted before we ever needed to insert it, so all
		 * we have to do is clean up the accounting
		 */
2399 2400
		BUG_ON(extent_op);
		head = btrfs_delayed_node_to_head(node);
2401 2402
		trace_run_delayed_ref_head(node, head, node->action);

2403
		if (insert_reserved) {
2404 2405
			btrfs_pin_extent(root, node->bytenr,
					 node->num_bytes, 1);
2406 2407 2408 2409 2410
			if (head->is_data) {
				ret = btrfs_del_csums(trans, root,
						      node->bytenr,
						      node->num_bytes);
			}
2411
		}
2412
		return ret;
2413 2414
	}

2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
		ret = run_delayed_tree_ref(trans, root, node, extent_op,
					   insert_reserved);
	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
		 node->type == BTRFS_SHARED_DATA_REF_KEY)
		ret = run_delayed_data_ref(trans, root, node, extent_op,
					   insert_reserved);
	else
		BUG();
	return ret;
2426 2427
}

2428
static inline struct btrfs_delayed_ref_node *
2429 2430
select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
2431 2432
	struct btrfs_delayed_ref_node *ref;

2433 2434
	if (list_empty(&head->ref_list))
		return NULL;
2435

2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446
	/*
	 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
	 * This is to prevent a ref count from going down to zero, which deletes
	 * the extent item from the extent tree, when there still are references
	 * to add, which would fail because they would not find the extent item.
	 */
	list_for_each_entry(ref, &head->ref_list, list) {
		if (ref->action == BTRFS_ADD_DELAYED_REF)
			return ref;
	}

2447 2448
	return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
			  list);
2449 2450
}

2451 2452 2453 2454
/*
 * Returns 0 on success or if called with an already aborted transaction.
 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
 */
2455 2456 2457
static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
					     struct btrfs_root *root,
					     unsigned long nr)
2458 2459 2460 2461
{
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_ref_head *locked_ref = NULL;
2462
	struct btrfs_delayed_extent_op *extent_op;
2463
	struct btrfs_fs_info *fs_info = root->fs_info;
2464
	ktime_t start = ktime_get();
2465
	int ret;
2466
	unsigned long count = 0;
2467
	unsigned long actual_count = 0;
2468 2469 2470 2471 2472
	int must_insert_reserved = 0;

	delayed_refs = &trans->transaction->delayed_refs;
	while (1) {
		if (!locked_ref) {
2473
			if (count >= nr)
2474 2475
				break;

2476 2477 2478 2479 2480 2481
			spin_lock(&delayed_refs->lock);
			locked_ref = btrfs_select_ref_head(trans);
			if (!locked_ref) {
				spin_unlock(&delayed_refs->lock);
				break;
			}
2482 2483 2484 2485

			/* grab the lock that says we are going to process
			 * all the refs for this head */
			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2486
			spin_unlock(&delayed_refs->lock);
2487 2488 2489 2490 2491 2492 2493 2494 2495 2496
			/*
			 * we may have dropped the spin lock to get the head
			 * mutex lock, and that might have given someone else
			 * time to free the head.  If that's true, it has been
			 * removed from our list and we can move on.
			 */
			if (ret == -EAGAIN) {
				locked_ref = NULL;
				count++;
				continue;
2497 2498
			}
		}
2499

2500
		spin_lock(&locked_ref->lock);
2501

2502 2503 2504 2505 2506 2507 2508
		/*
		 * locked_ref is the head node, so we have to go one
		 * node back for any delayed ref updates
		 */
		ref = select_delayed_ref(locked_ref);

		if (ref && ref->seq &&
2509
		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2510
			spin_unlock(&locked_ref->lock);
2511
			btrfs_delayed_ref_unlock(locked_ref);
2512 2513
			spin_lock(&delayed_refs->lock);
			locked_ref->processing = 0;
2514 2515
			delayed_refs->num_heads_ready++;
			spin_unlock(&delayed_refs->lock);
2516
			locked_ref = NULL;
2517
			cond_resched();
2518
			count++;
2519 2520 2521
			continue;
		}

2522 2523 2524 2525 2526 2527
		/*
		 * record the must insert reserved flag before we
		 * drop the spin lock.
		 */
		must_insert_reserved = locked_ref->must_insert_reserved;
		locked_ref->must_insert_reserved = 0;
2528

2529 2530 2531
		extent_op = locked_ref->extent_op;
		locked_ref->extent_op = NULL;

2532
		if (!ref) {
2533 2534


2535 2536 2537 2538 2539
			/* All delayed refs have been processed, Go ahead
			 * and send the head node to run_one_delayed_ref,
			 * so that any accounting fixes can happen
			 */
			ref = &locked_ref->node;
2540 2541

			if (extent_op && must_insert_reserved) {
2542
				btrfs_free_delayed_extent_op(extent_op);
2543 2544 2545 2546
				extent_op = NULL;
			}

			if (extent_op) {
2547
				spin_unlock(&locked_ref->lock);
2548 2549
				ret = run_delayed_extent_op(trans, root,
							    ref, extent_op);
2550
				btrfs_free_delayed_extent_op(extent_op);
2551

2552
				if (ret) {
2553 2554 2555 2556 2557 2558 2559 2560
					/*
					 * Need to reset must_insert_reserved if
					 * there was an error so the abort stuff
					 * can cleanup the reserved space
					 * properly.
					 */
					if (must_insert_reserved)
						locked_ref->must_insert_reserved = 1;
2561
					locked_ref->processing = 0;
2562
					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2563
					btrfs_delayed_ref_unlock(locked_ref);
2564 2565
					return ret;
				}
2566
				continue;
2567
			}
C
Chris Mason 已提交
2568

2569 2570 2571 2572 2573 2574 2575 2576
			/*
			 * Need to drop our head ref lock and re-aqcuire the
			 * delayed ref lock and then re-check to make sure
			 * nobody got added.
			 */
			spin_unlock(&locked_ref->lock);
			spin_lock(&delayed_refs->lock);
			spin_lock(&locked_ref->lock);
2577
			if (!list_empty(&locked_ref->ref_list) ||
2578
			    locked_ref->extent_op) {
2579 2580 2581 2582 2583 2584
				spin_unlock(&locked_ref->lock);
				spin_unlock(&delayed_refs->lock);
				continue;
			}
			ref->in_tree = 0;
			delayed_refs->num_heads--;
L
Liu Bo 已提交
2585 2586
			rb_erase(&locked_ref->href_node,
				 &delayed_refs->href_root);
2587 2588
			spin_unlock(&delayed_refs->lock);
		} else {
2589
			actual_count++;
2590
			ref->in_tree = 0;
2591
			list_del(&ref->list);
L
Liu Bo 已提交
2592
		}
2593 2594
		atomic_dec(&delayed_refs->num_entries);

2595
		if (!btrfs_delayed_ref_is_head(ref)) {
2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611
			/*
			 * when we play the delayed ref, also correct the
			 * ref_mod on head
			 */
			switch (ref->action) {
			case BTRFS_ADD_DELAYED_REF:
			case BTRFS_ADD_DELAYED_EXTENT:
				locked_ref->node.ref_mod -= ref->ref_mod;
				break;
			case BTRFS_DROP_DELAYED_REF:
				locked_ref->node.ref_mod += ref->ref_mod;
				break;
			default:
				WARN_ON(1);
			}
		}
2612
		spin_unlock(&locked_ref->lock);
2613

2614
		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2615
					  must_insert_reserved);
2616

2617
		btrfs_free_delayed_extent_op(extent_op);
2618
		if (ret) {
2619
			locked_ref->processing = 0;
2620 2621
			btrfs_delayed_ref_unlock(locked_ref);
			btrfs_put_delayed_ref(ref);
2622
			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2623 2624 2625
			return ret;
		}

2626 2627 2628 2629 2630 2631 2632
		/*
		 * If this node is a head, that means all the refs in this head
		 * have been dealt with, and we will pick the next head to deal
		 * with, so we must unlock the head and drop it from the cluster
		 * list before we release it.
		 */
		if (btrfs_delayed_ref_is_head(ref)) {
2633 2634 2635 2636 2637 2638
			if (locked_ref->is_data &&
			    locked_ref->total_ref_mod < 0) {
				spin_lock(&delayed_refs->lock);
				delayed_refs->pending_csums -= ref->num_bytes;
				spin_unlock(&delayed_refs->lock);
			}
2639 2640 2641 2642 2643
			btrfs_delayed_ref_unlock(locked_ref);
			locked_ref = NULL;
		}
		btrfs_put_delayed_ref(ref);
		count++;
2644 2645
		cond_resched();
	}
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661

	/*
	 * We don't want to include ref heads since we can have empty ref heads
	 * and those will drastically skew our runtime down since we just do
	 * accounting, no actual extent tree updates.
	 */
	if (actual_count > 0) {
		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
		u64 avg;

		/*
		 * We weigh the current average higher than our current runtime
		 * to avoid large swings in the average.
		 */
		spin_lock(&delayed_refs->lock);
		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2662
		fs_info->avg_delayed_ref_runtime = avg >> 2;	/* div by 4 */
2663 2664
		spin_unlock(&delayed_refs->lock);
	}
2665
	return 0;
2666 2667
}

2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710
#ifdef SCRAMBLE_DELAYED_REFS
/*
 * Normally delayed refs get processed in ascending bytenr order. This
 * correlates in most cases to the order added. To expose dependencies on this
 * order, we start to process the tree in the middle instead of the beginning
 */
static u64 find_middle(struct rb_root *root)
{
	struct rb_node *n = root->rb_node;
	struct btrfs_delayed_ref_node *entry;
	int alt = 1;
	u64 middle;
	u64 first = 0, last = 0;

	n = rb_first(root);
	if (n) {
		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
		first = entry->bytenr;
	}
	n = rb_last(root);
	if (n) {
		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
		last = entry->bytenr;
	}
	n = root->rb_node;

	while (n) {
		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
		WARN_ON(!entry->in_tree);

		middle = entry->bytenr;

		if (alt)
			n = n->rb_left;
		else
			n = n->rb_right;

		alt = 1 - alt;
	}
	return middle;
}
#endif

2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723
static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
{
	u64 num_bytes;

	num_bytes = heads * (sizeof(struct btrfs_extent_item) +
			     sizeof(struct btrfs_extent_inline_ref));
	if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
		num_bytes += heads * sizeof(struct btrfs_tree_block_info);

	/*
	 * We don't ever fill up leaves all the way so multiply by 2 just to be
	 * closer to what we're really going to want to ouse.
	 */
2724
	return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2725 2726
}

2727 2728 2729 2730
/*
 * Takes the number of bytes to be csumm'ed and figures out how many leaves it
 * would require to store the csums for that many bytes.
 */
2731
u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
{
	u64 csum_size;
	u64 num_csums_per_leaf;
	u64 num_csums;

	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
	num_csums_per_leaf = div64_u64(csum_size,
			(u64)btrfs_super_csum_size(root->fs_info->super_copy));
	num_csums = div64_u64(csum_bytes, root->sectorsize);
	num_csums += num_csums_per_leaf - 1;
	num_csums = div64_u64(num_csums, num_csums_per_leaf);
	return num_csums;
}

2746
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2747 2748 2749 2750
				       struct btrfs_root *root)
{
	struct btrfs_block_rsv *global_rsv;
	u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2751
	u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2752 2753
	u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
	u64 num_bytes, num_dirty_bgs_bytes;
2754 2755 2756 2757 2758
	int ret = 0;

	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
	num_heads = heads_to_leaves(root, num_heads);
	if (num_heads > 1)
2759
		num_bytes += (num_heads - 1) * root->nodesize;
2760
	num_bytes <<= 1;
2761
	num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2762 2763
	num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
							     num_dirty_bgs);
2764 2765 2766 2767 2768 2769
	global_rsv = &root->fs_info->global_block_rsv;

	/*
	 * If we can't allocate any more chunks lets make sure we have _lots_ of
	 * wiggle room since running delayed refs can create more delayed refs.
	 */
2770 2771
	if (global_rsv->space_info->full) {
		num_dirty_bgs_bytes <<= 1;
2772
		num_bytes <<= 1;
2773
	}
2774 2775

	spin_lock(&global_rsv->lock);
2776
	if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2777 2778 2779 2780 2781
		ret = 1;
	spin_unlock(&global_rsv->lock);
	return ret;
}

2782 2783 2784 2785 2786 2787 2788
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root)
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	u64 num_entries =
		atomic_read(&trans->transaction->delayed_refs.num_entries);
	u64 avg_runtime;
C
Chris Mason 已提交
2789
	u64 val;
2790 2791 2792

	smp_mb();
	avg_runtime = fs_info->avg_delayed_ref_runtime;
C
Chris Mason 已提交
2793
	val = num_entries * avg_runtime;
2794 2795
	if (num_entries * avg_runtime >= NSEC_PER_SEC)
		return 1;
C
Chris Mason 已提交
2796 2797
	if (val >= NSEC_PER_SEC / 2)
		return 2;
2798 2799 2800 2801

	return btrfs_check_space_for_delayed_refs(trans, root);
}

C
Chris Mason 已提交
2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862
struct async_delayed_refs {
	struct btrfs_root *root;
	int count;
	int error;
	int sync;
	struct completion wait;
	struct btrfs_work work;
};

static void delayed_ref_async_start(struct btrfs_work *work)
{
	struct async_delayed_refs *async;
	struct btrfs_trans_handle *trans;
	int ret;

	async = container_of(work, struct async_delayed_refs, work);

	trans = btrfs_join_transaction(async->root);
	if (IS_ERR(trans)) {
		async->error = PTR_ERR(trans);
		goto done;
	}

	/*
	 * trans->sync means that when we call end_transaciton, we won't
	 * wait on delayed refs
	 */
	trans->sync = true;
	ret = btrfs_run_delayed_refs(trans, async->root, async->count);
	if (ret)
		async->error = ret;

	ret = btrfs_end_transaction(trans, async->root);
	if (ret && !async->error)
		async->error = ret;
done:
	if (async->sync)
		complete(&async->wait);
	else
		kfree(async);
}

int btrfs_async_run_delayed_refs(struct btrfs_root *root,
				 unsigned long count, int wait)
{
	struct async_delayed_refs *async;
	int ret;

	async = kmalloc(sizeof(*async), GFP_NOFS);
	if (!async)
		return -ENOMEM;

	async->root = root->fs_info->tree_root;
	async->count = count;
	async->error = 0;
	if (wait)
		async->sync = 1;
	else
		async->sync = 0;
	init_completion(&async->wait);

2863 2864
	btrfs_init_work(&async->work, btrfs_extent_refs_helper,
			delayed_ref_async_start, NULL, NULL);
C
Chris Mason 已提交
2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876

	btrfs_queue_work(root->fs_info->extent_workers, &async->work);

	if (wait) {
		wait_for_completion(&async->wait);
		ret = async->error;
		kfree(async);
		return ret;
	}
	return 0;
}

2877 2878 2879 2880 2881 2882
/*
 * this starts processing the delayed reference count updates and
 * extent insertions we have queued up so far.  count can be
 * 0, which means to process everything in the tree at the start
 * of the run (but not newly added entries), or it can be some target
 * number you'd like to process.
2883 2884 2885
 *
 * Returns 0 on success or if called with an aborted transaction
 * Returns <0 on error and aborts the transaction
2886 2887 2888 2889 2890 2891
 */
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, unsigned long count)
{
	struct rb_node *node;
	struct btrfs_delayed_ref_root *delayed_refs;
L
Liu Bo 已提交
2892
	struct btrfs_delayed_ref_head *head;
2893 2894
	int ret;
	int run_all = count == (unsigned long)-1;
2895
	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2896

2897 2898 2899 2900
	/* We'll clean this up in btrfs_cleanup_transaction */
	if (trans->aborted)
		return 0;

2901 2902 2903 2904
	if (root == root->fs_info->extent_root)
		root = root->fs_info->tree_root;

	delayed_refs = &trans->transaction->delayed_refs;
L
Liu Bo 已提交
2905
	if (count == 0)
2906
		count = atomic_read(&delayed_refs->num_entries) * 2;
2907

2908
again:
2909 2910 2911
#ifdef SCRAMBLE_DELAYED_REFS
	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
2912
	trans->can_flush_pending_bgs = false;
2913 2914 2915 2916
	ret = __btrfs_run_delayed_refs(trans, root, count);
	if (ret < 0) {
		btrfs_abort_transaction(trans, root, ret);
		return ret;
2917
	}
2918

2919
	if (run_all) {
2920
		if (!list_empty(&trans->new_bgs))
2921 2922
			btrfs_create_pending_block_groups(trans, root);

2923
		spin_lock(&delayed_refs->lock);
L
Liu Bo 已提交
2924
		node = rb_first(&delayed_refs->href_root);
2925 2926
		if (!node) {
			spin_unlock(&delayed_refs->lock);
2927
			goto out;
2928
		}
2929
		count = (unsigned long)-1;
2930

2931
		while (node) {
L
Liu Bo 已提交
2932 2933 2934 2935
			head = rb_entry(node, struct btrfs_delayed_ref_head,
					href_node);
			if (btrfs_delayed_ref_is_head(&head->node)) {
				struct btrfs_delayed_ref_node *ref;
2936

L
Liu Bo 已提交
2937
				ref = &head->node;
2938 2939 2940
				atomic_inc(&ref->refs);

				spin_unlock(&delayed_refs->lock);
2941 2942 2943 2944
				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
2945 2946 2947 2948
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);

				btrfs_put_delayed_ref(ref);
2949
				cond_resched();
2950
				goto again;
L
Liu Bo 已提交
2951 2952
			} else {
				WARN_ON(1);
2953 2954 2955 2956
			}
			node = rb_next(node);
		}
		spin_unlock(&delayed_refs->lock);
2957
		cond_resched();
2958
		goto again;
2959
	}
2960
out:
2961
	assert_qgroups_uptodate(trans);
2962
	trans->can_flush_pending_bgs = can_flush_pending_bgs;
2963 2964 2965
	return 0;
}

2966 2967 2968
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				u64 bytenr, u64 num_bytes, u64 flags,
2969
				int level, int is_data)
2970 2971 2972 2973
{
	struct btrfs_delayed_extent_op *extent_op;
	int ret;

2974
	extent_op = btrfs_alloc_delayed_extent_op();
2975 2976 2977 2978 2979 2980 2981
	if (!extent_op)
		return -ENOMEM;

	extent_op->flags_to_set = flags;
	extent_op->update_flags = 1;
	extent_op->update_key = 0;
	extent_op->is_data = is_data ? 1 : 0;
2982
	extent_op->level = level;
2983

A
Arne Jansen 已提交
2984 2985
	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
					  num_bytes, extent_op);
2986
	if (ret)
2987
		btrfs_free_delayed_extent_op(extent_op);
2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
	return ret;
}

static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
				      u64 objectid, u64 offset, u64 bytenr)
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_data_ref *data_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
	int ret = 0;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
3005 3006 3007 3008
	if (!head) {
		spin_unlock(&delayed_refs->lock);
		return 0;
	}
3009 3010 3011 3012 3013

	if (!mutex_trylock(&head->mutex)) {
		atomic_inc(&head->node.refs);
		spin_unlock(&delayed_refs->lock);

3014
		btrfs_release_path(path);
3015

3016 3017 3018 3019
		/*
		 * Mutex was contended, block until it's released and let
		 * caller try again
		 */
3020 3021 3022 3023 3024
		mutex_lock(&head->mutex);
		mutex_unlock(&head->mutex);
		btrfs_put_delayed_ref(&head->node);
		return -EAGAIN;
	}
3025
	spin_unlock(&delayed_refs->lock);
3026

3027
	spin_lock(&head->lock);
3028
	list_for_each_entry(ref, &head->ref_list, list) {
3029 3030 3031 3032 3033
		/* If it's a shared ref we know a cross reference exists */
		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
			ret = 1;
			break;
		}
3034

3035
		data_ref = btrfs_delayed_node_to_data_ref(ref);
3036

3037 3038 3039 3040 3041 3042 3043 3044 3045 3046
		/*
		 * If our ref doesn't match the one we're currently looking at
		 * then we have a cross reference.
		 */
		if (data_ref->root != root->root_key.objectid ||
		    data_ref->objectid != objectid ||
		    data_ref->offset != offset) {
			ret = 1;
			break;
		}
3047
	}
3048
	spin_unlock(&head->lock);
3049 3050 3051 3052 3053 3054 3055 3056
	mutex_unlock(&head->mutex);
	return ret;
}

static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
					struct btrfs_root *root,
					struct btrfs_path *path,
					u64 objectid, u64 offset, u64 bytenr)
3057 3058
{
	struct btrfs_root *extent_root = root->fs_info->extent_root;
3059
	struct extent_buffer *leaf;
3060 3061 3062
	struct btrfs_extent_data_ref *ref;
	struct btrfs_extent_inline_ref *iref;
	struct btrfs_extent_item *ei;
3063
	struct btrfs_key key;
3064
	u32 item_size;
3065
	int ret;
3066

3067
	key.objectid = bytenr;
Z
Zheng Yan 已提交
3068
	key.offset = (u64)-1;
3069
	key.type = BTRFS_EXTENT_ITEM_KEY;
3070 3071 3072 3073

	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
3074
	BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
3075 3076 3077

	ret = -ENOENT;
	if (path->slots[0] == 0)
Z
Zheng Yan 已提交
3078
		goto out;
3079

Z
Zheng Yan 已提交
3080
	path->slots[0]--;
3081
	leaf = path->nodes[0];
3082
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3083

3084
	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3085
		goto out;
3086

3087 3088 3089 3090 3091 3092 3093 3094 3095
	ret = 1;
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
		goto out;
	}
#endif
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3096

3097 3098 3099
	if (item_size != sizeof(*ei) +
	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
		goto out;
3100

3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
	if (btrfs_extent_generation(leaf, ei) <=
	    btrfs_root_last_snapshot(&root->root_item))
		goto out;

	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
	if (btrfs_extent_inline_ref_type(leaf, iref) !=
	    BTRFS_EXTENT_DATA_REF_KEY)
		goto out;

	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
	if (btrfs_extent_refs(leaf, ei) !=
	    btrfs_extent_data_ref_count(leaf, ref) ||
	    btrfs_extent_data_ref_root(leaf, ref) !=
	    root->root_key.objectid ||
	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
		goto out;

	ret = 0;
out:
	return ret;
}

int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root,
			  u64 objectid, u64 offset, u64 bytenr)
{
	struct btrfs_path *path;
	int ret;
	int ret2;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOENT;

	do {
		ret = check_committed_ref(trans, root, path, objectid,
					  offset, bytenr);
		if (ret && ret != -ENOENT)
3140
			goto out;
Y
Yan Zheng 已提交
3141

3142 3143 3144 3145 3146 3147 3148
		ret2 = check_delayed_ref(trans, root, path, objectid,
					 offset, bytenr);
	} while (ret2 == -EAGAIN);

	if (ret2 && ret2 != -ENOENT) {
		ret = ret2;
		goto out;
3149
	}
3150 3151 3152

	if (ret != -ENOENT || ret2 != -ENOENT)
		ret = 0;
3153
out:
Y
Yan Zheng 已提交
3154
	btrfs_free_path(path);
3155 3156
	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
		WARN_ON(ret > 0);
3157
	return ret;
3158
}
C
Chris Mason 已提交
3159

3160
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3161
			   struct btrfs_root *root,
3162
			   struct extent_buffer *buf,
3163
			   int full_backref, int inc)
Z
Zheng Yan 已提交
3164 3165
{
	u64 bytenr;
3166 3167
	u64 num_bytes;
	u64 parent;
Z
Zheng Yan 已提交
3168 3169 3170 3171 3172 3173 3174 3175
	u64 ref_root;
	u32 nritems;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	int i;
	int level;
	int ret = 0;
	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
A
Arne Jansen 已提交
3176
			    u64, u64, u64, u64, u64, u64, int);
Z
Zheng Yan 已提交
3177

3178 3179

	if (btrfs_test_is_dummy_root(root))
3180
		return 0;
3181

Z
Zheng Yan 已提交
3182 3183 3184 3185
	ref_root = btrfs_header_owner(buf);
	nritems = btrfs_header_nritems(buf);
	level = btrfs_header_level(buf);

3186
	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3187
		return 0;
Z
Zheng Yan 已提交
3188

3189 3190 3191 3192
	if (inc)
		process_func = btrfs_inc_extent_ref;
	else
		process_func = btrfs_free_extent;
Z
Zheng Yan 已提交
3193

3194 3195 3196 3197 3198 3199
	if (full_backref)
		parent = buf->start;
	else
		parent = 0;

	for (i = 0; i < nritems; i++) {
Z
Zheng Yan 已提交
3200
		if (level == 0) {
3201
			btrfs_item_key_to_cpu(buf, &key, i);
3202
			if (key.type != BTRFS_EXTENT_DATA_KEY)
Z
Zheng Yan 已提交
3203
				continue;
3204
			fi = btrfs_item_ptr(buf, i,
Z
Zheng Yan 已提交
3205 3206 3207 3208 3209 3210 3211
					    struct btrfs_file_extent_item);
			if (btrfs_file_extent_type(buf, fi) ==
			    BTRFS_FILE_EXTENT_INLINE)
				continue;
			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
			if (bytenr == 0)
				continue;
3212 3213 3214 3215 3216

			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
			key.offset -= btrfs_file_extent_offset(buf, fi);
			ret = process_func(trans, root, bytenr, num_bytes,
					   parent, ref_root, key.objectid,
3217
					   key.offset, 1);
Z
Zheng Yan 已提交
3218 3219 3220
			if (ret)
				goto fail;
		} else {
3221
			bytenr = btrfs_node_blockptr(buf, i);
3222
			num_bytes = root->nodesize;
3223
			ret = process_func(trans, root, bytenr, num_bytes,
A
Arne Jansen 已提交
3224
					   parent, ref_root, level - 1, 0,
3225
					   1);
Z
Zheng Yan 已提交
3226 3227 3228 3229 3230 3231
			if (ret)
				goto fail;
		}
	}
	return 0;
fail:
3232 3233 3234 3235
	return ret;
}

int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3236
		  struct extent_buffer *buf, int full_backref)
3237
{
3238
	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3239 3240 3241
}

int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3242
		  struct extent_buffer *buf, int full_backref)
3243
{
3244
	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
Z
Zheng Yan 已提交
3245 3246
}

C
Chris Mason 已提交
3247 3248 3249 3250 3251 3252 3253
static int write_one_cache_group(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_block_group_cache *cache)
{
	int ret;
	struct btrfs_root *extent_root = root->fs_info->extent_root;
3254 3255
	unsigned long bi;
	struct extent_buffer *leaf;
C
Chris Mason 已提交
3256 3257

	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3258 3259 3260
	if (ret) {
		if (ret > 0)
			ret = -ENOENT;
3261
		goto fail;
3262
	}
3263 3264 3265 3266 3267

	leaf = path->nodes[0];
	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
	btrfs_mark_buffer_dirty(leaf);
3268
fail:
3269
	btrfs_release_path(path);
3270
	return ret;
C
Chris Mason 已提交
3271 3272 3273

}

3274 3275 3276 3277 3278
static struct btrfs_block_group_cache *
next_block_group(struct btrfs_root *root,
		 struct btrfs_block_group_cache *cache)
{
	struct rb_node *node;
3279

3280
	spin_lock(&root->fs_info->block_group_cache_lock);
3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291

	/* If our block group was removed, we need a full search. */
	if (RB_EMPTY_NODE(&cache->cache_node)) {
		const u64 next_bytenr = cache->key.objectid + cache->key.offset;

		spin_unlock(&root->fs_info->block_group_cache_lock);
		btrfs_put_block_group(cache);
		cache = btrfs_lookup_first_block_group(root->fs_info,
						       next_bytenr);
		return cache;
	}
3292 3293 3294 3295 3296
	node = rb_next(&cache->cache_node);
	btrfs_put_block_group(cache);
	if (node) {
		cache = rb_entry(node, struct btrfs_block_group_cache,
				 cache_node);
3297
		btrfs_get_block_group(cache);
3298 3299 3300 3301 3302 3303
	} else
		cache = NULL;
	spin_unlock(&root->fs_info->block_group_cache_lock);
	return cache;
}

3304 3305 3306 3307 3308 3309 3310
static int cache_save_setup(struct btrfs_block_group_cache *block_group,
			    struct btrfs_trans_handle *trans,
			    struct btrfs_path *path)
{
	struct btrfs_root *root = block_group->fs_info->tree_root;
	struct inode *inode = NULL;
	u64 alloc_hint = 0;
3311
	int dcs = BTRFS_DC_ERROR;
3312
	u64 num_pages = 0;
3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326
	int retries = 0;
	int ret = 0;

	/*
	 * If this block group is smaller than 100 megs don't bother caching the
	 * block group.
	 */
	if (block_group->key.offset < (100 * 1024 * 1024)) {
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
		spin_unlock(&block_group->lock);
		return 0;
	}

3327 3328
	if (trans->aborted)
		return 0;
3329 3330 3331 3332
again:
	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
		ret = PTR_ERR(inode);
3333
		btrfs_release_path(path);
3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349
		goto out;
	}

	if (IS_ERR(inode)) {
		BUG_ON(retries);
		retries++;

		if (block_group->ro)
			goto out_free;

		ret = create_free_space_inode(root, trans, block_group, path);
		if (ret)
			goto out_free;
		goto again;
	}

3350 3351 3352 3353 3354 3355 3356
	/* We've already setup this transaction, go ahead and exit */
	if (block_group->cache_generation == trans->transid &&
	    i_size_read(inode)) {
		dcs = BTRFS_DC_SETUP;
		goto out_put;
	}

3357 3358 3359 3360 3361 3362 3363
	/*
	 * We want to set the generation to 0, that way if anything goes wrong
	 * from here on out we know not to trust this cache when we load up next
	 * time.
	 */
	BTRFS_I(inode)->generation = 0;
	ret = btrfs_update_inode(trans, root, inode);
3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377
	if (ret) {
		/*
		 * So theoretically we could recover from this, simply set the
		 * super cache generation to 0 so we know to invalidate the
		 * cache, but then we'd have to keep track of the block groups
		 * that fail this way so we know we _have_ to reset this cache
		 * before the next commit or risk reading stale cache.  So to
		 * limit our exposure to horrible edge cases lets just abort the
		 * transaction, this only happens in really bad situations
		 * anyway.
		 */
		btrfs_abort_transaction(trans, root, ret);
		goto out_put;
	}
3378 3379 3380
	WARN_ON(ret);

	if (i_size_read(inode) > 0) {
3381 3382 3383 3384 3385
		ret = btrfs_check_trunc_cache_free_space(root,
					&root->fs_info->global_block_rsv);
		if (ret)
			goto out_put;

3386
		ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3387 3388 3389 3390 3391
		if (ret)
			goto out_put;
	}

	spin_lock(&block_group->lock);
3392
	if (block_group->cached != BTRFS_CACHE_FINISHED ||
3393
	    !btrfs_test_opt(root, SPACE_CACHE)) {
3394 3395 3396 3397 3398
		/*
		 * don't bother trying to write stuff out _if_
		 * a) we're not cached,
		 * b) we're with nospace_cache mount option.
		 */
3399
		dcs = BTRFS_DC_WRITTEN;
3400 3401 3402 3403 3404
		spin_unlock(&block_group->lock);
		goto out_put;
	}
	spin_unlock(&block_group->lock);

3405 3406 3407 3408 3409 3410
	/*
	 * Try to preallocate enough space based on how big the block group is.
	 * Keep in mind this has to include any pinned space which could end up
	 * taking up quite a bit since it's not folded into the other space
	 * cache.
	 */
3411
	num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3412 3413 3414 3415 3416 3417
	if (!num_pages)
		num_pages = 1;

	num_pages *= 16;
	num_pages *= PAGE_CACHE_SIZE;

3418
	ret = btrfs_check_data_free_space(inode, num_pages, num_pages);
3419 3420 3421 3422 3423 3424
	if (ret)
		goto out_put;

	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
					      num_pages, num_pages,
					      &alloc_hint);
3425 3426
	if (!ret)
		dcs = BTRFS_DC_SETUP;
3427
	btrfs_free_reserved_data_space(inode, num_pages);
3428

3429 3430 3431
out_put:
	iput(inode);
out_free:
3432
	btrfs_release_path(path);
3433 3434
out:
	spin_lock(&block_group->lock);
3435
	if (!ret && dcs == BTRFS_DC_SETUP)
3436
		block_group->cache_generation = trans->transid;
3437
	block_group->disk_cache_state = dcs;
3438 3439 3440 3441 3442
	spin_unlock(&block_group->lock);

	return ret;
}

3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root)
{
	struct btrfs_block_group_cache *cache, *tmp;
	struct btrfs_transaction *cur_trans = trans->transaction;
	struct btrfs_path *path;

	if (list_empty(&cur_trans->dirty_bgs) ||
	    !btrfs_test_opt(root, SPACE_CACHE))
		return 0;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/* Could add new block groups, use _safe just in case */
	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
				 dirty_list) {
		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
			cache_save_setup(cache, trans, path);
	}

	btrfs_free_path(path);
	return 0;
}

3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481
/*
 * transaction commit does final block group cache writeback during a
 * critical section where nothing is allowed to change the FS.  This is
 * required in order for the cache to actually match the block group,
 * but can introduce a lot of latency into the commit.
 *
 * So, btrfs_start_dirty_block_groups is here to kick off block group
 * cache IO.  There's a chance we'll have to redo some of it if the
 * block group changes again during the commit, but it greatly reduces
 * the commit latency by getting rid of the easy block groups while
 * we're still allowing others to join the commit.
 */
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3482
				   struct btrfs_root *root)
C
Chris Mason 已提交
3483
{
3484
	struct btrfs_block_group_cache *cache;
3485 3486
	struct btrfs_transaction *cur_trans = trans->transaction;
	int ret = 0;
3487
	int should_put;
3488 3489 3490
	struct btrfs_path *path = NULL;
	LIST_HEAD(dirty);
	struct list_head *io = &cur_trans->io_bgs;
3491
	int num_started = 0;
3492 3493 3494
	int loops = 0;

	spin_lock(&cur_trans->dirty_bgs_lock);
3495 3496 3497
	if (list_empty(&cur_trans->dirty_bgs)) {
		spin_unlock(&cur_trans->dirty_bgs_lock);
		return 0;
3498
	}
3499
	list_splice_init(&cur_trans->dirty_bgs, &dirty);
3500
	spin_unlock(&cur_trans->dirty_bgs_lock);
3501

3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514
again:
	/*
	 * make sure all the block groups on our dirty list actually
	 * exist
	 */
	btrfs_create_pending_block_groups(trans, root);

	if (!path) {
		path = btrfs_alloc_path();
		if (!path)
			return -ENOMEM;
	}

3515 3516 3517 3518 3519 3520
	/*
	 * cache_write_mutex is here only to save us from balance or automatic
	 * removal of empty block groups deleting this block group while we are
	 * writing out the cache
	 */
	mutex_lock(&trans->transaction->cache_write_mutex);
3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574
	while (!list_empty(&dirty)) {
		cache = list_first_entry(&dirty,
					 struct btrfs_block_group_cache,
					 dirty_list);
		/*
		 * this can happen if something re-dirties a block
		 * group that is already under IO.  Just wait for it to
		 * finish and then do it all again
		 */
		if (!list_empty(&cache->io_list)) {
			list_del_init(&cache->io_list);
			btrfs_wait_cache_io(root, trans, cache,
					    &cache->io_ctl, path,
					    cache->key.objectid);
			btrfs_put_block_group(cache);
		}


		/*
		 * btrfs_wait_cache_io uses the cache->dirty_list to decide
		 * if it should update the cache_state.  Don't delete
		 * until after we wait.
		 *
		 * Since we're not running in the commit critical section
		 * we need the dirty_bgs_lock to protect from update_block_group
		 */
		spin_lock(&cur_trans->dirty_bgs_lock);
		list_del_init(&cache->dirty_list);
		spin_unlock(&cur_trans->dirty_bgs_lock);

		should_put = 1;

		cache_save_setup(cache, trans, path);

		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
			cache->io_ctl.inode = NULL;
			ret = btrfs_write_out_cache(root, trans, cache, path);
			if (ret == 0 && cache->io_ctl.inode) {
				num_started++;
				should_put = 0;

				/*
				 * the cache_write_mutex is protecting
				 * the io_list
				 */
				list_add_tail(&cache->io_list, io);
			} else {
				/*
				 * if we failed to write the cache, the
				 * generation will be bad and life goes on
				 */
				ret = 0;
			}
		}
3575
		if (!ret) {
3576
			ret = write_one_cache_group(trans, root, path, cache);
3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598
			/*
			 * Our block group might still be attached to the list
			 * of new block groups in the transaction handle of some
			 * other task (struct btrfs_trans_handle->new_bgs). This
			 * means its block group item isn't yet in the extent
			 * tree. If this happens ignore the error, as we will
			 * try again later in the critical section of the
			 * transaction commit.
			 */
			if (ret == -ENOENT) {
				ret = 0;
				spin_lock(&cur_trans->dirty_bgs_lock);
				if (list_empty(&cache->dirty_list)) {
					list_add_tail(&cache->dirty_list,
						      &cur_trans->dirty_bgs);
					btrfs_get_block_group(cache);
				}
				spin_unlock(&cur_trans->dirty_bgs_lock);
			} else if (ret) {
				btrfs_abort_transaction(trans, root, ret);
			}
		}
3599 3600 3601 3602 3603 3604 3605

		/* if its not on the io list, we need to put the block group */
		if (should_put)
			btrfs_put_block_group(cache);

		if (ret)
			break;
3606 3607 3608 3609 3610 3611 3612 3613

		/*
		 * Avoid blocking other tasks for too long. It might even save
		 * us from writing caches for block groups that are going to be
		 * removed.
		 */
		mutex_unlock(&trans->transaction->cache_write_mutex);
		mutex_lock(&trans->transaction->cache_write_mutex);
3614
	}
3615
	mutex_unlock(&trans->transaction->cache_write_mutex);
3616 3617 3618 3619 3620 3621 3622 3623 3624 3625

	/*
	 * go through delayed refs for all the stuff we've just kicked off
	 * and then loop back (just once)
	 */
	ret = btrfs_run_delayed_refs(trans, root, 0);
	if (!ret && loops == 0) {
		loops++;
		spin_lock(&cur_trans->dirty_bgs_lock);
		list_splice_init(&cur_trans->dirty_bgs, &dirty);
3626 3627 3628 3629 3630 3631 3632 3633
		/*
		 * dirty_bgs_lock protects us from concurrent block group
		 * deletes too (not just cache_write_mutex).
		 */
		if (!list_empty(&dirty)) {
			spin_unlock(&cur_trans->dirty_bgs_lock);
			goto again;
		}
3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650
		spin_unlock(&cur_trans->dirty_bgs_lock);
	}

	btrfs_free_path(path);
	return ret;
}

int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root)
{
	struct btrfs_block_group_cache *cache;
	struct btrfs_transaction *cur_trans = trans->transaction;
	int ret = 0;
	int should_put;
	struct btrfs_path *path;
	struct list_head *io = &cur_trans->io_bgs;
	int num_started = 0;
C
Chris Mason 已提交
3651 3652 3653 3654 3655

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3656 3657 3658 3659 3660 3661 3662 3663 3664 3665
	/*
	 * We don't need the lock here since we are protected by the transaction
	 * commit.  We want to do the cache_save_setup first and then run the
	 * delayed refs to make sure we have the best chance at doing this all
	 * in one shot.
	 */
	while (!list_empty(&cur_trans->dirty_bgs)) {
		cache = list_first_entry(&cur_trans->dirty_bgs,
					 struct btrfs_block_group_cache,
					 dirty_list);
3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679

		/*
		 * this can happen if cache_save_setup re-dirties a block
		 * group that is already under IO.  Just wait for it to
		 * finish and then do it all again
		 */
		if (!list_empty(&cache->io_list)) {
			list_del_init(&cache->io_list);
			btrfs_wait_cache_io(root, trans, cache,
					    &cache->io_ctl, path,
					    cache->key.objectid);
			btrfs_put_block_group(cache);
		}

3680 3681 3682 3683
		/*
		 * don't remove from the dirty list until after we've waited
		 * on any pending IO
		 */
3684
		list_del_init(&cache->dirty_list);
3685 3686
		should_put = 1;

3687
		cache_save_setup(cache, trans, path);
3688

3689
		if (!ret)
3690 3691 3692 3693 3694 3695 3696 3697
			ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);

		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
			cache->io_ctl.inode = NULL;
			ret = btrfs_write_out_cache(root, trans, cache, path);
			if (ret == 0 && cache->io_ctl.inode) {
				num_started++;
				should_put = 0;
3698
				list_add_tail(&cache->io_list, io);
3699 3700 3701 3702 3703 3704 3705 3706
			} else {
				/*
				 * if we failed to write the cache, the
				 * generation will be bad and life goes on
				 */
				ret = 0;
			}
		}
3707
		if (!ret) {
3708
			ret = write_one_cache_group(trans, root, path, cache);
3709 3710 3711
			if (ret)
				btrfs_abort_transaction(trans, root, ret);
		}
3712 3713 3714 3715 3716 3717

		/* if its not on the io list, we need to put the block group */
		if (should_put)
			btrfs_put_block_group(cache);
	}

3718 3719
	while (!list_empty(io)) {
		cache = list_first_entry(io, struct btrfs_block_group_cache,
3720 3721 3722 3723
					 io_list);
		list_del_init(&cache->io_list);
		btrfs_wait_cache_io(root, trans, cache,
				    &cache->io_ctl, path, cache->key.objectid);
J
Josef Bacik 已提交
3724 3725 3726
		btrfs_put_block_group(cache);
	}

C
Chris Mason 已提交
3727
	btrfs_free_path(path);
3728
	return ret;
C
Chris Mason 已提交
3729 3730
}

3731 3732 3733 3734 3735 3736 3737 3738 3739
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
{
	struct btrfs_block_group_cache *block_group;
	int readonly = 0;

	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
	if (!block_group || block_group->ro)
		readonly = 1;
	if (block_group)
3740
		btrfs_put_block_group(block_group);
3741 3742 3743
	return readonly;
}

3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
static const char *alloc_name(u64 flags)
{
	switch (flags) {
	case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
		return "mixed";
	case BTRFS_BLOCK_GROUP_METADATA:
		return "metadata";
	case BTRFS_BLOCK_GROUP_DATA:
		return "data";
	case BTRFS_BLOCK_GROUP_SYSTEM:
		return "system";
	default:
		WARN_ON(1);
		return "invalid-combination";
	};
}

3761 3762 3763 3764 3765
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
			     u64 total_bytes, u64 bytes_used,
			     struct btrfs_space_info **space_info)
{
	struct btrfs_space_info *found;
3766 3767
	int i;
	int factor;
3768
	int ret;
3769 3770 3771 3772 3773 3774

	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
		     BTRFS_BLOCK_GROUP_RAID10))
		factor = 2;
	else
		factor = 1;
3775 3776 3777

	found = __find_space_info(info, flags);
	if (found) {
3778
		spin_lock(&found->lock);
3779
		found->total_bytes += total_bytes;
J
Josef Bacik 已提交
3780
		found->disk_total += total_bytes * factor;
3781
		found->bytes_used += bytes_used;
3782
		found->disk_used += bytes_used * factor;
3783 3784
		if (total_bytes > 0)
			found->full = 0;
3785
		spin_unlock(&found->lock);
3786 3787 3788
		*space_info = found;
		return 0;
	}
Y
Yan Zheng 已提交
3789
	found = kzalloc(sizeof(*found), GFP_NOFS);
3790 3791 3792
	if (!found)
		return -ENOMEM;

3793
	ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3794 3795 3796 3797 3798
	if (ret) {
		kfree(found);
		return ret;
	}

3799
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3800
		INIT_LIST_HEAD(&found->block_groups[i]);
3801
	init_rwsem(&found->groups_sem);
J
Josef Bacik 已提交
3802
	spin_lock_init(&found->lock);
3803
	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3804
	found->total_bytes = total_bytes;
J
Josef Bacik 已提交
3805
	found->disk_total = total_bytes * factor;
3806
	found->bytes_used = bytes_used;
3807
	found->disk_used = bytes_used * factor;
3808
	found->bytes_pinned = 0;
3809
	found->bytes_reserved = 0;
Y
Yan Zheng 已提交
3810
	found->bytes_readonly = 0;
3811
	found->bytes_may_use = 0;
3812
	found->full = 0;
3813
	found->max_extent_size = 0;
3814
	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3815
	found->chunk_alloc = 0;
3816 3817
	found->flush = 0;
	init_waitqueue_head(&found->wait);
3818
	INIT_LIST_HEAD(&found->ro_bgs);
3819 3820 3821 3822 3823 3824 3825 3826 3827

	ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
				    info->space_info_kobj, "%s",
				    alloc_name(found->flags));
	if (ret) {
		kfree(found);
		return ret;
	}

3828
	*space_info = found;
3829
	list_add_rcu(&found->list, &info->space_info);
3830 3831
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		info->data_sinfo = found;
3832 3833

	return ret;
3834 3835
}

3836 3837
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
3838 3839
	u64 extra_flags = chunk_to_extended(flags) &
				BTRFS_EXTENDED_PROFILE_MASK;
3840

3841
	write_seqlock(&fs_info->profiles_lock);
3842 3843 3844 3845 3846 3847
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		fs_info->avail_data_alloc_bits |= extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_METADATA)
		fs_info->avail_metadata_alloc_bits |= extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
		fs_info->avail_system_alloc_bits |= extra_flags;
3848
	write_sequnlock(&fs_info->profiles_lock);
3849
}
3850

3851 3852 3853
/*
 * returns target flags in extended format or 0 if restripe for this
 * chunk_type is not in progress
3854 3855
 *
 * should be called with either volume_mutex or balance_lock held
3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878
 */
static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
	u64 target = 0;

	if (!bctl)
		return 0;

	if (flags & BTRFS_BLOCK_GROUP_DATA &&
	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
	}

	return target;
}

3879 3880 3881
/*
 * @flags: available profiles in extended format (see ctree.h)
 *
3882 3883 3884
 * Returns reduced profile in chunk format.  If profile changing is in
 * progress (either running or paused) picks the target profile (if it's
 * already available), otherwise falls back to plain reducing.
3885
 */
3886
static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3887
{
3888
	u64 num_devices = root->fs_info->fs_devices->rw_devices;
3889
	u64 target;
3890 3891
	u64 raid_type;
	u64 allowed = 0;
3892

3893 3894 3895 3896
	/*
	 * see if restripe for this chunk_type is in progress, if so
	 * try to reduce to the target profile
	 */
3897
	spin_lock(&root->fs_info->balance_lock);
3898 3899 3900 3901
	target = get_restripe_target(root->fs_info, flags);
	if (target) {
		/* pick target profile only if it's already available */
		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3902
			spin_unlock(&root->fs_info->balance_lock);
3903
			return extended_to_chunk(target);
3904 3905 3906 3907
		}
	}
	spin_unlock(&root->fs_info->balance_lock);

D
David Woodhouse 已提交
3908
	/* First, mask out the RAID levels which aren't possible */
3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928
	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
			allowed |= btrfs_raid_group[raid_type];
	}
	allowed &= flags;

	if (allowed & BTRFS_BLOCK_GROUP_RAID6)
		allowed = BTRFS_BLOCK_GROUP_RAID6;
	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
		allowed = BTRFS_BLOCK_GROUP_RAID5;
	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
		allowed = BTRFS_BLOCK_GROUP_RAID10;
	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
		allowed = BTRFS_BLOCK_GROUP_RAID1;
	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
		allowed = BTRFS_BLOCK_GROUP_RAID0;

	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;

	return extended_to_chunk(flags | allowed);
3929 3930
}

3931
static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
J
Josef Bacik 已提交
3932
{
3933
	unsigned seq;
3934
	u64 flags;
3935 3936

	do {
3937
		flags = orig_flags;
3938 3939 3940 3941 3942 3943 3944 3945 3946
		seq = read_seqbegin(&root->fs_info->profiles_lock);

		if (flags & BTRFS_BLOCK_GROUP_DATA)
			flags |= root->fs_info->avail_data_alloc_bits;
		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
			flags |= root->fs_info->avail_system_alloc_bits;
		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
			flags |= root->fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
3947

3948
	return btrfs_reduce_alloc_profile(root, flags);
J
Josef Bacik 已提交
3949 3950
}

3951
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
J
Josef Bacik 已提交
3952
{
3953
	u64 flags;
D
David Woodhouse 已提交
3954
	u64 ret;
J
Josef Bacik 已提交
3955

3956 3957 3958 3959
	if (data)
		flags = BTRFS_BLOCK_GROUP_DATA;
	else if (root == root->fs_info->chunk_root)
		flags = BTRFS_BLOCK_GROUP_SYSTEM;
J
Josef Bacik 已提交
3960
	else
3961
		flags = BTRFS_BLOCK_GROUP_METADATA;
J
Josef Bacik 已提交
3962

D
David Woodhouse 已提交
3963 3964
	ret = get_alloc_profile(root, flags);
	return ret;
J
Josef Bacik 已提交
3965
}
J
Josef Bacik 已提交
3966

J
Josef Bacik 已提交
3967 3968 3969 3970
/*
 * This will check the space that the inode allocates from to make sure we have
 * enough space for bytes.
 */
3971
int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
J
Josef Bacik 已提交
3972 3973
{
	struct btrfs_space_info *data_sinfo;
3974
	struct btrfs_root *root = BTRFS_I(inode)->root;
3975
	struct btrfs_fs_info *fs_info = root->fs_info;
3976
	u64 used;
3977
	int ret = 0;
3978 3979
	int need_commit = 2;
	int have_pinned_space;
J
Josef Bacik 已提交
3980 3981

	/* make sure bytes are sectorsize aligned */
3982
	bytes = ALIGN(bytes, root->sectorsize);
J
Josef Bacik 已提交
3983

3984
	if (btrfs_is_free_space_inode(inode)) {
3985
		need_commit = 0;
3986
		ASSERT(current->journal_info);
3987 3988
	}

3989
	data_sinfo = fs_info->data_sinfo;
C
Chris Mason 已提交
3990 3991
	if (!data_sinfo)
		goto alloc;
J
Josef Bacik 已提交
3992

J
Josef Bacik 已提交
3993 3994 3995
again:
	/* make sure we have enough space to handle the data first */
	spin_lock(&data_sinfo->lock);
3996 3997 3998
	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
		data_sinfo->bytes_may_use;
3999 4000

	if (used + bytes > data_sinfo->total_bytes) {
4001
		struct btrfs_trans_handle *trans;
J
Josef Bacik 已提交
4002

J
Josef Bacik 已提交
4003 4004 4005 4006
		/*
		 * if we don't have enough free bytes in this space then we need
		 * to alloc a new chunk.
		 */
4007
		if (!data_sinfo->full) {
J
Josef Bacik 已提交
4008
			u64 alloc_target;
J
Josef Bacik 已提交
4009

4010
			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
J
Josef Bacik 已提交
4011
			spin_unlock(&data_sinfo->lock);
C
Chris Mason 已提交
4012
alloc:
J
Josef Bacik 已提交
4013
			alloc_target = btrfs_get_alloc_profile(root, 1);
4014 4015 4016 4017 4018 4019 4020 4021 4022 4023
			/*
			 * It is ugly that we don't call nolock join
			 * transaction for the free space inode case here.
			 * But it is safe because we only do the data space
			 * reservation for the free space cache in the
			 * transaction context, the common join transaction
			 * just increase the counter of the current transaction
			 * handler, doesn't try to acquire the trans_lock of
			 * the fs.
			 */
4024
			trans = btrfs_join_transaction(root);
4025 4026
			if (IS_ERR(trans))
				return PTR_ERR(trans);
J
Josef Bacik 已提交
4027

J
Josef Bacik 已提交
4028
			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4029 4030
					     alloc_target,
					     CHUNK_ALLOC_NO_FORCE);
J
Josef Bacik 已提交
4031
			btrfs_end_transaction(trans, root);
4032 4033 4034
			if (ret < 0) {
				if (ret != -ENOSPC)
					return ret;
4035 4036
				else {
					have_pinned_space = 1;
4037
					goto commit_trans;
4038
				}
4039
			}
J
Josef Bacik 已提交
4040

4041 4042 4043
			if (!data_sinfo)
				data_sinfo = fs_info->data_sinfo;

J
Josef Bacik 已提交
4044 4045
			goto again;
		}
4046 4047

		/*
4048
		 * If we don't have enough pinned space to deal with this
4049 4050
		 * allocation, and no removed chunk in current transaction,
		 * don't bother committing the transaction.
4051
		 */
4052 4053 4054
		have_pinned_space = percpu_counter_compare(
			&data_sinfo->total_bytes_pinned,
			used + bytes - data_sinfo->total_bytes);
J
Josef Bacik 已提交
4055 4056
		spin_unlock(&data_sinfo->lock);

4057
		/* commit the current transaction and try again */
4058
commit_trans:
4059
		if (need_commit &&
J
Josef Bacik 已提交
4060
		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
4061
			need_commit--;
4062

4063 4064 4065
			if (need_commit > 0)
				btrfs_wait_ordered_roots(fs_info, -1);

4066
			trans = btrfs_join_transaction(root);
4067 4068
			if (IS_ERR(trans))
				return PTR_ERR(trans);
4069
			if (have_pinned_space >= 0 ||
4070 4071
			    test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
				     &trans->transaction->flags) ||
4072
			    need_commit > 0) {
4073 4074 4075
				ret = btrfs_commit_transaction(trans, root);
				if (ret)
					return ret;
4076 4077 4078 4079 4080 4081
				/*
				 * make sure that all running delayed iput are
				 * done
				 */
				down_write(&root->fs_info->delayed_iput_sem);
				up_write(&root->fs_info->delayed_iput_sem);
4082 4083 4084 4085
				goto again;
			} else {
				btrfs_end_transaction(trans, root);
			}
4086
		}
J
Josef Bacik 已提交
4087

4088 4089 4090
		trace_btrfs_space_reservation(root->fs_info,
					      "space_info:enospc",
					      data_sinfo->flags, bytes, 1);
J
Josef Bacik 已提交
4091 4092
		return -ENOSPC;
	}
4093
	ret = btrfs_qgroup_reserve(root, write_bytes);
4094 4095
	if (ret)
		goto out;
J
Josef Bacik 已提交
4096
	data_sinfo->bytes_may_use += bytes;
J
Josef Bacik 已提交
4097
	trace_btrfs_space_reservation(root->fs_info, "space_info",
4098
				      data_sinfo->flags, bytes, 1);
4099
out:
J
Josef Bacik 已提交
4100 4101
	spin_unlock(&data_sinfo->lock);

4102
	return ret;
J
Josef Bacik 已提交
4103
}
J
Josef Bacik 已提交
4104 4105

/*
4106
 * Called if we need to clear a data reservation for this inode.
J
Josef Bacik 已提交
4107
 */
4108
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
4109
{
4110
	struct btrfs_root *root = BTRFS_I(inode)->root;
J
Josef Bacik 已提交
4111
	struct btrfs_space_info *data_sinfo;
4112

J
Josef Bacik 已提交
4113
	/* make sure bytes are sectorsize aligned */
4114
	bytes = ALIGN(bytes, root->sectorsize);
4115

4116
	data_sinfo = root->fs_info->data_sinfo;
J
Josef Bacik 已提交
4117
	spin_lock(&data_sinfo->lock);
4118
	WARN_ON(data_sinfo->bytes_may_use < bytes);
J
Josef Bacik 已提交
4119
	data_sinfo->bytes_may_use -= bytes;
J
Josef Bacik 已提交
4120
	trace_btrfs_space_reservation(root->fs_info, "space_info",
4121
				      data_sinfo->flags, bytes, 0);
J
Josef Bacik 已提交
4122
	spin_unlock(&data_sinfo->lock);
4123 4124
}

4125
static void force_metadata_allocation(struct btrfs_fs_info *info)
4126
{
4127 4128
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;
4129

4130 4131 4132
	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list) {
		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4133
			found->force_alloc = CHUNK_ALLOC_FORCE;
4134
	}
4135
	rcu_read_unlock();
4136 4137
}

4138 4139 4140 4141 4142
static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
{
	return (global->size << 1);
}

4143
static int should_alloc_chunk(struct btrfs_root *root,
4144
			      struct btrfs_space_info *sinfo, int force)
4145
{
4146
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4147
	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4148
	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4149
	u64 thresh;
4150

4151 4152 4153
	if (force == CHUNK_ALLOC_FORCE)
		return 1;

4154 4155 4156 4157 4158
	/*
	 * We need to take into account the global rsv because for all intents
	 * and purposes it's used space.  Don't worry about locking the
	 * global_rsv, it doesn't change except when the transaction commits.
	 */
4159
	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4160
		num_allocated += calc_global_rsv_need_space(global_rsv);
4161

4162 4163 4164 4165 4166
	/*
	 * in limited mode, we want to have some free space up to
	 * about 1% of the FS size.
	 */
	if (force == CHUNK_ALLOC_LIMITED) {
4167
		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4168 4169 4170 4171 4172 4173 4174
		thresh = max_t(u64, 64 * 1024 * 1024,
			       div_factor_fine(thresh, 1));

		if (num_bytes - num_allocated < thresh)
			return 1;
	}

4175
	if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4176
		return 0;
4177
	return 1;
4178 4179
}

4180
static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4181 4182 4183
{
	u64 num_dev;

D
David Woodhouse 已提交
4184 4185 4186 4187
	if (type & (BTRFS_BLOCK_GROUP_RAID10 |
		    BTRFS_BLOCK_GROUP_RAID0 |
		    BTRFS_BLOCK_GROUP_RAID5 |
		    BTRFS_BLOCK_GROUP_RAID6))
4188 4189 4190 4191 4192 4193
		num_dev = root->fs_info->fs_devices->rw_devices;
	else if (type & BTRFS_BLOCK_GROUP_RAID1)
		num_dev = 2;
	else
		num_dev = 1;	/* DUP or single */

4194
	return num_dev;
4195 4196
}

4197 4198 4199 4200 4201 4202 4203
/*
 * If @is_allocation is true, reserve space in the system space info necessary
 * for allocating a chunk, otherwise if it's false, reserve space necessary for
 * removing a chunk.
 */
void check_system_chunk(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
4204
			u64 type)
4205 4206 4207 4208
{
	struct btrfs_space_info *info;
	u64 left;
	u64 thresh;
4209
	int ret = 0;
4210
	u64 num_devs;
4211 4212 4213 4214 4215 4216

	/*
	 * Needed because we can end up allocating a system chunk and for an
	 * atomic and race free space reservation in the chunk block reserve.
	 */
	ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4217 4218 4219 4220

	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
	spin_lock(&info->lock);
	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4221 4222
		info->bytes_reserved - info->bytes_readonly -
		info->bytes_may_use;
4223 4224
	spin_unlock(&info->lock);

4225 4226 4227
	num_devs = get_profile_num_devs(root, type);

	/* num_devs device items to update and 1 chunk item to add or remove */
4228 4229
	thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
		btrfs_calc_trans_metadata_size(root, 1);
4230

4231
	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4232 4233
		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
			left, thresh, type);
4234 4235 4236 4237 4238 4239 4240
		dump_space_info(info, 0, 0);
	}

	if (left < thresh) {
		u64 flags;

		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255
		/*
		 * Ignore failure to create system chunk. We might end up not
		 * needing it, as we might not need to COW all nodes/leafs from
		 * the paths we visit in the chunk tree (they were already COWed
		 * or created in the current transaction for example).
		 */
		ret = btrfs_alloc_chunk(trans, root, flags);
	}

	if (!ret) {
		ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
					  &root->fs_info->chunk_block_rsv,
					  thresh, BTRFS_RESERVE_NO_FLUSH);
		if (!ret)
			trans->chunk_bytes_reserved += thresh;
4256 4257 4258
	}
}

4259
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4260
			  struct btrfs_root *extent_root, u64 flags, int force)
J
Josef Bacik 已提交
4261
{
4262
	struct btrfs_space_info *space_info;
4263
	struct btrfs_fs_info *fs_info = extent_root->fs_info;
4264
	int wait_for_alloc = 0;
J
Josef Bacik 已提交
4265 4266
	int ret = 0;

4267 4268 4269 4270
	/* Don't re-enter if we're already allocating a chunk */
	if (trans->allocating_chunk)
		return -ENOSPC;

4271
	space_info = __find_space_info(extent_root->fs_info, flags);
4272 4273 4274
	if (!space_info) {
		ret = update_space_info(extent_root->fs_info, flags,
					0, 0, &space_info);
4275
		BUG_ON(ret); /* -ENOMEM */
J
Josef Bacik 已提交
4276
	}
4277
	BUG_ON(!space_info); /* Logic error */
J
Josef Bacik 已提交
4278

4279
again:
4280
	spin_lock(&space_info->lock);
4281
	if (force < space_info->force_alloc)
4282
		force = space_info->force_alloc;
4283
	if (space_info->full) {
4284 4285 4286 4287
		if (should_alloc_chunk(extent_root, space_info, force))
			ret = -ENOSPC;
		else
			ret = 0;
4288
		spin_unlock(&space_info->lock);
4289
		return ret;
J
Josef Bacik 已提交
4290 4291
	}

4292
	if (!should_alloc_chunk(extent_root, space_info, force)) {
4293
		spin_unlock(&space_info->lock);
4294 4295 4296 4297 4298
		return 0;
	} else if (space_info->chunk_alloc) {
		wait_for_alloc = 1;
	} else {
		space_info->chunk_alloc = 1;
J
Josef Bacik 已提交
4299
	}
4300

4301
	spin_unlock(&space_info->lock);
J
Josef Bacik 已提交
4302

4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316
	mutex_lock(&fs_info->chunk_mutex);

	/*
	 * The chunk_mutex is held throughout the entirety of a chunk
	 * allocation, so once we've acquired the chunk_mutex we know that the
	 * other guy is done and we need to recheck and see if we should
	 * allocate.
	 */
	if (wait_for_alloc) {
		mutex_unlock(&fs_info->chunk_mutex);
		wait_for_alloc = 0;
		goto again;
	}

4317 4318
	trans->allocating_chunk = true;

4319 4320 4321 4322 4323 4324 4325
	/*
	 * If we have mixed data/metadata chunks we want to make sure we keep
	 * allocating mixed chunks instead of individual chunks.
	 */
	if (btrfs_mixed_space_info(space_info))
		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);

4326 4327 4328 4329 4330
	/*
	 * if we're doing a data chunk, go ahead and make sure that
	 * we keep a reasonable number of metadata chunks allocated in the
	 * FS as well.
	 */
J
Josef Bacik 已提交
4331
	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4332 4333 4334 4335
		fs_info->data_chunk_allocations++;
		if (!(fs_info->data_chunk_allocations %
		      fs_info->metadata_ratio))
			force_metadata_allocation(fs_info);
J
Josef Bacik 已提交
4336 4337
	}

4338 4339 4340 4341
	/*
	 * Check if we have enough space in SYSTEM chunk because we may need
	 * to update devices.
	 */
4342
	check_system_chunk(trans, extent_root, flags);
4343

Y
Yan Zheng 已提交
4344
	ret = btrfs_alloc_chunk(trans, extent_root, flags);
4345
	trans->allocating_chunk = false;
4346

J
Josef Bacik 已提交
4347
	spin_lock(&space_info->lock);
4348 4349
	if (ret < 0 && ret != -ENOSPC)
		goto out;
J
Josef Bacik 已提交
4350
	if (ret)
4351
		space_info->full = 1;
4352 4353
	else
		ret = 1;
4354

4355
	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4356
out:
4357
	space_info->chunk_alloc = 0;
J
Josef Bacik 已提交
4358
	spin_unlock(&space_info->lock);
4359
	mutex_unlock(&fs_info->chunk_mutex);
4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373
	/*
	 * When we allocate a new chunk we reserve space in the chunk block
	 * reserve to make sure we can COW nodes/leafs in the chunk tree or
	 * add new nodes/leafs to it if we end up needing to do it when
	 * inserting the chunk item and updating device items as part of the
	 * second phase of chunk allocation, performed by
	 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
	 * large number of new block groups to create in our transaction
	 * handle's new_bgs list to avoid exhausting the chunk block reserve
	 * in extreme cases - like having a single transaction create many new
	 * block groups when starting to write out the free space caches of all
	 * the block groups that were made dirty during the lifetime of the
	 * transaction.
	 */
4374 4375
	if (trans->can_flush_pending_bgs &&
	    trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4376 4377 4378
		btrfs_create_pending_block_groups(trans, trans->root);
		btrfs_trans_release_chunk_metadata(trans);
	}
J
Josef Bacik 已提交
4379
	return ret;
4380
}
J
Josef Bacik 已提交
4381

J
Josef Bacik 已提交
4382 4383
static int can_overcommit(struct btrfs_root *root,
			  struct btrfs_space_info *space_info, u64 bytes,
M
Miao Xie 已提交
4384
			  enum btrfs_reserve_flush_enum flush)
J
Josef Bacik 已提交
4385
{
4386
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
J
Josef Bacik 已提交
4387
	u64 profile = btrfs_get_alloc_profile(root, 0);
4388
	u64 space_size;
J
Josef Bacik 已提交
4389 4390 4391 4392
	u64 avail;
	u64 used;

	used = space_info->bytes_used + space_info->bytes_reserved +
4393 4394 4395 4396 4397 4398 4399 4400
		space_info->bytes_pinned + space_info->bytes_readonly;

	/*
	 * We only want to allow over committing if we have lots of actual space
	 * free, but if we don't have enough space to handle the global reserve
	 * space then we could end up having a real enospc problem when trying
	 * to allocate a chunk or some other such important allocation.
	 */
4401 4402 4403 4404
	spin_lock(&global_rsv->lock);
	space_size = calc_global_rsv_need_space(global_rsv);
	spin_unlock(&global_rsv->lock);
	if (used + space_size >= space_info->total_bytes)
4405 4406 4407
		return 0;

	used += space_info->bytes_may_use;
J
Josef Bacik 已提交
4408 4409 4410 4411 4412 4413 4414

	spin_lock(&root->fs_info->free_chunk_lock);
	avail = root->fs_info->free_chunk_space;
	spin_unlock(&root->fs_info->free_chunk_lock);

	/*
	 * If we have dup, raid1 or raid10 then only half of the free
D
David Woodhouse 已提交
4415 4416 4417
	 * space is actually useable.  For raid56, the space info used
	 * doesn't include the parity drive, so we don't have to
	 * change the math
J
Josef Bacik 已提交
4418 4419 4420 4421 4422 4423 4424
	 */
	if (profile & (BTRFS_BLOCK_GROUP_DUP |
		       BTRFS_BLOCK_GROUP_RAID1 |
		       BTRFS_BLOCK_GROUP_RAID10))
		avail >>= 1;

	/*
4425 4426 4427
	 * If we aren't flushing all things, let us overcommit up to
	 * 1/2th of the space. If we can flush, don't let us overcommit
	 * too much, let it overcommit up to 1/8 of the space.
J
Josef Bacik 已提交
4428
	 */
M
Miao Xie 已提交
4429
	if (flush == BTRFS_RESERVE_FLUSH_ALL)
4430
		avail >>= 3;
J
Josef Bacik 已提交
4431
	else
4432
		avail >>= 1;
J
Josef Bacik 已提交
4433

4434
	if (used + bytes < space_info->total_bytes + avail)
J
Josef Bacik 已提交
4435 4436 4437 4438
		return 1;
	return 0;
}

4439
static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4440
					 unsigned long nr_pages, int nr_items)
4441 4442 4443
{
	struct super_block *sb = root->fs_info->sb;

4444 4445 4446 4447
	if (down_read_trylock(&sb->s_umount)) {
		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
		up_read(&sb->s_umount);
	} else {
4448 4449 4450 4451 4452 4453 4454
		/*
		 * We needn't worry the filesystem going from r/w to r/o though
		 * we don't acquire ->s_umount mutex, because the filesystem
		 * should guarantee the delalloc inodes list be empty after
		 * the filesystem is readonly(all dirty pages are written to
		 * the disk).
		 */
4455
		btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4456
		if (!current->journal_info)
4457
			btrfs_wait_ordered_roots(root->fs_info, nr_items);
4458 4459 4460
	}
}

4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472
static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
{
	u64 bytes;
	int nr;

	bytes = btrfs_calc_trans_metadata_size(root, 1);
	nr = (int)div64_u64(to_reclaim, bytes);
	if (!nr)
		nr = 1;
	return nr;
}

4473 4474
#define EXTENT_SIZE_PER_ITEM	(256 * 1024)

J
Josef Bacik 已提交
4475
/*
4476
 * shrink metadata reservation for delalloc
J
Josef Bacik 已提交
4477
 */
J
Josef Bacik 已提交
4478 4479
static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
			    bool wait_ordered)
4480
{
4481
	struct btrfs_block_rsv *block_rsv;
J
Josef Bacik 已提交
4482
	struct btrfs_space_info *space_info;
4483
	struct btrfs_trans_handle *trans;
J
Josef Bacik 已提交
4484
	u64 delalloc_bytes;
4485
	u64 max_reclaim;
4486
	long time_left;
4487 4488
	unsigned long nr_pages;
	int loops;
4489
	int items;
M
Miao Xie 已提交
4490
	enum btrfs_reserve_flush_enum flush;
4491

4492
	/* Calc the number of the pages we need flush for space reservation */
4493 4494
	items = calc_reclaim_items_nr(root, to_reclaim);
	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4495

4496
	trans = (struct btrfs_trans_handle *)current->journal_info;
4497
	block_rsv = &root->fs_info->delalloc_block_rsv;
J
Josef Bacik 已提交
4498
	space_info = block_rsv->space_info;
4499

4500 4501
	delalloc_bytes = percpu_counter_sum_positive(
						&root->fs_info->delalloc_bytes);
J
Josef Bacik 已提交
4502
	if (delalloc_bytes == 0) {
4503
		if (trans)
J
Josef Bacik 已提交
4504
			return;
4505
		if (wait_ordered)
4506
			btrfs_wait_ordered_roots(root->fs_info, items);
J
Josef Bacik 已提交
4507
		return;
4508 4509
	}

4510
	loops = 0;
J
Josef Bacik 已提交
4511 4512 4513
	while (delalloc_bytes && loops < 3) {
		max_reclaim = min(delalloc_bytes, to_reclaim);
		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4514
		btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4515 4516 4517 4518
		/*
		 * We need to wait for the async pages to actually start before
		 * we do anything.
		 */
4519 4520 4521 4522 4523 4524 4525 4526
		max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
		if (!max_reclaim)
			goto skip_async;

		if (max_reclaim <= nr_pages)
			max_reclaim = 0;
		else
			max_reclaim -= nr_pages;
4527

4528 4529 4530 4531
		wait_event(root->fs_info->async_submit_wait,
			   atomic_read(&root->fs_info->async_delalloc_pages) <=
			   (int)max_reclaim);
skip_async:
M
Miao Xie 已提交
4532 4533 4534 4535
		if (!trans)
			flush = BTRFS_RESERVE_FLUSH_ALL;
		else
			flush = BTRFS_RESERVE_NO_FLUSH;
J
Josef Bacik 已提交
4536
		spin_lock(&space_info->lock);
M
Miao Xie 已提交
4537
		if (can_overcommit(root, space_info, orig, flush)) {
J
Josef Bacik 已提交
4538 4539 4540
			spin_unlock(&space_info->lock);
			break;
		}
J
Josef Bacik 已提交
4541
		spin_unlock(&space_info->lock);
4542

4543
		loops++;
4544
		if (wait_ordered && !trans) {
4545
			btrfs_wait_ordered_roots(root->fs_info, items);
4546
		} else {
J
Josef Bacik 已提交
4547
			time_left = schedule_timeout_killable(1);
4548 4549 4550
			if (time_left)
				break;
		}
4551 4552
		delalloc_bytes = percpu_counter_sum_positive(
						&root->fs_info->delalloc_bytes);
4553 4554 4555
	}
}

4556 4557 4558 4559 4560
/**
 * maybe_commit_transaction - possibly commit the transaction if its ok to
 * @root - the root we're allocating for
 * @bytes - the number of bytes we want to reserve
 * @force - force the commit
4561
 *
4562 4563 4564
 * This will check to make sure that committing the transaction will actually
 * get us somewhere and then commit the transaction if it does.  Otherwise it
 * will return -ENOSPC.
4565
 */
4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580
static int may_commit_transaction(struct btrfs_root *root,
				  struct btrfs_space_info *space_info,
				  u64 bytes, int force)
{
	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
	struct btrfs_trans_handle *trans;

	trans = (struct btrfs_trans_handle *)current->journal_info;
	if (trans)
		return -EAGAIN;

	if (force)
		goto commit;

	/* See if there is enough pinned space to make this reservation */
4581
	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4582
				   bytes) >= 0)
4583 4584 4585 4586 4587 4588 4589 4590 4591 4592
		goto commit;

	/*
	 * See if there is some space in the delayed insertion reservation for
	 * this reservation.
	 */
	if (space_info != delayed_rsv->space_info)
		return -ENOSPC;

	spin_lock(&delayed_rsv->lock);
4593 4594
	if (percpu_counter_compare(&space_info->total_bytes_pinned,
				   bytes - delayed_rsv->size) >= 0) {
4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607
		spin_unlock(&delayed_rsv->lock);
		return -ENOSPC;
	}
	spin_unlock(&delayed_rsv->lock);

commit:
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans))
		return -ENOSPC;

	return btrfs_commit_transaction(trans, root);
}

4608
enum flush_state {
4609 4610 4611 4612
	FLUSH_DELAYED_ITEMS_NR	=	1,
	FLUSH_DELAYED_ITEMS	=	2,
	FLUSH_DELALLOC		=	3,
	FLUSH_DELALLOC_WAIT	=	4,
4613 4614
	ALLOC_CHUNK		=	5,
	COMMIT_TRANS		=	6,
4615 4616 4617 4618 4619 4620 4621 4622
};

static int flush_space(struct btrfs_root *root,
		       struct btrfs_space_info *space_info, u64 num_bytes,
		       u64 orig_bytes, int state)
{
	struct btrfs_trans_handle *trans;
	int nr;
J
Josef Bacik 已提交
4623
	int ret = 0;
4624 4625 4626 4627

	switch (state) {
	case FLUSH_DELAYED_ITEMS_NR:
	case FLUSH_DELAYED_ITEMS:
4628 4629 4630
		if (state == FLUSH_DELAYED_ITEMS_NR)
			nr = calc_reclaim_items_nr(root, num_bytes) * 2;
		else
4631
			nr = -1;
4632

4633 4634 4635 4636 4637 4638 4639 4640
		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		ret = btrfs_run_delayed_items_nr(trans, root, nr);
		btrfs_end_transaction(trans, root);
		break;
4641 4642
	case FLUSH_DELALLOC:
	case FLUSH_DELALLOC_WAIT:
4643
		shrink_delalloc(root, num_bytes * 2, orig_bytes,
4644 4645
				state == FLUSH_DELALLOC_WAIT);
		break;
4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658
	case ALLOC_CHUNK:
		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
				     btrfs_get_alloc_profile(root, 0),
				     CHUNK_ALLOC_NO_FORCE);
		btrfs_end_transaction(trans, root);
		if (ret == -ENOSPC)
			ret = 0;
		break;
4659 4660 4661 4662 4663 4664 4665 4666 4667 4668
	case COMMIT_TRANS:
		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
		break;
	default:
		ret = -ENOSPC;
		break;
	}

	return ret;
}
4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710

static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
				 struct btrfs_space_info *space_info)
{
	u64 used;
	u64 expected;
	u64 to_reclaim;

	to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
				16 * 1024 * 1024);
	spin_lock(&space_info->lock);
	if (can_overcommit(root, space_info, to_reclaim,
			   BTRFS_RESERVE_FLUSH_ALL)) {
		to_reclaim = 0;
		goto out;
	}

	used = space_info->bytes_used + space_info->bytes_reserved +
	       space_info->bytes_pinned + space_info->bytes_readonly +
	       space_info->bytes_may_use;
	if (can_overcommit(root, space_info, 1024 * 1024,
			   BTRFS_RESERVE_FLUSH_ALL))
		expected = div_factor_fine(space_info->total_bytes, 95);
	else
		expected = div_factor_fine(space_info->total_bytes, 90);

	if (used > expected)
		to_reclaim = used - expected;
	else
		to_reclaim = 0;
	to_reclaim = min(to_reclaim, space_info->bytes_may_use +
				     space_info->bytes_reserved);
out:
	spin_unlock(&space_info->lock);

	return to_reclaim;
}

static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
					struct btrfs_fs_info *fs_info, u64 used)
{
4711 4712 4713 4714 4715 4716 4717
	u64 thresh = div_factor_fine(space_info->total_bytes, 98);

	/* If we're just plain full then async reclaim just slows us down. */
	if (space_info->bytes_used >= thresh)
		return 0;

	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4718 4719 4720 4721
		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}

static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4722 4723
				       struct btrfs_fs_info *fs_info,
				       int flush_state)
4724 4725 4726 4727
{
	u64 used;

	spin_lock(&space_info->lock);
4728 4729 4730 4731 4732 4733 4734 4735 4736
	/*
	 * We run out of space and have not got any free space via flush_space,
	 * so don't bother doing async reclaim.
	 */
	if (flush_state > COMMIT_TRANS && space_info->full) {
		spin_unlock(&space_info->lock);
		return 0;
	}

4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768
	used = space_info->bytes_used + space_info->bytes_reserved +
	       space_info->bytes_pinned + space_info->bytes_readonly +
	       space_info->bytes_may_use;
	if (need_do_async_reclaim(space_info, fs_info, used)) {
		spin_unlock(&space_info->lock);
		return 1;
	}
	spin_unlock(&space_info->lock);

	return 0;
}

static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
{
	struct btrfs_fs_info *fs_info;
	struct btrfs_space_info *space_info;
	u64 to_reclaim;
	int flush_state;

	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);

	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
						      space_info);
	if (!to_reclaim)
		return;

	flush_state = FLUSH_DELAYED_ITEMS_NR;
	do {
		flush_space(fs_info->fs_root, space_info, to_reclaim,
			    to_reclaim, flush_state);
		flush_state++;
4769 4770
		if (!btrfs_need_do_async_reclaim(space_info, fs_info,
						 flush_state))
4771
			return;
4772
	} while (flush_state < COMMIT_TRANS);
4773 4774 4775 4776 4777 4778 4779
}

void btrfs_init_async_reclaim_work(struct work_struct *work)
{
	INIT_WORK(work, btrfs_async_reclaim_metadata_space);
}

4780 4781 4782 4783 4784
/**
 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
 * @root - the root we're allocating for
 * @block_rsv - the block_rsv we're allocating for
 * @orig_bytes - the number of bytes we want
4785
 * @flush - whether or not we can flush to make our reservation
4786
 *
4787 4788 4789 4790 4791 4792
 * This will reserve orgi_bytes number of bytes from the space info associated
 * with the block_rsv.  If there is not enough space it will make an attempt to
 * flush out space to make room.  It will do this by flushing delalloc if
 * possible or committing the transaction.  If flush is 0 then no attempts to
 * regain reservations will be made and this will fail if there is not enough
 * space already.
4793
 */
4794
static int reserve_metadata_bytes(struct btrfs_root *root,
4795
				  struct btrfs_block_rsv *block_rsv,
M
Miao Xie 已提交
4796 4797
				  u64 orig_bytes,
				  enum btrfs_reserve_flush_enum flush)
J
Josef Bacik 已提交
4798
{
4799
	struct btrfs_space_info *space_info = block_rsv->space_info;
4800
	u64 used;
4801
	u64 num_bytes = orig_bytes;
4802
	int flush_state = FLUSH_DELAYED_ITEMS_NR;
4803
	int ret = 0;
4804
	bool flushing = false;
J
Josef Bacik 已提交
4805

4806
again:
4807
	ret = 0;
4808
	spin_lock(&space_info->lock);
4809
	/*
M
Miao Xie 已提交
4810 4811
	 * We only want to wait if somebody other than us is flushing and we
	 * are actually allowed to flush all things.
4812
	 */
M
Miao Xie 已提交
4813 4814
	while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
	       space_info->flush) {
4815 4816 4817 4818 4819 4820 4821
		spin_unlock(&space_info->lock);
		/*
		 * If we have a trans handle we can't wait because the flusher
		 * may have to commit the transaction, which would mean we would
		 * deadlock since we are waiting for the flusher to finish, but
		 * hold the current transaction open.
		 */
4822
		if (current->journal_info)
4823
			return -EAGAIN;
A
Arne Jansen 已提交
4824 4825 4826
		ret = wait_event_killable(space_info->wait, !space_info->flush);
		/* Must have been killed, return */
		if (ret)
4827 4828 4829 4830 4831 4832
			return -EINTR;

		spin_lock(&space_info->lock);
	}

	ret = -ENOSPC;
4833 4834 4835
	used = space_info->bytes_used + space_info->bytes_reserved +
		space_info->bytes_pinned + space_info->bytes_readonly +
		space_info->bytes_may_use;
J
Josef Bacik 已提交
4836

4837 4838 4839 4840 4841 4842 4843
	/*
	 * The idea here is that we've not already over-reserved the block group
	 * then we can go ahead and save our reservation first and then start
	 * flushing if we need to.  Otherwise if we've already overcommitted
	 * lets start flushing stuff first and then come back and try to make
	 * our reservation.
	 */
4844 4845
	if (used <= space_info->total_bytes) {
		if (used + orig_bytes <= space_info->total_bytes) {
4846
			space_info->bytes_may_use += orig_bytes;
J
Josef Bacik 已提交
4847
			trace_btrfs_space_reservation(root->fs_info,
4848
				"space_info", space_info->flags, orig_bytes, 1);
4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863
			ret = 0;
		} else {
			/*
			 * Ok set num_bytes to orig_bytes since we aren't
			 * overocmmitted, this way we only try and reclaim what
			 * we need.
			 */
			num_bytes = orig_bytes;
		}
	} else {
		/*
		 * Ok we're over committed, set num_bytes to the overcommitted
		 * amount plus the amount of bytes that we need for this
		 * reservation.
		 */
4864
		num_bytes = used - space_info->total_bytes +
4865
			(orig_bytes * 2);
4866
	}
J
Josef Bacik 已提交
4867

4868 4869 4870 4871 4872 4873
	if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
		space_info->bytes_may_use += orig_bytes;
		trace_btrfs_space_reservation(root->fs_info, "space_info",
					      space_info->flags, orig_bytes,
					      1);
		ret = 0;
4874 4875
	}

4876 4877 4878 4879
	/*
	 * Couldn't make our reservation, save our place so while we're trying
	 * to reclaim space we can actually use it instead of somebody else
	 * stealing it from us.
M
Miao Xie 已提交
4880 4881 4882
	 *
	 * We make the other tasks wait for the flush only when we can flush
	 * all things.
4883
	 */
4884
	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4885 4886
		flushing = true;
		space_info->flush = 1;
4887 4888
	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
		used += orig_bytes;
4889 4890 4891 4892 4893 4894 4895
		/*
		 * We will do the space reservation dance during log replay,
		 * which means we won't have fs_info->fs_root set, so don't do
		 * the async reclaim as we will panic.
		 */
		if (!root->fs_info->log_root_recovering &&
		    need_do_async_reclaim(space_info, root->fs_info, used) &&
4896 4897 4898
		    !work_busy(&root->fs_info->async_reclaim_work))
			queue_work(system_unbound_wq,
				   &root->fs_info->async_reclaim_work);
4899
	}
4900
	spin_unlock(&space_info->lock);
J
Josef Bacik 已提交
4901

M
Miao Xie 已提交
4902
	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4903
		goto out;
4904

4905 4906 4907
	ret = flush_space(root, space_info, num_bytes, orig_bytes,
			  flush_state);
	flush_state++;
M
Miao Xie 已提交
4908 4909 4910 4911 4912 4913 4914 4915 4916 4917

	/*
	 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
	 * would happen. So skip delalloc flush.
	 */
	if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
	    (flush_state == FLUSH_DELALLOC ||
	     flush_state == FLUSH_DELALLOC_WAIT))
		flush_state = ALLOC_CHUNK;

4918
	if (!ret)
4919
		goto again;
M
Miao Xie 已提交
4920 4921 4922 4923 4924
	else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
		 flush_state < COMMIT_TRANS)
		goto again;
	else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
		 flush_state <= COMMIT_TRANS)
4925 4926 4927
		goto again;

out:
4928 4929 4930 4931 4932 4933 4934 4935 4936
	if (ret == -ENOSPC &&
	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
		struct btrfs_block_rsv *global_rsv =
			&root->fs_info->global_block_rsv;

		if (block_rsv != global_rsv &&
		    !block_rsv_use_bytes(global_rsv, orig_bytes))
			ret = 0;
	}
4937 4938 4939 4940
	if (ret == -ENOSPC)
		trace_btrfs_space_reservation(root->fs_info,
					      "space_info:enospc",
					      space_info->flags, orig_bytes, 1);
4941
	if (flushing) {
4942
		spin_lock(&space_info->lock);
4943 4944
		space_info->flush = 0;
		wake_up_all(&space_info->wait);
4945
		spin_unlock(&space_info->lock);
4946 4947 4948 4949
	}
	return ret;
}

4950 4951 4952
static struct btrfs_block_rsv *get_block_rsv(
					const struct btrfs_trans_handle *trans,
					const struct btrfs_root *root)
4953
{
4954 4955
	struct btrfs_block_rsv *block_rsv = NULL;

4956 4957 4958
	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
	    (root == root->fs_info->csum_root && trans->adding_csums) ||
	     (root == root->fs_info->uuid_root))
4959 4960
		block_rsv = trans->block_rsv;

4961
	if (!block_rsv)
4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996
		block_rsv = root->block_rsv;

	if (!block_rsv)
		block_rsv = &root->fs_info->empty_block_rsv;

	return block_rsv;
}

static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
			       u64 num_bytes)
{
	int ret = -ENOSPC;
	spin_lock(&block_rsv->lock);
	if (block_rsv->reserved >= num_bytes) {
		block_rsv->reserved -= num_bytes;
		if (block_rsv->reserved < block_rsv->size)
			block_rsv->full = 0;
		ret = 0;
	}
	spin_unlock(&block_rsv->lock);
	return ret;
}

static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
				u64 num_bytes, int update_size)
{
	spin_lock(&block_rsv->lock);
	block_rsv->reserved += num_bytes;
	if (update_size)
		block_rsv->size += num_bytes;
	else if (block_rsv->reserved >= block_rsv->size)
		block_rsv->full = 1;
	spin_unlock(&block_rsv->lock);
}

4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
			     struct btrfs_block_rsv *dest, u64 num_bytes,
			     int min_factor)
{
	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
	u64 min_bytes;

	if (global_rsv->space_info != dest->space_info)
		return -ENOSPC;

	spin_lock(&global_rsv->lock);
	min_bytes = div_factor(global_rsv->size, min_factor);
	if (global_rsv->reserved < min_bytes + num_bytes) {
		spin_unlock(&global_rsv->lock);
		return -ENOSPC;
	}
	global_rsv->reserved -= num_bytes;
	if (global_rsv->reserved < global_rsv->size)
		global_rsv->full = 0;
	spin_unlock(&global_rsv->lock);

	block_rsv_add_bytes(dest, num_bytes, 1);
	return 0;
}

J
Josef Bacik 已提交
5022 5023
static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
				    struct btrfs_block_rsv *block_rsv,
5024
				    struct btrfs_block_rsv *dest, u64 num_bytes)
5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042
{
	struct btrfs_space_info *space_info = block_rsv->space_info;

	spin_lock(&block_rsv->lock);
	if (num_bytes == (u64)-1)
		num_bytes = block_rsv->size;
	block_rsv->size -= num_bytes;
	if (block_rsv->reserved >= block_rsv->size) {
		num_bytes = block_rsv->reserved - block_rsv->size;
		block_rsv->reserved = block_rsv->size;
		block_rsv->full = 1;
	} else {
		num_bytes = 0;
	}
	spin_unlock(&block_rsv->lock);

	if (num_bytes > 0) {
		if (dest) {
5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056
			spin_lock(&dest->lock);
			if (!dest->full) {
				u64 bytes_to_add;

				bytes_to_add = dest->size - dest->reserved;
				bytes_to_add = min(num_bytes, bytes_to_add);
				dest->reserved += bytes_to_add;
				if (dest->reserved >= dest->size)
					dest->full = 1;
				num_bytes -= bytes_to_add;
			}
			spin_unlock(&dest->lock);
		}
		if (num_bytes) {
5057
			spin_lock(&space_info->lock);
5058
			space_info->bytes_may_use -= num_bytes;
J
Josef Bacik 已提交
5059
			trace_btrfs_space_reservation(fs_info, "space_info",
5060
					space_info->flags, num_bytes, 0);
5061
			spin_unlock(&space_info->lock);
5062
		}
J
Josef Bacik 已提交
5063
	}
5064
}
5065

5066 5067 5068 5069
static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
				   struct btrfs_block_rsv *dst, u64 num_bytes)
{
	int ret;
J
Josef Bacik 已提交
5070

5071 5072 5073
	ret = block_rsv_use_bytes(src, num_bytes);
	if (ret)
		return ret;
J
Josef Bacik 已提交
5074

5075
	block_rsv_add_bytes(dst, num_bytes, 1);
J
Josef Bacik 已提交
5076 5077 5078
	return 0;
}

5079
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
J
Josef Bacik 已提交
5080
{
5081 5082
	memset(rsv, 0, sizeof(*rsv));
	spin_lock_init(&rsv->lock);
5083
	rsv->type = type;
5084 5085
}

5086 5087
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
					      unsigned short type)
5088 5089 5090
{
	struct btrfs_block_rsv *block_rsv;
	struct btrfs_fs_info *fs_info = root->fs_info;
J
Josef Bacik 已提交
5091

5092 5093 5094
	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
	if (!block_rsv)
		return NULL;
J
Josef Bacik 已提交
5095

5096
	btrfs_init_block_rsv(block_rsv, type);
5097 5098 5099 5100
	block_rsv->space_info = __find_space_info(fs_info,
						  BTRFS_BLOCK_GROUP_METADATA);
	return block_rsv;
}
J
Josef Bacik 已提交
5101

5102 5103 5104
void btrfs_free_block_rsv(struct btrfs_root *root,
			  struct btrfs_block_rsv *rsv)
{
J
Josef Bacik 已提交
5105 5106
	if (!rsv)
		return;
5107 5108
	btrfs_block_rsv_release(root, rsv, (u64)-1);
	kfree(rsv);
J
Josef Bacik 已提交
5109 5110
}

5111 5112 5113 5114 5115
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
{
	kfree(rsv);
}

M
Miao Xie 已提交
5116 5117 5118
int btrfs_block_rsv_add(struct btrfs_root *root,
			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
			enum btrfs_reserve_flush_enum flush)
J
Josef Bacik 已提交
5119
{
5120
	int ret;
J
Josef Bacik 已提交
5121

5122 5123
	if (num_bytes == 0)
		return 0;
5124

5125
	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5126 5127 5128 5129
	if (!ret) {
		block_rsv_add_bytes(block_rsv, num_bytes, 1);
		return 0;
	}
J
Josef Bacik 已提交
5130

5131 5132
	return ret;
}
J
Josef Bacik 已提交
5133

5134
int btrfs_block_rsv_check(struct btrfs_root *root,
5135
			  struct btrfs_block_rsv *block_rsv, int min_factor)
5136 5137 5138
{
	u64 num_bytes = 0;
	int ret = -ENOSPC;
J
Josef Bacik 已提交
5139

5140 5141
	if (!block_rsv)
		return 0;
J
Josef Bacik 已提交
5142

5143
	spin_lock(&block_rsv->lock);
5144 5145 5146 5147
	num_bytes = div_factor(block_rsv->size, min_factor);
	if (block_rsv->reserved >= num_bytes)
		ret = 0;
	spin_unlock(&block_rsv->lock);
J
Josef Bacik 已提交
5148

5149 5150 5151
	return ret;
}

M
Miao Xie 已提交
5152 5153 5154
int btrfs_block_rsv_refill(struct btrfs_root *root,
			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
			   enum btrfs_reserve_flush_enum flush)
5155 5156 5157 5158 5159 5160 5161 5162 5163
{
	u64 num_bytes = 0;
	int ret = -ENOSPC;

	if (!block_rsv)
		return 0;

	spin_lock(&block_rsv->lock);
	num_bytes = min_reserved;
5164
	if (block_rsv->reserved >= num_bytes)
5165
		ret = 0;
5166
	else
5167 5168
		num_bytes -= block_rsv->reserved;
	spin_unlock(&block_rsv->lock);
5169

5170 5171 5172
	if (!ret)
		return 0;

5173
	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5174 5175
	if (!ret) {
		block_rsv_add_bytes(block_rsv, num_bytes, 0);
5176
		return 0;
J
Josef Bacik 已提交
5177
	}
J
Josef Bacik 已提交
5178

5179
	return ret;
5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193
}

int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
			    struct btrfs_block_rsv *dst_rsv,
			    u64 num_bytes)
{
	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}

void btrfs_block_rsv_release(struct btrfs_root *root,
			     struct btrfs_block_rsv *block_rsv,
			     u64 num_bytes)
{
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5194
	if (global_rsv == block_rsv ||
5195 5196
	    block_rsv->space_info != global_rsv->space_info)
		global_rsv = NULL;
J
Josef Bacik 已提交
5197 5198
	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
				num_bytes);
J
Josef Bacik 已提交
5199 5200 5201
}

/*
5202 5203 5204
 * helper to calculate size of global block reservation.
 * the desired value is sum of space used by extent tree,
 * checksum tree and root tree
J
Josef Bacik 已提交
5205
 */
5206
static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
5207
{
5208 5209 5210 5211
	struct btrfs_space_info *sinfo;
	u64 num_bytes;
	u64 meta_used;
	u64 data_used;
5212
	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
J
Josef Bacik 已提交
5213

5214 5215 5216 5217
	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
	spin_lock(&sinfo->lock);
	data_used = sinfo->bytes_used;
	spin_unlock(&sinfo->lock);
C
Chris Mason 已提交
5218

5219 5220
	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
	spin_lock(&sinfo->lock);
5221 5222
	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
		data_used = 0;
5223 5224
	meta_used = sinfo->bytes_used;
	spin_unlock(&sinfo->lock);
5225

5226 5227
	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
		    csum_size * 2;
5228
	num_bytes += div_u64(data_used + meta_used, 50);
5229

5230
	if (num_bytes * 3 > meta_used)
5231
		num_bytes = div_u64(meta_used, 3);
5232

5233
	return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5234
}
J
Josef Bacik 已提交
5235

5236 5237 5238 5239 5240
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
	struct btrfs_space_info *sinfo = block_rsv->space_info;
	u64 num_bytes;
J
Josef Bacik 已提交
5241

5242
	num_bytes = calc_global_metadata_size(fs_info);
C
Chris Mason 已提交
5243

5244
	spin_lock(&sinfo->lock);
5245
	spin_lock(&block_rsv->lock);
5246

5247
	block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5248

5249
	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5250 5251
		    sinfo->bytes_reserved + sinfo->bytes_readonly +
		    sinfo->bytes_may_use;
5252 5253 5254 5255

	if (sinfo->total_bytes > num_bytes) {
		num_bytes = sinfo->total_bytes - num_bytes;
		block_rsv->reserved += num_bytes;
5256
		sinfo->bytes_may_use += num_bytes;
J
Josef Bacik 已提交
5257
		trace_btrfs_space_reservation(fs_info, "space_info",
5258
				      sinfo->flags, num_bytes, 1);
J
Josef Bacik 已提交
5259 5260
	}

5261 5262
	if (block_rsv->reserved >= block_rsv->size) {
		num_bytes = block_rsv->reserved - block_rsv->size;
5263
		sinfo->bytes_may_use -= num_bytes;
J
Josef Bacik 已提交
5264
		trace_btrfs_space_reservation(fs_info, "space_info",
5265
				      sinfo->flags, num_bytes, 0);
5266 5267 5268
		block_rsv->reserved = block_rsv->size;
		block_rsv->full = 1;
	}
5269

5270
	spin_unlock(&block_rsv->lock);
5271
	spin_unlock(&sinfo->lock);
J
Josef Bacik 已提交
5272 5273
}

5274
static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
5275
{
5276
	struct btrfs_space_info *space_info;
J
Josef Bacik 已提交
5277

5278 5279
	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
	fs_info->chunk_block_rsv.space_info = space_info;
J
Josef Bacik 已提交
5280

5281
	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5282 5283
	fs_info->global_block_rsv.space_info = space_info;
	fs_info->delalloc_block_rsv.space_info = space_info;
5284 5285
	fs_info->trans_block_rsv.space_info = space_info;
	fs_info->empty_block_rsv.space_info = space_info;
5286
	fs_info->delayed_block_rsv.space_info = space_info;
5287

5288 5289 5290 5291
	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5292 5293
	if (fs_info->quota_root)
		fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5294
	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5295 5296

	update_global_block_rsv(fs_info);
J
Josef Bacik 已提交
5297 5298
}

5299
static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
5300
{
J
Josef Bacik 已提交
5301 5302
	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
				(u64)-1);
5303 5304 5305 5306 5307 5308
	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
	WARN_ON(fs_info->trans_block_rsv.size > 0);
	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
	WARN_ON(fs_info->chunk_block_rsv.size > 0);
	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5309 5310
	WARN_ON(fs_info->delayed_block_rsv.size > 0);
	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5311 5312
}

5313 5314
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root)
J
Josef Bacik 已提交
5315
{
5316 5317 5318
	if (!trans->block_rsv)
		return;

5319 5320
	if (!trans->bytes_reserved)
		return;
J
Josef Bacik 已提交
5321

5322
	trace_btrfs_space_reservation(root->fs_info, "transaction",
5323
				      trans->transid, trans->bytes_reserved, 0);
5324
	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5325 5326
	trans->bytes_reserved = 0;
}
J
Josef Bacik 已提交
5327

5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345
/*
 * To be called after all the new block groups attached to the transaction
 * handle have been created (btrfs_create_pending_block_groups()).
 */
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
{
	struct btrfs_fs_info *fs_info = trans->root->fs_info;

	if (!trans->chunk_bytes_reserved)
		return;

	WARN_ON_ONCE(!list_empty(&trans->new_bgs));

	block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
				trans->chunk_bytes_reserved);
	trans->chunk_bytes_reserved = 0;
}

5346
/* Can only return 0 or -ENOSPC */
5347 5348 5349 5350 5351 5352 5353 5354
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
				  struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;

	/*
5355 5356 5357
	 * We need to hold space in order to delete our orphan item once we've
	 * added it, so this takes the reservation so we can release it later
	 * when we are truly done with the orphan item.
5358
	 */
C
Chris Mason 已提交
5359
	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
J
Josef Bacik 已提交
5360 5361
	trace_btrfs_space_reservation(root->fs_info, "orphan",
				      btrfs_ino(inode), num_bytes, 1);
5362
	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
J
Josef Bacik 已提交
5363 5364
}

5365
void btrfs_orphan_release_metadata(struct inode *inode)
5366
{
5367
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Chris Mason 已提交
5368
	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
J
Josef Bacik 已提交
5369 5370
	trace_btrfs_space_reservation(root->fs_info, "orphan",
				      btrfs_ino(inode), num_bytes, 0);
5371 5372
	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
5373

5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390
/*
 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
 * root: the root of the parent directory
 * rsv: block reservation
 * items: the number of items that we need do reservation
 * qgroup_reserved: used to return the reserved size in qgroup
 *
 * This function is used to reserve the space for snapshot/subvolume
 * creation and deletion. Those operations are different with the
 * common file/directory operations, they change two fs/file trees
 * and root tree, the number of items that the qgroup reserves is
 * different with the free space reservation. So we can not use
 * the space reseravtion mechanism in start_transaction().
 */
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
				     struct btrfs_block_rsv *rsv,
				     int items,
5391 5392
				     u64 *qgroup_reserved,
				     bool use_global_rsv)
5393
{
5394 5395
	u64 num_bytes;
	int ret;
5396
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5397 5398 5399

	if (root->fs_info->quota_enabled) {
		/* One for parent inode, two for dir entries */
5400
		num_bytes = 3 * root->nodesize;
5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414
		ret = btrfs_qgroup_reserve(root, num_bytes);
		if (ret)
			return ret;
	} else {
		num_bytes = 0;
	}

	*qgroup_reserved = num_bytes;

	num_bytes = btrfs_calc_trans_metadata_size(root, items);
	rsv->space_info = __find_space_info(root->fs_info,
					    BTRFS_BLOCK_GROUP_METADATA);
	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
				  BTRFS_RESERVE_FLUSH_ALL);
5415 5416 5417 5418

	if (ret == -ENOSPC && use_global_rsv)
		ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);

5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431
	if (ret) {
		if (*qgroup_reserved)
			btrfs_qgroup_free(root, *qgroup_reserved);
	}

	return ret;
}

void btrfs_subvolume_release_metadata(struct btrfs_root *root,
				      struct btrfs_block_rsv *rsv,
				      u64 qgroup_reserved)
{
	btrfs_block_rsv_release(root, rsv, (u64)-1);
5432 5433
}

5434 5435 5436
/**
 * drop_outstanding_extent - drop an outstanding extent
 * @inode: the inode we're dropping the extent for
5437
 * @num_bytes: the number of bytes we're relaseing.
5438 5439 5440 5441 5442 5443
 *
 * This is called when we are freeing up an outstanding extent, either called
 * after an error or after an extent is written.  This will return the number of
 * reserved extents that need to be freed.  This must be called with
 * BTRFS_I(inode)->lock held.
 */
5444
static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5445
{
5446
	unsigned drop_inode_space = 0;
5447
	unsigned dropped_extents = 0;
5448
	unsigned num_extents = 0;
5449

5450 5451 5452 5453 5454 5455
	num_extents = (unsigned)div64_u64(num_bytes +
					  BTRFS_MAX_EXTENT_SIZE - 1,
					  BTRFS_MAX_EXTENT_SIZE);
	ASSERT(num_extents);
	ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
	BTRFS_I(inode)->outstanding_extents -= num_extents;
5456

5457
	if (BTRFS_I(inode)->outstanding_extents == 0 &&
5458 5459
	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
			       &BTRFS_I(inode)->runtime_flags))
5460 5461
		drop_inode_space = 1;

5462 5463 5464 5465 5466 5467
	/*
	 * If we have more or the same amount of outsanding extents than we have
	 * reserved then we need to leave the reserved extents count alone.
	 */
	if (BTRFS_I(inode)->outstanding_extents >=
	    BTRFS_I(inode)->reserved_extents)
5468
		return drop_inode_space;
5469 5470 5471 5472

	dropped_extents = BTRFS_I(inode)->reserved_extents -
		BTRFS_I(inode)->outstanding_extents;
	BTRFS_I(inode)->reserved_extents -= dropped_extents;
5473
	return dropped_extents + drop_inode_space;
5474 5475
}

5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495
/**
 * calc_csum_metadata_size - return the amount of metada space that must be
 *	reserved/free'd for the given bytes.
 * @inode: the inode we're manipulating
 * @num_bytes: the number of bytes in question
 * @reserve: 1 if we are reserving space, 0 if we are freeing space
 *
 * This adjusts the number of csum_bytes in the inode and then returns the
 * correct amount of metadata that must either be reserved or freed.  We
 * calculate how many checksums we can fit into one leaf and then divide the
 * number of bytes that will need to be checksumed by this value to figure out
 * how many checksums will be required.  If we are adding bytes then the number
 * may go up and we will return the number of additional bytes that must be
 * reserved.  If it is going down we will return the number of bytes that must
 * be freed.
 *
 * This must be called with BTRFS_I(inode)->lock held.
 */
static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
				   int reserve)
5496
{
5497
	struct btrfs_root *root = BTRFS_I(inode)->root;
5498
	u64 old_csums, num_csums;
5499 5500 5501 5502 5503

	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
	    BTRFS_I(inode)->csum_bytes == 0)
		return 0;

5504
	old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5505 5506 5507 5508
	if (reserve)
		BTRFS_I(inode)->csum_bytes += num_bytes;
	else
		BTRFS_I(inode)->csum_bytes -= num_bytes;
5509
	num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5510 5511 5512 5513 5514 5515 5516 5517 5518 5519

	/* No change, no need to reserve more */
	if (old_csums == num_csums)
		return 0;

	if (reserve)
		return btrfs_calc_trans_metadata_size(root,
						      num_csums - old_csums);

	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5520
}
Y
Yan Zheng 已提交
5521

5522 5523 5524 5525
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5526
	u64 to_reserve = 0;
5527
	u64 csum_bytes;
5528
	unsigned nr_extents = 0;
5529
	int extra_reserve = 0;
M
Miao Xie 已提交
5530
	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5531
	int ret = 0;
5532
	bool delalloc_lock = true;
5533 5534
	u64 to_free = 0;
	unsigned dropped;
5535

5536 5537 5538 5539 5540 5541
	/* If we are a free space inode we need to not flush since we will be in
	 * the middle of a transaction commit.  We also don't need the delalloc
	 * mutex since we won't race with anybody.  We need this mostly to make
	 * lockdep shut its filthy mouth.
	 */
	if (btrfs_is_free_space_inode(inode)) {
M
Miao Xie 已提交
5542
		flush = BTRFS_RESERVE_NO_FLUSH;
5543 5544
		delalloc_lock = false;
	}
5545

M
Miao Xie 已提交
5546 5547
	if (flush != BTRFS_RESERVE_NO_FLUSH &&
	    btrfs_transaction_in_commit(root->fs_info))
5548
		schedule_timeout(1);
5549

5550 5551 5552
	if (delalloc_lock)
		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);

5553
	num_bytes = ALIGN(num_bytes, root->sectorsize);
5554

5555
	spin_lock(&BTRFS_I(inode)->lock);
5556 5557 5558 5559 5560
	nr_extents = (unsigned)div64_u64(num_bytes +
					 BTRFS_MAX_EXTENT_SIZE - 1,
					 BTRFS_MAX_EXTENT_SIZE);
	BTRFS_I(inode)->outstanding_extents += nr_extents;
	nr_extents = 0;
5561 5562

	if (BTRFS_I(inode)->outstanding_extents >
5563
	    BTRFS_I(inode)->reserved_extents)
5564 5565
		nr_extents = BTRFS_I(inode)->outstanding_extents -
			BTRFS_I(inode)->reserved_extents;
5566

5567 5568 5569 5570
	/*
	 * Add an item to reserve for updating the inode when we complete the
	 * delalloc io.
	 */
5571 5572
	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
		      &BTRFS_I(inode)->runtime_flags)) {
5573
		nr_extents++;
5574
		extra_reserve = 1;
5575
	}
5576 5577

	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5578
	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5579
	csum_bytes = BTRFS_I(inode)->csum_bytes;
5580
	spin_unlock(&BTRFS_I(inode)->lock);
5581

5582
	if (root->fs_info->quota_enabled) {
5583
		ret = btrfs_qgroup_reserve(root, nr_extents * root->nodesize);
5584 5585 5586
		if (ret)
			goto out_fail;
	}
5587

5588 5589 5590
	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
	if (unlikely(ret)) {
		if (root->fs_info->quota_enabled)
5591
			btrfs_qgroup_free(root, nr_extents * root->nodesize);
5592
		goto out_fail;
5593
	}
5594

5595 5596
	spin_lock(&BTRFS_I(inode)->lock);
	if (extra_reserve) {
5597 5598
		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
			&BTRFS_I(inode)->runtime_flags);
5599 5600 5601 5602
		nr_extents--;
	}
	BTRFS_I(inode)->reserved_extents += nr_extents;
	spin_unlock(&BTRFS_I(inode)->lock);
5603 5604 5605

	if (delalloc_lock)
		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5606

J
Josef Bacik 已提交
5607
	if (to_reserve)
5608
		trace_btrfs_space_reservation(root->fs_info, "delalloc",
J
Josef Bacik 已提交
5609
					      btrfs_ino(inode), to_reserve, 1);
5610 5611 5612
	block_rsv_add_bytes(block_rsv, to_reserve, 1);

	return 0;
5613 5614 5615

out_fail:
	spin_lock(&BTRFS_I(inode)->lock);
5616
	dropped = drop_outstanding_extent(inode, num_bytes);
5617 5618 5619 5620 5621
	/*
	 * If the inodes csum_bytes is the same as the original
	 * csum_bytes then we know we haven't raced with any free()ers
	 * so we can just reduce our inodes csum bytes and carry on.
	 */
5622
	if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5623
		calc_csum_metadata_size(inode, num_bytes, 0);
5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663
	} else {
		u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
		u64 bytes;

		/*
		 * This is tricky, but first we need to figure out how much we
		 * free'd from any free-ers that occured during this
		 * reservation, so we reset ->csum_bytes to the csum_bytes
		 * before we dropped our lock, and then call the free for the
		 * number of bytes that were freed while we were trying our
		 * reservation.
		 */
		bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
		BTRFS_I(inode)->csum_bytes = csum_bytes;
		to_free = calc_csum_metadata_size(inode, bytes, 0);


		/*
		 * Now we need to see how much we would have freed had we not
		 * been making this reservation and our ->csum_bytes were not
		 * artificially inflated.
		 */
		BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
		bytes = csum_bytes - orig_csum_bytes;
		bytes = calc_csum_metadata_size(inode, bytes, 0);

		/*
		 * Now reset ->csum_bytes to what it should be.  If bytes is
		 * more than to_free then we would have free'd more space had we
		 * not had an artificially high ->csum_bytes, so we need to free
		 * the remainder.  If bytes is the same or less then we don't
		 * need to do anything, the other free-ers did the correct
		 * thing.
		 */
		BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
		if (bytes > to_free)
			to_free = bytes - to_free;
		else
			to_free = 0;
	}
5664
	spin_unlock(&BTRFS_I(inode)->lock);
5665
	if (dropped)
5666 5667 5668 5669 5670 5671 5672 5673 5674 5675
		to_free += btrfs_calc_trans_metadata_size(root, dropped);

	if (to_free) {
		btrfs_block_rsv_release(root, block_rsv, to_free);
		trace_btrfs_space_reservation(root->fs_info, "delalloc",
					      btrfs_ino(inode), to_free, 0);
	}
	if (delalloc_lock)
		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
	return ret;
5676 5677
}

5678 5679 5680 5681 5682 5683 5684 5685 5686
/**
 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
 * @inode: the inode to release the reservation for
 * @num_bytes: the number of bytes we're releasing
 *
 * This will release the metadata reservation for an inode.  This can be called
 * once we complete IO for a given set of bytes to release their metadata
 * reservations.
 */
5687 5688 5689
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
5690 5691
	u64 to_free = 0;
	unsigned dropped;
5692 5693

	num_bytes = ALIGN(num_bytes, root->sectorsize);
5694
	spin_lock(&BTRFS_I(inode)->lock);
5695
	dropped = drop_outstanding_extent(inode, num_bytes);
5696

5697 5698
	if (num_bytes)
		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5699
	spin_unlock(&BTRFS_I(inode)->lock);
5700 5701
	if (dropped > 0)
		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5702

5703 5704 5705
	if (btrfs_test_is_dummy_root(root))
		return;

J
Josef Bacik 已提交
5706 5707
	trace_btrfs_space_reservation(root->fs_info, "delalloc",
				      btrfs_ino(inode), to_free, 0);
5708

5709 5710 5711 5712
	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
				to_free);
}

5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727
/**
 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
 * @inode: inode we're writing to
 * @num_bytes: the number of bytes we want to allocate
 *
 * This will do the following things
 *
 * o reserve space in the data space info for num_bytes
 * o reserve space in the metadata space info based on number of outstanding
 *   extents and how much csums will be needed
 * o add to the inodes ->delalloc_bytes
 * o add it to the fs_info's delalloc inodes list.
 *
 * This will return 0 for success and -ENOSPC if there is no space left.
 */
5728 5729 5730 5731
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
{
	int ret;

5732
	ret = btrfs_check_data_free_space(inode, num_bytes, num_bytes);
C
Chris Mason 已提交
5733
	if (ret)
5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744
		return ret;

	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
	if (ret) {
		btrfs_free_reserved_data_space(inode, num_bytes);
		return ret;
	}

	return 0;
}

5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757
/**
 * btrfs_delalloc_release_space - release data and metadata space for delalloc
 * @inode: inode we're releasing space for
 * @num_bytes: the number of bytes we want to free up
 *
 * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
 * called in the case that we don't need the metadata AND data reservations
 * anymore.  So if there is an error or we insert an inline extent.
 *
 * This function will release the metadata space that was not used and will
 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
 * list if there are no delalloc bytes left.
 */
5758 5759 5760 5761
void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
{
	btrfs_delalloc_release_metadata(inode, num_bytes);
	btrfs_free_reserved_data_space(inode, num_bytes);
5762 5763
}

5764 5765 5766
static int update_block_group(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root, u64 bytenr,
			      u64 num_bytes, int alloc)
C
Chris Mason 已提交
5767
{
5768
	struct btrfs_block_group_cache *cache = NULL;
C
Chris Mason 已提交
5769
	struct btrfs_fs_info *info = root->fs_info;
5770
	u64 total = num_bytes;
C
Chris Mason 已提交
5771
	u64 old_val;
5772
	u64 byte_in_group;
5773
	int factor;
C
Chris Mason 已提交
5774

5775
	/* block accounting for super block */
5776
	spin_lock(&info->delalloc_root_lock);
5777
	old_val = btrfs_super_bytes_used(info->super_copy);
5778 5779 5780 5781
	if (alloc)
		old_val += num_bytes;
	else
		old_val -= num_bytes;
5782
	btrfs_set_super_bytes_used(info->super_copy, old_val);
5783
	spin_unlock(&info->delalloc_root_lock);
5784

C
Chris Mason 已提交
5785
	while (total) {
5786
		cache = btrfs_lookup_block_group(info, bytenr);
5787
		if (!cache)
5788
			return -ENOENT;
5789 5790 5791 5792 5793 5794
		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
				    BTRFS_BLOCK_GROUP_RAID1 |
				    BTRFS_BLOCK_GROUP_RAID10))
			factor = 2;
		else
			factor = 1;
5795 5796 5797 5798 5799 5800 5801
		/*
		 * If this block group has free space cache written out, we
		 * need to make sure to load it if we are removing space.  This
		 * is because we need the unpinning stage to actually add the
		 * space back to the block group, otherwise we will leak space.
		 */
		if (!alloc && cache->cached == BTRFS_CACHE_NO)
5802
			cache_block_group(cache, 1);
5803

5804 5805
		byte_in_group = bytenr - cache->key.objectid;
		WARN_ON(byte_in_group > cache->key.offset);
C
Chris Mason 已提交
5806

5807
		spin_lock(&cache->space_info->lock);
5808
		spin_lock(&cache->lock);
5809

5810
		if (btrfs_test_opt(root, SPACE_CACHE) &&
5811 5812 5813
		    cache->disk_cache_state < BTRFS_DC_CLEAR)
			cache->disk_cache_state = BTRFS_DC_CLEAR;

C
Chris Mason 已提交
5814
		old_val = btrfs_block_group_used(&cache->item);
5815
		num_bytes = min(total, cache->key.offset - byte_in_group);
C
Chris Mason 已提交
5816
		if (alloc) {
5817
			old_val += num_bytes;
5818 5819 5820
			btrfs_set_block_group_used(&cache->item, old_val);
			cache->reserved -= num_bytes;
			cache->space_info->bytes_reserved -= num_bytes;
5821 5822
			cache->space_info->bytes_used += num_bytes;
			cache->space_info->disk_used += num_bytes * factor;
5823
			spin_unlock(&cache->lock);
5824
			spin_unlock(&cache->space_info->lock);
C
Chris Mason 已提交
5825
		} else {
5826
			old_val -= num_bytes;
5827 5828 5829 5830 5831 5832 5833
			btrfs_set_block_group_used(&cache->item, old_val);
			cache->pinned += num_bytes;
			cache->space_info->bytes_pinned += num_bytes;
			cache->space_info->bytes_used -= num_bytes;
			cache->space_info->disk_used -= num_bytes * factor;
			spin_unlock(&cache->lock);
			spin_unlock(&cache->space_info->lock);
5834

5835 5836 5837
			set_extent_dirty(info->pinned_extents,
					 bytenr, bytenr + num_bytes - 1,
					 GFP_NOFS | __GFP_NOFAIL);
5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850
			/*
			 * No longer have used bytes in this block group, queue
			 * it for deletion.
			 */
			if (old_val == 0) {
				spin_lock(&info->unused_bgs_lock);
				if (list_empty(&cache->bg_list)) {
					btrfs_get_block_group(cache);
					list_add_tail(&cache->bg_list,
						      &info->unused_bgs);
				}
				spin_unlock(&info->unused_bgs_lock);
			}
C
Chris Mason 已提交
5851
		}
5852 5853 5854 5855 5856 5857 5858 5859 5860 5861

		spin_lock(&trans->transaction->dirty_bgs_lock);
		if (list_empty(&cache->dirty_list)) {
			list_add_tail(&cache->dirty_list,
				      &trans->transaction->dirty_bgs);
				trans->transaction->num_dirty_bgs++;
			btrfs_get_block_group(cache);
		}
		spin_unlock(&trans->transaction->dirty_bgs_lock);

5862
		btrfs_put_block_group(cache);
5863 5864
		total -= num_bytes;
		bytenr += num_bytes;
C
Chris Mason 已提交
5865 5866 5867
	}
	return 0;
}
5868

5869 5870
static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
{
J
Josef Bacik 已提交
5871
	struct btrfs_block_group_cache *cache;
5872
	u64 bytenr;
J
Josef Bacik 已提交
5873

5874 5875 5876 5877 5878 5879 5880
	spin_lock(&root->fs_info->block_group_cache_lock);
	bytenr = root->fs_info->first_logical_byte;
	spin_unlock(&root->fs_info->block_group_cache_lock);

	if (bytenr < (u64)-1)
		return bytenr;

J
Josef Bacik 已提交
5881 5882
	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
	if (!cache)
5883
		return 0;
J
Josef Bacik 已提交
5884

5885
	bytenr = cache->key.objectid;
5886
	btrfs_put_block_group(cache);
5887 5888

	return bytenr;
5889 5890
}

5891 5892 5893
static int pin_down_extent(struct btrfs_root *root,
			   struct btrfs_block_group_cache *cache,
			   u64 bytenr, u64 num_bytes, int reserved)
5894
{
5895 5896 5897 5898 5899 5900 5901 5902 5903 5904
	spin_lock(&cache->space_info->lock);
	spin_lock(&cache->lock);
	cache->pinned += num_bytes;
	cache->space_info->bytes_pinned += num_bytes;
	if (reserved) {
		cache->reserved -= num_bytes;
		cache->space_info->bytes_reserved -= num_bytes;
	}
	spin_unlock(&cache->lock);
	spin_unlock(&cache->space_info->lock);
J
Josef Bacik 已提交
5905

5906 5907
	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
5908
	if (reserved)
J
Josef Bacik 已提交
5909
		trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5910 5911
	return 0;
}
J
Josef Bacik 已提交
5912

5913 5914 5915 5916 5917 5918 5919
/*
 * this function must be called within transaction
 */
int btrfs_pin_extent(struct btrfs_root *root,
		     u64 bytenr, u64 num_bytes, int reserved)
{
	struct btrfs_block_group_cache *cache;
J
Josef Bacik 已提交
5920

5921
	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5922
	BUG_ON(!cache); /* Logic error */
5923 5924 5925 5926

	pin_down_extent(root, cache, bytenr, num_bytes, reserved);

	btrfs_put_block_group(cache);
5927 5928 5929
	return 0;
}

5930
/*
5931 5932
 * this function must be called within transaction
 */
5933
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5934 5935 5936
				    u64 bytenr, u64 num_bytes)
{
	struct btrfs_block_group_cache *cache;
5937
	int ret;
5938 5939

	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5940 5941
	if (!cache)
		return -EINVAL;
5942 5943 5944 5945 5946 5947 5948

	/*
	 * pull in the free space cache (if any) so that our pin
	 * removes the free space from the cache.  We have load_only set
	 * to one because the slow code to read in the free extents does check
	 * the pinned extents.
	 */
5949
	cache_block_group(cache, 1);
5950 5951 5952 5953

	pin_down_extent(root, cache, bytenr, num_bytes, 0);

	/* remove us from the free space cache (if we're there at all) */
5954
	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5955
	btrfs_put_block_group(cache);
5956
	return ret;
5957 5958
}

5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032
static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
{
	int ret;
	struct btrfs_block_group_cache *block_group;
	struct btrfs_caching_control *caching_ctl;

	block_group = btrfs_lookup_block_group(root->fs_info, start);
	if (!block_group)
		return -EINVAL;

	cache_block_group(block_group, 0);
	caching_ctl = get_caching_control(block_group);

	if (!caching_ctl) {
		/* Logic error */
		BUG_ON(!block_group_cache_done(block_group));
		ret = btrfs_remove_free_space(block_group, start, num_bytes);
	} else {
		mutex_lock(&caching_ctl->mutex);

		if (start >= caching_ctl->progress) {
			ret = add_excluded_extent(root, start, num_bytes);
		} else if (start + num_bytes <= caching_ctl->progress) {
			ret = btrfs_remove_free_space(block_group,
						      start, num_bytes);
		} else {
			num_bytes = caching_ctl->progress - start;
			ret = btrfs_remove_free_space(block_group,
						      start, num_bytes);
			if (ret)
				goto out_lock;

			num_bytes = (start + num_bytes) -
				caching_ctl->progress;
			start = caching_ctl->progress;
			ret = add_excluded_extent(root, start, num_bytes);
		}
out_lock:
		mutex_unlock(&caching_ctl->mutex);
		put_caching_control(caching_ctl);
	}
	btrfs_put_block_group(block_group);
	return ret;
}

int btrfs_exclude_logged_extents(struct btrfs_root *log,
				 struct extent_buffer *eb)
{
	struct btrfs_file_extent_item *item;
	struct btrfs_key key;
	int found_type;
	int i;

	if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
		return 0;

	for (i = 0; i < btrfs_header_nritems(eb); i++) {
		btrfs_item_key_to_cpu(eb, &key, i);
		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;
		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
		found_type = btrfs_file_extent_type(eb, item);
		if (found_type == BTRFS_FILE_EXTENT_INLINE)
			continue;
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
			continue;
		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
		__exclude_logged_extent(log, key.objectid, key.offset);
	}

	return 0;
}

6033 6034 6035 6036 6037
/**
 * btrfs_update_reserved_bytes - update the block_group and space info counters
 * @cache:	The cache we are manipulating
 * @num_bytes:	The number of bytes in question
 * @reserve:	One of the reservation enums
6038
 * @delalloc:   The blocks are allocated for the delalloc write
6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054
 *
 * This is called by the allocator when it reserves space, or by somebody who is
 * freeing space that was never actually used on disk.  For example if you
 * reserve some space for a new leaf in transaction A and before transaction A
 * commits you free that leaf, you call this with reserve set to 0 in order to
 * clear the reservation.
 *
 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
 * ENOSPC accounting.  For data we handle the reservation through clearing the
 * delalloc bits in the io_tree.  We have to do this since we could end up
 * allocating less disk space for the amount of data we have reserved in the
 * case of compression.
 *
 * If this is a reservation and the block group has become read only we cannot
 * make the reservation and return -EAGAIN, otherwise this function always
 * succeeds.
6055
 */
6056
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6057
				       u64 num_bytes, int reserve, int delalloc)
6058
{
6059
	struct btrfs_space_info *space_info = cache->space_info;
6060
	int ret = 0;
6061

6062 6063 6064
	spin_lock(&space_info->lock);
	spin_lock(&cache->lock);
	if (reserve != RESERVE_FREE) {
6065 6066 6067
		if (cache->ro) {
			ret = -EAGAIN;
		} else {
6068 6069 6070
			cache->reserved += num_bytes;
			space_info->bytes_reserved += num_bytes;
			if (reserve == RESERVE_ALLOC) {
J
Josef Bacik 已提交
6071
				trace_btrfs_space_reservation(cache->fs_info,
6072 6073
						"space_info", space_info->flags,
						num_bytes, 0);
6074 6075
				space_info->bytes_may_use -= num_bytes;
			}
6076 6077 6078

			if (delalloc)
				cache->delalloc_bytes += num_bytes;
6079
		}
6080 6081 6082 6083 6084
	} else {
		if (cache->ro)
			space_info->bytes_readonly += num_bytes;
		cache->reserved -= num_bytes;
		space_info->bytes_reserved -= num_bytes;
6085 6086 6087

		if (delalloc)
			cache->delalloc_bytes -= num_bytes;
6088
	}
6089 6090
	spin_unlock(&cache->lock);
	spin_unlock(&space_info->lock);
6091
	return ret;
6092
}
C
Chris Mason 已提交
6093

6094
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6095
				struct btrfs_root *root)
6096 6097
{
	struct btrfs_fs_info *fs_info = root->fs_info;
6098 6099 6100
	struct btrfs_caching_control *next;
	struct btrfs_caching_control *caching_ctl;
	struct btrfs_block_group_cache *cache;
6101

6102
	down_write(&fs_info->commit_root_sem);
6103

6104 6105 6106 6107 6108 6109 6110
	list_for_each_entry_safe(caching_ctl, next,
				 &fs_info->caching_block_groups, list) {
		cache = caching_ctl->block_group;
		if (block_group_cache_done(cache)) {
			cache->last_byte_to_unpin = (u64)-1;
			list_del_init(&caching_ctl->list);
			put_caching_control(caching_ctl);
6111
		} else {
6112
			cache->last_byte_to_unpin = caching_ctl->progress;
6113 6114
		}
	}
6115 6116 6117 6118 6119 6120

	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
		fs_info->pinned_extents = &fs_info->freed_extents[1];
	else
		fs_info->pinned_extents = &fs_info->freed_extents[0];

6121
	up_write(&fs_info->commit_root_sem);
6122 6123

	update_global_block_rsv(fs_info);
6124 6125
}

6126 6127
static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
			      const bool return_free_space)
C
Chris Mason 已提交
6128
{
6129 6130
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_block_group_cache *cache = NULL;
6131 6132
	struct btrfs_space_info *space_info;
	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6133
	u64 len;
6134
	bool readonly;
C
Chris Mason 已提交
6135

6136
	while (start <= end) {
6137
		readonly = false;
6138 6139 6140 6141 6142
		if (!cache ||
		    start >= cache->key.objectid + cache->key.offset) {
			if (cache)
				btrfs_put_block_group(cache);
			cache = btrfs_lookup_block_group(fs_info, start);
6143
			BUG_ON(!cache); /* Logic error */
6144 6145 6146 6147 6148 6149 6150
		}

		len = cache->key.objectid + cache->key.offset - start;
		len = min(len, end + 1 - start);

		if (start < cache->last_byte_to_unpin) {
			len = min(len, cache->last_byte_to_unpin - start);
6151 6152
			if (return_free_space)
				btrfs_add_free_space(cache, start, len);
6153 6154
		}

6155
		start += len;
6156
		space_info = cache->space_info;
6157

6158
		spin_lock(&space_info->lock);
6159 6160
		spin_lock(&cache->lock);
		cache->pinned -= len;
6161
		space_info->bytes_pinned -= len;
6162
		space_info->max_extent_size = 0;
6163
		percpu_counter_add(&space_info->total_bytes_pinned, -len);
6164 6165 6166 6167
		if (cache->ro) {
			space_info->bytes_readonly += len;
			readonly = true;
		}
6168
		spin_unlock(&cache->lock);
6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181
		if (!readonly && global_rsv->space_info == space_info) {
			spin_lock(&global_rsv->lock);
			if (!global_rsv->full) {
				len = min(len, global_rsv->size -
					  global_rsv->reserved);
				global_rsv->reserved += len;
				space_info->bytes_may_use += len;
				if (global_rsv->reserved >= global_rsv->size)
					global_rsv->full = 1;
			}
			spin_unlock(&global_rsv->lock);
		}
		spin_unlock(&space_info->lock);
C
Chris Mason 已提交
6182
	}
6183 6184 6185

	if (cache)
		btrfs_put_block_group(cache);
C
Chris Mason 已提交
6186 6187 6188 6189
	return 0;
}

int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6190
			       struct btrfs_root *root)
6191
{
6192
	struct btrfs_fs_info *fs_info = root->fs_info;
6193 6194
	struct btrfs_block_group_cache *block_group, *tmp;
	struct list_head *deleted_bgs;
6195
	struct extent_io_tree *unpin;
6196 6197
	u64 start;
	u64 end;
6198 6199
	int ret;

6200 6201 6202 6203 6204
	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
		unpin = &fs_info->freed_extents[1];
	else
		unpin = &fs_info->freed_extents[0];

6205
	while (!trans->aborted) {
6206
		mutex_lock(&fs_info->unused_bg_unpin_mutex);
6207
		ret = find_first_extent_bit(unpin, 0, &start, &end,
6208
					    EXTENT_DIRTY, NULL);
6209 6210
		if (ret) {
			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6211
			break;
6212
		}
6213

6214 6215 6216
		if (btrfs_test_opt(root, DISCARD))
			ret = btrfs_discard_extent(root, start,
						   end + 1 - start, NULL);
6217

6218
		clear_extent_dirty(unpin, start, end, GFP_NOFS);
6219
		unpin_extent_range(root, start, end, true);
6220
		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6221
		cond_resched();
6222
	}
J
Josef Bacik 已提交
6223

6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251
	/*
	 * Transaction is finished.  We don't need the lock anymore.  We
	 * do need to clean up the block groups in case of a transaction
	 * abort.
	 */
	deleted_bgs = &trans->transaction->deleted_bgs;
	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
		u64 trimmed = 0;

		ret = -EROFS;
		if (!trans->aborted)
			ret = btrfs_discard_extent(root,
						   block_group->key.objectid,
						   block_group->key.offset,
						   &trimmed);

		list_del_init(&block_group->bg_list);
		btrfs_put_block_group_trimming(block_group);
		btrfs_put_block_group(block_group);

		if (ret) {
			const char *errstr = btrfs_decode_error(ret);
			btrfs_warn(fs_info,
				   "Discard failed while removing blockgroup: errno=%d %s\n",
				   ret, errstr);
		}
	}

C
Chris Mason 已提交
6252 6253 6254
	return 0;
}

6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275
static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
			     u64 owner, u64 root_objectid)
{
	struct btrfs_space_info *space_info;
	u64 flags;

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
			flags = BTRFS_BLOCK_GROUP_SYSTEM;
		else
			flags = BTRFS_BLOCK_GROUP_METADATA;
	} else {
		flags = BTRFS_BLOCK_GROUP_DATA;
	}

	space_info = __find_space_info(fs_info, flags);
	BUG_ON(!space_info); /* Logic bug */
	percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
}


6276 6277
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
6278
				struct btrfs_delayed_ref_node *node, u64 parent,
6279 6280
				u64 root_objectid, u64 owner_objectid,
				u64 owner_offset, int refs_to_drop,
6281
				struct btrfs_delayed_extent_op *extent_op)
6282
{
C
Chris Mason 已提交
6283
	struct btrfs_key key;
6284
	struct btrfs_path *path;
6285 6286
	struct btrfs_fs_info *info = root->fs_info;
	struct btrfs_root *extent_root = info->extent_root;
6287
	struct extent_buffer *leaf;
6288 6289
	struct btrfs_extent_item *ei;
	struct btrfs_extent_inline_ref *iref;
6290
	int ret;
6291
	int is_data;
6292 6293 6294
	int extent_slot = 0;
	int found_extent = 0;
	int num_to_del = 1;
6295
	int no_quota = node->no_quota;
6296 6297
	u32 item_size;
	u64 refs;
6298 6299
	u64 bytenr = node->bytenr;
	u64 num_bytes = node->num_bytes;
J
Josef Bacik 已提交
6300
	int last_ref = 0;
6301 6302
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
C
Chris Mason 已提交
6303

J
Josef Bacik 已提交
6304 6305 6306
	if (!info->quota_enabled || !is_fstree(root_objectid))
		no_quota = 1;

6307
	path = btrfs_alloc_path();
6308 6309
	if (!path)
		return -ENOMEM;
6310

6311
	path->reada = 1;
6312
	path->leave_spinning = 1;
6313 6314 6315 6316

	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
	BUG_ON(!is_data && refs_to_drop != 1);

6317 6318 6319
	if (is_data)
		skinny_metadata = 0;

6320 6321 6322 6323
	ret = lookup_extent_backref(trans, extent_root, path, &iref,
				    bytenr, num_bytes, parent,
				    root_objectid, owner_objectid,
				    owner_offset);
6324
	if (ret == 0) {
6325
		extent_slot = path->slots[0];
6326 6327
		while (extent_slot >= 0) {
			btrfs_item_key_to_cpu(path->nodes[0], &key,
6328
					      extent_slot);
6329
			if (key.objectid != bytenr)
6330
				break;
6331 6332
			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
			    key.offset == num_bytes) {
6333 6334 6335
				found_extent = 1;
				break;
			}
6336 6337 6338 6339 6340
			if (key.type == BTRFS_METADATA_ITEM_KEY &&
			    key.offset == owner_objectid) {
				found_extent = 1;
				break;
			}
6341 6342
			if (path->slots[0] - extent_slot > 5)
				break;
6343
			extent_slot--;
6344
		}
6345 6346 6347 6348 6349
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
		if (found_extent && item_size < sizeof(*ei))
			found_extent = 0;
#endif
Z
Zheng Yan 已提交
6350
		if (!found_extent) {
6351
			BUG_ON(iref);
6352
			ret = remove_extent_backref(trans, extent_root, path,
6353
						    NULL, refs_to_drop,
J
Josef Bacik 已提交
6354
						    is_data, &last_ref);
6355 6356 6357 6358
			if (ret) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
6359
			btrfs_release_path(path);
6360
			path->leave_spinning = 1;
6361 6362 6363 6364 6365

			key.objectid = bytenr;
			key.type = BTRFS_EXTENT_ITEM_KEY;
			key.offset = num_bytes;

6366 6367 6368 6369 6370
			if (!is_data && skinny_metadata) {
				key.type = BTRFS_METADATA_ITEM_KEY;
				key.offset = owner_objectid;
			}

Z
Zheng Yan 已提交
6371 6372
			ret = btrfs_search_slot(trans, extent_root,
						&key, path, -1, 1);
6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388
			if (ret > 0 && skinny_metadata && path->slots[0]) {
				/*
				 * Couldn't find our skinny metadata item,
				 * see if we have ye olde extent item.
				 */
				path->slots[0]--;
				btrfs_item_key_to_cpu(path->nodes[0], &key,
						      path->slots[0]);
				if (key.objectid == bytenr &&
				    key.type == BTRFS_EXTENT_ITEM_KEY &&
				    key.offset == num_bytes)
					ret = 0;
			}

			if (ret > 0 && skinny_metadata) {
				skinny_metadata = false;
6389
				key.objectid = bytenr;
6390 6391 6392 6393 6394 6395 6396
				key.type = BTRFS_EXTENT_ITEM_KEY;
				key.offset = num_bytes;
				btrfs_release_path(path);
				ret = btrfs_search_slot(trans, extent_root,
							&key, path, -1, 1);
			}

6397
			if (ret) {
6398
				btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6399
					ret, bytenr);
6400 6401 6402
				if (ret > 0)
					btrfs_print_leaf(extent_root,
							 path->nodes[0]);
6403
			}
6404 6405 6406 6407
			if (ret < 0) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
Z
Zheng Yan 已提交
6408 6409
			extent_slot = path->slots[0];
		}
6410
	} else if (WARN_ON(ret == -ENOENT)) {
6411
		btrfs_print_leaf(extent_root, path->nodes[0]);
6412 6413
		btrfs_err(info,
			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6414 6415
			bytenr, parent, root_objectid, owner_objectid,
			owner_offset);
6416 6417
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
6418
	} else {
6419 6420
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
6421
	}
6422 6423

	leaf = path->nodes[0];
6424 6425 6426 6427 6428 6429
	item_size = btrfs_item_size_nr(leaf, extent_slot);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		BUG_ON(found_extent || extent_slot != path->slots[0]);
		ret = convert_extent_item_v0(trans, extent_root, path,
					     owner_objectid, 0);
6430 6431 6432 6433
		if (ret < 0) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}
6434

6435
		btrfs_release_path(path);
6436 6437 6438 6439 6440 6441 6442 6443 6444
		path->leave_spinning = 1;

		key.objectid = bytenr;
		key.type = BTRFS_EXTENT_ITEM_KEY;
		key.offset = num_bytes;

		ret = btrfs_search_slot(trans, extent_root, &key, path,
					-1, 1);
		if (ret) {
6445
			btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6446
				ret, bytenr);
6447 6448
			btrfs_print_leaf(extent_root, path->nodes[0]);
		}
6449 6450 6451 6452 6453
		if (ret < 0) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}

6454 6455 6456 6457 6458 6459
		extent_slot = path->slots[0];
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, extent_slot);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));
6460
	ei = btrfs_item_ptr(leaf, extent_slot,
C
Chris Mason 已提交
6461
			    struct btrfs_extent_item);
6462 6463
	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
	    key.type == BTRFS_EXTENT_ITEM_KEY) {
6464 6465 6466 6467 6468
		struct btrfs_tree_block_info *bi;
		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
		bi = (struct btrfs_tree_block_info *)(ei + 1);
		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
	}
6469

6470
	refs = btrfs_extent_refs(leaf, ei);
6471 6472
	if (refs < refs_to_drop) {
		btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6473
			  "for bytenr %Lu", refs_to_drop, refs, bytenr);
6474 6475 6476 6477
		ret = -EINVAL;
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
	}
6478
	refs -= refs_to_drop;
6479

6480 6481 6482 6483 6484 6485
	if (refs > 0) {
		if (extent_op)
			__run_delayed_extent_op(extent_op, leaf, ei);
		/*
		 * In the case of inline back ref, reference count will
		 * be updated by remove_extent_backref
6486
		 */
6487 6488 6489 6490 6491 6492 6493 6494 6495
		if (iref) {
			BUG_ON(!found_extent);
		} else {
			btrfs_set_extent_refs(leaf, ei, refs);
			btrfs_mark_buffer_dirty(leaf);
		}
		if (found_extent) {
			ret = remove_extent_backref(trans, extent_root, path,
						    iref, refs_to_drop,
J
Josef Bacik 已提交
6496
						    is_data, &last_ref);
6497 6498 6499 6500
			if (ret) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
6501
		}
6502 6503
		add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
				 root_objectid);
6504 6505 6506
	} else {
		if (found_extent) {
			BUG_ON(is_data && refs_to_drop !=
6507
			       extent_data_ref_count(path, iref));
6508 6509 6510 6511 6512 6513 6514
			if (iref) {
				BUG_ON(path->slots[0] != extent_slot);
			} else {
				BUG_ON(path->slots[0] != extent_slot + 1);
				path->slots[0] = extent_slot;
				num_to_del = 2;
			}
C
Chris Mason 已提交
6515
		}
6516

J
Josef Bacik 已提交
6517
		last_ref = 1;
6518 6519
		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
				      num_to_del);
6520 6521 6522 6523
		if (ret) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}
6524
		btrfs_release_path(path);
6525

6526
		if (is_data) {
6527
			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6528 6529 6530 6531
			if (ret) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
6532 6533
		}

6534
		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6535 6536 6537 6538
		if (ret) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}
6539
	}
J
Josef Bacik 已提交
6540 6541
	btrfs_release_path(path);

6542
out:
6543
	btrfs_free_path(path);
6544 6545 6546
	return ret;
}

6547
/*
6548
 * when we free an block, it is possible (and likely) that we free the last
6549 6550 6551 6552 6553 6554 6555 6556 6557
 * delayed ref for that extent as well.  This searches the delayed ref tree for
 * a given extent, and if there are no other delayed refs to be processed, it
 * removes it from the tree.
 */
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root, u64 bytenr)
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_root *delayed_refs;
6558
	int ret = 0;
6559 6560 6561 6562 6563

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (!head)
6564
		goto out_delayed_unlock;
6565

6566
	spin_lock(&head->lock);
6567
	if (!list_empty(&head->ref_list))
6568 6569
		goto out;

6570 6571 6572
	if (head->extent_op) {
		if (!head->must_insert_reserved)
			goto out;
6573
		btrfs_free_delayed_extent_op(head->extent_op);
6574 6575 6576
		head->extent_op = NULL;
	}

6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588
	/*
	 * waiting for the lock here would deadlock.  If someone else has it
	 * locked they are already in the process of dropping it anyway
	 */
	if (!mutex_trylock(&head->mutex))
		goto out;

	/*
	 * at this point we have a head with no other entries.  Go
	 * ahead and process it.
	 */
	head->node.in_tree = 0;
L
Liu Bo 已提交
6589
	rb_erase(&head->href_node, &delayed_refs->href_root);
6590

6591
	atomic_dec(&delayed_refs->num_entries);
6592 6593 6594 6595 6596

	/*
	 * we don't take a ref on the node because we're removing it from the
	 * tree, so we just steal the ref the tree was holding.
	 */
6597
	delayed_refs->num_heads--;
6598
	if (head->processing == 0)
6599
		delayed_refs->num_heads_ready--;
6600 6601
	head->processing = 0;
	spin_unlock(&head->lock);
6602 6603
	spin_unlock(&delayed_refs->lock);

6604 6605 6606 6607 6608
	BUG_ON(head->extent_op);
	if (head->must_insert_reserved)
		ret = 1;

	mutex_unlock(&head->mutex);
6609
	btrfs_put_delayed_ref(&head->node);
6610
	return ret;
6611
out:
6612
	spin_unlock(&head->lock);
6613 6614

out_delayed_unlock:
6615 6616 6617 6618
	spin_unlock(&delayed_refs->lock);
	return 0;
}

6619 6620 6621
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root,
			   struct extent_buffer *buf,
6622
			   u64 parent, int last_ref)
6623
{
6624
	int pin = 1;
6625 6626 6627
	int ret;

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
A
Arne Jansen 已提交
6628 6629 6630 6631
		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
					buf->start, buf->len,
					parent, root->root_key.objectid,
					btrfs_header_level(buf),
6632
					BTRFS_DROP_DELAYED_REF, NULL, 0);
6633
		BUG_ON(ret); /* -ENOMEM */
6634 6635 6636 6637 6638 6639
	}

	if (!last_ref)
		return;

	if (btrfs_header_generation(buf) == trans->transid) {
6640 6641
		struct btrfs_block_group_cache *cache;

6642 6643 6644
		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
			ret = check_ref_cleanup(trans, root, buf->start);
			if (!ret)
6645
				goto out;
6646 6647
		}

6648 6649
		cache = btrfs_lookup_block_group(root->fs_info, buf->start);

6650 6651
		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
			pin_down_extent(root, cache, buf->start, buf->len, 1);
6652
			btrfs_put_block_group(cache);
6653
			goto out;
6654 6655 6656 6657 6658
		}

		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));

		btrfs_add_free_space(cache, buf->start, buf->len);
6659
		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6660
		btrfs_put_block_group(cache);
J
Josef Bacik 已提交
6661
		trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6662
		pin = 0;
6663 6664
	}
out:
6665 6666 6667 6668 6669
	if (pin)
		add_pinned_bytes(root->fs_info, buf->len,
				 btrfs_header_level(buf),
				 root->root_key.objectid);

6670 6671 6672 6673 6674
	/*
	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
	 * anymore.
	 */
	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6675 6676
}

6677
/* Can return -ENOMEM */
A
Arne Jansen 已提交
6678 6679
int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
J
Josef Bacik 已提交
6680
		      u64 owner, u64 offset, int no_quota)
6681 6682
{
	int ret;
A
Arne Jansen 已提交
6683
	struct btrfs_fs_info *fs_info = root->fs_info;
6684

6685
	if (btrfs_test_is_dummy_root(root))
6686
		return 0;
6687

6688 6689
	add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);

6690 6691 6692 6693
	/*
	 * tree log blocks never actually go into the extent allocation
	 * tree, just update pinning info and exit early.
	 */
6694 6695
	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6696
		/* unlocks the pinned mutex */
6697
		btrfs_pin_extent(root, bytenr, num_bytes, 1);
6698
		ret = 0;
6699
	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
A
Arne Jansen 已提交
6700 6701
		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
					num_bytes,
6702
					parent, root_objectid, (int)owner,
J
Josef Bacik 已提交
6703
					BTRFS_DROP_DELAYED_REF, NULL, no_quota);
6704
	} else {
A
Arne Jansen 已提交
6705 6706 6707 6708
		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
						num_bytes,
						parent, root_objectid, owner,
						offset, BTRFS_DROP_DELAYED_REF,
J
Josef Bacik 已提交
6709
						NULL, no_quota);
6710
	}
6711 6712 6713
	return ret;
}

J
Josef Bacik 已提交
6714 6715 6716 6717 6718 6719 6720 6721 6722 6723
/*
 * when we wait for progress in the block group caching, its because
 * our allocation attempt failed at least once.  So, we must sleep
 * and let some progress happen before we try again.
 *
 * This function will sleep at least once waiting for new free space to
 * show up, and then it will check the block group free space numbers
 * for our min num_bytes.  Another option is to have it go ahead
 * and look in the rbtree for a free extent of a given size, but this
 * is a good start.
6724 6725 6726
 *
 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
 * any of the information in this block group.
J
Josef Bacik 已提交
6727
 */
6728
static noinline void
J
Josef Bacik 已提交
6729 6730 6731
wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
				u64 num_bytes)
{
6732
	struct btrfs_caching_control *caching_ctl;
J
Josef Bacik 已提交
6733

6734 6735
	caching_ctl = get_caching_control(cache);
	if (!caching_ctl)
6736
		return;
J
Josef Bacik 已提交
6737

6738
	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6739
		   (cache->free_space_ctl->free_space >= num_bytes));
6740 6741 6742 6743 6744 6745 6746 6747

	put_caching_control(caching_ctl);
}

static noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
{
	struct btrfs_caching_control *caching_ctl;
6748
	int ret = 0;
6749 6750 6751

	caching_ctl = get_caching_control(cache);
	if (!caching_ctl)
6752
		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6753 6754

	wait_event(caching_ctl->wait, block_group_cache_done(cache));
6755 6756
	if (cache->cached == BTRFS_CACHE_ERROR)
		ret = -EIO;
6757
	put_caching_control(caching_ctl);
6758
	return ret;
J
Josef Bacik 已提交
6759 6760
}

6761
int __get_raid_index(u64 flags)
6762
{
6763
	if (flags & BTRFS_BLOCK_GROUP_RAID10)
6764
		return BTRFS_RAID_RAID10;
6765
	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6766
		return BTRFS_RAID_RAID1;
6767
	else if (flags & BTRFS_BLOCK_GROUP_DUP)
6768
		return BTRFS_RAID_DUP;
6769
	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6770
		return BTRFS_RAID_RAID0;
D
David Woodhouse 已提交
6771
	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6772
		return BTRFS_RAID_RAID5;
D
David Woodhouse 已提交
6773
	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6774
		return BTRFS_RAID_RAID6;
6775

6776
	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6777 6778
}

6779
int get_block_group_index(struct btrfs_block_group_cache *cache)
6780
{
6781
	return __get_raid_index(cache->flags);
6782 6783
}

6784 6785 6786 6787 6788 6789 6790 6791 6792 6793
static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
	[BTRFS_RAID_RAID10]	= "raid10",
	[BTRFS_RAID_RAID1]	= "raid1",
	[BTRFS_RAID_DUP]	= "dup",
	[BTRFS_RAID_RAID0]	= "raid0",
	[BTRFS_RAID_SINGLE]	= "single",
	[BTRFS_RAID_RAID5]	= "raid5",
	[BTRFS_RAID_RAID6]	= "raid6",
};

6794
static const char *get_raid_name(enum btrfs_raid_types type)
6795 6796 6797 6798 6799 6800 6801
{
	if (type >= BTRFS_NR_RAID_TYPES)
		return NULL;

	return btrfs_raid_type_names[type];
}

J
Josef Bacik 已提交
6802
enum btrfs_loop_type {
6803 6804 6805 6806
	LOOP_CACHING_NOWAIT = 0,
	LOOP_CACHING_WAIT = 1,
	LOOP_ALLOC_CHUNK = 2,
	LOOP_NO_EMPTY_SIZE = 3,
J
Josef Bacik 已提交
6807 6808
};

6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872
static inline void
btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
		       int delalloc)
{
	if (delalloc)
		down_read(&cache->data_rwsem);
}

static inline void
btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
		       int delalloc)
{
	btrfs_get_block_group(cache);
	if (delalloc)
		down_read(&cache->data_rwsem);
}

static struct btrfs_block_group_cache *
btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
		   struct btrfs_free_cluster *cluster,
		   int delalloc)
{
	struct btrfs_block_group_cache *used_bg;
	bool locked = false;
again:
	spin_lock(&cluster->refill_lock);
	if (locked) {
		if (used_bg == cluster->block_group)
			return used_bg;

		up_read(&used_bg->data_rwsem);
		btrfs_put_block_group(used_bg);
	}

	used_bg = cluster->block_group;
	if (!used_bg)
		return NULL;

	if (used_bg == block_group)
		return used_bg;

	btrfs_get_block_group(used_bg);

	if (!delalloc)
		return used_bg;

	if (down_read_trylock(&used_bg->data_rwsem))
		return used_bg;

	spin_unlock(&cluster->refill_lock);
	down_read(&used_bg->data_rwsem);
	locked = true;
	goto again;
}

static inline void
btrfs_release_block_group(struct btrfs_block_group_cache *cache,
			 int delalloc)
{
	if (delalloc)
		up_read(&cache->data_rwsem);
	btrfs_put_block_group(cache);
}

6873 6874 6875
/*
 * walks the btree of allocated extents and find a hole of a given size.
 * The key ins is changed to record the hole:
6876
 * ins->objectid == start position
6877
 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6878
 * ins->offset == the size of the hole.
6879
 * Any available blocks before search_start are skipped.
6880 6881 6882
 *
 * If there is no suitable free space, we will record the max size of
 * the free space extent currently.
6883
 */
6884
static noinline int find_free_extent(struct btrfs_root *orig_root,
6885 6886
				     u64 num_bytes, u64 empty_size,
				     u64 hint_byte, struct btrfs_key *ins,
6887
				     u64 flags, int delalloc)
6888
{
6889
	int ret = 0;
C
Chris Mason 已提交
6890
	struct btrfs_root *root = orig_root->fs_info->extent_root;
6891
	struct btrfs_free_cluster *last_ptr = NULL;
6892
	struct btrfs_block_group_cache *block_group = NULL;
6893
	u64 search_start = 0;
6894
	u64 max_extent_size = 0;
6895
	int empty_cluster = 2 * 1024 * 1024;
6896
	struct btrfs_space_info *space_info;
6897
	int loop = 0;
6898 6899
	int index = __get_raid_index(flags);
	int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6900
		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
6901
	bool failed_cluster_refill = false;
6902
	bool failed_alloc = false;
6903
	bool use_cluster = true;
6904
	bool have_caching_bg = false;
6905

6906
	WARN_ON(num_bytes < root->sectorsize);
6907
	ins->type = BTRFS_EXTENT_ITEM_KEY;
6908 6909
	ins->objectid = 0;
	ins->offset = 0;
6910

6911
	trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
J
Josef Bacik 已提交
6912

6913
	space_info = __find_space_info(root->fs_info, flags);
6914
	if (!space_info) {
6915
		btrfs_err(root->fs_info, "No space info for %llu", flags);
6916 6917
		return -ENOSPC;
	}
J
Josef Bacik 已提交
6918

6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941
	/*
	 * If our free space is heavily fragmented we may not be able to make
	 * big contiguous allocations, so instead of doing the expensive search
	 * for free space, simply return ENOSPC with our max_extent_size so we
	 * can go ahead and search for a more manageable chunk.
	 *
	 * If our max_extent_size is large enough for our allocation simply
	 * disable clustering since we will likely not be able to find enough
	 * space to create a cluster and induce latency trying.
	 */
	if (unlikely(space_info->max_extent_size)) {
		spin_lock(&space_info->lock);
		if (space_info->max_extent_size &&
		    num_bytes > space_info->max_extent_size) {
			ins->offset = space_info->max_extent_size;
			spin_unlock(&space_info->lock);
			return -ENOSPC;
		} else if (space_info->max_extent_size) {
			use_cluster = false;
		}
		spin_unlock(&space_info->lock);
	}

6942 6943 6944 6945 6946 6947 6948
	/*
	 * If the space info is for both data and metadata it means we have a
	 * small filesystem and we can't use the clustering stuff.
	 */
	if (btrfs_mixed_space_info(space_info))
		use_cluster = false;

6949
	if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6950
		last_ptr = &root->fs_info->meta_alloc_cluster;
6951 6952
		if (!btrfs_test_opt(root, SSD))
			empty_cluster = 64 * 1024;
6953 6954
	}

6955
	if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6956
	    btrfs_test_opt(root, SSD)) {
6957 6958
		last_ptr = &root->fs_info->data_alloc_cluster;
	}
J
Josef Bacik 已提交
6959

6960
	if (last_ptr) {
6961 6962 6963 6964
		spin_lock(&last_ptr->lock);
		if (last_ptr->block_group)
			hint_byte = last_ptr->window_start;
		spin_unlock(&last_ptr->lock);
6965
	}
6966

6967
	search_start = max(search_start, first_logical_byte(root, 0));
6968
	search_start = max(search_start, hint_byte);
6969

J
Josef Bacik 已提交
6970
	if (!last_ptr)
6971 6972
		empty_cluster = 0;

J
Josef Bacik 已提交
6973 6974 6975
	if (search_start == hint_byte) {
		block_group = btrfs_lookup_block_group(root->fs_info,
						       search_start);
J
Josef Bacik 已提交
6976 6977 6978
		/*
		 * we don't want to use the block group if it doesn't match our
		 * allocation bits, or if its not cached.
6979 6980 6981
		 *
		 * However if we are re-searching with an ideal block group
		 * picked out then we don't care that the block group is cached.
J
Josef Bacik 已提交
6982
		 */
6983
		if (block_group && block_group_bits(block_group, flags) &&
6984
		    block_group->cached != BTRFS_CACHE_NO) {
J
Josef Bacik 已提交
6985
			down_read(&space_info->groups_sem);
6986 6987 6988 6989 6990 6991 6992 6993 6994 6995
			if (list_empty(&block_group->list) ||
			    block_group->ro) {
				/*
				 * someone is removing this block group,
				 * we can't jump into the have_block_group
				 * target because our list pointers are not
				 * valid
				 */
				btrfs_put_block_group(block_group);
				up_read(&space_info->groups_sem);
6996
			} else {
6997
				index = get_block_group_index(block_group);
6998
				btrfs_lock_block_group(block_group, delalloc);
6999
				goto have_block_group;
7000
			}
J
Josef Bacik 已提交
7001
		} else if (block_group) {
7002
			btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
7003
		}
7004
	}
J
Josef Bacik 已提交
7005
search:
7006
	have_caching_bg = false;
7007
	down_read(&space_info->groups_sem);
7008 7009
	list_for_each_entry(block_group, &space_info->block_groups[index],
			    list) {
7010
		u64 offset;
J
Josef Bacik 已提交
7011
		int cached;
7012

7013
		btrfs_grab_block_group(block_group, delalloc);
J
Josef Bacik 已提交
7014
		search_start = block_group->key.objectid;
7015

7016 7017 7018 7019 7020
		/*
		 * this can happen if we end up cycling through all the
		 * raid types, but we want to make sure we only allocate
		 * for the proper type.
		 */
7021
		if (!block_group_bits(block_group, flags)) {
7022 7023
		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
				BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
7024 7025
				BTRFS_BLOCK_GROUP_RAID5 |
				BTRFS_BLOCK_GROUP_RAID6 |
7026 7027 7028 7029 7030 7031 7032
				BTRFS_BLOCK_GROUP_RAID10;

			/*
			 * if they asked for extra copies and this block group
			 * doesn't provide them, bail.  This does allow us to
			 * fill raid0 from raid1.
			 */
7033
			if ((flags & extra) && !(block_group->flags & extra))
7034 7035 7036
				goto loop;
		}

J
Josef Bacik 已提交
7037
have_block_group:
7038 7039
		cached = block_group_cache_done(block_group);
		if (unlikely(!cached)) {
7040
			ret = cache_block_group(block_group, 0);
7041 7042
			BUG_ON(ret < 0);
			ret = 0;
J
Josef Bacik 已提交
7043 7044
		}

7045 7046
		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
			goto loop;
7047
		if (unlikely(block_group->ro))
J
Josef Bacik 已提交
7048
			goto loop;
J
Josef Bacik 已提交
7049

7050
		/*
7051 7052
		 * Ok we want to try and use the cluster allocator, so
		 * lets look there
7053
		 */
7054
		if (last_ptr) {
7055
			struct btrfs_block_group_cache *used_block_group;
7056
			unsigned long aligned_cluster;
7057 7058 7059 7060
			/*
			 * the refill lock keeps out other
			 * people trying to start a new cluster
			 */
7061 7062 7063 7064
			used_block_group = btrfs_lock_cluster(block_group,
							      last_ptr,
							      delalloc);
			if (!used_block_group)
7065
				goto refill_cluster;
7066

7067 7068 7069 7070
			if (used_block_group != block_group &&
			    (used_block_group->ro ||
			     !block_group_bits(used_block_group, flags)))
				goto release_cluster;
7071

7072
			offset = btrfs_alloc_from_cluster(used_block_group,
7073 7074 7075 7076
						last_ptr,
						num_bytes,
						used_block_group->key.objectid,
						&max_extent_size);
7077 7078 7079
			if (offset) {
				/* we have a block, we're done */
				spin_unlock(&last_ptr->refill_lock);
J
Josef Bacik 已提交
7080
				trace_btrfs_reserve_extent_cluster(root,
7081 7082
						used_block_group,
						search_start, num_bytes);
7083
				if (used_block_group != block_group) {
7084 7085
					btrfs_release_block_group(block_group,
								  delalloc);
7086 7087
					block_group = used_block_group;
				}
7088 7089 7090
				goto checks;
			}

7091
			WARN_ON(last_ptr->block_group != used_block_group);
7092
release_cluster:
7093 7094 7095 7096 7097 7098 7099 7100
			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
			 * set up a new clusters, so lets just skip it
			 * and let the allocator find whatever block
			 * it can find.  If we reach this point, we
			 * will have tried the cluster allocator
			 * plenty of times and not have found
			 * anything, so we are likely way too
			 * fragmented for the clustering stuff to find
7101 7102 7103 7104 7105 7106 7107 7108
			 * anything.
			 *
			 * However, if the cluster is taken from the
			 * current block group, release the cluster
			 * first, so that we stand a better chance of
			 * succeeding in the unclustered
			 * allocation.  */
			if (loop >= LOOP_NO_EMPTY_SIZE &&
7109
			    used_block_group != block_group) {
7110
				spin_unlock(&last_ptr->refill_lock);
7111 7112
				btrfs_release_block_group(used_block_group,
							  delalloc);
7113 7114 7115
				goto unclustered_alloc;
			}

7116 7117 7118 7119 7120 7121
			/*
			 * this cluster didn't work out, free it and
			 * start over
			 */
			btrfs_return_cluster_to_free_space(NULL, last_ptr);

7122 7123 7124 7125
			if (used_block_group != block_group)
				btrfs_release_block_group(used_block_group,
							  delalloc);
refill_cluster:
7126 7127 7128 7129 7130
			if (loop >= LOOP_NO_EMPTY_SIZE) {
				spin_unlock(&last_ptr->refill_lock);
				goto unclustered_alloc;
			}

7131 7132 7133 7134
			aligned_cluster = max_t(unsigned long,
						empty_cluster + empty_size,
					      block_group->full_stripe_len);

7135
			/* allocate a cluster in this block group */
7136 7137 7138 7139
			ret = btrfs_find_space_cluster(root, block_group,
						       last_ptr, search_start,
						       num_bytes,
						       aligned_cluster);
7140 7141 7142 7143 7144 7145
			if (ret == 0) {
				/*
				 * now pull our allocation out of this
				 * cluster
				 */
				offset = btrfs_alloc_from_cluster(block_group,
7146 7147 7148 7149
							last_ptr,
							num_bytes,
							search_start,
							&max_extent_size);
7150 7151 7152
				if (offset) {
					/* we found one, proceed */
					spin_unlock(&last_ptr->refill_lock);
J
Josef Bacik 已提交
7153 7154 7155
					trace_btrfs_reserve_extent_cluster(root,
						block_group, search_start,
						num_bytes);
7156 7157
					goto checks;
				}
7158 7159
			} else if (!cached && loop > LOOP_CACHING_NOWAIT
				   && !failed_cluster_refill) {
J
Josef Bacik 已提交
7160 7161
				spin_unlock(&last_ptr->refill_lock);

7162
				failed_cluster_refill = true;
J
Josef Bacik 已提交
7163 7164 7165
				wait_block_group_cache_progress(block_group,
				       num_bytes + empty_cluster + empty_size);
				goto have_block_group;
7166
			}
J
Josef Bacik 已提交
7167

7168 7169 7170 7171 7172 7173
			/*
			 * at this point we either didn't find a cluster
			 * or we weren't able to allocate a block from our
			 * cluster.  Free the cluster we've been trying
			 * to use, and go to the next block group
			 */
7174
			btrfs_return_cluster_to_free_space(NULL, last_ptr);
7175
			spin_unlock(&last_ptr->refill_lock);
7176
			goto loop;
7177 7178
		}

7179
unclustered_alloc:
7180 7181 7182 7183
		spin_lock(&block_group->free_space_ctl->tree_lock);
		if (cached &&
		    block_group->free_space_ctl->free_space <
		    num_bytes + empty_cluster + empty_size) {
7184 7185 7186 7187
			if (block_group->free_space_ctl->free_space >
			    max_extent_size)
				max_extent_size =
					block_group->free_space_ctl->free_space;
7188 7189 7190 7191 7192
			spin_unlock(&block_group->free_space_ctl->tree_lock);
			goto loop;
		}
		spin_unlock(&block_group->free_space_ctl->tree_lock);

7193
		offset = btrfs_find_space_for_alloc(block_group, search_start,
7194 7195
						    num_bytes, empty_size,
						    &max_extent_size);
7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206
		/*
		 * If we didn't find a chunk, and we haven't failed on this
		 * block group before, and this block group is in the middle of
		 * caching and we are ok with waiting, then go ahead and wait
		 * for progress to be made, and set failed_alloc to true.
		 *
		 * If failed_alloc is true then we've already waited on this
		 * block group once and should move on to the next block group.
		 */
		if (!offset && !failed_alloc && !cached &&
		    loop > LOOP_CACHING_NOWAIT) {
J
Josef Bacik 已提交
7207
			wait_block_group_cache_progress(block_group,
7208 7209
						num_bytes + empty_size);
			failed_alloc = true;
J
Josef Bacik 已提交
7210
			goto have_block_group;
7211
		} else if (!offset) {
7212 7213
			if (!cached)
				have_caching_bg = true;
7214
			goto loop;
J
Josef Bacik 已提交
7215
		}
7216
checks:
7217
		search_start = ALIGN(offset, root->stripesize);
7218

J
Josef Bacik 已提交
7219 7220
		/* move on to the next group */
		if (search_start + num_bytes >
7221 7222
		    block_group->key.objectid + block_group->key.offset) {
			btrfs_add_free_space(block_group, offset, num_bytes);
J
Josef Bacik 已提交
7223
			goto loop;
7224
		}
7225

7226
		if (offset < search_start)
7227
			btrfs_add_free_space(block_group, offset,
7228 7229
					     search_start - offset);
		BUG_ON(offset > search_start);
J
Josef Bacik 已提交
7230

7231
		ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7232
						  alloc_type, delalloc);
7233
		if (ret == -EAGAIN) {
7234
			btrfs_add_free_space(block_group, offset, num_bytes);
J
Josef Bacik 已提交
7235
			goto loop;
J
Josef Bacik 已提交
7236
		}
7237

7238
		/* we are all good, lets return */
J
Josef Bacik 已提交
7239 7240
		ins->objectid = search_start;
		ins->offset = num_bytes;
7241

J
Josef Bacik 已提交
7242 7243
		trace_btrfs_reserve_extent(orig_root, block_group,
					   search_start, num_bytes);
7244
		btrfs_release_block_group(block_group, delalloc);
J
Josef Bacik 已提交
7245 7246
		break;
loop:
7247
		failed_cluster_refill = false;
7248
		failed_alloc = false;
7249
		BUG_ON(index != get_block_group_index(block_group));
7250
		btrfs_release_block_group(block_group, delalloc);
J
Josef Bacik 已提交
7251 7252 7253
	}
	up_read(&space_info->groups_sem);

7254 7255 7256
	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
		goto search;

7257 7258 7259
	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
		goto search;

7260
	/*
7261 7262
	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
	 *			caching kthreads as we move along
J
Josef Bacik 已提交
7263 7264 7265 7266
	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
	 *			again
7267
	 */
7268
	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7269
		index = 0;
7270
		loop++;
J
Josef Bacik 已提交
7271
		if (loop == LOOP_ALLOC_CHUNK) {
7272
			struct btrfs_trans_handle *trans;
7273 7274 7275 7276 7277 7278 7279
			int exist = 0;

			trans = current->journal_info;
			if (trans)
				exist = 1;
			else
				trans = btrfs_join_transaction(root);
7280 7281 7282 7283 7284 7285

			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				goto out;
			}

7286
			ret = do_chunk_alloc(trans, root, flags,
7287 7288 7289 7290 7291
					     CHUNK_ALLOC_FORCE);
			/*
			 * Do not bail out on ENOSPC since we
			 * can do more things.
			 */
7292
			if (ret < 0 && ret != -ENOSPC)
7293 7294
				btrfs_abort_transaction(trans,
							root, ret);
7295 7296
			else
				ret = 0;
7297 7298
			if (!exist)
				btrfs_end_transaction(trans, root);
7299
			if (ret)
7300
				goto out;
J
Josef Bacik 已提交
7301 7302
		}

7303 7304 7305
		if (loop == LOOP_NO_EMPTY_SIZE) {
			empty_size = 0;
			empty_cluster = 0;
7306
		}
7307 7308

		goto search;
J
Josef Bacik 已提交
7309 7310
	} else if (!ins->objectid) {
		ret = -ENOSPC;
7311
	} else if (ins->objectid) {
7312
		ret = 0;
C
Chris Mason 已提交
7313
	}
7314
out:
7315 7316 7317 7318
	if (ret == -ENOSPC) {
		spin_lock(&space_info->lock);
		space_info->max_extent_size = max_extent_size;
		spin_unlock(&space_info->lock);
7319
		ins->offset = max_extent_size;
7320
	}
C
Chris Mason 已提交
7321
	return ret;
7322
}
7323

J
Josef Bacik 已提交
7324 7325
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
			    int dump_block_groups)
J
Josef Bacik 已提交
7326 7327
{
	struct btrfs_block_group_cache *cache;
7328
	int index = 0;
J
Josef Bacik 已提交
7329

J
Josef Bacik 已提交
7330
	spin_lock(&info->lock);
7331
	printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7332 7333 7334
	       info->flags,
	       info->total_bytes - info->bytes_used - info->bytes_pinned -
	       info->bytes_reserved - info->bytes_readonly,
C
Chris Mason 已提交
7335
	       (info->full) ? "" : "not ");
7336
	printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7337
	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
7338 7339 7340
	       info->total_bytes, info->bytes_used, info->bytes_pinned,
	       info->bytes_reserved, info->bytes_may_use,
	       info->bytes_readonly);
J
Josef Bacik 已提交
7341 7342 7343 7344
	spin_unlock(&info->lock);

	if (!dump_block_groups)
		return;
J
Josef Bacik 已提交
7345

7346
	down_read(&info->groups_sem);
7347 7348
again:
	list_for_each_entry(cache, &info->block_groups[index], list) {
J
Josef Bacik 已提交
7349
		spin_lock(&cache->lock);
7350 7351 7352
		printk(KERN_INFO "BTRFS: "
			   "block group %llu has %llu bytes, "
			   "%llu used %llu pinned %llu reserved %s\n",
7353 7354 7355
		       cache->key.objectid, cache->key.offset,
		       btrfs_block_group_used(&cache->item), cache->pinned,
		       cache->reserved, cache->ro ? "[readonly]" : "");
J
Josef Bacik 已提交
7356 7357 7358
		btrfs_dump_free_space(cache, bytes);
		spin_unlock(&cache->lock);
	}
7359 7360
	if (++index < BTRFS_NR_RAID_TYPES)
		goto again;
7361
	up_read(&info->groups_sem);
J
Josef Bacik 已提交
7362
}
7363

7364
int btrfs_reserve_extent(struct btrfs_root *root,
7365 7366
			 u64 num_bytes, u64 min_alloc_size,
			 u64 empty_size, u64 hint_byte,
7367
			 struct btrfs_key *ins, int is_data, int delalloc)
7368
{
7369
	bool final_tried = num_bytes == min_alloc_size;
7370
	u64 flags;
7371
	int ret;
7372

7373
	flags = btrfs_get_alloc_profile(root, is_data);
7374
again:
7375
	WARN_ON(num_bytes < root->sectorsize);
7376
	ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7377
			       flags, delalloc);
7378

7379
	if (ret == -ENOSPC) {
7380 7381
		if (!final_tried && ins->offset) {
			num_bytes = min(num_bytes >> 1, ins->offset);
7382
			num_bytes = round_down(num_bytes, root->sectorsize);
7383 7384 7385 7386 7387 7388 7389
			num_bytes = max(num_bytes, min_alloc_size);
			if (num_bytes == min_alloc_size)
				final_tried = true;
			goto again;
		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
			struct btrfs_space_info *sinfo;

7390
			sinfo = __find_space_info(root->fs_info, flags);
7391
			btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7392
				flags, num_bytes);
7393 7394
			if (sinfo)
				dump_space_info(sinfo, num_bytes, 1);
7395
		}
7396
	}
J
Josef Bacik 已提交
7397 7398

	return ret;
7399 7400
}

7401
static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7402 7403
					u64 start, u64 len,
					int pin, int delalloc)
7404
{
J
Josef Bacik 已提交
7405
	struct btrfs_block_group_cache *cache;
7406
	int ret = 0;
J
Josef Bacik 已提交
7407 7408 7409

	cache = btrfs_lookup_block_group(root->fs_info, start);
	if (!cache) {
7410
		btrfs_err(root->fs_info, "Unable to find block group for %llu",
7411
			start);
J
Josef Bacik 已提交
7412 7413
		return -ENOSPC;
	}
7414

7415 7416 7417
	if (pin)
		pin_down_extent(root, cache, start, len, 1);
	else {
7418 7419
		if (btrfs_test_opt(root, DISCARD))
			ret = btrfs_discard_extent(root, start, len, NULL);
7420
		btrfs_add_free_space(cache, start, len);
7421
		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7422
	}
7423

7424
	btrfs_put_block_group(cache);
J
Josef Bacik 已提交
7425

7426 7427
	trace_btrfs_reserved_extent_free(root, start, len);

7428 7429 7430
	return ret;
}

7431
int btrfs_free_reserved_extent(struct btrfs_root *root,
7432
			       u64 start, u64 len, int delalloc)
7433
{
7434
	return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7435 7436 7437 7438 7439
}

int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
				       u64 start, u64 len)
{
7440
	return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7441 7442
}

7443 7444 7445 7446 7447
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      u64 parent, u64 root_objectid,
				      u64 flags, u64 owner, u64 offset,
				      struct btrfs_key *ins, int ref_mod)
7448 7449
{
	int ret;
7450
	struct btrfs_fs_info *fs_info = root->fs_info;
7451
	struct btrfs_extent_item *extent_item;
7452
	struct btrfs_extent_inline_ref *iref;
7453
	struct btrfs_path *path;
7454 7455 7456
	struct extent_buffer *leaf;
	int type;
	u32 size;
7457

7458 7459 7460 7461
	if (parent > 0)
		type = BTRFS_SHARED_DATA_REF_KEY;
	else
		type = BTRFS_EXTENT_DATA_REF_KEY;
7462

7463
	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7464 7465

	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
7466 7467
	if (!path)
		return -ENOMEM;
7468

7469
	path->leave_spinning = 1;
7470 7471
	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
				      ins, size);
7472 7473 7474 7475
	if (ret) {
		btrfs_free_path(path);
		return ret;
	}
J
Josef Bacik 已提交
7476

7477 7478
	leaf = path->nodes[0];
	extent_item = btrfs_item_ptr(leaf, path->slots[0],
7479
				     struct btrfs_extent_item);
7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499
	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
	btrfs_set_extent_flags(leaf, extent_item,
			       flags | BTRFS_EXTENT_FLAG_DATA);

	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
	btrfs_set_extent_inline_ref_type(leaf, iref, type);
	if (parent > 0) {
		struct btrfs_shared_data_ref *ref;
		ref = (struct btrfs_shared_data_ref *)(iref + 1);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
	} else {
		struct btrfs_extent_data_ref *ref;
		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
	}
7500 7501

	btrfs_mark_buffer_dirty(path->nodes[0]);
7502
	btrfs_free_path(path);
7503

7504
	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7505
	if (ret) { /* -ENOENT, logic error */
7506
		btrfs_err(fs_info, "update block group failed for %llu %llu",
7507
			ins->objectid, ins->offset);
7508 7509
		BUG();
	}
J
Josef Bacik 已提交
7510
	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7511 7512 7513
	return ret;
}

7514 7515 7516 7517
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 parent, u64 root_objectid,
				     u64 flags, struct btrfs_disk_key *key,
J
Josef Bacik 已提交
7518 7519
				     int level, struct btrfs_key *ins,
				     int no_quota)
7520 7521
{
	int ret;
7522 7523 7524 7525 7526 7527
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_extent_item *extent_item;
	struct btrfs_tree_block_info *block_info;
	struct btrfs_extent_inline_ref *iref;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
7528
	u32 size = sizeof(*extent_item) + sizeof(*iref);
J
Josef Bacik 已提交
7529
	u64 num_bytes = ins->offset;
7530 7531 7532 7533 7534
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);

	if (!skinny_metadata)
		size += sizeof(*block_info);
7535

7536
	path = btrfs_alloc_path();
7537 7538
	if (!path) {
		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7539
						   root->nodesize);
7540
		return -ENOMEM;
7541
	}
7542

7543 7544 7545
	path->leave_spinning = 1;
	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
				      ins, size);
7546
	if (ret) {
7547
		btrfs_free_path(path);
7548
		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7549
						   root->nodesize);
7550 7551
		return ret;
	}
7552 7553 7554 7555 7556 7557 7558 7559 7560

	leaf = path->nodes[0];
	extent_item = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_item);
	btrfs_set_extent_refs(leaf, extent_item, 1);
	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
	btrfs_set_extent_flags(leaf, extent_item,
			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);

7561 7562
	if (skinny_metadata) {
		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7563
		num_bytes = root->nodesize;
7564 7565 7566 7567 7568 7569
	} else {
		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
		btrfs_set_tree_block_key(leaf, block_info, key);
		btrfs_set_tree_block_level(leaf, block_info, level);
		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
	}
7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584

	if (parent > 0) {
		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
		btrfs_set_extent_inline_ref_type(leaf, iref,
						 BTRFS_SHARED_BLOCK_REF_KEY);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else {
		btrfs_set_extent_inline_ref_type(leaf, iref,
						 BTRFS_TREE_BLOCK_REF_KEY);
		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
	}

	btrfs_mark_buffer_dirty(leaf);
	btrfs_free_path(path);

7585 7586
	ret = update_block_group(trans, root, ins->objectid, root->nodesize,
				 1);
7587
	if (ret) { /* -ENOENT, logic error */
7588
		btrfs_err(fs_info, "update block group failed for %llu %llu",
7589
			ins->objectid, ins->offset);
7590 7591
		BUG();
	}
J
Josef Bacik 已提交
7592

7593
	trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605
	return ret;
}

int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 root_objectid, u64 owner,
				     u64 offset, struct btrfs_key *ins)
{
	int ret;

	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);

A
Arne Jansen 已提交
7606 7607 7608 7609
	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
					 ins->offset, 0,
					 root_objectid, owner, offset,
					 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
7610 7611
	return ret;
}
7612 7613 7614 7615 7616 7617

/*
 * this is used by the tree logging recovery code.  It records that
 * an extent has been allocated and makes sure to clear the free
 * space cache bits as well
 */
7618 7619 7620 7621
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   u64 root_objectid, u64 owner, u64 offset,
				   struct btrfs_key *ins)
7622 7623 7624
{
	int ret;
	struct btrfs_block_group_cache *block_group;
7625

7626 7627 7628 7629 7630 7631
	/*
	 * Mixed block groups will exclude before processing the log so we only
	 * need to do the exlude dance if this fs isn't mixed.
	 */
	if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
		ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7632
		if (ret)
7633
			return ret;
7634 7635
	}

7636 7637 7638 7639
	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
	if (!block_group)
		return -EINVAL;

7640
	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7641
					  RESERVE_ALLOC_NO_ACCOUNT, 0);
7642
	BUG_ON(ret); /* logic error */
7643 7644
	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
					 0, owner, offset, ins, 1);
7645
	btrfs_put_block_group(block_group);
7646 7647 7648
	return ret;
}

7649 7650
static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7651
		      u64 bytenr, int level)
7652 7653 7654
{
	struct extent_buffer *buf;

7655
	buf = btrfs_find_create_tree_block(root, bytenr);
7656 7657 7658
	if (!buf)
		return ERR_PTR(-ENOMEM);
	btrfs_set_header_generation(buf, trans->transid);
7659
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7660
	btrfs_tree_lock(buf);
7661
	clean_tree_block(trans, root->fs_info, buf);
7662
	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7663 7664

	btrfs_set_lock_blocking(buf);
7665
	btrfs_set_buffer_uptodate(buf);
7666

7667
	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7668
		buf->log_index = root->log_transid % 2;
7669 7670 7671 7672
		/*
		 * we allow two log transactions at a time, use different
		 * EXENT bit to differentiate dirty pages.
		 */
7673
		if (buf->log_index == 0)
7674 7675 7676 7677 7678
			set_extent_dirty(&root->dirty_log_pages, buf->start,
					buf->start + buf->len - 1, GFP_NOFS);
		else
			set_extent_new(&root->dirty_log_pages, buf->start,
					buf->start + buf->len - 1, GFP_NOFS);
7679
	} else {
7680
		buf->log_index = -1;
7681
		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7682
			 buf->start + buf->len - 1, GFP_NOFS);
7683
	}
7684
	trans->blocks_used++;
7685
	/* this returns a buffer locked for blocking */
7686 7687 7688
	return buf;
}

7689 7690 7691 7692 7693
static struct btrfs_block_rsv *
use_block_rsv(struct btrfs_trans_handle *trans,
	      struct btrfs_root *root, u32 blocksize)
{
	struct btrfs_block_rsv *block_rsv;
7694
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7695
	int ret;
7696
	bool global_updated = false;
7697 7698 7699

	block_rsv = get_block_rsv(trans, root);

7700 7701
	if (unlikely(block_rsv->size == 0))
		goto try_reserve;
7702
again:
7703 7704 7705 7706
	ret = block_rsv_use_bytes(block_rsv, blocksize);
	if (!ret)
		return block_rsv;

7707 7708 7709
	if (block_rsv->failfast)
		return ERR_PTR(ret);

7710 7711 7712 7713 7714 7715
	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
		global_updated = true;
		update_global_block_rsv(root->fs_info);
		goto again;
	}

7716 7717 7718 7719 7720 7721
	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
		static DEFINE_RATELIMIT_STATE(_rs,
				DEFAULT_RATELIMIT_INTERVAL * 10,
				/*DEFAULT_RATELIMIT_BURST*/ 1);
		if (__ratelimit(&_rs))
			WARN(1, KERN_DEBUG
7722
				"BTRFS: block rsv returned %d\n", ret);
7723 7724 7725 7726 7727 7728 7729 7730
	}
try_reserve:
	ret = reserve_metadata_bytes(root, block_rsv, blocksize,
				     BTRFS_RESERVE_NO_FLUSH);
	if (!ret)
		return block_rsv;
	/*
	 * If we couldn't reserve metadata bytes try and use some from
7731 7732
	 * the global reserve if its space type is the same as the global
	 * reservation.
7733
	 */
7734 7735
	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
	    block_rsv->space_info == global_rsv->space_info) {
7736 7737 7738 7739 7740
		ret = block_rsv_use_bytes(global_rsv, blocksize);
		if (!ret)
			return global_rsv;
	}
	return ERR_PTR(ret);
7741 7742
}

J
Josef Bacik 已提交
7743 7744
static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
7745 7746
{
	block_rsv_add_bytes(block_rsv, blocksize, 0);
J
Josef Bacik 已提交
7747
	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7748 7749
}

7750
/*
7751
 * finds a free extent and does all the dirty work required for allocation
7752
 * returns the tree buffer or an ERR_PTR on error.
7753
 */
7754 7755
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
					struct btrfs_root *root,
7756 7757
					u64 parent, u64 root_objectid,
					struct btrfs_disk_key *key, int level,
7758
					u64 hint, u64 empty_size)
7759
{
C
Chris Mason 已提交
7760
	struct btrfs_key ins;
7761
	struct btrfs_block_rsv *block_rsv;
7762
	struct extent_buffer *buf;
7763
	struct btrfs_delayed_extent_op *extent_op;
7764 7765
	u64 flags = 0;
	int ret;
7766
	u32 blocksize = root->nodesize;
7767 7768
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
7769

7770
	if (btrfs_test_is_dummy_root(root)) {
7771
		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7772
					    level);
7773 7774 7775 7776
		if (!IS_ERR(buf))
			root->alloc_bytenr += blocksize;
		return buf;
	}
7777

7778 7779 7780 7781
	block_rsv = use_block_rsv(trans, root, blocksize);
	if (IS_ERR(block_rsv))
		return ERR_CAST(block_rsv);

7782
	ret = btrfs_reserve_extent(root, blocksize, blocksize,
7783
				   empty_size, hint, &ins, 0, 0);
7784 7785
	if (ret)
		goto out_unuse;
7786

7787
	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7788 7789 7790 7791
	if (IS_ERR(buf)) {
		ret = PTR_ERR(buf);
		goto out_free_reserved;
	}
7792 7793 7794 7795 7796 7797 7798 7799 7800

	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
		if (parent == 0)
			parent = ins.objectid;
		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
	} else
		BUG_ON(parent > 0);

	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
7801
		extent_op = btrfs_alloc_delayed_extent_op();
7802 7803 7804 7805
		if (!extent_op) {
			ret = -ENOMEM;
			goto out_free_buf;
		}
7806 7807 7808 7809 7810
		if (key)
			memcpy(&extent_op->key, key, sizeof(extent_op->key));
		else
			memset(&extent_op->key, 0, sizeof(extent_op->key));
		extent_op->flags_to_set = flags;
7811 7812 7813 7814
		if (skinny_metadata)
			extent_op->update_key = 0;
		else
			extent_op->update_key = 1;
7815 7816
		extent_op->update_flags = 1;
		extent_op->is_data = 0;
7817
		extent_op->level = level;
7818

A
Arne Jansen 已提交
7819
		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
7820 7821 7822 7823 7824 7825
						 ins.objectid, ins.offset,
						 parent, root_objectid, level,
						 BTRFS_ADD_DELAYED_EXTENT,
						 extent_op, 0);
		if (ret)
			goto out_free_delayed;
7826
	}
7827
	return buf;
7828 7829 7830 7831 7832 7833 7834 7835 7836 7837

out_free_delayed:
	btrfs_free_delayed_extent_op(extent_op);
out_free_buf:
	free_extent_buffer(buf);
out_free_reserved:
	btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
out_unuse:
	unuse_block_rsv(root->fs_info, block_rsv, blocksize);
	return ERR_PTR(ret);
7838
}
7839

7840 7841 7842 7843 7844 7845 7846 7847 7848
struct walk_control {
	u64 refs[BTRFS_MAX_LEVEL];
	u64 flags[BTRFS_MAX_LEVEL];
	struct btrfs_key update_progress;
	int stage;
	int level;
	int shared_level;
	int update_ref;
	int keep_locks;
Y
Yan, Zheng 已提交
7849 7850
	int reada_slot;
	int reada_count;
A
Arne Jansen 已提交
7851
	int for_reloc;
7852 7853 7854 7855 7856
};

#define DROP_REFERENCE	1
#define UPDATE_BACKREF	2

Y
Yan, Zheng 已提交
7857 7858 7859 7860
static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     struct walk_control *wc,
				     struct btrfs_path *path)
7861
{
Y
Yan, Zheng 已提交
7862 7863 7864
	u64 bytenr;
	u64 generation;
	u64 refs;
7865
	u64 flags;
7866
	u32 nritems;
Y
Yan, Zheng 已提交
7867 7868 7869
	u32 blocksize;
	struct btrfs_key key;
	struct extent_buffer *eb;
7870
	int ret;
Y
Yan, Zheng 已提交
7871 7872
	int slot;
	int nread = 0;
7873

Y
Yan, Zheng 已提交
7874 7875 7876 7877 7878 7879 7880 7881
	if (path->slots[wc->level] < wc->reada_slot) {
		wc->reada_count = wc->reada_count * 2 / 3;
		wc->reada_count = max(wc->reada_count, 2);
	} else {
		wc->reada_count = wc->reada_count * 3 / 2;
		wc->reada_count = min_t(int, wc->reada_count,
					BTRFS_NODEPTRS_PER_BLOCK(root));
	}
7882

Y
Yan, Zheng 已提交
7883 7884
	eb = path->nodes[wc->level];
	nritems = btrfs_header_nritems(eb);
7885
	blocksize = root->nodesize;
7886

Y
Yan, Zheng 已提交
7887 7888 7889
	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
		if (nread >= wc->reada_count)
			break;
7890

C
Chris Mason 已提交
7891
		cond_resched();
Y
Yan, Zheng 已提交
7892 7893
		bytenr = btrfs_node_blockptr(eb, slot);
		generation = btrfs_node_ptr_generation(eb, slot);
C
Chris Mason 已提交
7894

Y
Yan, Zheng 已提交
7895 7896
		if (slot == path->slots[wc->level])
			goto reada;
7897

Y
Yan, Zheng 已提交
7898 7899
		if (wc->stage == UPDATE_BACKREF &&
		    generation <= root->root_key.offset)
7900 7901
			continue;

7902
		/* We don't lock the tree block, it's OK to be racy here */
7903 7904 7905
		ret = btrfs_lookup_extent_info(trans, root, bytenr,
					       wc->level - 1, 1, &refs,
					       &flags);
7906 7907 7908
		/* We don't care about errors in readahead. */
		if (ret < 0)
			continue;
7909 7910
		BUG_ON(refs == 0);

Y
Yan, Zheng 已提交
7911 7912 7913
		if (wc->stage == DROP_REFERENCE) {
			if (refs == 1)
				goto reada;
7914

7915 7916 7917
			if (wc->level == 1 &&
			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				continue;
Y
Yan, Zheng 已提交
7918 7919 7920 7921 7922 7923 7924 7925
			if (!wc->update_ref ||
			    generation <= root->root_key.offset)
				continue;
			btrfs_node_key_to_cpu(eb, &key, slot);
			ret = btrfs_comp_cpu_keys(&key,
						  &wc->update_progress);
			if (ret < 0)
				continue;
7926 7927 7928 7929
		} else {
			if (wc->level == 1 &&
			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				continue;
7930
		}
Y
Yan, Zheng 已提交
7931
reada:
7932
		readahead_tree_block(root, bytenr);
Y
Yan, Zheng 已提交
7933
		nread++;
C
Chris Mason 已提交
7934
	}
Y
Yan, Zheng 已提交
7935
	wc->reada_slot = slot;
C
Chris Mason 已提交
7936
}
7937

7938 7939 7940 7941 7942 7943
/*
 * TODO: Modify related function to add related node/leaf to dirty_extent_root,
 * for later qgroup accounting.
 *
 * Current, this function does nothing.
 */
7944 7945 7946 7947 7948
static int account_leaf_items(struct btrfs_trans_handle *trans,
			      struct btrfs_root *root,
			      struct extent_buffer *eb)
{
	int nr = btrfs_header_nritems(eb);
7949
	int i, extent_type;
7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	u64 bytenr, num_bytes;

	for (i = 0; i < nr; i++) {
		btrfs_item_key_to_cpu(eb, &key, i);

		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;

		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
		/* filter out non qgroup-accountable extents  */
		extent_type = btrfs_file_extent_type(eb, fi);

		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
			continue;

		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
		if (!bytenr)
			continue;

		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
	}
	return 0;
}

/*
 * Walk up the tree from the bottom, freeing leaves and any interior
 * nodes which have had all slots visited. If a node (leaf or
 * interior) is freed, the node above it will have it's slot
 * incremented. The root node will never be freed.
 *
 * At the end of this function, we should have a path which has all
 * slots incremented to the next position for a search. If we need to
 * read a new node it will be NULL and the node above it will have the
 * correct slot selected for a later read.
 *
 * If we increment the root nodes slot counter past the number of
 * elements, 1 is returned to signal completion of the search.
 */
static int adjust_slots_upwards(struct btrfs_root *root,
				struct btrfs_path *path, int root_level)
{
	int level = 0;
	int nr, slot;
	struct extent_buffer *eb;

	if (root_level == 0)
		return 1;

	while (level <= root_level) {
		eb = path->nodes[level];
		nr = btrfs_header_nritems(eb);
		path->slots[level]++;
		slot = path->slots[level];
		if (slot >= nr || level == 0) {
			/*
			 * Don't free the root -  we will detect this
			 * condition after our loop and return a
			 * positive value for caller to stop walking the tree.
			 */
			if (level != root_level) {
				btrfs_tree_unlock_rw(eb, path->locks[level]);
				path->locks[level] = 0;

				free_extent_buffer(eb);
				path->nodes[level] = NULL;
				path->slots[level] = 0;
			}
		} else {
			/*
			 * We have a valid slot to walk back down
			 * from. Stop here so caller can process these
			 * new nodes.
			 */
			break;
		}

		level++;
	}

	eb = path->nodes[root_level];
	if (path->slots[root_level] >= btrfs_header_nritems(eb))
		return 1;

	return 0;
}

/*
 * root_eb is the subtree root and is locked before this function is called.
8040 8041
 * TODO: Modify this function to mark all (including complete shared node)
 * to dirty_extent_root to allow it get accounted in qgroup.
8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102
 */
static int account_shared_subtree(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
				  struct extent_buffer *root_eb,
				  u64 root_gen,
				  int root_level)
{
	int ret = 0;
	int level;
	struct extent_buffer *eb = root_eb;
	struct btrfs_path *path = NULL;

	BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
	BUG_ON(root_eb == NULL);

	if (!root->fs_info->quota_enabled)
		return 0;

	if (!extent_buffer_uptodate(root_eb)) {
		ret = btrfs_read_buffer(root_eb, root_gen);
		if (ret)
			goto out;
	}

	if (root_level == 0) {
		ret = account_leaf_items(trans, root, root_eb);
		goto out;
	}

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	/*
	 * Walk down the tree.  Missing extent blocks are filled in as
	 * we go. Metadata is accounted every time we read a new
	 * extent block.
	 *
	 * When we reach a leaf, we account for file extent items in it,
	 * walk back up the tree (adjusting slot pointers as we go)
	 * and restart the search process.
	 */
	extent_buffer_get(root_eb); /* For path */
	path->nodes[root_level] = root_eb;
	path->slots[root_level] = 0;
	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
walk_down:
	level = root_level;
	while (level >= 0) {
		if (path->nodes[level] == NULL) {
			int parent_slot;
			u64 child_gen;
			u64 child_bytenr;

			/* We need to get child blockptr/gen from
			 * parent before we can read it. */
			eb = path->nodes[level + 1];
			parent_slot = path->slots[level + 1];
			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
			child_gen = btrfs_node_ptr_generation(eb, parent_slot);

8103
			eb = read_tree_block(root, child_bytenr, child_gen);
8104 8105 8106 8107
			if (IS_ERR(eb)) {
				ret = PTR_ERR(eb);
				goto out;
			} else if (!extent_buffer_uptodate(eb)) {
L
Liu Bo 已提交
8108
				free_extent_buffer(eb);
8109
				ret = -EIO;
8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144
				goto out;
			}

			path->nodes[level] = eb;
			path->slots[level] = 0;

			btrfs_tree_read_lock(eb);
			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
		}

		if (level == 0) {
			ret = account_leaf_items(trans, root, path->nodes[level]);
			if (ret)
				goto out;

			/* Nonzero return here means we completed our search */
			ret = adjust_slots_upwards(root, path, root_level);
			if (ret)
				break;

			/* Restart search with new slots */
			goto walk_down;
		}

		level--;
	}

	ret = 0;
out:
	btrfs_free_path(path);

	return ret;
}

Y
Yan Zheng 已提交
8145
/*
L
Liu Bo 已提交
8146
 * helper to process tree block while walking down the tree.
8147 8148 8149 8150 8151
 *
 * when wc->stage == UPDATE_BACKREF, this function updates
 * back refs for pointers in the block.
 *
 * NOTE: return value 1 means we should stop walking down.
Y
Yan Zheng 已提交
8152
 */
8153
static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8154
				   struct btrfs_root *root,
8155
				   struct btrfs_path *path,
8156
				   struct walk_control *wc, int lookup_info)
Y
Yan Zheng 已提交
8157
{
8158 8159 8160
	int level = wc->level;
	struct extent_buffer *eb = path->nodes[level];
	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
Y
Yan Zheng 已提交
8161 8162
	int ret;

8163 8164 8165
	if (wc->stage == UPDATE_BACKREF &&
	    btrfs_header_owner(eb) != root->root_key.objectid)
		return 1;
Y
Yan Zheng 已提交
8166

8167 8168 8169 8170
	/*
	 * when reference count of tree block is 1, it won't increase
	 * again. once full backref flag is set, we never clear it.
	 */
8171 8172 8173
	if (lookup_info &&
	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8174 8175
		BUG_ON(!path->locks[level]);
		ret = btrfs_lookup_extent_info(trans, root,
8176
					       eb->start, level, 1,
8177 8178
					       &wc->refs[level],
					       &wc->flags[level]);
8179 8180 8181
		BUG_ON(ret == -ENOMEM);
		if (ret)
			return ret;
8182 8183
		BUG_ON(wc->refs[level] == 0);
	}
8184

8185 8186 8187
	if (wc->stage == DROP_REFERENCE) {
		if (wc->refs[level] > 1)
			return 1;
Y
Yan Zheng 已提交
8188

8189
		if (path->locks[level] && !wc->keep_locks) {
8190
			btrfs_tree_unlock_rw(eb, path->locks[level]);
8191 8192 8193 8194
			path->locks[level] = 0;
		}
		return 0;
	}
Y
Yan Zheng 已提交
8195

8196 8197 8198
	/* wc->stage == UPDATE_BACKREF */
	if (!(wc->flags[level] & flag)) {
		BUG_ON(!path->locks[level]);
8199
		ret = btrfs_inc_ref(trans, root, eb, 1);
8200
		BUG_ON(ret); /* -ENOMEM */
8201
		ret = btrfs_dec_ref(trans, root, eb, 0);
8202
		BUG_ON(ret); /* -ENOMEM */
8203
		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8204 8205
						  eb->len, flag,
						  btrfs_header_level(eb), 0);
8206
		BUG_ON(ret); /* -ENOMEM */
8207 8208 8209 8210 8211 8212 8213 8214
		wc->flags[level] |= flag;
	}

	/*
	 * the block is shared by multiple trees, so it's not good to
	 * keep the tree lock
	 */
	if (path->locks[level] && level > 0) {
8215
		btrfs_tree_unlock_rw(eb, path->locks[level]);
8216 8217 8218 8219 8220
		path->locks[level] = 0;
	}
	return 0;
}

Y
Yan, Zheng 已提交
8221
/*
L
Liu Bo 已提交
8222
 * helper to process tree block pointer.
Y
Yan, Zheng 已提交
8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236
 *
 * when wc->stage == DROP_REFERENCE, this function checks
 * reference count of the block pointed to. if the block
 * is shared and we need update back refs for the subtree
 * rooted at the block, this function changes wc->stage to
 * UPDATE_BACKREF. if the block is shared and there is no
 * need to update back, this function drops the reference
 * to the block.
 *
 * NOTE: return value 1 means we should stop walking down.
 */
static noinline int do_walk_down(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
8237
				 struct walk_control *wc, int *lookup_info)
Y
Yan, Zheng 已提交
8238 8239 8240 8241 8242 8243 8244 8245 8246 8247
{
	u64 bytenr;
	u64 generation;
	u64 parent;
	u32 blocksize;
	struct btrfs_key key;
	struct extent_buffer *next;
	int level = wc->level;
	int reada = 0;
	int ret = 0;
8248
	bool need_account = false;
Y
Yan, Zheng 已提交
8249 8250 8251 8252 8253 8254 8255 8256 8257

	generation = btrfs_node_ptr_generation(path->nodes[level],
					       path->slots[level]);
	/*
	 * if the lower level block was created before the snapshot
	 * was created, we know there is no need to update back refs
	 * for the subtree
	 */
	if (wc->stage == UPDATE_BACKREF &&
8258 8259
	    generation <= root->root_key.offset) {
		*lookup_info = 1;
Y
Yan, Zheng 已提交
8260
		return 1;
8261
	}
Y
Yan, Zheng 已提交
8262 8263

	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8264
	blocksize = root->nodesize;
Y
Yan, Zheng 已提交
8265

8266
	next = btrfs_find_tree_block(root->fs_info, bytenr);
Y
Yan, Zheng 已提交
8267
	if (!next) {
8268
		next = btrfs_find_create_tree_block(root, bytenr);
8269 8270
		if (!next)
			return -ENOMEM;
8271 8272
		btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
					       level - 1);
Y
Yan, Zheng 已提交
8273 8274 8275 8276 8277
		reada = 1;
	}
	btrfs_tree_lock(next);
	btrfs_set_lock_blocking(next);

8278
	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8279 8280
				       &wc->refs[level - 1],
				       &wc->flags[level - 1]);
8281 8282 8283 8284 8285
	if (ret < 0) {
		btrfs_tree_unlock(next);
		return ret;
	}

8286 8287 8288 8289
	if (unlikely(wc->refs[level - 1] == 0)) {
		btrfs_err(root->fs_info, "Missing references.");
		BUG();
	}
8290
	*lookup_info = 0;
Y
Yan, Zheng 已提交
8291

8292
	if (wc->stage == DROP_REFERENCE) {
Y
Yan, Zheng 已提交
8293
		if (wc->refs[level - 1] > 1) {
8294
			need_account = true;
8295 8296 8297 8298
			if (level == 1 &&
			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				goto skip;

Y
Yan, Zheng 已提交
8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311
			if (!wc->update_ref ||
			    generation <= root->root_key.offset)
				goto skip;

			btrfs_node_key_to_cpu(path->nodes[level], &key,
					      path->slots[level]);
			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
			if (ret < 0)
				goto skip;

			wc->stage = UPDATE_BACKREF;
			wc->shared_level = level - 1;
		}
8312 8313 8314 8315
	} else {
		if (level == 1 &&
		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
			goto skip;
Y
Yan, Zheng 已提交
8316 8317
	}

8318
	if (!btrfs_buffer_uptodate(next, generation, 0)) {
Y
Yan, Zheng 已提交
8319 8320 8321
		btrfs_tree_unlock(next);
		free_extent_buffer(next);
		next = NULL;
8322
		*lookup_info = 1;
Y
Yan, Zheng 已提交
8323 8324 8325 8326 8327
	}

	if (!next) {
		if (reada && level == 1)
			reada_walk_down(trans, root, wc, path);
8328
		next = read_tree_block(root, bytenr, generation);
8329 8330 8331
		if (IS_ERR(next)) {
			return PTR_ERR(next);
		} else if (!extent_buffer_uptodate(next)) {
8332
			free_extent_buffer(next);
8333
			return -EIO;
8334
		}
Y
Yan, Zheng 已提交
8335 8336 8337 8338 8339 8340 8341 8342
		btrfs_tree_lock(next);
		btrfs_set_lock_blocking(next);
	}

	level--;
	BUG_ON(level != btrfs_header_level(next));
	path->nodes[level] = next;
	path->slots[level] = 0;
8343
	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
Y
Yan, Zheng 已提交
8344 8345 8346 8347 8348 8349 8350
	wc->level = level;
	if (wc->level == 1)
		wc->reada_slot = 0;
	return 0;
skip:
	wc->refs[level - 1] = 0;
	wc->flags[level - 1] = 0;
8351 8352 8353 8354 8355 8356 8357 8358
	if (wc->stage == DROP_REFERENCE) {
		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
			parent = path->nodes[level]->start;
		} else {
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(path->nodes[level]));
			parent = 0;
		}
Y
Yan, Zheng 已提交
8359

8360 8361 8362 8363
		if (need_account) {
			ret = account_shared_subtree(trans, root, next,
						     generation, level - 1);
			if (ret) {
8364 8365
				btrfs_err_rl(root->fs_info,
					"Error "
8366
					"%d accounting shared subtree. Quota "
8367 8368
					"is out of sync, rescan required.",
					ret);
8369 8370
			}
		}
8371
		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
A
Arne Jansen 已提交
8372
				root->root_key.objectid, level - 1, 0, 0);
8373
		BUG_ON(ret); /* -ENOMEM */
Y
Yan, Zheng 已提交
8374 8375 8376
	}
	btrfs_tree_unlock(next);
	free_extent_buffer(next);
8377
	*lookup_info = 1;
Y
Yan, Zheng 已提交
8378 8379 8380
	return 1;
}

8381
/*
L
Liu Bo 已提交
8382
 * helper to process tree block while walking up the tree.
8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397
 *
 * when wc->stage == DROP_REFERENCE, this function drops
 * reference count on the block.
 *
 * when wc->stage == UPDATE_BACKREF, this function changes
 * wc->stage back to DROP_REFERENCE if we changed wc->stage
 * to UPDATE_BACKREF previously while processing the block.
 *
 * NOTE: return value 1 means we should stop walking up.
 */
static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct walk_control *wc)
{
8398
	int ret;
8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424
	int level = wc->level;
	struct extent_buffer *eb = path->nodes[level];
	u64 parent = 0;

	if (wc->stage == UPDATE_BACKREF) {
		BUG_ON(wc->shared_level < level);
		if (level < wc->shared_level)
			goto out;

		ret = find_next_key(path, level + 1, &wc->update_progress);
		if (ret > 0)
			wc->update_ref = 0;

		wc->stage = DROP_REFERENCE;
		wc->shared_level = -1;
		path->slots[level] = 0;

		/*
		 * check reference count again if the block isn't locked.
		 * we should start walking down the tree again if reference
		 * count is one.
		 */
		if (!path->locks[level]) {
			BUG_ON(level == 0);
			btrfs_tree_lock(eb);
			btrfs_set_lock_blocking(eb);
8425
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8426 8427

			ret = btrfs_lookup_extent_info(trans, root,
8428
						       eb->start, level, 1,
8429 8430
						       &wc->refs[level],
						       &wc->flags[level]);
8431 8432
			if (ret < 0) {
				btrfs_tree_unlock_rw(eb, path->locks[level]);
L
Liu Bo 已提交
8433
				path->locks[level] = 0;
8434 8435
				return ret;
			}
8436 8437
			BUG_ON(wc->refs[level] == 0);
			if (wc->refs[level] == 1) {
8438
				btrfs_tree_unlock_rw(eb, path->locks[level]);
L
Liu Bo 已提交
8439
				path->locks[level] = 0;
8440 8441
				return 1;
			}
Y
Yan Zheng 已提交
8442
		}
8443
	}
Y
Yan Zheng 已提交
8444

8445 8446
	/* wc->stage == DROP_REFERENCE */
	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8447

8448 8449 8450
	if (wc->refs[level] == 1) {
		if (level == 0) {
			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8451
				ret = btrfs_dec_ref(trans, root, eb, 1);
8452
			else
8453
				ret = btrfs_dec_ref(trans, root, eb, 0);
8454
			BUG_ON(ret); /* -ENOMEM */
8455 8456
			ret = account_leaf_items(trans, root, eb);
			if (ret) {
8457 8458
				btrfs_err_rl(root->fs_info,
					"error "
8459
					"%d accounting leaf items. Quota "
8460 8461
					"is out of sync, rescan required.",
					ret);
8462
			}
8463 8464 8465 8466 8467 8468
		}
		/* make block locked assertion in clean_tree_block happy */
		if (!path->locks[level] &&
		    btrfs_header_generation(eb) == trans->transid) {
			btrfs_tree_lock(eb);
			btrfs_set_lock_blocking(eb);
8469
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8470
		}
8471
		clean_tree_block(trans, root->fs_info, eb);
8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485
	}

	if (eb == root->node) {
		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
			parent = eb->start;
		else
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(eb));
	} else {
		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
			parent = path->nodes[level + 1]->start;
		else
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(path->nodes[level + 1]));
Y
Yan Zheng 已提交
8486 8487
	}

8488
	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8489 8490 8491
out:
	wc->refs[level] = 0;
	wc->flags[level] = 0;
8492
	return 0;
8493 8494 8495 8496 8497 8498 8499 8500
}

static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   struct btrfs_path *path,
				   struct walk_control *wc)
{
	int level = wc->level;
8501
	int lookup_info = 1;
8502 8503 8504
	int ret;

	while (level >= 0) {
8505
		ret = walk_down_proc(trans, root, path, wc, lookup_info);
8506 8507 8508 8509 8510 8511
		if (ret > 0)
			break;

		if (level == 0)
			break;

8512 8513 8514 8515
		if (path->slots[level] >=
		    btrfs_header_nritems(path->nodes[level]))
			break;

8516
		ret = do_walk_down(trans, root, path, wc, &lookup_info);
Y
Yan, Zheng 已提交
8517 8518 8519
		if (ret > 0) {
			path->slots[level]++;
			continue;
8520 8521
		} else if (ret < 0)
			return ret;
Y
Yan, Zheng 已提交
8522
		level = wc->level;
Y
Yan Zheng 已提交
8523 8524 8525 8526
	}
	return 0;
}

C
Chris Mason 已提交
8527
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8528
				 struct btrfs_root *root,
Y
Yan Zheng 已提交
8529
				 struct btrfs_path *path,
8530
				 struct walk_control *wc, int max_level)
C
Chris Mason 已提交
8531
{
8532
	int level = wc->level;
C
Chris Mason 已提交
8533
	int ret;
8534

8535 8536 8537 8538 8539 8540
	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
	while (level < max_level && path->nodes[level]) {
		wc->level = level;
		if (path->slots[level] + 1 <
		    btrfs_header_nritems(path->nodes[level])) {
			path->slots[level]++;
C
Chris Mason 已提交
8541 8542
			return 0;
		} else {
8543 8544 8545
			ret = walk_up_proc(trans, root, path, wc);
			if (ret > 0)
				return 0;
8546

8547
			if (path->locks[level]) {
8548 8549
				btrfs_tree_unlock_rw(path->nodes[level],
						     path->locks[level]);
8550
				path->locks[level] = 0;
Y
Yan Zheng 已提交
8551
			}
8552 8553 8554
			free_extent_buffer(path->nodes[level]);
			path->nodes[level] = NULL;
			level++;
C
Chris Mason 已提交
8555 8556 8557 8558 8559
		}
	}
	return 1;
}

C
Chris Mason 已提交
8560
/*
8561 8562 8563 8564 8565 8566 8567 8568 8569
 * drop a subvolume tree.
 *
 * this function traverses the tree freeing any blocks that only
 * referenced by the tree.
 *
 * when a shared tree block is found. this function decreases its
 * reference count by one. if update_ref is true, this function
 * also make sure backrefs for the shared block and all lower level
 * blocks are properly updated.
D
David Sterba 已提交
8570 8571
 *
 * If called with for_reloc == 0, may exit early with -EAGAIN
C
Chris Mason 已提交
8572
 */
8573
int btrfs_drop_snapshot(struct btrfs_root *root,
A
Arne Jansen 已提交
8574 8575
			 struct btrfs_block_rsv *block_rsv, int update_ref,
			 int for_reloc)
C
Chris Mason 已提交
8576
{
8577
	struct btrfs_path *path;
8578 8579
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = root->fs_info->tree_root;
8580
	struct btrfs_root_item *root_item = &root->root_item;
8581 8582 8583 8584 8585
	struct walk_control *wc;
	struct btrfs_key key;
	int err = 0;
	int ret;
	int level;
8586
	bool root_dropped = false;
C
Chris Mason 已提交
8587

8588 8589
	btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);

8590
	path = btrfs_alloc_path();
8591 8592 8593 8594
	if (!path) {
		err = -ENOMEM;
		goto out;
	}
C
Chris Mason 已提交
8595

8596
	wc = kzalloc(sizeof(*wc), GFP_NOFS);
8597 8598
	if (!wc) {
		btrfs_free_path(path);
8599 8600
		err = -ENOMEM;
		goto out;
8601
	}
8602

8603
	trans = btrfs_start_transaction(tree_root, 0);
8604 8605 8606 8607
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
		goto out_free;
	}
8608

8609 8610
	if (block_rsv)
		trans->block_rsv = block_rsv;
8611

8612
	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8613
		level = btrfs_header_level(root->node);
8614 8615
		path->nodes[level] = btrfs_lock_root_node(root);
		btrfs_set_lock_blocking(path->nodes[level]);
8616
		path->slots[level] = 0;
8617
		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8618 8619
		memset(&wc->update_progress, 0,
		       sizeof(wc->update_progress));
8620 8621
	} else {
		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8622 8623 8624
		memcpy(&wc->update_progress, &key,
		       sizeof(wc->update_progress));

8625
		level = root_item->drop_level;
8626
		BUG_ON(level == 0);
8627
		path->lowest_level = level;
8628 8629 8630 8631
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		path->lowest_level = 0;
		if (ret < 0) {
			err = ret;
8632
			goto out_end_trans;
8633
		}
Y
Yan, Zheng 已提交
8634
		WARN_ON(ret > 0);
8635

8636 8637 8638 8639
		/*
		 * unlock our path, this is safe because only this
		 * function is allowed to delete this snapshot
		 */
8640
		btrfs_unlock_up_safe(path, 0);
8641 8642 8643 8644 8645

		level = btrfs_header_level(root->node);
		while (1) {
			btrfs_tree_lock(path->nodes[level]);
			btrfs_set_lock_blocking(path->nodes[level]);
8646
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8647 8648 8649

			ret = btrfs_lookup_extent_info(trans, root,
						path->nodes[level]->start,
8650
						level, 1, &wc->refs[level],
8651
						&wc->flags[level]);
8652 8653 8654 8655
			if (ret < 0) {
				err = ret;
				goto out_end_trans;
			}
8656 8657 8658 8659 8660 8661
			BUG_ON(wc->refs[level] == 0);

			if (level == root_item->drop_level)
				break;

			btrfs_tree_unlock(path->nodes[level]);
8662
			path->locks[level] = 0;
8663 8664 8665
			WARN_ON(wc->refs[level] != 1);
			level--;
		}
8666
	}
8667 8668 8669 8670 8671 8672

	wc->level = level;
	wc->shared_level = -1;
	wc->stage = DROP_REFERENCE;
	wc->update_ref = update_ref;
	wc->keep_locks = 0;
A
Arne Jansen 已提交
8673
	wc->for_reloc = for_reloc;
Y
Yan, Zheng 已提交
8674
	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8675

C
Chris Mason 已提交
8676
	while (1) {
D
David Sterba 已提交
8677

8678 8679 8680
		ret = walk_down_tree(trans, root, path, wc);
		if (ret < 0) {
			err = ret;
C
Chris Mason 已提交
8681
			break;
8682
		}
C
Chris Mason 已提交
8683

8684 8685 8686
		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
		if (ret < 0) {
			err = ret;
C
Chris Mason 已提交
8687
			break;
8688 8689 8690 8691
		}

		if (ret > 0) {
			BUG_ON(wc->stage != DROP_REFERENCE);
8692 8693
			break;
		}
8694 8695 8696 8697 8698 8699 8700 8701 8702 8703

		if (wc->stage == DROP_REFERENCE) {
			level = wc->level;
			btrfs_node_key(path->nodes[level],
				       &root_item->drop_progress,
				       path->slots[level]);
			root_item->drop_level = level;
		}

		BUG_ON(wc->level == 0);
8704 8705
		if (btrfs_should_end_transaction(trans, tree_root) ||
		    (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8706 8707 8708
			ret = btrfs_update_root(trans, tree_root,
						&root->root_key,
						root_item);
8709 8710 8711 8712 8713
			if (ret) {
				btrfs_abort_transaction(trans, tree_root, ret);
				err = ret;
				goto out_end_trans;
			}
8714

8715
			btrfs_end_transaction_throttle(trans, tree_root);
8716
			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8717
				pr_debug("BTRFS: drop snapshot early exit\n");
8718 8719 8720 8721
				err = -EAGAIN;
				goto out_free;
			}

8722
			trans = btrfs_start_transaction(tree_root, 0);
8723 8724 8725 8726
			if (IS_ERR(trans)) {
				err = PTR_ERR(trans);
				goto out_free;
			}
8727 8728
			if (block_rsv)
				trans->block_rsv = block_rsv;
8729
		}
C
Chris Mason 已提交
8730
	}
8731
	btrfs_release_path(path);
8732 8733
	if (err)
		goto out_end_trans;
8734 8735

	ret = btrfs_del_root(trans, tree_root, &root->root_key);
8736 8737 8738 8739
	if (ret) {
		btrfs_abort_transaction(trans, tree_root, ret);
		goto out_end_trans;
	}
8740

8741
	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
8742 8743
		ret = btrfs_find_root(tree_root, &root->root_key, path,
				      NULL, NULL);
8744 8745 8746 8747 8748
		if (ret < 0) {
			btrfs_abort_transaction(trans, tree_root, ret);
			err = ret;
			goto out_end_trans;
		} else if (ret > 0) {
8749 8750 8751 8752 8753 8754 8755
			/* if we fail to delete the orphan item this time
			 * around, it'll get picked up the next time.
			 *
			 * The most common failure here is just -ENOENT.
			 */
			btrfs_del_orphan_item(trans, tree_root,
					      root->root_key.objectid);
8756 8757 8758
		}
	}

8759
	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
8760
		btrfs_add_dropped_root(trans, root);
8761 8762 8763
	} else {
		free_extent_buffer(root->node);
		free_extent_buffer(root->commit_root);
8764
		btrfs_put_fs_root(root);
8765
	}
8766
	root_dropped = true;
8767
out_end_trans:
8768
	btrfs_end_transaction_throttle(trans, tree_root);
8769
out_free:
8770
	kfree(wc);
8771
	btrfs_free_path(path);
8772
out:
8773 8774 8775 8776 8777 8778 8779
	/*
	 * So if we need to stop dropping the snapshot for whatever reason we
	 * need to make sure to add it back to the dead root list so that we
	 * keep trying to do the work later.  This also cleans up roots if we
	 * don't have it in the radix (like when we recover after a power fail
	 * or unmount) so we don't leak memory.
	 */
8780
	if (!for_reloc && root_dropped == false)
8781
		btrfs_add_dead_root(root);
8782
	if (err && err != -EAGAIN)
8783
		btrfs_std_error(root->fs_info, err, NULL);
8784
	return err;
C
Chris Mason 已提交
8785
}
C
Chris Mason 已提交
8786

8787 8788 8789 8790
/*
 * drop subtree rooted at tree block 'node'.
 *
 * NOTE: this function will unlock and release tree block 'node'
A
Arne Jansen 已提交
8791
 * only used by relocation code
8792
 */
Y
Yan Zheng 已提交
8793 8794 8795 8796 8797 8798
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
			struct extent_buffer *node,
			struct extent_buffer *parent)
{
	struct btrfs_path *path;
8799
	struct walk_control *wc;
Y
Yan Zheng 已提交
8800 8801 8802 8803 8804
	int level;
	int parent_level;
	int ret = 0;
	int wret;

8805 8806
	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);

Y
Yan Zheng 已提交
8807
	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
8808 8809
	if (!path)
		return -ENOMEM;
Y
Yan Zheng 已提交
8810

8811
	wc = kzalloc(sizeof(*wc), GFP_NOFS);
T
Tsutomu Itoh 已提交
8812 8813 8814 8815
	if (!wc) {
		btrfs_free_path(path);
		return -ENOMEM;
	}
8816

8817
	btrfs_assert_tree_locked(parent);
Y
Yan Zheng 已提交
8818 8819 8820 8821 8822
	parent_level = btrfs_header_level(parent);
	extent_buffer_get(parent);
	path->nodes[parent_level] = parent;
	path->slots[parent_level] = btrfs_header_nritems(parent);

8823
	btrfs_assert_tree_locked(node);
Y
Yan Zheng 已提交
8824 8825 8826
	level = btrfs_header_level(node);
	path->nodes[level] = node;
	path->slots[level] = 0;
8827
	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8828 8829 8830 8831 8832 8833 8834 8835

	wc->refs[parent_level] = 1;
	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
	wc->level = level;
	wc->shared_level = -1;
	wc->stage = DROP_REFERENCE;
	wc->update_ref = 0;
	wc->keep_locks = 1;
A
Arne Jansen 已提交
8836
	wc->for_reloc = 1;
Y
Yan, Zheng 已提交
8837
	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
Y
Yan Zheng 已提交
8838 8839

	while (1) {
8840 8841
		wret = walk_down_tree(trans, root, path, wc);
		if (wret < 0) {
Y
Yan Zheng 已提交
8842 8843
			ret = wret;
			break;
8844
		}
Y
Yan Zheng 已提交
8845

8846
		wret = walk_up_tree(trans, root, path, wc, parent_level);
Y
Yan Zheng 已提交
8847 8848 8849 8850 8851 8852
		if (wret < 0)
			ret = wret;
		if (wret != 0)
			break;
	}

8853
	kfree(wc);
Y
Yan Zheng 已提交
8854 8855 8856 8857
	btrfs_free_path(path);
	return ret;
}

8858 8859 8860
static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
{
	u64 num_devices;
8861
	u64 stripped;
8862

8863 8864 8865 8866 8867 8868 8869
	/*
	 * if restripe for this chunk_type is on pick target profile and
	 * return, otherwise do the usual balance
	 */
	stripped = get_restripe_target(root->fs_info, flags);
	if (stripped)
		return extended_to_chunk(stripped);
8870

8871
	num_devices = root->fs_info->fs_devices->rw_devices;
8872

8873
	stripped = BTRFS_BLOCK_GROUP_RAID0 |
D
David Woodhouse 已提交
8874
		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
8875 8876
		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;

8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900
	if (num_devices == 1) {
		stripped |= BTRFS_BLOCK_GROUP_DUP;
		stripped = flags & ~stripped;

		/* turn raid0 into single device chunks */
		if (flags & BTRFS_BLOCK_GROUP_RAID0)
			return stripped;

		/* turn mirroring into duplication */
		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
			     BTRFS_BLOCK_GROUP_RAID10))
			return stripped | BTRFS_BLOCK_GROUP_DUP;
	} else {
		/* they already had raid on here, just return */
		if (flags & stripped)
			return flags;

		stripped |= BTRFS_BLOCK_GROUP_DUP;
		stripped = flags & ~stripped;

		/* switch duplicated blocks with raid1 */
		if (flags & BTRFS_BLOCK_GROUP_DUP)
			return stripped | BTRFS_BLOCK_GROUP_RAID1;

8901
		/* this is drive concat, leave it alone */
8902
	}
8903

8904 8905 8906
	return flags;
}

8907
static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
C
Chris Mason 已提交
8908
{
8909 8910
	struct btrfs_space_info *sinfo = cache->space_info;
	u64 num_bytes;
8911
	u64 min_allocable_bytes;
8912
	int ret = -ENOSPC;
C
Chris Mason 已提交
8913

8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925
	/*
	 * We need some metadata space and system metadata space for
	 * allocating chunks in some corner cases until we force to set
	 * it to be readonly.
	 */
	if ((sinfo->flags &
	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
	    !force)
		min_allocable_bytes = 1 * 1024 * 1024;
	else
		min_allocable_bytes = 0;

8926 8927
	spin_lock(&sinfo->lock);
	spin_lock(&cache->lock);
8928 8929

	if (cache->ro) {
8930
		cache->ro++;
8931 8932 8933 8934
		ret = 0;
		goto out;
	}

8935 8936 8937 8938
	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
		    cache->bytes_super - btrfs_block_group_used(&cache->item);

	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8939 8940
	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
	    min_allocable_bytes <= sinfo->total_bytes) {
8941
		sinfo->bytes_readonly += num_bytes;
8942
		cache->ro++;
8943
		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8944 8945
		ret = 0;
	}
8946
out:
8947 8948 8949 8950
	spin_unlock(&cache->lock);
	spin_unlock(&sinfo->lock);
	return ret;
}
8951

8952
int btrfs_inc_block_group_ro(struct btrfs_root *root,
8953
			     struct btrfs_block_group_cache *cache)
8954

8955 8956 8957 8958
{
	struct btrfs_trans_handle *trans;
	u64 alloc_flags;
	int ret;
8959

8960
again:
C
Chris Mason 已提交
8961
	trans = btrfs_join_transaction(root);
8962 8963
	if (IS_ERR(trans))
		return PTR_ERR(trans);
8964

8965 8966 8967 8968 8969 8970
	/*
	 * we're not allowed to set block groups readonly after the dirty
	 * block groups cache has started writing.  If it already started,
	 * back off and let this transaction commit
	 */
	mutex_lock(&root->fs_info->ro_block_group_mutex);
8971
	if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982
		u64 transid = trans->transid;

		mutex_unlock(&root->fs_info->ro_block_group_mutex);
		btrfs_end_transaction(trans, root);

		ret = btrfs_wait_for_commit(root, transid);
		if (ret)
			return ret;
		goto again;
	}

8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000
	/*
	 * if we are changing raid levels, try to allocate a corresponding
	 * block group with the new raid level.
	 */
	alloc_flags = update_block_group_flags(root, cache->flags);
	if (alloc_flags != cache->flags) {
		ret = do_chunk_alloc(trans, root, alloc_flags,
				     CHUNK_ALLOC_FORCE);
		/*
		 * ENOSPC is allowed here, we may have enough space
		 * already allocated at the new raid level to
		 * carry on
		 */
		if (ret == -ENOSPC)
			ret = 0;
		if (ret < 0)
			goto out;
	}
9001

9002
	ret = inc_block_group_ro(cache, 0);
9003 9004 9005
	if (!ret)
		goto out;
	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9006
	ret = do_chunk_alloc(trans, root, alloc_flags,
9007
			     CHUNK_ALLOC_FORCE);
9008 9009
	if (ret < 0)
		goto out;
9010
	ret = inc_block_group_ro(cache, 0);
9011
out:
9012 9013
	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
		alloc_flags = update_block_group_flags(root, cache->flags);
9014
		lock_chunks(root->fs_info->chunk_root);
9015
		check_system_chunk(trans, root, alloc_flags);
9016
		unlock_chunks(root->fs_info->chunk_root);
9017
	}
9018
	mutex_unlock(&root->fs_info->ro_block_group_mutex);
9019

9020 9021 9022
	btrfs_end_transaction(trans, root);
	return ret;
}
9023

9024 9025 9026 9027
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, u64 type)
{
	u64 alloc_flags = get_alloc_profile(root, type);
9028
	return do_chunk_alloc(trans, root, alloc_flags,
9029
			      CHUNK_ALLOC_FORCE);
9030 9031
}

9032 9033
/*
 * helper to account the unused space of all the readonly block group in the
9034
 * space_info. takes mirrors into account.
9035
 */
9036
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9037 9038 9039 9040 9041
{
	struct btrfs_block_group_cache *block_group;
	u64 free_bytes = 0;
	int factor;

9042 9043 9044 9045 9046 9047
	/* It's df, we don't care if it's racey */
	if (list_empty(&sinfo->ro_bgs))
		return 0;

	spin_lock(&sinfo->lock);
	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072
		spin_lock(&block_group->lock);

		if (!block_group->ro) {
			spin_unlock(&block_group->lock);
			continue;
		}

		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
					  BTRFS_BLOCK_GROUP_RAID10 |
					  BTRFS_BLOCK_GROUP_DUP))
			factor = 2;
		else
			factor = 1;

		free_bytes += (block_group->key.offset -
			       btrfs_block_group_used(&block_group->item)) *
			       factor;

		spin_unlock(&block_group->lock);
	}
	spin_unlock(&sinfo->lock);

	return free_bytes;
}

9073
void btrfs_dec_block_group_ro(struct btrfs_root *root,
9074
			      struct btrfs_block_group_cache *cache)
9075
{
9076 9077 9078 9079 9080 9081 9082
	struct btrfs_space_info *sinfo = cache->space_info;
	u64 num_bytes;

	BUG_ON(!cache->ro);

	spin_lock(&sinfo->lock);
	spin_lock(&cache->lock);
9083 9084 9085 9086 9087 9088 9089
	if (!--cache->ro) {
		num_bytes = cache->key.offset - cache->reserved -
			    cache->pinned - cache->bytes_super -
			    btrfs_block_group_used(&cache->item);
		sinfo->bytes_readonly -= num_bytes;
		list_del_init(&cache->ro_list);
	}
9090 9091
	spin_unlock(&cache->lock);
	spin_unlock(&sinfo->lock);
9092 9093
}

9094 9095 9096 9097 9098 9099 9100
/*
 * checks to see if its even possible to relocate this block group.
 *
 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
 * ok to go ahead and try.
 */
int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
Z
Zheng Yan 已提交
9101
{
9102 9103 9104 9105
	struct btrfs_block_group_cache *block_group;
	struct btrfs_space_info *space_info;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_device *device;
9106
	struct btrfs_trans_handle *trans;
9107
	u64 min_free;
J
Josef Bacik 已提交
9108 9109
	u64 dev_min = 1;
	u64 dev_nr = 0;
9110
	u64 target;
9111
	int index;
9112 9113
	int full = 0;
	int ret = 0;
Z
Zheng Yan 已提交
9114

9115
	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
Z
Zheng Yan 已提交
9116

9117 9118 9119
	/* odd, couldn't find the block group, leave it alone */
	if (!block_group)
		return -1;
Z
Zheng Yan 已提交
9120

9121 9122
	min_free = btrfs_block_group_used(&block_group->item);

9123
	/* no bytes used, we're good */
9124
	if (!min_free)
Z
Zheng Yan 已提交
9125 9126
		goto out;

9127 9128
	space_info = block_group->space_info;
	spin_lock(&space_info->lock);
9129

9130
	full = space_info->full;
9131

9132 9133
	/*
	 * if this is the last block group we have in this space, we can't
9134 9135 9136 9137
	 * relocate it unless we're able to allocate a new chunk below.
	 *
	 * Otherwise, we need to make sure we have room in the space to handle
	 * all of the extents from this block group.  If we can, we're good
9138
	 */
9139
	if ((space_info->total_bytes != block_group->key.offset) &&
9140 9141 9142
	    (space_info->bytes_used + space_info->bytes_reserved +
	     space_info->bytes_pinned + space_info->bytes_readonly +
	     min_free < space_info->total_bytes)) {
9143 9144
		spin_unlock(&space_info->lock);
		goto out;
9145
	}
9146
	spin_unlock(&space_info->lock);
9147

9148 9149 9150
	/*
	 * ok we don't have enough space, but maybe we have free space on our
	 * devices to allocate new chunks for relocation, so loop through our
9151 9152 9153
	 * alloc devices and guess if we have enough space.  if this block
	 * group is going to be restriped, run checks against the target
	 * profile instead of the current one.
9154 9155
	 */
	ret = -1;
9156

9157 9158 9159 9160 9161 9162 9163 9164
	/*
	 * index:
	 *      0: raid10
	 *      1: raid1
	 *      2: dup
	 *      3: raid0
	 *      4: single
	 */
9165 9166
	target = get_restripe_target(root->fs_info, block_group->flags);
	if (target) {
9167
		index = __get_raid_index(extended_to_chunk(target));
9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178
	} else {
		/*
		 * this is just a balance, so if we were marked as full
		 * we know there is no space for a new chunk
		 */
		if (full)
			goto out;

		index = get_block_group_index(block_group);
	}

9179
	if (index == BTRFS_RAID_RAID10) {
9180
		dev_min = 4;
J
Josef Bacik 已提交
9181 9182
		/* Divide by 2 */
		min_free >>= 1;
9183
	} else if (index == BTRFS_RAID_RAID1) {
9184
		dev_min = 2;
9185
	} else if (index == BTRFS_RAID_DUP) {
J
Josef Bacik 已提交
9186 9187
		/* Multiply by 2 */
		min_free <<= 1;
9188
	} else if (index == BTRFS_RAID_RAID0) {
9189
		dev_min = fs_devices->rw_devices;
9190
		min_free = div64_u64(min_free, dev_min);
9191 9192
	}

9193 9194 9195 9196 9197 9198 9199
	/* We need to do this so that we can look at pending chunks */
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

9200 9201
	mutex_lock(&root->fs_info->chunk_mutex);
	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9202
		u64 dev_offset;
9203

9204 9205 9206 9207
		/*
		 * check to make sure we can actually find a chunk with enough
		 * space to fit our block group in.
		 */
9208 9209
		if (device->total_bytes > device->bytes_used + min_free &&
		    !device->is_tgtdev_for_dev_replace) {
9210
			ret = find_free_dev_extent(trans, device, min_free,
9211
						   &dev_offset, NULL);
9212
			if (!ret)
9213 9214 9215
				dev_nr++;

			if (dev_nr >= dev_min)
9216
				break;
9217

9218
			ret = -1;
9219
		}
9220
	}
9221
	mutex_unlock(&root->fs_info->chunk_mutex);
9222
	btrfs_end_transaction(trans, root);
9223
out:
9224
	btrfs_put_block_group(block_group);
9225 9226 9227
	return ret;
}

9228 9229
static int find_first_block_group(struct btrfs_root *root,
		struct btrfs_path *path, struct btrfs_key *key)
9230
{
9231
	int ret = 0;
9232 9233 9234
	struct btrfs_key found_key;
	struct extent_buffer *leaf;
	int slot;
9235

9236 9237
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
	if (ret < 0)
9238 9239
		goto out;

C
Chris Mason 已提交
9240
	while (1) {
9241
		slot = path->slots[0];
9242
		leaf = path->nodes[0];
9243 9244 9245 9246 9247
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
9248
				goto out;
9249
			break;
9250
		}
9251
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
9252

9253
		if (found_key.objectid >= key->objectid &&
9254 9255 9256 9257
		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
			ret = 0;
			goto out;
		}
9258
		path->slots[0]++;
9259
	}
9260
out:
9261
	return ret;
9262 9263
}

9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297
void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
{
	struct btrfs_block_group_cache *block_group;
	u64 last = 0;

	while (1) {
		struct inode *inode;

		block_group = btrfs_lookup_first_block_group(info, last);
		while (block_group) {
			spin_lock(&block_group->lock);
			if (block_group->iref)
				break;
			spin_unlock(&block_group->lock);
			block_group = next_block_group(info->tree_root,
						       block_group);
		}
		if (!block_group) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}

		inode = block_group->inode;
		block_group->iref = 0;
		block_group->inode = NULL;
		spin_unlock(&block_group->lock);
		iput(inode);
		last = block_group->key.objectid + block_group->key.offset;
		btrfs_put_block_group(block_group);
	}
}

Z
Zheng Yan 已提交
9298 9299 9300
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
	struct btrfs_block_group_cache *block_group;
9301
	struct btrfs_space_info *space_info;
9302
	struct btrfs_caching_control *caching_ctl;
Z
Zheng Yan 已提交
9303 9304
	struct rb_node *n;

9305
	down_write(&info->commit_root_sem);
9306 9307 9308 9309 9310 9311
	while (!list_empty(&info->caching_block_groups)) {
		caching_ctl = list_entry(info->caching_block_groups.next,
					 struct btrfs_caching_control, list);
		list_del(&caching_ctl->list);
		put_caching_control(caching_ctl);
	}
9312
	up_write(&info->commit_root_sem);
9313

9314 9315 9316 9317 9318 9319 9320 9321 9322 9323
	spin_lock(&info->unused_bgs_lock);
	while (!list_empty(&info->unused_bgs)) {
		block_group = list_first_entry(&info->unused_bgs,
					       struct btrfs_block_group_cache,
					       bg_list);
		list_del_init(&block_group->bg_list);
		btrfs_put_block_group(block_group);
	}
	spin_unlock(&info->unused_bgs_lock);

Z
Zheng Yan 已提交
9324 9325 9326 9327 9328 9329
	spin_lock(&info->block_group_cache_lock);
	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
		block_group = rb_entry(n, struct btrfs_block_group_cache,
				       cache_node);
		rb_erase(&block_group->cache_node,
			 &info->block_group_cache_tree);
9330
		RB_CLEAR_NODE(&block_group->cache_node);
Y
Yan Zheng 已提交
9331 9332
		spin_unlock(&info->block_group_cache_lock);

9333
		down_write(&block_group->space_info->groups_sem);
Z
Zheng Yan 已提交
9334
		list_del(&block_group->list);
9335
		up_write(&block_group->space_info->groups_sem);
9336

J
Josef Bacik 已提交
9337
		if (block_group->cached == BTRFS_CACHE_STARTED)
9338
			wait_block_group_cache_done(block_group);
J
Josef Bacik 已提交
9339

9340 9341 9342 9343
		/*
		 * We haven't cached this block group, which means we could
		 * possibly have excluded extents on this block group.
		 */
9344 9345
		if (block_group->cached == BTRFS_CACHE_NO ||
		    block_group->cached == BTRFS_CACHE_ERROR)
9346 9347
			free_excluded_extents(info->extent_root, block_group);

J
Josef Bacik 已提交
9348
		btrfs_remove_free_space_cache(block_group);
9349
		btrfs_put_block_group(block_group);
Y
Yan Zheng 已提交
9350 9351

		spin_lock(&info->block_group_cache_lock);
Z
Zheng Yan 已提交
9352 9353
	}
	spin_unlock(&info->block_group_cache_lock);
9354 9355 9356 9357 9358 9359 9360 9361 9362

	/* now that all the block groups are freed, go through and
	 * free all the space_info structs.  This is only called during
	 * the final stages of unmount, and so we know nobody is
	 * using them.  We call synchronize_rcu() once before we start,
	 * just to be on the safe side.
	 */
	synchronize_rcu();

9363 9364
	release_global_block_rsv(info);

9365
	while (!list_empty(&info->space_info)) {
9366 9367
		int i;

9368 9369 9370
		space_info = list_entry(info->space_info.next,
					struct btrfs_space_info,
					list);
9371
		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9372
			if (WARN_ON(space_info->bytes_pinned > 0 ||
9373
			    space_info->bytes_reserved > 0 ||
9374
			    space_info->bytes_may_use > 0)) {
9375 9376
				dump_space_info(space_info, 0, 0);
			}
9377
		}
9378
		list_del(&space_info->list);
9379 9380
		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
			struct kobject *kobj;
9381 9382 9383
			kobj = space_info->block_group_kobjs[i];
			space_info->block_group_kobjs[i] = NULL;
			if (kobj) {
9384 9385 9386 9387 9388 9389
				kobject_del(kobj);
				kobject_put(kobj);
			}
		}
		kobject_del(&space_info->kobj);
		kobject_put(&space_info->kobj);
9390
	}
Z
Zheng Yan 已提交
9391 9392 9393
	return 0;
}

9394 9395 9396 9397
static void __link_block_group(struct btrfs_space_info *space_info,
			       struct btrfs_block_group_cache *cache)
{
	int index = get_block_group_index(cache);
9398
	bool first = false;
9399 9400

	down_write(&space_info->groups_sem);
9401 9402 9403 9404 9405 9406
	if (list_empty(&space_info->block_groups[index]))
		first = true;
	list_add_tail(&cache->list, &space_info->block_groups[index]);
	up_write(&space_info->groups_sem);

	if (first) {
9407
		struct raid_kobject *rkobj;
9408 9409
		int ret;

9410 9411 9412 9413 9414 9415 9416
		rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
		if (!rkobj)
			goto out_err;
		rkobj->raid_type = index;
		kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
		ret = kobject_add(&rkobj->kobj, &space_info->kobj,
				  "%s", get_raid_name(index));
9417
		if (ret) {
9418 9419
			kobject_put(&rkobj->kobj);
			goto out_err;
9420
		}
9421
		space_info->block_group_kobjs[index] = &rkobj->kobj;
9422
	}
9423 9424 9425 9426

	return;
out_err:
	pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9427 9428
}

9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455
static struct btrfs_block_group_cache *
btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
{
	struct btrfs_block_group_cache *cache;

	cache = kzalloc(sizeof(*cache), GFP_NOFS);
	if (!cache)
		return NULL;

	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
					GFP_NOFS);
	if (!cache->free_space_ctl) {
		kfree(cache);
		return NULL;
	}

	cache->key.objectid = start;
	cache->key.offset = size;
	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;

	cache->sectorsize = root->sectorsize;
	cache->fs_info = root->fs_info;
	cache->full_stripe_len = btrfs_full_stripe_len(root,
					       &root->fs_info->mapping_tree,
					       start);
	atomic_set(&cache->count, 1);
	spin_lock_init(&cache->lock);
9456
	init_rwsem(&cache->data_rwsem);
9457 9458
	INIT_LIST_HEAD(&cache->list);
	INIT_LIST_HEAD(&cache->cluster_list);
9459
	INIT_LIST_HEAD(&cache->bg_list);
9460
	INIT_LIST_HEAD(&cache->ro_list);
9461
	INIT_LIST_HEAD(&cache->dirty_list);
9462
	INIT_LIST_HEAD(&cache->io_list);
9463
	btrfs_init_free_space_ctl(cache);
9464
	atomic_set(&cache->trimming, 0);
9465 9466 9467 9468

	return cache;
}

C
Chris Mason 已提交
9469 9470 9471 9472 9473
int btrfs_read_block_groups(struct btrfs_root *root)
{
	struct btrfs_path *path;
	int ret;
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
9474
	struct btrfs_fs_info *info = root->fs_info;
9475
	struct btrfs_space_info *space_info;
C
Chris Mason 已提交
9476 9477
	struct btrfs_key key;
	struct btrfs_key found_key;
9478
	struct extent_buffer *leaf;
9479 9480
	int need_clear = 0;
	u64 cache_gen;
9481

C
Chris Mason 已提交
9482
	root = info->extent_root;
C
Chris Mason 已提交
9483
	key.objectid = 0;
9484
	key.offset = 0;
9485
	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
C
Chris Mason 已提交
9486 9487 9488
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
J
Josef Bacik 已提交
9489
	path->reada = 1;
C
Chris Mason 已提交
9490

9491
	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9492
	if (btrfs_test_opt(root, SPACE_CACHE) &&
9493
	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9494
		need_clear = 1;
9495 9496
	if (btrfs_test_opt(root, CLEAR_CACHE))
		need_clear = 1;
9497

C
Chris Mason 已提交
9498
	while (1) {
9499
		ret = find_first_block_group(root, path, &key);
9500 9501
		if (ret > 0)
			break;
9502 9503
		if (ret != 0)
			goto error;
9504

9505 9506
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9507 9508 9509

		cache = btrfs_create_block_group_cache(root, found_key.objectid,
						       found_key.offset);
C
Chris Mason 已提交
9510
		if (!cache) {
9511
			ret = -ENOMEM;
9512
			goto error;
C
Chris Mason 已提交
9513
		}
9514

9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526
		if (need_clear) {
			/*
			 * When we mount with old space cache, we need to
			 * set BTRFS_DC_CLEAR and set dirty flag.
			 *
			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
			 *    truncate the old free space cache inode and
			 *    setup a new one.
			 * b) Setting 'dirty flag' makes sure that we flush
			 *    the new space cache info onto disk.
			 */
			if (btrfs_test_opt(root, SPACE_CACHE))
9527
				cache->disk_cache_state = BTRFS_DC_CLEAR;
9528
		}
9529

9530 9531 9532
		read_extent_buffer(leaf, &cache->item,
				   btrfs_item_ptr_offset(leaf, path->slots[0]),
				   sizeof(cache->item));
9533
		cache->flags = btrfs_block_group_flags(&cache->item);
9534

C
Chris Mason 已提交
9535
		key.objectid = found_key.objectid + found_key.offset;
9536
		btrfs_release_path(path);
9537

9538 9539 9540 9541 9542
		/*
		 * We need to exclude the super stripes now so that the space
		 * info has super bytes accounted for, otherwise we'll think
		 * we have more space than we actually do.
		 */
9543 9544 9545 9546 9547 9548 9549
		ret = exclude_super_stripes(root, cache);
		if (ret) {
			/*
			 * We may have excluded something, so call this just in
			 * case.
			 */
			free_excluded_extents(root, cache);
9550
			btrfs_put_block_group(cache);
9551 9552
			goto error;
		}
9553

J
Josef Bacik 已提交
9554 9555 9556 9557 9558 9559 9560 9561
		/*
		 * check for two cases, either we are full, and therefore
		 * don't need to bother with the caching work since we won't
		 * find any space, or we are empty, and we can just add all
		 * the space in and be done with it.  This saves us _alot_ of
		 * time, particularly in the full case.
		 */
		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9562
			cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
9563
			cache->cached = BTRFS_CACHE_FINISHED;
9564
			free_excluded_extents(root, cache);
J
Josef Bacik 已提交
9565
		} else if (btrfs_block_group_used(&cache->item) == 0) {
9566
			cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
9567 9568 9569 9570 9571
			cache->cached = BTRFS_CACHE_FINISHED;
			add_new_free_space(cache, root->fs_info,
					   found_key.objectid,
					   found_key.objectid +
					   found_key.offset);
9572
			free_excluded_extents(root, cache);
J
Josef Bacik 已提交
9573
		}
9574

9575 9576 9577 9578 9579 9580 9581
		ret = btrfs_add_block_group_cache(root->fs_info, cache);
		if (ret) {
			btrfs_remove_free_space_cache(cache);
			btrfs_put_block_group(cache);
			goto error;
		}

9582 9583 9584
		ret = update_space_info(info, cache->flags, found_key.offset,
					btrfs_block_group_used(&cache->item),
					&space_info);
9585 9586 9587 9588 9589
		if (ret) {
			btrfs_remove_free_space_cache(cache);
			spin_lock(&info->block_group_cache_lock);
			rb_erase(&cache->cache_node,
				 &info->block_group_cache_tree);
9590
			RB_CLEAR_NODE(&cache->cache_node);
9591 9592 9593 9594 9595
			spin_unlock(&info->block_group_cache_lock);
			btrfs_put_block_group(cache);
			goto error;
		}

9596
		cache->space_info = space_info;
9597
		spin_lock(&cache->space_info->lock);
9598
		cache->space_info->bytes_readonly += cache->bytes_super;
9599 9600
		spin_unlock(&cache->space_info->lock);

9601
		__link_block_group(space_info, cache);
J
Josef Bacik 已提交
9602

9603
		set_avail_alloc_bits(root->fs_info, cache->flags);
9604
		if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9605
			inc_block_group_ro(cache, 1);
9606 9607 9608 9609 9610 9611 9612 9613 9614 9615
		} else if (btrfs_block_group_used(&cache->item) == 0) {
			spin_lock(&info->unused_bgs_lock);
			/* Should always be true but just in case. */
			if (list_empty(&cache->bg_list)) {
				btrfs_get_block_group(cache);
				list_add_tail(&cache->bg_list,
					      &info->unused_bgs);
			}
			spin_unlock(&info->unused_bgs_lock);
		}
C
Chris Mason 已提交
9616
	}
9617 9618 9619 9620 9621

	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
		if (!(get_alloc_profile(root, space_info->flags) &
		      (BTRFS_BLOCK_GROUP_RAID10 |
		       BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
9622 9623
		       BTRFS_BLOCK_GROUP_RAID5 |
		       BTRFS_BLOCK_GROUP_RAID6 |
9624 9625 9626 9627 9628 9629
		       BTRFS_BLOCK_GROUP_DUP)))
			continue;
		/*
		 * avoid allocating from un-mirrored block group if there are
		 * mirrored block groups.
		 */
9630 9631 9632
		list_for_each_entry(cache,
				&space_info->block_groups[BTRFS_RAID_RAID0],
				list)
9633
			inc_block_group_ro(cache, 1);
9634 9635 9636
		list_for_each_entry(cache,
				&space_info->block_groups[BTRFS_RAID_SINGLE],
				list)
9637
			inc_block_group_ro(cache, 1);
C
Chris Mason 已提交
9638
	}
9639 9640

	init_global_block_rsv(info);
9641 9642
	ret = 0;
error:
C
Chris Mason 已提交
9643
	btrfs_free_path(path);
9644
	return ret;
C
Chris Mason 已提交
9645
}
9646

9647 9648 9649 9650 9651 9652 9653 9654
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root)
{
	struct btrfs_block_group_cache *block_group, *tmp;
	struct btrfs_root *extent_root = root->fs_info->extent_root;
	struct btrfs_block_group_item item;
	struct btrfs_key key;
	int ret = 0;
9655
	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9656

9657
	trans->can_flush_pending_bgs = false;
9658
	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9659
		if (ret)
9660
			goto next;
9661 9662 9663 9664 9665 9666 9667 9668 9669 9670

		spin_lock(&block_group->lock);
		memcpy(&item, &block_group->item, sizeof(item));
		memcpy(&key, &block_group->key, sizeof(key));
		spin_unlock(&block_group->lock);

		ret = btrfs_insert_item(trans, extent_root, &key, &item,
					sizeof(item));
		if (ret)
			btrfs_abort_transaction(trans, extent_root, ret);
9671 9672 9673 9674
		ret = btrfs_finish_chunk_alloc(trans, extent_root,
					       key.objectid, key.offset);
		if (ret)
			btrfs_abort_transaction(trans, extent_root, ret);
9675 9676
next:
		list_del_init(&block_group->bg_list);
9677
	}
9678
	trans->can_flush_pending_bgs = can_flush_pending_bgs;
9679 9680
}

9681 9682
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, u64 bytes_used,
9683
			   u64 type, u64 chunk_objectid, u64 chunk_offset,
9684 9685 9686 9687 9688 9689 9690 9691
			   u64 size)
{
	int ret;
	struct btrfs_root *extent_root;
	struct btrfs_block_group_cache *cache;

	extent_root = root->fs_info->extent_root;

9692
	btrfs_set_log_full_commit(root->fs_info, trans);
9693

9694
	cache = btrfs_create_block_group_cache(root, chunk_offset, size);
J
Josef Bacik 已提交
9695 9696
	if (!cache)
		return -ENOMEM;
9697

9698 9699 9700 9701
	btrfs_set_block_group_used(&cache->item, bytes_used);
	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
	btrfs_set_block_group_flags(&cache->item, type);

9702
	cache->flags = type;
9703
	cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
9704
	cache->cached = BTRFS_CACHE_FINISHED;
9705 9706 9707 9708 9709 9710 9711
	ret = exclude_super_stripes(root, cache);
	if (ret) {
		/*
		 * We may have excluded something, so call this just in
		 * case.
		 */
		free_excluded_extents(root, cache);
9712
		btrfs_put_block_group(cache);
9713 9714
		return ret;
	}
9715

J
Josef Bacik 已提交
9716 9717 9718
	add_new_free_space(cache, root->fs_info, chunk_offset,
			   chunk_offset + size);

9719 9720
	free_excluded_extents(root, cache);

9721 9722 9723 9724 9725 9726 9727 9728
#ifdef CONFIG_BTRFS_DEBUG
	if (btrfs_should_fragment_free_space(root, cache)) {
		u64 new_bytes_used = size - bytes_used;

		bytes_used += new_bytes_used >> 1;
		fragment_free_space(root, cache);
	}
#endif
9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741
	/*
	 * Call to ensure the corresponding space_info object is created and
	 * assigned to our block group, but don't update its counters just yet.
	 * We want our bg to be added to the rbtree with its ->space_info set.
	 */
	ret = update_space_info(root->fs_info, cache->flags, 0, 0,
				&cache->space_info);
	if (ret) {
		btrfs_remove_free_space_cache(cache);
		btrfs_put_block_group(cache);
		return ret;
	}

9742 9743 9744 9745 9746 9747 9748
	ret = btrfs_add_block_group_cache(root->fs_info, cache);
	if (ret) {
		btrfs_remove_free_space_cache(cache);
		btrfs_put_block_group(cache);
		return ret;
	}

9749 9750 9751 9752
	/*
	 * Now that our block group has its ->space_info set and is inserted in
	 * the rbtree, update the space info's counters.
	 */
9753 9754
	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
				&cache->space_info);
9755 9756 9757 9758 9759
	if (ret) {
		btrfs_remove_free_space_cache(cache);
		spin_lock(&root->fs_info->block_group_cache_lock);
		rb_erase(&cache->cache_node,
			 &root->fs_info->block_group_cache_tree);
9760
		RB_CLEAR_NODE(&cache->cache_node);
9761 9762 9763 9764
		spin_unlock(&root->fs_info->block_group_cache_lock);
		btrfs_put_block_group(cache);
		return ret;
	}
9765
	update_global_block_rsv(root->fs_info);
9766 9767

	spin_lock(&cache->space_info->lock);
9768
	cache->space_info->bytes_readonly += cache->bytes_super;
9769 9770
	spin_unlock(&cache->space_info->lock);

9771
	__link_block_group(cache->space_info, cache);
9772

9773
	list_add_tail(&cache->bg_list, &trans->new_bgs);
9774

C
Chris Mason 已提交
9775
	set_avail_alloc_bits(extent_root->fs_info, type);
9776

9777 9778
	return 0;
}
Z
Zheng Yan 已提交
9779

9780 9781
static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
9782 9783
	u64 extra_flags = chunk_to_extended(flags) &
				BTRFS_EXTENDED_PROFILE_MASK;
9784

9785
	write_seqlock(&fs_info->profiles_lock);
9786 9787 9788 9789 9790 9791
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		fs_info->avail_data_alloc_bits &= ~extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_METADATA)
		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
		fs_info->avail_system_alloc_bits &= ~extra_flags;
9792
	write_sequnlock(&fs_info->profiles_lock);
9793 9794
}

Z
Zheng Yan 已提交
9795
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9796 9797
			     struct btrfs_root *root, u64 group_start,
			     struct extent_map *em)
Z
Zheng Yan 已提交
9798 9799 9800
{
	struct btrfs_path *path;
	struct btrfs_block_group_cache *block_group;
9801
	struct btrfs_free_cluster *cluster;
9802
	struct btrfs_root *tree_root = root->fs_info->tree_root;
Z
Zheng Yan 已提交
9803
	struct btrfs_key key;
9804
	struct inode *inode;
9805
	struct kobject *kobj = NULL;
Z
Zheng Yan 已提交
9806
	int ret;
9807
	int index;
J
Josef Bacik 已提交
9808
	int factor;
9809
	struct btrfs_caching_control *caching_ctl = NULL;
9810
	bool remove_em;
Z
Zheng Yan 已提交
9811 9812 9813 9814 9815

	root = root->fs_info->extent_root;

	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
	BUG_ON(!block_group);
Y
Yan Zheng 已提交
9816
	BUG_ON(!block_group->ro);
Z
Zheng Yan 已提交
9817

9818 9819 9820 9821 9822 9823
	/*
	 * Free the reserved super bytes from this block group before
	 * remove it.
	 */
	free_excluded_extents(root, block_group);

Z
Zheng Yan 已提交
9824
	memcpy(&key, &block_group->key, sizeof(key));
9825
	index = get_block_group_index(block_group);
J
Josef Bacik 已提交
9826 9827 9828 9829 9830 9831
	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
				  BTRFS_BLOCK_GROUP_RAID1 |
				  BTRFS_BLOCK_GROUP_RAID10))
		factor = 2;
	else
		factor = 1;
Z
Zheng Yan 已提交
9832

9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847
	/* make sure this block group isn't part of an allocation cluster */
	cluster = &root->fs_info->data_alloc_cluster;
	spin_lock(&cluster->refill_lock);
	btrfs_return_cluster_to_free_space(block_group, cluster);
	spin_unlock(&cluster->refill_lock);

	/*
	 * make sure this block group isn't part of a metadata
	 * allocation cluster
	 */
	cluster = &root->fs_info->meta_alloc_cluster;
	spin_lock(&cluster->refill_lock);
	btrfs_return_cluster_to_free_space(block_group, cluster);
	spin_unlock(&cluster->refill_lock);

Z
Zheng Yan 已提交
9848
	path = btrfs_alloc_path();
9849 9850 9851 9852
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
Z
Zheng Yan 已提交
9853

9854 9855 9856 9857
	/*
	 * get the inode first so any iput calls done for the io_list
	 * aren't the final iput (no unlinks allowed now)
	 */
9858
	inode = lookup_free_space_inode(tree_root, block_group, path);
9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885

	mutex_lock(&trans->transaction->cache_write_mutex);
	/*
	 * make sure our free spache cache IO is done before remove the
	 * free space inode
	 */
	spin_lock(&trans->transaction->dirty_bgs_lock);
	if (!list_empty(&block_group->io_list)) {
		list_del_init(&block_group->io_list);

		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);

		spin_unlock(&trans->transaction->dirty_bgs_lock);
		btrfs_wait_cache_io(root, trans, block_group,
				    &block_group->io_ctl, path,
				    block_group->key.objectid);
		btrfs_put_block_group(block_group);
		spin_lock(&trans->transaction->dirty_bgs_lock);
	}

	if (!list_empty(&block_group->dirty_list)) {
		list_del_init(&block_group->dirty_list);
		btrfs_put_block_group(block_group);
	}
	spin_unlock(&trans->transaction->dirty_bgs_lock);
	mutex_unlock(&trans->transaction->cache_write_mutex);

9886
	if (!IS_ERR(inode)) {
9887
		ret = btrfs_orphan_add(trans, inode);
9888 9889 9890 9891
		if (ret) {
			btrfs_add_delayed_iput(inode);
			goto out;
		}
9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903
		clear_nlink(inode);
		/* One for the block groups ref */
		spin_lock(&block_group->lock);
		if (block_group->iref) {
			block_group->iref = 0;
			block_group->inode = NULL;
			spin_unlock(&block_group->lock);
			iput(inode);
		} else {
			spin_unlock(&block_group->lock);
		}
		/* One for our lookup ref */
9904
		btrfs_add_delayed_iput(inode);
9905 9906 9907 9908 9909 9910 9911 9912 9913 9914
	}

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = block_group->key.objectid;
	key.type = 0;

	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0)
9915
		btrfs_release_path(path);
9916 9917 9918 9919
	if (ret == 0) {
		ret = btrfs_del_item(trans, tree_root, path);
		if (ret)
			goto out;
9920
		btrfs_release_path(path);
9921 9922
	}

9923
	spin_lock(&root->fs_info->block_group_cache_lock);
Z
Zheng Yan 已提交
9924 9925
	rb_erase(&block_group->cache_node,
		 &root->fs_info->block_group_cache_tree);
9926
	RB_CLEAR_NODE(&block_group->cache_node);
9927 9928 9929

	if (root->fs_info->first_logical_byte == block_group->key.objectid)
		root->fs_info->first_logical_byte = (u64)-1;
9930
	spin_unlock(&root->fs_info->block_group_cache_lock);
J
Josef Bacik 已提交
9931

9932
	down_write(&block_group->space_info->groups_sem);
9933 9934 9935 9936 9937
	/*
	 * we must use list_del_init so people can check to see if they
	 * are still on the list after taking the semaphore
	 */
	list_del_init(&block_group->list);
9938
	if (list_empty(&block_group->space_info->block_groups[index])) {
9939 9940
		kobj = block_group->space_info->block_group_kobjs[index];
		block_group->space_info->block_group_kobjs[index] = NULL;
9941
		clear_avail_alloc_bits(root->fs_info, block_group->flags);
9942
	}
9943
	up_write(&block_group->space_info->groups_sem);
9944 9945 9946 9947
	if (kobj) {
		kobject_del(kobj);
		kobject_put(kobj);
	}
Z
Zheng Yan 已提交
9948

9949 9950
	if (block_group->has_caching_ctl)
		caching_ctl = get_caching_control(block_group);
J
Josef Bacik 已提交
9951
	if (block_group->cached == BTRFS_CACHE_STARTED)
9952
		wait_block_group_cache_done(block_group);
9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974
	if (block_group->has_caching_ctl) {
		down_write(&root->fs_info->commit_root_sem);
		if (!caching_ctl) {
			struct btrfs_caching_control *ctl;

			list_for_each_entry(ctl,
				    &root->fs_info->caching_block_groups, list)
				if (ctl->block_group == block_group) {
					caching_ctl = ctl;
					atomic_inc(&caching_ctl->count);
					break;
				}
		}
		if (caching_ctl)
			list_del_init(&caching_ctl->list);
		up_write(&root->fs_info->commit_root_sem);
		if (caching_ctl) {
			/* Once for the caching bgs list and once for us. */
			put_caching_control(caching_ctl);
			put_caching_control(caching_ctl);
		}
	}
J
Josef Bacik 已提交
9975

9976 9977
	spin_lock(&trans->transaction->dirty_bgs_lock);
	if (!list_empty(&block_group->dirty_list)) {
9978 9979 9980 9981
		WARN_ON(1);
	}
	if (!list_empty(&block_group->io_list)) {
		WARN_ON(1);
9982 9983
	}
	spin_unlock(&trans->transaction->dirty_bgs_lock);
J
Josef Bacik 已提交
9984 9985
	btrfs_remove_free_space_cache(block_group);

Y
Yan Zheng 已提交
9986
	spin_lock(&block_group->space_info->lock);
9987
	list_del_init(&block_group->ro_list);
9988 9989 9990 9991 9992 9993 9994 9995 9996

	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
		WARN_ON(block_group->space_info->total_bytes
			< block_group->key.offset);
		WARN_ON(block_group->space_info->bytes_readonly
			< block_group->key.offset);
		WARN_ON(block_group->space_info->disk_total
			< block_group->key.offset * factor);
	}
Y
Yan Zheng 已提交
9997 9998
	block_group->space_info->total_bytes -= block_group->key.offset;
	block_group->space_info->bytes_readonly -= block_group->key.offset;
J
Josef Bacik 已提交
9999
	block_group->space_info->disk_total -= block_group->key.offset * factor;
10000

Y
Yan Zheng 已提交
10001
	spin_unlock(&block_group->space_info->lock);
10002

10003 10004
	memcpy(&key, &block_group->key, sizeof(key));

10005
	lock_chunks(root);
10006 10007 10008 10009
	if (!list_empty(&em->list)) {
		/* We're in the transaction->pending_chunks list. */
		free_extent_map(em);
	}
10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028
	spin_lock(&block_group->lock);
	block_group->removed = 1;
	/*
	 * At this point trimming can't start on this block group, because we
	 * removed the block group from the tree fs_info->block_group_cache_tree
	 * so no one can't find it anymore and even if someone already got this
	 * block group before we removed it from the rbtree, they have already
	 * incremented block_group->trimming - if they didn't, they won't find
	 * any free space entries because we already removed them all when we
	 * called btrfs_remove_free_space_cache().
	 *
	 * And we must not remove the extent map from the fs_info->mapping_tree
	 * to prevent the same logical address range and physical device space
	 * ranges from being reused for a new block group. This is because our
	 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
	 * completely transactionless, so while it is trimming a range the
	 * currently running transaction might finish and a new one start,
	 * allowing for new block groups to be created that can reuse the same
	 * physical device locations unless we take this special care.
10029 10030 10031 10032 10033
	 *
	 * There may also be an implicit trim operation if the file system
	 * is mounted with -odiscard. The same protections must remain
	 * in place until the extents have been discarded completely when
	 * the transaction commit has completed.
10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061
	 */
	remove_em = (atomic_read(&block_group->trimming) == 0);
	/*
	 * Make sure a trimmer task always sees the em in the pinned_chunks list
	 * if it sees block_group->removed == 1 (needs to lock block_group->lock
	 * before checking block_group->removed).
	 */
	if (!remove_em) {
		/*
		 * Our em might be in trans->transaction->pending_chunks which
		 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
		 * and so is the fs_info->pinned_chunks list.
		 *
		 * So at this point we must be holding the chunk_mutex to avoid
		 * any races with chunk allocation (more specifically at
		 * volumes.c:contains_pending_extent()), to ensure it always
		 * sees the em, either in the pending_chunks list or in the
		 * pinned_chunks list.
		 */
		list_move_tail(&em->list, &root->fs_info->pinned_chunks);
	}
	spin_unlock(&block_group->lock);

	if (remove_em) {
		struct extent_map_tree *em_tree;

		em_tree = &root->fs_info->mapping_tree.map_tree;
		write_lock(&em_tree->lock);
10062 10063 10064 10065 10066
		/*
		 * The em might be in the pending_chunks list, so make sure the
		 * chunk mutex is locked, since remove_extent_mapping() will
		 * delete us from that list.
		 */
10067 10068 10069 10070 10071 10072
		remove_extent_mapping(em_tree, em);
		write_unlock(&em_tree->lock);
		/* once for the tree */
		free_extent_map(em);
	}

10073 10074
	unlock_chunks(root);

10075 10076
	btrfs_put_block_group(block_group);
	btrfs_put_block_group(block_group);
Z
Zheng Yan 已提交
10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret > 0)
		ret = -EIO;
	if (ret < 0)
		goto out;

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	return ret;
}
L
liubo 已提交
10089

10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107
/*
 * Process the unused_bgs list and remove any that don't have any allocated
 * space inside of them.
 */
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
{
	struct btrfs_block_group_cache *block_group;
	struct btrfs_space_info *space_info;
	struct btrfs_root *root = fs_info->extent_root;
	struct btrfs_trans_handle *trans;
	int ret = 0;

	if (!fs_info->open)
		return;

	spin_lock(&fs_info->unused_bgs_lock);
	while (!list_empty(&fs_info->unused_bgs)) {
		u64 start, end;
10108
		int trimming;
10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120

		block_group = list_first_entry(&fs_info->unused_bgs,
					       struct btrfs_block_group_cache,
					       bg_list);
		space_info = block_group->space_info;
		list_del_init(&block_group->bg_list);
		if (ret || btrfs_mixed_space_info(space_info)) {
			btrfs_put_block_group(block_group);
			continue;
		}
		spin_unlock(&fs_info->unused_bgs_lock);

10121 10122
		mutex_lock(&root->fs_info->delete_unused_bgs_mutex);

10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141
		/* Don't want to race with allocators so take the groups_sem */
		down_write(&space_info->groups_sem);
		spin_lock(&block_group->lock);
		if (block_group->reserved ||
		    btrfs_block_group_used(&block_group->item) ||
		    block_group->ro) {
			/*
			 * We want to bail if we made new allocations or have
			 * outstanding allocations in this block group.  We do
			 * the ro check in case balance is currently acting on
			 * this block group.
			 */
			spin_unlock(&block_group->lock);
			up_write(&space_info->groups_sem);
			goto next;
		}
		spin_unlock(&block_group->lock);

		/* We don't want to force the issue, only flip if it's ok. */
10142
		ret = inc_block_group_ro(block_group, 0);
10143 10144 10145 10146 10147 10148 10149 10150 10151 10152
		up_write(&space_info->groups_sem);
		if (ret < 0) {
			ret = 0;
			goto next;
		}

		/*
		 * Want to do this before we do anything else so we can recover
		 * properly if we fail to join the transaction.
		 */
10153 10154
		/* 1 for btrfs_orphan_reserve_metadata() */
		trans = btrfs_start_transaction(root, 1);
10155
		if (IS_ERR(trans)) {
10156
			btrfs_dec_block_group_ro(root, block_group);
10157 10158 10159 10160 10161 10162 10163 10164 10165 10166
			ret = PTR_ERR(trans);
			goto next;
		}

		/*
		 * We could have pending pinned extents for this block group,
		 * just delete them, we don't care about them anymore.
		 */
		start = block_group->key.objectid;
		end = start + block_group->key.offset - 1;
10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178
		/*
		 * Hold the unused_bg_unpin_mutex lock to avoid racing with
		 * btrfs_finish_extent_commit(). If we are at transaction N,
		 * another task might be running finish_extent_commit() for the
		 * previous transaction N - 1, and have seen a range belonging
		 * to the block group in freed_extents[] before we were able to
		 * clear the whole block group range from freed_extents[]. This
		 * means that task can lookup for the block group after we
		 * unpinned it from freed_extents[] and removed it, leading to
		 * a BUG_ON() at btrfs_unpin_extent_range().
		 */
		mutex_lock(&fs_info->unused_bg_unpin_mutex);
10179
		ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10180
				  EXTENT_DIRTY, GFP_NOFS);
10181
		if (ret) {
10182
			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10183
			btrfs_dec_block_group_ro(root, block_group);
10184 10185 10186
			goto end_trans;
		}
		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10187
				  EXTENT_DIRTY, GFP_NOFS);
10188
		if (ret) {
10189
			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10190
			btrfs_dec_block_group_ro(root, block_group);
10191 10192
			goto end_trans;
		}
10193
		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10194 10195

		/* Reset pinned so btrfs_put_block_group doesn't complain */
10196 10197 10198 10199 10200 10201 10202
		spin_lock(&space_info->lock);
		spin_lock(&block_group->lock);

		space_info->bytes_pinned -= block_group->pinned;
		space_info->bytes_readonly += block_group->pinned;
		percpu_counter_add(&space_info->total_bytes_pinned,
				   -block_group->pinned);
10203 10204
		block_group->pinned = 0;

10205 10206 10207
		spin_unlock(&block_group->lock);
		spin_unlock(&space_info->lock);

10208 10209 10210 10211 10212 10213 10214
		/* DISCARD can flip during remount */
		trimming = btrfs_test_opt(root, DISCARD);

		/* Implicit trim during transaction commit. */
		if (trimming)
			btrfs_get_block_group_trimming(block_group);

10215 10216 10217 10218 10219 10220
		/*
		 * Btrfs_remove_chunk will abort the transaction if things go
		 * horribly wrong.
		 */
		ret = btrfs_remove_chunk(trans, root,
					 block_group->key.objectid);
10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240

		if (ret) {
			if (trimming)
				btrfs_put_block_group_trimming(block_group);
			goto end_trans;
		}

		/*
		 * If we're not mounted with -odiscard, we can just forget
		 * about this block group. Otherwise we'll need to wait
		 * until transaction commit to do the actual discard.
		 */
		if (trimming) {
			WARN_ON(!list_empty(&block_group->bg_list));
			spin_lock(&trans->transaction->deleted_bgs_lock);
			list_move(&block_group->bg_list,
				  &trans->transaction->deleted_bgs);
			spin_unlock(&trans->transaction->deleted_bgs_lock);
			btrfs_get_block_group(block_group);
		}
10241
end_trans:
10242 10243
		btrfs_end_transaction(trans, root);
next:
10244
		mutex_unlock(&root->fs_info->delete_unused_bgs_mutex);
10245 10246 10247 10248 10249 10250
		btrfs_put_block_group(block_group);
		spin_lock(&fs_info->unused_bgs_lock);
	}
	spin_unlock(&fs_info->unused_bgs_lock);
}

10251 10252 10253
int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
{
	struct btrfs_space_info *space_info;
10254 10255 10256 10257
	struct btrfs_super_block *disk_super;
	u64 features;
	u64 flags;
	int mixed = 0;
10258 10259
	int ret;

10260
	disk_super = fs_info->super_copy;
10261 10262
	if (!btrfs_super_root(disk_super))
		return 1;
10263

10264 10265 10266
	features = btrfs_super_incompat_flags(disk_super);
	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;
10267

10268 10269
	flags = BTRFS_BLOCK_GROUP_SYSTEM;
	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10270
	if (ret)
10271
		goto out;
10272

10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285
	if (mixed) {
		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
	} else {
		flags = BTRFS_BLOCK_GROUP_METADATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
		if (ret)
			goto out;

		flags = BTRFS_BLOCK_GROUP_DATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
	}
out:
10286 10287 10288
	return ret;
}

L
liubo 已提交
10289 10290
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
10291
	return unpin_extent_range(root, start, end, false);
L
liubo 已提交
10292 10293
}

10294 10295 10296 10297 10298 10299 10300 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380
/*
 * It used to be that old block groups would be left around forever.
 * Iterating over them would be enough to trim unused space.  Since we
 * now automatically remove them, we also need to iterate over unallocated
 * space.
 *
 * We don't want a transaction for this since the discard may take a
 * substantial amount of time.  We don't require that a transaction be
 * running, but we do need to take a running transaction into account
 * to ensure that we're not discarding chunks that were released in
 * the current transaction.
 *
 * Holding the chunks lock will prevent other threads from allocating
 * or releasing chunks, but it won't prevent a running transaction
 * from committing and releasing the memory that the pending chunks
 * list head uses.  For that, we need to take a reference to the
 * transaction.
 */
static int btrfs_trim_free_extents(struct btrfs_device *device,
				   u64 minlen, u64 *trimmed)
{
	u64 start = 0, len = 0;
	int ret;

	*trimmed = 0;

	/* Not writeable = nothing to do. */
	if (!device->writeable)
		return 0;

	/* No free space = nothing to do. */
	if (device->total_bytes <= device->bytes_used)
		return 0;

	ret = 0;

	while (1) {
		struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
		struct btrfs_transaction *trans;
		u64 bytes;

		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
		if (ret)
			return ret;

		down_read(&fs_info->commit_root_sem);

		spin_lock(&fs_info->trans_lock);
		trans = fs_info->running_transaction;
		if (trans)
			atomic_inc(&trans->use_count);
		spin_unlock(&fs_info->trans_lock);

		ret = find_free_dev_extent_start(trans, device, minlen, start,
						 &start, &len);
		if (trans)
			btrfs_put_transaction(trans);

		if (ret) {
			up_read(&fs_info->commit_root_sem);
			mutex_unlock(&fs_info->chunk_mutex);
			if (ret == -ENOSPC)
				ret = 0;
			break;
		}

		ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
		up_read(&fs_info->commit_root_sem);
		mutex_unlock(&fs_info->chunk_mutex);

		if (ret)
			break;

		start += len;
		*trimmed += bytes;

		if (fatal_signal_pending(current)) {
			ret = -ERESTARTSYS;
			break;
		}

		cond_resched();
	}

	return ret;
}

10381 10382 10383 10384
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_block_group_cache *cache = NULL;
10385 10386
	struct btrfs_device *device;
	struct list_head *devices;
10387 10388 10389 10390
	u64 group_trimmed;
	u64 start;
	u64 end;
	u64 trimmed = 0;
10391
	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10392 10393
	int ret = 0;

10394 10395 10396 10397 10398 10399 10400
	/*
	 * try to trim all FS space, our block group may start from non-zero.
	 */
	if (range->len == total_bytes)
		cache = btrfs_lookup_first_block_group(fs_info, range->start);
	else
		cache = btrfs_lookup_block_group(fs_info, range->start);
10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413

	while (cache) {
		if (cache->key.objectid >= (range->start + range->len)) {
			btrfs_put_block_group(cache);
			break;
		}

		start = max(range->start, cache->key.objectid);
		end = min(range->start + range->len,
				cache->key.objectid + cache->key.offset);

		if (end - start >= range->minlen) {
			if (!block_group_cache_done(cache)) {
10414
				ret = cache_block_group(cache, 0);
10415 10416 10417 10418 10419 10420 10421 10422 10423
				if (ret) {
					btrfs_put_block_group(cache);
					break;
				}
				ret = wait_block_group_cache_done(cache);
				if (ret) {
					btrfs_put_block_group(cache);
					break;
				}
10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440
			}
			ret = btrfs_trim_block_group(cache,
						     &group_trimmed,
						     start,
						     end,
						     range->minlen);

			trimmed += group_trimmed;
			if (ret) {
				btrfs_put_block_group(cache);
				break;
			}
		}

		cache = next_block_group(fs_info->tree_root, cache);
	}

10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452
	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
	devices = &root->fs_info->fs_devices->alloc_list;
	list_for_each_entry(device, devices, dev_alloc_list) {
		ret = btrfs_trim_free_extents(device, range->minlen,
					      &group_trimmed);
		if (ret)
			break;

		trimmed += group_trimmed;
	}
	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);

10453 10454 10455
	range->len = trimmed;
	return ret;
}
10456 10457

/*
10458 10459 10460 10461 10462 10463
 * btrfs_{start,end}_write_no_snapshoting() are similar to
 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
 * data into the page cache through nocow before the subvolume is snapshoted,
 * but flush the data into disk after the snapshot creation, or to prevent
 * operations while snapshoting is ongoing and that cause the snapshot to be
 * inconsistent (writes followed by expanding truncates for example).
10464
 */
10465
void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10466 10467 10468
{
	percpu_counter_dec(&root->subv_writers->counter);
	/*
10469
	 * Make sure counter is updated before we wake up waiters.
10470 10471 10472 10473 10474 10475
	 */
	smp_mb();
	if (waitqueue_active(&root->subv_writers->wait))
		wake_up(&root->subv_writers->wait);
}

10476
int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10477
{
10478
	if (atomic_read(&root->will_be_snapshoted))
10479 10480 10481 10482 10483 10484 10485
		return 0;

	percpu_counter_inc(&root->subv_writers->counter);
	/*
	 * Make sure counter is updated before we check for snapshot creation.
	 */
	smp_mb();
10486
	if (atomic_read(&root->will_be_snapshoted)) {
10487
		btrfs_end_write_no_snapshoting(root);
10488 10489 10490 10491
		return 0;
	}
	return 1;
}