extent-tree.c 237.5 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Copyright (C) 2007 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */
Z
Zach Brown 已提交
18
#include <linux/sched.h>
19
#include <linux/pagemap.h>
20
#include <linux/writeback.h>
21
#include <linux/blkdev.h>
22
#include <linux/sort.h>
23
#include <linux/rcupdate.h>
J
Josef Bacik 已提交
24
#include <linux/kthread.h>
25
#include <linux/slab.h>
26
#include <linux/ratelimit.h>
27
#include <linux/percpu_counter.h>
28
#include "hash.h"
29 30 31
#include "ctree.h"
#include "disk-io.h"
#include "print-tree.h"
32
#include "transaction.h"
33
#include "volumes.h"
D
David Woodhouse 已提交
34
#include "raid56.h"
35
#include "locking.h"
36
#include "free-space-cache.h"
37
#include "math.h"
38

39 40
#undef SCRAMBLE_DELAYED_REFS

41 42
/*
 * control flags for do_chunk_alloc's force field
43 44 45 46 47 48 49 50 51
 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
 * if we really need one.
 *
 * CHUNK_ALLOC_LIMITED means to only try and allocate one
 * if we have very few chunks already allocated.  This is
 * used as part of the clustering code to help make sure
 * we have a good pool of storage to cluster in, without
 * filling the FS with empty chunks
 *
52 53
 * CHUNK_ALLOC_FORCE means it must try to allocate one
 *
54 55 56
 */
enum {
	CHUNK_ALLOC_NO_FORCE = 0,
57 58
	CHUNK_ALLOC_LIMITED = 1,
	CHUNK_ALLOC_FORCE = 2,
59 60
};

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
/*
 * Control how reservations are dealt with.
 *
 * RESERVE_FREE - freeing a reservation.
 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
 *   ENOSPC accounting
 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
 *   bytes_may_use as the ENOSPC accounting is done elsewhere
 */
enum {
	RESERVE_FREE = 0,
	RESERVE_ALLOC = 1,
	RESERVE_ALLOC_NO_ACCOUNT = 2,
};

76
static int update_block_group(struct btrfs_root *root,
77
			      u64 bytenr, u64 num_bytes, int alloc);
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				u64 bytenr, u64 num_bytes, u64 parent,
				u64 root_objectid, u64 owner_objectid,
				u64 owner_offset, int refs_to_drop,
				struct btrfs_delayed_extent_op *extra_op);
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
				    struct extent_buffer *leaf,
				    struct btrfs_extent_item *ei);
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      u64 parent, u64 root_objectid,
				      u64 flags, u64 owner, u64 offset,
				      struct btrfs_key *ins, int ref_mod);
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 parent, u64 root_objectid,
				     u64 flags, struct btrfs_disk_key *key,
				     int level, struct btrfs_key *ins);
J
Josef Bacik 已提交
97
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
98 99
			  struct btrfs_root *extent_root, u64 flags,
			  int force);
100 101
static int find_next_key(struct btrfs_path *path, int level,
			 struct btrfs_key *key);
J
Josef Bacik 已提交
102 103
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
			    int dump_block_groups);
104 105
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
				       u64 num_bytes, int reserve);
106 107
static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
			       u64 num_bytes);
108 109
int btrfs_pin_extent(struct btrfs_root *root,
		     u64 bytenr, u64 num_bytes, int reserved);
J
Josef Bacik 已提交
110

J
Josef Bacik 已提交
111 112 113 114
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
{
	smp_mb();
115 116
	return cache->cached == BTRFS_CACHE_FINISHED ||
		cache->cached == BTRFS_CACHE_ERROR;
J
Josef Bacik 已提交
117 118
}

J
Josef Bacik 已提交
119 120 121 122 123
static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
{
	return (cache->flags & bits) == bits;
}

124
static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
125 126 127 128 129 130
{
	atomic_inc(&cache->count);
}

void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
{
131 132 133
	if (atomic_dec_and_test(&cache->count)) {
		WARN_ON(cache->pinned > 0);
		WARN_ON(cache->reserved > 0);
134
		kfree(cache->free_space_ctl);
135
		kfree(cache);
136
	}
137 138
}

J
Josef Bacik 已提交
139 140 141 142
/*
 * this adds the block group to the fs_info rb tree for the block group
 * cache
 */
143
static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
J
Josef Bacik 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
				struct btrfs_block_group_cache *block_group)
{
	struct rb_node **p;
	struct rb_node *parent = NULL;
	struct btrfs_block_group_cache *cache;

	spin_lock(&info->block_group_cache_lock);
	p = &info->block_group_cache_tree.rb_node;

	while (*p) {
		parent = *p;
		cache = rb_entry(parent, struct btrfs_block_group_cache,
				 cache_node);
		if (block_group->key.objectid < cache->key.objectid) {
			p = &(*p)->rb_left;
		} else if (block_group->key.objectid > cache->key.objectid) {
			p = &(*p)->rb_right;
		} else {
			spin_unlock(&info->block_group_cache_lock);
			return -EEXIST;
		}
	}

	rb_link_node(&block_group->cache_node, parent, p);
	rb_insert_color(&block_group->cache_node,
			&info->block_group_cache_tree);
170 171 172 173

	if (info->first_logical_byte > block_group->key.objectid)
		info->first_logical_byte = block_group->key.objectid;

J
Josef Bacik 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
	spin_unlock(&info->block_group_cache_lock);

	return 0;
}

/*
 * This will return the block group at or after bytenr if contains is 0, else
 * it will return the block group that contains the bytenr
 */
static struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
			      int contains)
{
	struct btrfs_block_group_cache *cache, *ret = NULL;
	struct rb_node *n;
	u64 end, start;

	spin_lock(&info->block_group_cache_lock);
	n = info->block_group_cache_tree.rb_node;

	while (n) {
		cache = rb_entry(n, struct btrfs_block_group_cache,
				 cache_node);
		end = cache->key.objectid + cache->key.offset - 1;
		start = cache->key.objectid;

		if (bytenr < start) {
			if (!contains && (!ret || start < ret->key.objectid))
				ret = cache;
			n = n->rb_left;
		} else if (bytenr > start) {
			if (contains && bytenr <= end) {
				ret = cache;
				break;
			}
			n = n->rb_right;
		} else {
			ret = cache;
			break;
		}
	}
215
	if (ret) {
216
		btrfs_get_block_group(ret);
217 218 219
		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
			info->first_logical_byte = ret->key.objectid;
	}
J
Josef Bacik 已提交
220 221 222 223 224
	spin_unlock(&info->block_group_cache_lock);

	return ret;
}

225 226
static int add_excluded_extent(struct btrfs_root *root,
			       u64 start, u64 num_bytes)
J
Josef Bacik 已提交
227
{
228 229 230 231 232 233 234
	u64 end = start + num_bytes - 1;
	set_extent_bits(&root->fs_info->freed_extents[0],
			start, end, EXTENT_UPTODATE, GFP_NOFS);
	set_extent_bits(&root->fs_info->freed_extents[1],
			start, end, EXTENT_UPTODATE, GFP_NOFS);
	return 0;
}
J
Josef Bacik 已提交
235

236 237 238 239
static void free_excluded_extents(struct btrfs_root *root,
				  struct btrfs_block_group_cache *cache)
{
	u64 start, end;
J
Josef Bacik 已提交
240

241 242 243 244 245 246 247
	start = cache->key.objectid;
	end = start + cache->key.offset - 1;

	clear_extent_bits(&root->fs_info->freed_extents[0],
			  start, end, EXTENT_UPTODATE, GFP_NOFS);
	clear_extent_bits(&root->fs_info->freed_extents[1],
			  start, end, EXTENT_UPTODATE, GFP_NOFS);
J
Josef Bacik 已提交
248 249
}

250 251
static int exclude_super_stripes(struct btrfs_root *root,
				 struct btrfs_block_group_cache *cache)
J
Josef Bacik 已提交
252 253 254 255 256 257
{
	u64 bytenr;
	u64 *logical;
	int stripe_len;
	int i, nr, ret;

258 259 260 261 262
	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
		cache->bytes_super += stripe_len;
		ret = add_excluded_extent(root, cache->key.objectid,
					  stripe_len);
263 264
		if (ret)
			return ret;
265 266
	}

J
Josef Bacik 已提交
267 268 269 270 271
	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
		bytenr = btrfs_sb_offset(i);
		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
				       cache->key.objectid, bytenr,
				       0, &logical, &nr, &stripe_len);
272 273
		if (ret)
			return ret;
274

J
Josef Bacik 已提交
275
		while (nr--) {
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
			u64 start, len;

			if (logical[nr] > cache->key.objectid +
			    cache->key.offset)
				continue;

			if (logical[nr] + stripe_len <= cache->key.objectid)
				continue;

			start = logical[nr];
			if (start < cache->key.objectid) {
				start = cache->key.objectid;
				len = (logical[nr] + stripe_len) - start;
			} else {
				len = min_t(u64, stripe_len,
					    cache->key.objectid +
					    cache->key.offset - start);
			}

			cache->bytes_super += len;
			ret = add_excluded_extent(root, start, len);
297 298 299 300
			if (ret) {
				kfree(logical);
				return ret;
			}
J
Josef Bacik 已提交
301
		}
302

J
Josef Bacik 已提交
303 304 305 306 307
		kfree(logical);
	}
	return 0;
}

308 309 310 311 312 313 314 315 316 317 318
static struct btrfs_caching_control *
get_caching_control(struct btrfs_block_group_cache *cache)
{
	struct btrfs_caching_control *ctl;

	spin_lock(&cache->lock);
	if (cache->cached != BTRFS_CACHE_STARTED) {
		spin_unlock(&cache->lock);
		return NULL;
	}

319 320 321
	/* We're loading it the fast way, so we don't have a caching_ctl. */
	if (!cache->caching_ctl) {
		spin_unlock(&cache->lock);
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
		return NULL;
	}

	ctl = cache->caching_ctl;
	atomic_inc(&ctl->count);
	spin_unlock(&cache->lock);
	return ctl;
}

static void put_caching_control(struct btrfs_caching_control *ctl)
{
	if (atomic_dec_and_test(&ctl->count))
		kfree(ctl);
}

J
Josef Bacik 已提交
337 338 339 340 341
/*
 * this is only called by cache_block_group, since we could have freed extents
 * we need to check the pinned_extents for any extents that can't be used yet
 * since their free space will be released as soon as the transaction commits.
 */
J
Josef Bacik 已提交
342
static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
J
Josef Bacik 已提交
343 344
			      struct btrfs_fs_info *info, u64 start, u64 end)
{
J
Josef Bacik 已提交
345
	u64 extent_start, extent_end, size, total_added = 0;
J
Josef Bacik 已提交
346 347 348
	int ret;

	while (start < end) {
349
		ret = find_first_extent_bit(info->pinned_extents, start,
J
Josef Bacik 已提交
350
					    &extent_start, &extent_end,
351 352
					    EXTENT_DIRTY | EXTENT_UPTODATE,
					    NULL);
J
Josef Bacik 已提交
353 354 355
		if (ret)
			break;

356
		if (extent_start <= start) {
J
Josef Bacik 已提交
357 358 359
			start = extent_end + 1;
		} else if (extent_start > start && extent_start < end) {
			size = extent_start - start;
J
Josef Bacik 已提交
360
			total_added += size;
361 362
			ret = btrfs_add_free_space(block_group, start,
						   size);
363
			BUG_ON(ret); /* -ENOMEM or logic error */
J
Josef Bacik 已提交
364 365 366 367 368 369 370 371
			start = extent_end + 1;
		} else {
			break;
		}
	}

	if (start < end) {
		size = end - start;
J
Josef Bacik 已提交
372
		total_added += size;
373
		ret = btrfs_add_free_space(block_group, start, size);
374
		BUG_ON(ret); /* -ENOMEM or logic error */
J
Josef Bacik 已提交
375 376
	}

J
Josef Bacik 已提交
377
	return total_added;
J
Josef Bacik 已提交
378 379
}

380
static noinline void caching_thread(struct btrfs_work *work)
381
{
382 383 384 385
	struct btrfs_block_group_cache *block_group;
	struct btrfs_fs_info *fs_info;
	struct btrfs_caching_control *caching_ctl;
	struct btrfs_root *extent_root;
386
	struct btrfs_path *path;
387
	struct extent_buffer *leaf;
388
	struct btrfs_key key;
J
Josef Bacik 已提交
389
	u64 total_found = 0;
390 391
	u64 last = 0;
	u32 nritems;
392
	int ret = -ENOMEM;
393

394 395 396 397 398
	caching_ctl = container_of(work, struct btrfs_caching_control, work);
	block_group = caching_ctl->block_group;
	fs_info = block_group->fs_info;
	extent_root = fs_info->extent_root;

399 400
	path = btrfs_alloc_path();
	if (!path)
401
		goto out;
402

J
Josef Bacik 已提交
403
	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
404

405
	/*
J
Josef Bacik 已提交
406 407 408 409
	 * We don't want to deadlock with somebody trying to allocate a new
	 * extent for the extent root while also trying to search the extent
	 * root to add free space.  So we skip locking and search the commit
	 * root, since its read-only
410 411
	 */
	path->skip_locking = 1;
J
Josef Bacik 已提交
412
	path->search_commit_root = 1;
J
Josef Bacik 已提交
413
	path->reada = 1;
J
Josef Bacik 已提交
414

Y
Yan Zheng 已提交
415
	key.objectid = last;
416
	key.offset = 0;
417
	key.type = BTRFS_EXTENT_ITEM_KEY;
418
again:
419
	mutex_lock(&caching_ctl->mutex);
420 421 422
	/* need to make sure the commit_root doesn't disappear */
	down_read(&fs_info->extent_commit_sem);

423
next:
424
	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
425
	if (ret < 0)
426
		goto err;
Y
Yan Zheng 已提交
427

428 429 430
	leaf = path->nodes[0];
	nritems = btrfs_header_nritems(leaf);

C
Chris Mason 已提交
431
	while (1) {
432
		if (btrfs_fs_closing(fs_info) > 1) {
433
			last = (u64)-1;
J
Josef Bacik 已提交
434
			break;
435
		}
J
Josef Bacik 已提交
436

437 438 439 440 441
		if (path->slots[0] < nritems) {
			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		} else {
			ret = find_next_key(path, 0, &key);
			if (ret)
442
				break;
J
Josef Bacik 已提交
443

444
			if (need_resched()) {
445
				caching_ctl->progress = last;
C
Chris Mason 已提交
446
				btrfs_release_path(path);
447 448
				up_read(&fs_info->extent_commit_sem);
				mutex_unlock(&caching_ctl->mutex);
449
				cond_resched();
450 451
				goto again;
			}
452 453 454 455 456 457

			ret = btrfs_next_leaf(extent_root, path);
			if (ret < 0)
				goto err;
			if (ret)
				break;
458 459 460
			leaf = path->nodes[0];
			nritems = btrfs_header_nritems(leaf);
			continue;
461
		}
J
Josef Bacik 已提交
462

463 464 465 466 467 468 469 470 471 472
		if (key.objectid < last) {
			key.objectid = last;
			key.offset = 0;
			key.type = BTRFS_EXTENT_ITEM_KEY;

			caching_ctl->progress = last;
			btrfs_release_path(path);
			goto next;
		}

473 474
		if (key.objectid < block_group->key.objectid) {
			path->slots[0]++;
J
Josef Bacik 已提交
475
			continue;
476
		}
J
Josef Bacik 已提交
477

478
		if (key.objectid >= block_group->key.objectid +
J
Josef Bacik 已提交
479
		    block_group->key.offset)
480
			break;
481

482 483
		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
		    key.type == BTRFS_METADATA_ITEM_KEY) {
J
Josef Bacik 已提交
484 485 486
			total_found += add_new_free_space(block_group,
							  fs_info, last,
							  key.objectid);
487 488 489 490 491
			if (key.type == BTRFS_METADATA_ITEM_KEY)
				last = key.objectid +
					fs_info->tree_root->leafsize;
			else
				last = key.objectid + key.offset;
J
Josef Bacik 已提交
492

493 494 495 496
			if (total_found > (1024 * 1024 * 2)) {
				total_found = 0;
				wake_up(&caching_ctl->wait);
			}
J
Josef Bacik 已提交
497
		}
498 499
		path->slots[0]++;
	}
J
Josef Bacik 已提交
500
	ret = 0;
501

J
Josef Bacik 已提交
502 503 504
	total_found += add_new_free_space(block_group, fs_info, last,
					  block_group->key.objectid +
					  block_group->key.offset);
505
	caching_ctl->progress = (u64)-1;
J
Josef Bacik 已提交
506 507

	spin_lock(&block_group->lock);
508
	block_group->caching_ctl = NULL;
J
Josef Bacik 已提交
509 510
	block_group->cached = BTRFS_CACHE_FINISHED;
	spin_unlock(&block_group->lock);
J
Josef Bacik 已提交
511

512
err:
513
	btrfs_free_path(path);
514
	up_read(&fs_info->extent_commit_sem);
J
Josef Bacik 已提交
515

516 517 518
	free_excluded_extents(extent_root, block_group);

	mutex_unlock(&caching_ctl->mutex);
519
out:
520 521 522 523 524 525
	if (ret) {
		spin_lock(&block_group->lock);
		block_group->caching_ctl = NULL;
		block_group->cached = BTRFS_CACHE_ERROR;
		spin_unlock(&block_group->lock);
	}
526 527 528
	wake_up(&caching_ctl->wait);

	put_caching_control(caching_ctl);
529
	btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
530 531
}

532 533
static int cache_block_group(struct btrfs_block_group_cache *cache,
			     int load_cache_only)
J
Josef Bacik 已提交
534
{
535
	DEFINE_WAIT(wait);
536 537
	struct btrfs_fs_info *fs_info = cache->fs_info;
	struct btrfs_caching_control *caching_ctl;
J
Josef Bacik 已提交
538 539
	int ret = 0;

540
	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
541 542
	if (!caching_ctl)
		return -ENOMEM;
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582

	INIT_LIST_HEAD(&caching_ctl->list);
	mutex_init(&caching_ctl->mutex);
	init_waitqueue_head(&caching_ctl->wait);
	caching_ctl->block_group = cache;
	caching_ctl->progress = cache->key.objectid;
	atomic_set(&caching_ctl->count, 1);
	caching_ctl->work.func = caching_thread;

	spin_lock(&cache->lock);
	/*
	 * This should be a rare occasion, but this could happen I think in the
	 * case where one thread starts to load the space cache info, and then
	 * some other thread starts a transaction commit which tries to do an
	 * allocation while the other thread is still loading the space cache
	 * info.  The previous loop should have kept us from choosing this block
	 * group, but if we've moved to the state where we will wait on caching
	 * block groups we need to first check if we're doing a fast load here,
	 * so we can wait for it to finish, otherwise we could end up allocating
	 * from a block group who's cache gets evicted for one reason or
	 * another.
	 */
	while (cache->cached == BTRFS_CACHE_FAST) {
		struct btrfs_caching_control *ctl;

		ctl = cache->caching_ctl;
		atomic_inc(&ctl->count);
		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
		spin_unlock(&cache->lock);

		schedule();

		finish_wait(&ctl->wait, &wait);
		put_caching_control(ctl);
		spin_lock(&cache->lock);
	}

	if (cache->cached != BTRFS_CACHE_NO) {
		spin_unlock(&cache->lock);
		kfree(caching_ctl);
583
		return 0;
584 585 586 587 588
	}
	WARN_ON(cache->caching_ctl);
	cache->caching_ctl = caching_ctl;
	cache->cached = BTRFS_CACHE_FAST;
	spin_unlock(&cache->lock);
589

590
	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591 592 593 594
		ret = load_free_space_cache(fs_info, cache);

		spin_lock(&cache->lock);
		if (ret == 1) {
595
			cache->caching_ctl = NULL;
596 597 598
			cache->cached = BTRFS_CACHE_FINISHED;
			cache->last_byte_to_unpin = (u64)-1;
		} else {
599 600 601 602 603 604
			if (load_cache_only) {
				cache->caching_ctl = NULL;
				cache->cached = BTRFS_CACHE_NO;
			} else {
				cache->cached = BTRFS_CACHE_STARTED;
			}
605 606
		}
		spin_unlock(&cache->lock);
607
		wake_up(&caching_ctl->wait);
608
		if (ret == 1) {
609
			put_caching_control(caching_ctl);
610
			free_excluded_extents(fs_info->extent_root, cache);
611
			return 0;
612
		}
613 614 615 616 617 618 619 620 621 622 623 624 625 626
	} else {
		/*
		 * We are not going to do the fast caching, set cached to the
		 * appropriate value and wakeup any waiters.
		 */
		spin_lock(&cache->lock);
		if (load_cache_only) {
			cache->caching_ctl = NULL;
			cache->cached = BTRFS_CACHE_NO;
		} else {
			cache->cached = BTRFS_CACHE_STARTED;
		}
		spin_unlock(&cache->lock);
		wake_up(&caching_ctl->wait);
627 628
	}

629 630
	if (load_cache_only) {
		put_caching_control(caching_ctl);
631
		return 0;
J
Josef Bacik 已提交
632 633
	}

634
	down_write(&fs_info->extent_commit_sem);
635
	atomic_inc(&caching_ctl->count);
636 637 638
	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
	up_write(&fs_info->extent_commit_sem);

639
	btrfs_get_block_group(cache);
640

641
	btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
J
Josef Bacik 已提交
642

643
	return ret;
644 645
}

J
Josef Bacik 已提交
646 647 648
/*
 * return the block group that starts at or after bytenr
 */
C
Chris Mason 已提交
649 650
static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
C
Chris Mason 已提交
651
{
J
Josef Bacik 已提交
652
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
653

J
Josef Bacik 已提交
654
	cache = block_group_cache_tree_search(info, bytenr, 0);
C
Chris Mason 已提交
655

J
Josef Bacik 已提交
656
	return cache;
C
Chris Mason 已提交
657 658
}

J
Josef Bacik 已提交
659
/*
660
 * return the block group that contains the given bytenr
J
Josef Bacik 已提交
661
 */
C
Chris Mason 已提交
662 663 664
struct btrfs_block_group_cache *btrfs_lookup_block_group(
						 struct btrfs_fs_info *info,
						 u64 bytenr)
C
Chris Mason 已提交
665
{
J
Josef Bacik 已提交
666
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
667

J
Josef Bacik 已提交
668
	cache = block_group_cache_tree_search(info, bytenr, 1);
669

J
Josef Bacik 已提交
670
	return cache;
C
Chris Mason 已提交
671
}
672

J
Josef Bacik 已提交
673 674
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
						  u64 flags)
675
{
J
Josef Bacik 已提交
676 677
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;
678

679
	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
680

681 682
	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list) {
683
		if (found->flags & flags) {
684
			rcu_read_unlock();
J
Josef Bacik 已提交
685
			return found;
686
		}
J
Josef Bacik 已提交
687
	}
688
	rcu_read_unlock();
J
Josef Bacik 已提交
689
	return NULL;
690 691
}

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
/*
 * after adding space to the filesystem, we need to clear the full flags
 * on all the space infos.
 */
void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
{
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;

	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list)
		found->full = 0;
	rcu_read_unlock();
}

707
/* simple helper to search for an existing extent at a given offset */
Z
Zheng Yan 已提交
708
int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
709 710 711
{
	int ret;
	struct btrfs_key key;
Z
Zheng Yan 已提交
712
	struct btrfs_path *path;
713

Z
Zheng Yan 已提交
714
	path = btrfs_alloc_path();
715 716 717
	if (!path)
		return -ENOMEM;

718 719
	key.objectid = start;
	key.offset = len;
720
	key.type = BTRFS_EXTENT_ITEM_KEY;
721 722
	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
				0, 0);
723 724 725 726 727 728
	if (ret > 0) {
		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
		if (key.objectid == start &&
		    key.type == BTRFS_METADATA_ITEM_KEY)
			ret = 0;
	}
Z
Zheng Yan 已提交
729
	btrfs_free_path(path);
730 731 732
	return ret;
}

733
/*
734
 * helper function to lookup reference count and flags of a tree block.
735 736 737 738 739 740 741 742 743
 *
 * the head node for delayed ref is used to store the sum of all the
 * reference count modifications queued up in the rbtree. the head
 * node may also store the extent flags to set. This way you can check
 * to see what the reference count and extent flags would be if all of
 * the delayed refs are not processed.
 */
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root, u64 bytenr,
744
			     u64 offset, int metadata, u64 *refs, u64 *flags)
745 746 747 748 749 750 751 752 753 754 755 756
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_path *path;
	struct btrfs_extent_item *ei;
	struct extent_buffer *leaf;
	struct btrfs_key key;
	u32 item_size;
	u64 num_refs;
	u64 extent_flags;
	int ret;

757 758 759 760 761 762 763 764 765
	/*
	 * If we don't have skinny metadata, don't bother doing anything
	 * different
	 */
	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
		offset = root->leafsize;
		metadata = 0;
	}

766 767 768 769
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

770 771 772 773 774 775 776 777 778 779
	if (metadata) {
		key.objectid = bytenr;
		key.type = BTRFS_METADATA_ITEM_KEY;
		key.offset = offset;
	} else {
		key.objectid = bytenr;
		key.type = BTRFS_EXTENT_ITEM_KEY;
		key.offset = offset;
	}

780 781 782 783 784 785 786 787 788 789
	if (!trans) {
		path->skip_locking = 1;
		path->search_commit_root = 1;
	}
again:
	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
				&key, path, 0, 0);
	if (ret < 0)
		goto out_free;

790
	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
		metadata = 0;
		if (path->slots[0]) {
			path->slots[0]--;
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0]);
			if (key.objectid == bytenr &&
			    key.type == BTRFS_EXTENT_ITEM_KEY &&
			    key.offset == root->leafsize)
				ret = 0;
		}
		if (ret) {
			key.objectid = bytenr;
			key.type = BTRFS_EXTENT_ITEM_KEY;
			key.offset = root->leafsize;
			btrfs_release_path(path);
			goto again;
		}
808 809
	}

810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848
	if (ret == 0) {
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		if (item_size >= sizeof(*ei)) {
			ei = btrfs_item_ptr(leaf, path->slots[0],
					    struct btrfs_extent_item);
			num_refs = btrfs_extent_refs(leaf, ei);
			extent_flags = btrfs_extent_flags(leaf, ei);
		} else {
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
			struct btrfs_extent_item_v0 *ei0;
			BUG_ON(item_size != sizeof(*ei0));
			ei0 = btrfs_item_ptr(leaf, path->slots[0],
					     struct btrfs_extent_item_v0);
			num_refs = btrfs_extent_refs_v0(leaf, ei0);
			/* FIXME: this isn't correct for data */
			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
#else
			BUG();
#endif
		}
		BUG_ON(num_refs == 0);
	} else {
		num_refs = 0;
		extent_flags = 0;
		ret = 0;
	}

	if (!trans)
		goto out;

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (head) {
		if (!mutex_trylock(&head->mutex)) {
			atomic_inc(&head->node.refs);
			spin_unlock(&delayed_refs->lock);

849
			btrfs_release_path(path);
850

851 852 853 854
			/*
			 * Mutex was contended, block until it's released and try
			 * again
			 */
855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
			mutex_lock(&head->mutex);
			mutex_unlock(&head->mutex);
			btrfs_put_delayed_ref(&head->node);
			goto again;
		}
		if (head->extent_op && head->extent_op->update_flags)
			extent_flags |= head->extent_op->flags_to_set;
		else
			BUG_ON(num_refs == 0);

		num_refs += head->node.ref_mod;
		mutex_unlock(&head->mutex);
	}
	spin_unlock(&delayed_refs->lock);
out:
	WARN_ON(num_refs == 0);
	if (refs)
		*refs = num_refs;
	if (flags)
		*flags = extent_flags;
out_free:
	btrfs_free_path(path);
	return ret;
}

880 881 882 883 884 885 886 887 888 889 890 891 892 893
/*
 * Back reference rules.  Back refs have three main goals:
 *
 * 1) differentiate between all holders of references to an extent so that
 *    when a reference is dropped we can make sure it was a valid reference
 *    before freeing the extent.
 *
 * 2) Provide enough information to quickly find the holders of an extent
 *    if we notice a given block is corrupted or bad.
 *
 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
 *    maintenance.  This is actually the same as #2, but with a slightly
 *    different use case.
 *
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
 * There are two kinds of back refs. The implicit back refs is optimized
 * for pointers in non-shared tree blocks. For a given pointer in a block,
 * back refs of this kind provide information about the block's owner tree
 * and the pointer's key. These information allow us to find the block by
 * b-tree searching. The full back refs is for pointers in tree blocks not
 * referenced by their owner trees. The location of tree block is recorded
 * in the back refs. Actually the full back refs is generic, and can be
 * used in all cases the implicit back refs is used. The major shortcoming
 * of the full back refs is its overhead. Every time a tree block gets
 * COWed, we have to update back refs entry for all pointers in it.
 *
 * For a newly allocated tree block, we use implicit back refs for
 * pointers in it. This means most tree related operations only involve
 * implicit back refs. For a tree block created in old transaction, the
 * only way to drop a reference to it is COW it. So we can detect the
 * event that tree block loses its owner tree's reference and do the
 * back refs conversion.
 *
 * When a tree block is COW'd through a tree, there are four cases:
 *
 * The reference count of the block is one and the tree is the block's
 * owner tree. Nothing to do in this case.
 *
 * The reference count of the block is one and the tree is not the
 * block's owner tree. In this case, full back refs is used for pointers
 * in the block. Remove these full back refs, add implicit back refs for
 * every pointers in the new block.
 *
 * The reference count of the block is greater than one and the tree is
 * the block's owner tree. In this case, implicit back refs is used for
 * pointers in the block. Add full back refs for every pointers in the
 * block, increase lower level extents' reference counts. The original
 * implicit back refs are entailed to the new block.
 *
 * The reference count of the block is greater than one and the tree is
 * not the block's owner tree. Add implicit back refs for every pointer in
 * the new block, increase lower level extents' reference count.
 *
 * Back Reference Key composing:
 *
 * The key objectid corresponds to the first byte in the extent,
 * The key type is used to differentiate between types of back refs.
 * There are different meanings of the key offset for different types
 * of back refs.
 *
939 940 941
 * File extents can be referenced by:
 *
 * - multiple snapshots, subvolumes, or different generations in one subvol
Z
Zheng Yan 已提交
942
 * - different files inside a single subvolume
943 944
 * - different offsets inside a file (bookend extents in file.c)
 *
945
 * The extent ref structure for the implicit back refs has fields for:
946 947 948
 *
 * - Objectid of the subvolume root
 * - objectid of the file holding the reference
949 950
 * - original offset in the file
 * - how many bookend extents
951
 *
952 953
 * The key offset for the implicit back refs is hash of the first
 * three fields.
954
 *
955
 * The extent ref structure for the full back refs has field for:
956
 *
957
 * - number of pointers in the tree leaf
958
 *
959 960
 * The key offset for the implicit back refs is the first byte of
 * the tree leaf
961
 *
962 963
 * When a file extent is allocated, The implicit back refs is used.
 * the fields are filled in:
964
 *
965
 *     (root_key.objectid, inode objectid, offset in file, 1)
966
 *
967 968
 * When a file extent is removed file truncation, we find the
 * corresponding implicit back refs and check the following fields:
969
 *
970
 *     (btrfs_header_owner(leaf), inode objectid, offset in file)
971
 *
972
 * Btree extents can be referenced by:
973
 *
974
 * - Different subvolumes
975
 *
976 977 978 979
 * Both the implicit back refs and the full back refs for tree blocks
 * only consist of key. The key offset for the implicit back refs is
 * objectid of block's owner tree. The key offset for the full back refs
 * is the first byte of parent block.
980
 *
981 982 983
 * When implicit back refs is used, information about the lowest key and
 * level of the tree block are required. These information are stored in
 * tree block info structure.
984
 */
Z
Zheng Yan 已提交
985

986 987 988 989 990
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
				  struct btrfs_path *path,
				  u64 owner, u32 extra_size)
991
{
992 993 994 995 996
	struct btrfs_extent_item *item;
	struct btrfs_extent_item_v0 *ei0;
	struct btrfs_extent_ref_v0 *ref0;
	struct btrfs_tree_block_info *bi;
	struct extent_buffer *leaf;
997
	struct btrfs_key key;
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	struct btrfs_key found_key;
	u32 new_size = sizeof(*item);
	u64 refs;
	int ret;

	leaf = path->nodes[0];
	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));

	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
	ei0 = btrfs_item_ptr(leaf, path->slots[0],
			     struct btrfs_extent_item_v0);
	refs = btrfs_extent_refs_v0(leaf, ei0);

	if (owner == (u64)-1) {
		while (1) {
			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
				ret = btrfs_next_leaf(root, path);
				if (ret < 0)
					return ret;
1017
				BUG_ON(ret > 0); /* Corruption */
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
				leaf = path->nodes[0];
			}
			btrfs_item_key_to_cpu(leaf, &found_key,
					      path->slots[0]);
			BUG_ON(key.objectid != found_key.objectid);
			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
				path->slots[0]++;
				continue;
			}
			ref0 = btrfs_item_ptr(leaf, path->slots[0],
					      struct btrfs_extent_ref_v0);
			owner = btrfs_ref_objectid_v0(leaf, ref0);
			break;
		}
	}
1033
	btrfs_release_path(path);
1034 1035 1036 1037 1038 1039 1040 1041 1042

	if (owner < BTRFS_FIRST_FREE_OBJECTID)
		new_size += sizeof(*bi);

	new_size -= sizeof(*ei0);
	ret = btrfs_search_slot(trans, root, &key, path,
				new_size + extra_size, 1);
	if (ret < 0)
		return ret;
1043
	BUG_ON(ret); /* Corruption */
1044

1045
	btrfs_extend_item(root, path, new_size);
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	btrfs_set_extent_refs(leaf, item, refs);
	/* FIXME: get real generation */
	btrfs_set_extent_generation(leaf, item, 0);
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		btrfs_set_extent_flags(leaf, item,
				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
		bi = (struct btrfs_tree_block_info *)(item + 1);
		/* FIXME: get first key of the block */
		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
		btrfs_set_tree_block_level(leaf, bi, (int)owner);
	} else {
		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
	}
	btrfs_mark_buffer_dirty(leaf);
	return 0;
}
#endif

static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
{
	u32 high_crc = ~(u32)0;
	u32 low_crc = ~(u32)0;
	__le64 lenum;

	lenum = cpu_to_le64(root_objectid);
1075
	high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1076
	lenum = cpu_to_le64(owner);
1077
	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1078
	lenum = cpu_to_le64(offset);
1079
	low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

	return ((u64)high_crc << 31) ^ (u64)low_crc;
}

static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
				     struct btrfs_extent_data_ref *ref)
{
	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
				    btrfs_extent_data_ref_objectid(leaf, ref),
				    btrfs_extent_data_ref_offset(leaf, ref));
}

static int match_extent_data_ref(struct extent_buffer *leaf,
				 struct btrfs_extent_data_ref *ref,
				 u64 root_objectid, u64 owner, u64 offset)
{
	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
		return 0;
	return 1;
}

static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   u64 bytenr, u64 parent,
					   u64 root_objectid,
					   u64 owner, u64 offset)
{
	struct btrfs_key key;
	struct btrfs_extent_data_ref *ref;
Z
Zheng Yan 已提交
1112
	struct extent_buffer *leaf;
1113
	u32 nritems;
1114
	int ret;
1115 1116
	int recow;
	int err = -ENOENT;
1117

Z
Zheng Yan 已提交
1118
	key.objectid = bytenr;
1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
	if (parent) {
		key.type = BTRFS_SHARED_DATA_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_EXTENT_DATA_REF_KEY;
		key.offset = hash_extent_data_ref(root_objectid,
						  owner, offset);
	}
again:
	recow = 0;
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret < 0) {
		err = ret;
		goto fail;
	}
Z
Zheng Yan 已提交
1134

1135 1136 1137 1138 1139
	if (parent) {
		if (!ret)
			return 0;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		key.type = BTRFS_EXTENT_REF_V0_KEY;
1140
		btrfs_release_path(path);
1141 1142 1143 1144 1145 1146 1147 1148 1149
		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
		if (ret < 0) {
			err = ret;
			goto fail;
		}
		if (!ret)
			return 0;
#endif
		goto fail;
Z
Zheng Yan 已提交
1150 1151 1152
	}

	leaf = path->nodes[0];
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
	nritems = btrfs_header_nritems(leaf);
	while (1) {
		if (path->slots[0] >= nritems) {
			ret = btrfs_next_leaf(root, path);
			if (ret < 0)
				err = ret;
			if (ret)
				goto fail;

			leaf = path->nodes[0];
			nritems = btrfs_header_nritems(leaf);
			recow = 1;
		}

		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
		if (key.objectid != bytenr ||
		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
			goto fail;

		ref = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_data_ref);

		if (match_extent_data_ref(leaf, ref, root_objectid,
					  owner, offset)) {
			if (recow) {
1178
				btrfs_release_path(path);
1179 1180 1181 1182 1183 1184
				goto again;
			}
			err = 0;
			break;
		}
		path->slots[0]++;
Z
Zheng Yan 已提交
1185
	}
1186 1187
fail:
	return err;
Z
Zheng Yan 已提交
1188 1189
}

1190 1191 1192 1193 1194 1195
static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   u64 bytenr, u64 parent,
					   u64 root_objectid, u64 owner,
					   u64 offset, int refs_to_add)
Z
Zheng Yan 已提交
1196 1197 1198
{
	struct btrfs_key key;
	struct extent_buffer *leaf;
1199
	u32 size;
Z
Zheng Yan 已提交
1200 1201
	u32 num_refs;
	int ret;
1202 1203

	key.objectid = bytenr;
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	if (parent) {
		key.type = BTRFS_SHARED_DATA_REF_KEY;
		key.offset = parent;
		size = sizeof(struct btrfs_shared_data_ref);
	} else {
		key.type = BTRFS_EXTENT_DATA_REF_KEY;
		key.offset = hash_extent_data_ref(root_objectid,
						  owner, offset);
		size = sizeof(struct btrfs_extent_data_ref);
	}
1214

1215 1216 1217 1218 1219 1220 1221
	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
	if (ret && ret != -EEXIST)
		goto fail;

	leaf = path->nodes[0];
	if (parent) {
		struct btrfs_shared_data_ref *ref;
Z
Zheng Yan 已提交
1222
		ref = btrfs_item_ptr(leaf, path->slots[0],
1223 1224 1225 1226 1227 1228 1229
				     struct btrfs_shared_data_ref);
		if (ret == 0) {
			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
		} else {
			num_refs = btrfs_shared_data_ref_count(leaf, ref);
			num_refs += refs_to_add;
			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
Z
Zheng Yan 已提交
1230
		}
1231 1232 1233 1234 1235 1236 1237 1238
	} else {
		struct btrfs_extent_data_ref *ref;
		while (ret == -EEXIST) {
			ref = btrfs_item_ptr(leaf, path->slots[0],
					     struct btrfs_extent_data_ref);
			if (match_extent_data_ref(leaf, ref, root_objectid,
						  owner, offset))
				break;
1239
			btrfs_release_path(path);
1240 1241 1242 1243 1244
			key.offset++;
			ret = btrfs_insert_empty_item(trans, root, path, &key,
						      size);
			if (ret && ret != -EEXIST)
				goto fail;
Z
Zheng Yan 已提交
1245

1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
			leaf = path->nodes[0];
		}
		ref = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_data_ref);
		if (ret == 0) {
			btrfs_set_extent_data_ref_root(leaf, ref,
						       root_objectid);
			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
		} else {
			num_refs = btrfs_extent_data_ref_count(leaf, ref);
			num_refs += refs_to_add;
			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
Z
Zheng Yan 已提交
1260 1261
		}
	}
1262 1263 1264
	btrfs_mark_buffer_dirty(leaf);
	ret = 0;
fail:
1265
	btrfs_release_path(path);
1266
	return ret;
1267 1268
}

1269 1270 1271 1272
static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
					   struct btrfs_root *root,
					   struct btrfs_path *path,
					   int refs_to_drop)
Z
Zheng Yan 已提交
1273
{
1274 1275 1276
	struct btrfs_key key;
	struct btrfs_extent_data_ref *ref1 = NULL;
	struct btrfs_shared_data_ref *ref2 = NULL;
Z
Zheng Yan 已提交
1277
	struct extent_buffer *leaf;
1278
	u32 num_refs = 0;
Z
Zheng Yan 已提交
1279 1280 1281
	int ret = 0;

	leaf = path->nodes[0];
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);

	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
		ref1 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_data_ref);
		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
		ref2 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_shared_data_ref);
		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
		struct btrfs_extent_ref_v0 *ref0;
		ref0 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_ref_v0);
		num_refs = btrfs_ref_count_v0(leaf, ref0);
#endif
	} else {
		BUG();
	}

1303 1304
	BUG_ON(num_refs < refs_to_drop);
	num_refs -= refs_to_drop;
1305

Z
Zheng Yan 已提交
1306 1307 1308
	if (num_refs == 0) {
		ret = btrfs_del_item(trans, root, path);
	} else {
1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		else {
			struct btrfs_extent_ref_v0 *ref0;
			ref0 = btrfs_item_ptr(leaf, path->slots[0],
					struct btrfs_extent_ref_v0);
			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
		}
#endif
Z
Zheng Yan 已提交
1321 1322 1323 1324 1325
		btrfs_mark_buffer_dirty(leaf);
	}
	return ret;
}

1326 1327 1328
static noinline u32 extent_data_ref_count(struct btrfs_root *root,
					  struct btrfs_path *path,
					  struct btrfs_extent_inline_ref *iref)
1329
{
1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct btrfs_extent_data_ref *ref1;
	struct btrfs_shared_data_ref *ref2;
	u32 num_refs = 0;

	leaf = path->nodes[0];
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
	if (iref) {
		if (btrfs_extent_inline_ref_type(leaf, iref) ==
		    BTRFS_EXTENT_DATA_REF_KEY) {
			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
		} else {
			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
		}
	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
		ref1 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_data_ref);
		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
		ref2 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_shared_data_ref);
		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
		struct btrfs_extent_ref_v0 *ref0;
		ref0 = btrfs_item_ptr(leaf, path->slots[0],
				      struct btrfs_extent_ref_v0);
		num_refs = btrfs_ref_count_v0(leaf, ref0);
C
Chris Mason 已提交
1361
#endif
1362 1363 1364 1365 1366
	} else {
		WARN_ON(1);
	}
	return num_refs;
}
1367

1368 1369 1370 1371 1372
static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
					  struct btrfs_root *root,
					  struct btrfs_path *path,
					  u64 bytenr, u64 parent,
					  u64 root_objectid)
1373
{
1374
	struct btrfs_key key;
1375 1376
	int ret;

1377 1378 1379 1380 1381 1382 1383
	key.objectid = bytenr;
	if (parent) {
		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_TREE_BLOCK_REF_KEY;
		key.offset = root_objectid;
1384 1385
	}

1386 1387 1388 1389 1390
	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret > 0)
		ret = -ENOENT;
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (ret == -ENOENT && parent) {
1391
		btrfs_release_path(path);
1392 1393 1394 1395 1396
		key.type = BTRFS_EXTENT_REF_V0_KEY;
		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
		if (ret > 0)
			ret = -ENOENT;
	}
1397
#endif
1398
	return ret;
1399 1400
}

1401 1402 1403 1404 1405
static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
					  struct btrfs_root *root,
					  struct btrfs_path *path,
					  u64 bytenr, u64 parent,
					  u64 root_objectid)
Z
Zheng Yan 已提交
1406
{
1407
	struct btrfs_key key;
Z
Zheng Yan 已提交
1408 1409
	int ret;

1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
	key.objectid = bytenr;
	if (parent) {
		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
		key.offset = parent;
	} else {
		key.type = BTRFS_TREE_BLOCK_REF_KEY;
		key.offset = root_objectid;
	}

	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1420
	btrfs_release_path(path);
Z
Zheng Yan 已提交
1421 1422 1423
	return ret;
}

1424
static inline int extent_ref_type(u64 parent, u64 owner)
Z
Zheng Yan 已提交
1425
{
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
	int type;
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		if (parent > 0)
			type = BTRFS_SHARED_BLOCK_REF_KEY;
		else
			type = BTRFS_TREE_BLOCK_REF_KEY;
	} else {
		if (parent > 0)
			type = BTRFS_SHARED_DATA_REF_KEY;
		else
			type = BTRFS_EXTENT_DATA_REF_KEY;
	}
	return type;
Z
Zheng Yan 已提交
1439
}
1440

1441 1442
static int find_next_key(struct btrfs_path *path, int level,
			 struct btrfs_key *key)
1443

C
Chris Mason 已提交
1444
{
1445
	for (; level < BTRFS_MAX_LEVEL; level++) {
1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
		if (!path->nodes[level])
			break;
		if (path->slots[level] + 1 >=
		    btrfs_header_nritems(path->nodes[level]))
			continue;
		if (level == 0)
			btrfs_item_key_to_cpu(path->nodes[level], key,
					      path->slots[level] + 1);
		else
			btrfs_node_key_to_cpu(path->nodes[level], key,
					      path->slots[level] + 1);
		return 0;
	}
	return 1;
}
C
Chris Mason 已提交
1461

1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
/*
 * look for inline back ref. if back ref is found, *ref_ret is set
 * to the address of inline back ref, and 0 is returned.
 *
 * if back ref isn't found, *ref_ret is set to the address where it
 * should be inserted, and -ENOENT is returned.
 *
 * if insert is true and there are too many inline back refs, the path
 * points to the extent item, and -EAGAIN is returned.
 *
 * NOTE: inline back refs are ordered in the same way that back ref
 *	 items in the tree are ordered.
 */
static noinline_for_stack
int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref **ref_ret,
				 u64 bytenr, u64 num_bytes,
				 u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int insert)
{
	struct btrfs_key key;
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	struct btrfs_extent_inline_ref *iref;
	u64 flags;
	u64 item_size;
	unsigned long ptr;
	unsigned long end;
	int extra_size;
	int type;
	int want;
	int ret;
	int err = 0;
1497 1498
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
1499

1500
	key.objectid = bytenr;
Z
Zheng Yan 已提交
1501
	key.type = BTRFS_EXTENT_ITEM_KEY;
1502
	key.offset = num_bytes;
Z
Zheng Yan 已提交
1503

1504 1505 1506
	want = extent_ref_type(parent, owner);
	if (insert) {
		extra_size = btrfs_extent_inline_ref_size(want);
1507
		path->keep_locks = 1;
1508 1509
	} else
		extra_size = -1;
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520

	/*
	 * Owner is our parent level, so we can just add one to get the level
	 * for the block we are interested in.
	 */
	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
		key.type = BTRFS_METADATA_ITEM_KEY;
		key.offset = owner;
	}

again:
1521
	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1522
	if (ret < 0) {
1523 1524 1525
		err = ret;
		goto out;
	}
1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549

	/*
	 * We may be a newly converted file system which still has the old fat
	 * extent entries for metadata, so try and see if we have one of those.
	 */
	if (ret > 0 && skinny_metadata) {
		skinny_metadata = false;
		if (path->slots[0]) {
			path->slots[0]--;
			btrfs_item_key_to_cpu(path->nodes[0], &key,
					      path->slots[0]);
			if (key.objectid == bytenr &&
			    key.type == BTRFS_EXTENT_ITEM_KEY &&
			    key.offset == num_bytes)
				ret = 0;
		}
		if (ret) {
			key.type = BTRFS_EXTENT_ITEM_KEY;
			key.offset = num_bytes;
			btrfs_release_path(path);
			goto again;
		}
	}

1550 1551 1552
	if (ret && !insert) {
		err = -ENOENT;
		goto out;
1553
	} else if (WARN_ON(ret)) {
1554 1555
		err = -EIO;
		goto out;
1556
	}
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583

	leaf = path->nodes[0];
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		if (!insert) {
			err = -ENOENT;
			goto out;
		}
		ret = convert_extent_item_v0(trans, root, path, owner,
					     extra_size);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));

	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	flags = btrfs_extent_flags(leaf, ei);

	ptr = (unsigned long)(ei + 1);
	end = (unsigned long)ei + item_size;

1584
	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
		ptr += sizeof(struct btrfs_tree_block_info);
		BUG_ON(ptr > end);
	}

	err = -ENOENT;
	while (1) {
		if (ptr >= end) {
			WARN_ON(ptr > end);
			break;
		}
		iref = (struct btrfs_extent_inline_ref *)ptr;
		type = btrfs_extent_inline_ref_type(leaf, iref);
		if (want < type)
			break;
		if (want > type) {
			ptr += btrfs_extent_inline_ref_size(type);
			continue;
		}

		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
			struct btrfs_extent_data_ref *dref;
			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
			if (match_extent_data_ref(leaf, dref, root_objectid,
						  owner, offset)) {
				err = 0;
				break;
			}
			if (hash_extent_data_ref_item(leaf, dref) <
			    hash_extent_data_ref(root_objectid, owner, offset))
				break;
		} else {
			u64 ref_offset;
			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
			if (parent > 0) {
				if (parent == ref_offset) {
					err = 0;
					break;
				}
				if (ref_offset < parent)
					break;
			} else {
				if (root_objectid == ref_offset) {
					err = 0;
					break;
				}
				if (ref_offset < root_objectid)
					break;
			}
		}
		ptr += btrfs_extent_inline_ref_size(type);
	}
	if (err == -ENOENT && insert) {
		if (item_size + extra_size >=
		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
			err = -EAGAIN;
			goto out;
		}
		/*
		 * To add new inline back ref, we have to make sure
		 * there is no corresponding back ref item.
		 * For simplicity, we just do not add new inline back
		 * ref if there is any kind of item for this block
		 */
1648 1649
		if (find_next_key(path, 0, &key) == 0 &&
		    key.objectid == bytenr &&
1650
		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1651 1652 1653 1654 1655 1656
			err = -EAGAIN;
			goto out;
		}
	}
	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
out:
1657
	if (insert) {
1658 1659 1660 1661 1662 1663 1664 1665 1666 1667
		path->keep_locks = 0;
		btrfs_unlock_up_safe(path, 1);
	}
	return err;
}

/*
 * helper to add new inline back ref
 */
static noinline_for_stack
1668
void setup_inline_extent_backref(struct btrfs_root *root,
1669 1670 1671 1672 1673
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref *iref,
				 u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int refs_to_add,
				 struct btrfs_delayed_extent_op *extent_op)
1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690
{
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	unsigned long ptr;
	unsigned long end;
	unsigned long item_offset;
	u64 refs;
	int size;
	int type;

	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	item_offset = (unsigned long)iref - (unsigned long)ei;

	type = extent_ref_type(parent, owner);
	size = btrfs_extent_inline_ref_size(type);

1691
	btrfs_extend_item(root, path, size);
1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740

	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, ei);
	refs += refs_to_add;
	btrfs_set_extent_refs(leaf, ei, refs);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, ei);

	ptr = (unsigned long)ei + item_offset;
	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
	if (ptr < end - size)
		memmove_extent_buffer(leaf, ptr + size, ptr,
				      end - size - ptr);

	iref = (struct btrfs_extent_inline_ref *)ptr;
	btrfs_set_extent_inline_ref_type(leaf, iref, type);
	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
		struct btrfs_extent_data_ref *dref;
		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
		struct btrfs_shared_data_ref *sref;
		sref = (struct btrfs_shared_data_ref *)(iref + 1);
		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else {
		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
	}
	btrfs_mark_buffer_dirty(leaf);
}

static int lookup_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref **ref_ret,
				 u64 bytenr, u64 num_bytes, u64 parent,
				 u64 root_objectid, u64 owner, u64 offset)
{
	int ret;

	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
					   bytenr, num_bytes, parent,
					   root_objectid, owner, offset, 0);
	if (ret != -ENOENT)
1741
		return ret;
1742

1743
	btrfs_release_path(path);
1744 1745 1746 1747 1748 1749 1750 1751
	*ref_ret = NULL;

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
					    root_objectid);
	} else {
		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
					     root_objectid, owner, offset);
1752
	}
1753 1754
	return ret;
}
Z
Zheng Yan 已提交
1755

1756 1757 1758 1759
/*
 * helper to update/remove inline back ref
 */
static noinline_for_stack
1760
void update_inline_extent_backref(struct btrfs_root *root,
1761 1762 1763 1764
				  struct btrfs_path *path,
				  struct btrfs_extent_inline_ref *iref,
				  int refs_to_mod,
				  struct btrfs_delayed_extent_op *extent_op)
1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
{
	struct extent_buffer *leaf;
	struct btrfs_extent_item *ei;
	struct btrfs_extent_data_ref *dref = NULL;
	struct btrfs_shared_data_ref *sref = NULL;
	unsigned long ptr;
	unsigned long end;
	u32 item_size;
	int size;
	int type;
	u64 refs;

	leaf = path->nodes[0];
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, ei);
	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
	refs += refs_to_mod;
	btrfs_set_extent_refs(leaf, ei, refs);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, ei);

	type = btrfs_extent_inline_ref_type(leaf, iref);

	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
		refs = btrfs_extent_data_ref_count(leaf, dref);
	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
		sref = (struct btrfs_shared_data_ref *)(iref + 1);
		refs = btrfs_shared_data_ref_count(leaf, sref);
	} else {
		refs = 1;
		BUG_ON(refs_to_mod != -1);
1797
	}
Z
Zheng Yan 已提交
1798

1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
	refs += refs_to_mod;

	if (refs > 0) {
		if (type == BTRFS_EXTENT_DATA_REF_KEY)
			btrfs_set_extent_data_ref_count(leaf, dref, refs);
		else
			btrfs_set_shared_data_ref_count(leaf, sref, refs);
	} else {
		size =  btrfs_extent_inline_ref_size(type);
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		ptr = (unsigned long)iref;
		end = (unsigned long)ei + item_size;
		if (ptr + size < end)
			memmove_extent_buffer(leaf, ptr, ptr + size,
					      end - ptr - size);
		item_size -= size;
1816
		btrfs_truncate_item(root, path, item_size, 1);
1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
	}
	btrfs_mark_buffer_dirty(leaf);
}

static noinline_for_stack
int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 bytenr, u64 num_bytes, u64 parent,
				 u64 root_objectid, u64 owner,
				 u64 offset, int refs_to_add,
				 struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_extent_inline_ref *iref;
	int ret;

	ret = lookup_inline_extent_backref(trans, root, path, &iref,
					   bytenr, num_bytes, parent,
					   root_objectid, owner, offset, 1);
	if (ret == 0) {
		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1838
		update_inline_extent_backref(root, path, iref,
1839
					     refs_to_add, extent_op);
1840
	} else if (ret == -ENOENT) {
1841
		setup_inline_extent_backref(root, path, iref, parent,
1842 1843 1844
					    root_objectid, owner, offset,
					    refs_to_add, extent_op);
		ret = 0;
1845
	}
1846 1847
	return ret;
}
Z
Zheng Yan 已提交
1848

1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866
static int insert_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 bytenr, u64 parent, u64 root_objectid,
				 u64 owner, u64 offset, int refs_to_add)
{
	int ret;
	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		BUG_ON(refs_to_add != 1);
		ret = insert_tree_block_ref(trans, root, path, bytenr,
					    parent, root_objectid);
	} else {
		ret = insert_extent_data_ref(trans, root, path, bytenr,
					     parent, root_objectid,
					     owner, offset, refs_to_add);
	}
	return ret;
}
1867

1868 1869 1870 1871 1872 1873
static int remove_extent_backref(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_extent_inline_ref *iref,
				 int refs_to_drop, int is_data)
{
1874
	int ret = 0;
1875

1876 1877
	BUG_ON(!is_data && refs_to_drop != 1);
	if (iref) {
1878
		update_inline_extent_backref(root, path, iref,
1879
					     -refs_to_drop, NULL);
1880 1881 1882 1883 1884 1885 1886 1887
	} else if (is_data) {
		ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
	} else {
		ret = btrfs_del_item(trans, root, path);
	}
	return ret;
}

1888
static int btrfs_issue_discard(struct block_device *bdev,
1889 1890
				u64 start, u64 len)
{
1891
	return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1892 1893 1894
}

static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1895
				u64 num_bytes, u64 *actual_bytes)
1896 1897
{
	int ret;
1898
	u64 discarded_bytes = 0;
1899
	struct btrfs_bio *bbio = NULL;
1900

C
Christoph Hellwig 已提交
1901

1902
	/* Tell the block device(s) that the sectors can be discarded */
1903
	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
1904
			      bytenr, &num_bytes, &bbio, 0);
1905
	/* Error condition is -ENOMEM */
1906
	if (!ret) {
1907
		struct btrfs_bio_stripe *stripe = bbio->stripes;
1908 1909 1910
		int i;


1911
		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1912 1913 1914
			if (!stripe->dev->can_discard)
				continue;

1915 1916 1917 1918 1919 1920
			ret = btrfs_issue_discard(stripe->dev->bdev,
						  stripe->physical,
						  stripe->length);
			if (!ret)
				discarded_bytes += stripe->length;
			else if (ret != -EOPNOTSUPP)
1921
				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
1922 1923 1924 1925 1926 1927 1928

			/*
			 * Just in case we get back EOPNOTSUPP for some reason,
			 * just ignore the return value so we don't screw up
			 * people calling discard_extent.
			 */
			ret = 0;
1929
		}
1930
		kfree(bbio);
1931
	}
1932 1933 1934 1935

	if (actual_bytes)
		*actual_bytes = discarded_bytes;

1936

D
David Woodhouse 已提交
1937 1938
	if (ret == -EOPNOTSUPP)
		ret = 0;
1939 1940 1941
	return ret;
}

1942
/* Can return -ENOMEM */
1943 1944 1945
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
			 struct btrfs_root *root,
			 u64 bytenr, u64 num_bytes, u64 parent,
A
Arne Jansen 已提交
1946
			 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1947 1948
{
	int ret;
A
Arne Jansen 已提交
1949 1950
	struct btrfs_fs_info *fs_info = root->fs_info;

1951 1952 1953 1954
	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
	       root_objectid == BTRFS_TREE_LOG_OBJECTID);

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
A
Arne Jansen 已提交
1955 1956
		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
					num_bytes,
1957
					parent, root_objectid, (int)owner,
A
Arne Jansen 已提交
1958
					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1959
	} else {
A
Arne Jansen 已提交
1960 1961
		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
					num_bytes,
1962
					parent, root_objectid, owner, offset,
A
Arne Jansen 已提交
1963
					BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991
	}
	return ret;
}

static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root,
				  u64 bytenr, u64 num_bytes,
				  u64 parent, u64 root_objectid,
				  u64 owner, u64 offset, int refs_to_add,
				  struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_path *path;
	struct extent_buffer *leaf;
	struct btrfs_extent_item *item;
	u64 refs;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	path->reada = 1;
	path->leave_spinning = 1;
	/* this will setup the path even if it fails to insert the back ref */
	ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
					   path, bytenr, num_bytes, parent,
					   root_objectid, owner, offset,
					   refs_to_add, extent_op);
1992
	if (ret != -EAGAIN)
1993 1994 1995 1996 1997 1998 1999 2000
		goto out;

	leaf = path->nodes[0];
	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	refs = btrfs_extent_refs(leaf, item);
	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
	if (extent_op)
		__run_delayed_extent_op(extent_op, leaf, item);
2001

2002
	btrfs_mark_buffer_dirty(leaf);
2003
	btrfs_release_path(path);
2004 2005

	path->reada = 1;
2006 2007
	path->leave_spinning = 1;

2008 2009
	/* now insert the actual backref */
	ret = insert_extent_backref(trans, root->fs_info->extent_root,
2010 2011
				    path, bytenr, parent, root_objectid,
				    owner, offset, refs_to_add);
2012 2013
	if (ret)
		btrfs_abort_transaction(trans, root, ret);
2014
out:
2015
	btrfs_free_path(path);
2016
	return ret;
2017 2018
}

2019 2020 2021 2022 2023
static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_delayed_ref_node *node,
				struct btrfs_delayed_extent_op *extent_op,
				int insert_reserved)
2024
{
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036
	int ret = 0;
	struct btrfs_delayed_data_ref *ref;
	struct btrfs_key ins;
	u64 parent = 0;
	u64 ref_root = 0;
	u64 flags = 0;

	ins.objectid = node->bytenr;
	ins.offset = node->num_bytes;
	ins.type = BTRFS_EXTENT_ITEM_KEY;

	ref = btrfs_delayed_node_to_data_ref(node);
2037 2038
	trace_run_delayed_data_ref(node, ref, node->action);

2039 2040 2041 2042 2043 2044
	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
		parent = ref->parent;
	else
		ref_root = ref->root;

	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2045
		if (extent_op)
2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096
			flags |= extent_op->flags_to_set;
		ret = alloc_reserved_file_extent(trans, root,
						 parent, ref_root, flags,
						 ref->objectid, ref->offset,
						 &ins, node->ref_mod);
	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
					     node->num_bytes, parent,
					     ref_root, ref->objectid,
					     ref->offset, node->ref_mod,
					     extent_op);
	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
		ret = __btrfs_free_extent(trans, root, node->bytenr,
					  node->num_bytes, parent,
					  ref_root, ref->objectid,
					  ref->offset, node->ref_mod,
					  extent_op);
	} else {
		BUG();
	}
	return ret;
}

static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
				    struct extent_buffer *leaf,
				    struct btrfs_extent_item *ei)
{
	u64 flags = btrfs_extent_flags(leaf, ei);
	if (extent_op->update_flags) {
		flags |= extent_op->flags_to_set;
		btrfs_set_extent_flags(leaf, ei, flags);
	}

	if (extent_op->update_key) {
		struct btrfs_tree_block_info *bi;
		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
		bi = (struct btrfs_tree_block_info *)(ei + 1);
		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
	}
}

static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_delayed_ref_node *node,
				 struct btrfs_delayed_extent_op *extent_op)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	struct btrfs_extent_item *ei;
	struct extent_buffer *leaf;
	u32 item_size;
2097
	int ret;
2098
	int err = 0;
2099
	int metadata = !extent_op->is_data;
2100

2101 2102 2103
	if (trans->aborted)
		return 0;

2104 2105 2106
	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
		metadata = 0;

2107 2108 2109 2110 2111 2112
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = node->bytenr;

2113 2114
	if (metadata) {
		key.type = BTRFS_METADATA_ITEM_KEY;
2115
		key.offset = extent_op->level;
2116 2117 2118 2119 2120 2121
	} else {
		key.type = BTRFS_EXTENT_ITEM_KEY;
		key.offset = node->num_bytes;
	}

again:
2122 2123 2124 2125 2126 2127 2128 2129 2130
	path->reada = 1;
	path->leave_spinning = 1;
	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
				path, 0, 1);
	if (ret < 0) {
		err = ret;
		goto out;
	}
	if (ret > 0) {
2131
		if (metadata) {
2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143
			if (path->slots[0] > 0) {
				path->slots[0]--;
				btrfs_item_key_to_cpu(path->nodes[0], &key,
						      path->slots[0]);
				if (key.objectid == node->bytenr &&
				    key.type == BTRFS_EXTENT_ITEM_KEY &&
				    key.offset == node->num_bytes)
					ret = 0;
			}
			if (ret > 0) {
				btrfs_release_path(path);
				metadata = 0;
2144

2145 2146 2147 2148 2149 2150 2151 2152
				key.objectid = node->bytenr;
				key.offset = node->num_bytes;
				key.type = BTRFS_EXTENT_ITEM_KEY;
				goto again;
			}
		} else {
			err = -EIO;
			goto out;
2153
		}
2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
	}

	leaf = path->nodes[0];
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
					     path, (u64)-1, 0);
		if (ret < 0) {
			err = ret;
			goto out;
		}
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
	__run_delayed_extent_op(extent_op, leaf, ei);
2173

2174 2175 2176 2177
	btrfs_mark_buffer_dirty(leaf);
out:
	btrfs_free_path(path);
	return err;
2178 2179
}

2180 2181 2182 2183 2184
static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				struct btrfs_delayed_ref_node *node,
				struct btrfs_delayed_extent_op *extent_op,
				int insert_reserved)
2185 2186
{
	int ret = 0;
2187 2188 2189 2190
	struct btrfs_delayed_tree_ref *ref;
	struct btrfs_key ins;
	u64 parent = 0;
	u64 ref_root = 0;
2191 2192
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
2193

2194
	ref = btrfs_delayed_node_to_tree_ref(node);
2195 2196
	trace_run_delayed_tree_ref(node, ref, node->action);

2197 2198 2199 2200 2201
	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
		parent = ref->parent;
	else
		ref_root = ref->root;

2202 2203 2204 2205 2206 2207 2208 2209 2210
	ins.objectid = node->bytenr;
	if (skinny_metadata) {
		ins.offset = ref->level;
		ins.type = BTRFS_METADATA_ITEM_KEY;
	} else {
		ins.offset = node->num_bytes;
		ins.type = BTRFS_EXTENT_ITEM_KEY;
	}

2211 2212
	BUG_ON(node->ref_mod != 1);
	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2213
		BUG_ON(!extent_op || !extent_op->update_flags);
2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
		ret = alloc_reserved_tree_block(trans, root,
						parent, ref_root,
						extent_op->flags_to_set,
						&extent_op->key,
						ref->level, &ins);
	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
		ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
					     node->num_bytes, parent, ref_root,
					     ref->level, 0, 1, extent_op);
	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
		ret = __btrfs_free_extent(trans, root, node->bytenr,
					  node->num_bytes, parent, ref_root,
					  ref->level, 0, 1, extent_op);
	} else {
		BUG();
	}
2230 2231 2232 2233
	return ret;
}

/* helper function to actually process a single delayed ref entry */
2234 2235 2236 2237 2238
static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root,
			       struct btrfs_delayed_ref_node *node,
			       struct btrfs_delayed_extent_op *extent_op,
			       int insert_reserved)
2239
{
2240 2241
	int ret = 0;

2242 2243 2244 2245
	if (trans->aborted) {
		if (insert_reserved)
			btrfs_pin_extent(root, node->bytenr,
					 node->num_bytes, 1);
2246
		return 0;
2247
	}
2248

2249
	if (btrfs_delayed_ref_is_head(node)) {
2250 2251 2252 2253 2254 2255 2256
		struct btrfs_delayed_ref_head *head;
		/*
		 * we've hit the end of the chain and we were supposed
		 * to insert this extent into the tree.  But, it got
		 * deleted before we ever needed to insert it, so all
		 * we have to do is clean up the accounting
		 */
2257 2258
		BUG_ON(extent_op);
		head = btrfs_delayed_node_to_head(node);
2259 2260
		trace_run_delayed_ref_head(node, head, node->action);

2261
		if (insert_reserved) {
2262 2263
			btrfs_pin_extent(root, node->bytenr,
					 node->num_bytes, 1);
2264 2265 2266 2267 2268
			if (head->is_data) {
				ret = btrfs_del_csums(trans, root,
						      node->bytenr,
						      node->num_bytes);
			}
2269
		}
2270
		return ret;
2271 2272
	}

2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
		ret = run_delayed_tree_ref(trans, root, node, extent_op,
					   insert_reserved);
	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
		 node->type == BTRFS_SHARED_DATA_REF_KEY)
		ret = run_delayed_data_ref(trans, root, node, extent_op,
					   insert_reserved);
	else
		BUG();
	return ret;
2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305
}

static noinline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head *head)
{
	struct rb_node *node;
	struct btrfs_delayed_ref_node *ref;
	int action = BTRFS_ADD_DELAYED_REF;
again:
	/*
	 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
	 * this prevents ref count from going down to zero when
	 * there still are pending delayed ref.
	 */
	node = rb_prev(&head->node.rb_node);
	while (1) {
		if (!node)
			break;
		ref = rb_entry(node, struct btrfs_delayed_ref_node,
				rb_node);
		if (ref->bytenr != head->node.bytenr)
			break;
2306
		if (ref->action == action)
2307 2308 2309 2310 2311 2312 2313 2314 2315 2316
			return ref;
		node = rb_prev(node);
	}
	if (action == BTRFS_ADD_DELAYED_REF) {
		action = BTRFS_DROP_DELAYED_REF;
		goto again;
	}
	return NULL;
}

2317 2318 2319 2320
/*
 * Returns 0 on success or if called with an already aborted transaction.
 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
 */
2321 2322 2323
static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root,
				       struct list_head *cluster)
2324 2325 2326 2327
{
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_ref_head *locked_ref = NULL;
2328
	struct btrfs_delayed_extent_op *extent_op;
2329
	struct btrfs_fs_info *fs_info = root->fs_info;
2330
	int ret;
2331
	int count = 0;
2332 2333 2334 2335 2336
	int must_insert_reserved = 0;

	delayed_refs = &trans->transaction->delayed_refs;
	while (1) {
		if (!locked_ref) {
2337 2338
			/* pick a new head ref from the cluster list */
			if (list_empty(cluster))
2339 2340
				break;

2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357
			locked_ref = list_entry(cluster->next,
				     struct btrfs_delayed_ref_head, cluster);

			/* grab the lock that says we are going to process
			 * all the refs for this head */
			ret = btrfs_delayed_ref_lock(trans, locked_ref);

			/*
			 * we may have dropped the spin lock to get the head
			 * mutex lock, and that might have given someone else
			 * time to free the head.  If that's true, it has been
			 * removed from our list and we can move on.
			 */
			if (ret == -EAGAIN) {
				locked_ref = NULL;
				count++;
				continue;
2358 2359
			}
		}
2360

2361 2362 2363 2364 2365 2366 2367 2368 2369 2370
		/*
		 * We need to try and merge add/drops of the same ref since we
		 * can run into issues with relocate dropping the implicit ref
		 * and then it being added back again before the drop can
		 * finish.  If we merged anything we need to re-loop so we can
		 * get a good ref.
		 */
		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
					 locked_ref);

2371 2372 2373 2374 2375 2376 2377
		/*
		 * locked_ref is the head node, so we have to go one
		 * node back for any delayed ref updates
		 */
		ref = select_delayed_ref(locked_ref);

		if (ref && ref->seq &&
2378
		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2379 2380 2381 2382 2383
			/*
			 * there are still refs with lower seq numbers in the
			 * process of being added. Don't run this ref yet.
			 */
			list_del_init(&locked_ref->cluster);
2384
			btrfs_delayed_ref_unlock(locked_ref);
2385 2386 2387 2388 2389 2390 2391 2392
			locked_ref = NULL;
			delayed_refs->num_heads_ready++;
			spin_unlock(&delayed_refs->lock);
			cond_resched();
			spin_lock(&delayed_refs->lock);
			continue;
		}

2393 2394 2395 2396 2397 2398
		/*
		 * record the must insert reserved flag before we
		 * drop the spin lock.
		 */
		must_insert_reserved = locked_ref->must_insert_reserved;
		locked_ref->must_insert_reserved = 0;
2399

2400 2401 2402
		extent_op = locked_ref->extent_op;
		locked_ref->extent_op = NULL;

2403 2404 2405 2406 2407 2408
		if (!ref) {
			/* All delayed refs have been processed, Go ahead
			 * and send the head node to run_one_delayed_ref,
			 * so that any accounting fixes can happen
			 */
			ref = &locked_ref->node;
2409 2410

			if (extent_op && must_insert_reserved) {
2411
				btrfs_free_delayed_extent_op(extent_op);
2412 2413 2414 2415 2416 2417 2418 2419
				extent_op = NULL;
			}

			if (extent_op) {
				spin_unlock(&delayed_refs->lock);

				ret = run_delayed_extent_op(trans, root,
							    ref, extent_op);
2420
				btrfs_free_delayed_extent_op(extent_op);
2421

2422
				if (ret) {
2423 2424 2425 2426 2427 2428 2429 2430
					/*
					 * Need to reset must_insert_reserved if
					 * there was an error so the abort stuff
					 * can cleanup the reserved space
					 * properly.
					 */
					if (must_insert_reserved)
						locked_ref->must_insert_reserved = 1;
2431
					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2432
					spin_lock(&delayed_refs->lock);
2433
					btrfs_delayed_ref_unlock(locked_ref);
2434 2435 2436
					return ret;
				}

2437
				goto next;
2438
			}
2439
		}
C
Chris Mason 已提交
2440

2441 2442 2443
		ref->in_tree = 0;
		rb_erase(&ref->rb_node, &delayed_refs->root);
		delayed_refs->num_entries--;
2444
		if (!btrfs_delayed_ref_is_head(ref)) {
2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459
			/*
			 * when we play the delayed ref, also correct the
			 * ref_mod on head
			 */
			switch (ref->action) {
			case BTRFS_ADD_DELAYED_REF:
			case BTRFS_ADD_DELAYED_EXTENT:
				locked_ref->node.ref_mod -= ref->ref_mod;
				break;
			case BTRFS_DROP_DELAYED_REF:
				locked_ref->node.ref_mod += ref->ref_mod;
				break;
			default:
				WARN_ON(1);
			}
2460 2461
		} else {
			list_del_init(&locked_ref->cluster);
2462
		}
2463
		spin_unlock(&delayed_refs->lock);
2464

2465
		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2466
					  must_insert_reserved);
2467

2468
		btrfs_free_delayed_extent_op(extent_op);
2469
		if (ret) {
2470 2471
			btrfs_delayed_ref_unlock(locked_ref);
			btrfs_put_delayed_ref(ref);
2472
			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2473
			spin_lock(&delayed_refs->lock);
2474 2475 2476
			return ret;
		}

2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
		/*
		 * If this node is a head, that means all the refs in this head
		 * have been dealt with, and we will pick the next head to deal
		 * with, so we must unlock the head and drop it from the cluster
		 * list before we release it.
		 */
		if (btrfs_delayed_ref_is_head(ref)) {
			btrfs_delayed_ref_unlock(locked_ref);
			locked_ref = NULL;
		}
		btrfs_put_delayed_ref(ref);
		count++;
2489
next:
2490 2491 2492 2493 2494 2495
		cond_resched();
		spin_lock(&delayed_refs->lock);
	}
	return count;
}

2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
#ifdef SCRAMBLE_DELAYED_REFS
/*
 * Normally delayed refs get processed in ascending bytenr order. This
 * correlates in most cases to the order added. To expose dependencies on this
 * order, we start to process the tree in the middle instead of the beginning
 */
static u64 find_middle(struct rb_root *root)
{
	struct rb_node *n = root->rb_node;
	struct btrfs_delayed_ref_node *entry;
	int alt = 1;
	u64 middle;
	u64 first = 0, last = 0;

	n = rb_first(root);
	if (n) {
		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
		first = entry->bytenr;
	}
	n = rb_last(root);
	if (n) {
		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
		last = entry->bytenr;
	}
	n = root->rb_node;

	while (n) {
		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
		WARN_ON(!entry->in_tree);

		middle = entry->bytenr;

		if (alt)
			n = n->rb_left;
		else
			n = n->rb_right;

		alt = 1 - alt;
	}
	return middle;
}
#endif

2539 2540 2541 2542 2543 2544 2545 2546 2547
int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
					 struct btrfs_fs_info *fs_info)
{
	struct qgroup_update *qgroup_update;
	int ret = 0;

	if (list_empty(&trans->qgroup_ref_list) !=
	    !trans->delayed_ref_elem.seq) {
		/* list without seq or seq without list */
2548
		btrfs_err(fs_info,
2549
			"qgroup accounting update error, list is%s empty, seq is %#x.%x",
2550
			list_empty(&trans->qgroup_ref_list) ? "" : " not",
2551 2552
			(u32)(trans->delayed_ref_elem.seq >> 32),
			(u32)trans->delayed_ref_elem.seq);
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574
		BUG();
	}

	if (!trans->delayed_ref_elem.seq)
		return 0;

	while (!list_empty(&trans->qgroup_ref_list)) {
		qgroup_update = list_first_entry(&trans->qgroup_ref_list,
						 struct qgroup_update, list);
		list_del(&qgroup_update->list);
		if (!ret)
			ret = btrfs_qgroup_account_ref(
					trans, fs_info, qgroup_update->node,
					qgroup_update->extent_op);
		kfree(qgroup_update);
	}

	btrfs_put_tree_mod_seq(fs_info, &trans->delayed_ref_elem);

	return ret;
}

2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
		      int count)
{
	int val = atomic_read(&delayed_refs->ref_seq);

	if (val < seq || val >= seq + count)
		return 1;
	return 0;
}

2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629
static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
{
	u64 num_bytes;

	num_bytes = heads * (sizeof(struct btrfs_extent_item) +
			     sizeof(struct btrfs_extent_inline_ref));
	if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
		num_bytes += heads * sizeof(struct btrfs_tree_block_info);

	/*
	 * We don't ever fill up leaves all the way so multiply by 2 just to be
	 * closer to what we're really going to want to ouse.
	 */
	return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
}

int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root)
{
	struct btrfs_block_rsv *global_rsv;
	u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
	u64 num_bytes;
	int ret = 0;

	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
	num_heads = heads_to_leaves(root, num_heads);
	if (num_heads > 1)
		num_bytes += (num_heads - 1) * root->leafsize;
	num_bytes <<= 1;
	global_rsv = &root->fs_info->global_block_rsv;

	/*
	 * If we can't allocate any more chunks lets make sure we have _lots_ of
	 * wiggle room since running delayed refs can create more delayed refs.
	 */
	if (global_rsv->space_info->full)
		num_bytes <<= 1;

	spin_lock(&global_rsv->lock);
	if (global_rsv->reserved <= num_bytes)
		ret = 1;
	spin_unlock(&global_rsv->lock);
	return ret;
}

2630 2631 2632 2633 2634 2635
/*
 * this starts processing the delayed reference count updates and
 * extent insertions we have queued up so far.  count can be
 * 0, which means to process everything in the tree at the start
 * of the run (but not newly added entries), or it can be some target
 * number you'd like to process.
2636 2637 2638
 *
 * Returns 0 on success or if called with an aborted transaction
 * Returns <0 on error and aborts the transaction
2639 2640 2641 2642 2643 2644 2645 2646 2647
 */
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, unsigned long count)
{
	struct rb_node *node;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	struct list_head cluster;
	int ret;
2648
	u64 delayed_start;
2649 2650
	int run_all = count == (unsigned long)-1;
	int run_most = 0;
2651
	int loops;
2652

2653 2654 2655 2656
	/* We'll clean this up in btrfs_cleanup_transaction */
	if (trans->aborted)
		return 0;

2657 2658 2659
	if (root == root->fs_info->extent_root)
		root = root->fs_info->tree_root;

2660 2661
	btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);

2662 2663
	delayed_refs = &trans->transaction->delayed_refs;
	INIT_LIST_HEAD(&cluster);
2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676
	if (count == 0) {
		count = delayed_refs->num_entries * 2;
		run_most = 1;
	}

	if (!run_all && !run_most) {
		int old;
		int seq = atomic_read(&delayed_refs->ref_seq);

progress:
		old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
		if (old) {
			DEFINE_WAIT(__wait);
2677 2678
			if (delayed_refs->flushing ||
			    !btrfs_should_throttle_delayed_refs(trans, root))
2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702
				return 0;

			prepare_to_wait(&delayed_refs->wait, &__wait,
					TASK_UNINTERRUPTIBLE);

			old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
			if (old) {
				schedule();
				finish_wait(&delayed_refs->wait, &__wait);

				if (!refs_newer(delayed_refs, seq, 256))
					goto progress;
				else
					return 0;
			} else {
				finish_wait(&delayed_refs->wait, &__wait);
				goto again;
			}
		}

	} else {
		atomic_inc(&delayed_refs->procs_running_refs);
	}

2703
again:
2704
	loops = 0;
2705
	spin_lock(&delayed_refs->lock);
2706

2707 2708 2709 2710
#ifdef SCRAMBLE_DELAYED_REFS
	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif

2711 2712
	while (1) {
		if (!(run_all || run_most) &&
2713
		    !btrfs_should_throttle_delayed_refs(trans, root))
2714
			break;
2715

2716
		/*
2717 2718 2719 2720
		 * go find something we can process in the rbtree.  We start at
		 * the beginning of the tree, and then build a cluster
		 * of refs to process starting at the first one we are able to
		 * lock
2721
		 */
2722
		delayed_start = delayed_refs->run_delayed_start;
2723 2724 2725
		ret = btrfs_find_ref_cluster(trans, &cluster,
					     delayed_refs->run_delayed_start);
		if (ret)
2726 2727
			break;

2728
		ret = run_clustered_refs(trans, root, &cluster);
2729
		if (ret < 0) {
2730
			btrfs_release_ref_cluster(&cluster);
2731 2732
			spin_unlock(&delayed_refs->lock);
			btrfs_abort_transaction(trans, root, ret);
2733
			atomic_dec(&delayed_refs->procs_running_refs);
2734
			wake_up(&delayed_refs->wait);
2735 2736
			return ret;
		}
2737

2738 2739
		atomic_add(ret, &delayed_refs->ref_seq);

2740 2741 2742 2743
		count -= min_t(unsigned long, ret, count);

		if (count == 0)
			break;
2744

2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762
		if (delayed_start >= delayed_refs->run_delayed_start) {
			if (loops == 0) {
				/*
				 * btrfs_find_ref_cluster looped. let's do one
				 * more cycle. if we don't run any delayed ref
				 * during that cycle (because we can't because
				 * all of them are blocked), bail out.
				 */
				loops = 1;
			} else {
				/*
				 * no runnable refs left, stop trying
				 */
				BUG_ON(run_all);
				break;
			}
		}
		if (ret) {
2763
			/* refs were run, let's reset staleness detection */
2764
			loops = 0;
2765
		}
2766
	}
2767

2768
	if (run_all) {
2769 2770 2771 2772 2773 2774
		if (!list_empty(&trans->new_bgs)) {
			spin_unlock(&delayed_refs->lock);
			btrfs_create_pending_block_groups(trans, root);
			spin_lock(&delayed_refs->lock);
		}

2775
		node = rb_first(&delayed_refs->root);
2776
		if (!node)
2777
			goto out;
2778
		count = (unsigned long)-1;
2779

2780 2781 2782 2783 2784
		while (node) {
			ref = rb_entry(node, struct btrfs_delayed_ref_node,
				       rb_node);
			if (btrfs_delayed_ref_is_head(ref)) {
				struct btrfs_delayed_ref_head *head;
2785

2786 2787 2788 2789
				head = btrfs_delayed_node_to_head(ref);
				atomic_inc(&ref->refs);

				spin_unlock(&delayed_refs->lock);
2790 2791 2792 2793
				/*
				 * Mutex was contended, block until it's
				 * released and try again
				 */
2794 2795 2796 2797
				mutex_lock(&head->mutex);
				mutex_unlock(&head->mutex);

				btrfs_put_delayed_ref(ref);
2798
				cond_resched();
2799 2800 2801 2802 2803 2804 2805
				goto again;
			}
			node = rb_next(node);
		}
		spin_unlock(&delayed_refs->lock);
		schedule_timeout(1);
		goto again;
2806
	}
2807
out:
2808 2809 2810 2811 2812
	atomic_dec(&delayed_refs->procs_running_refs);
	smp_mb();
	if (waitqueue_active(&delayed_refs->wait))
		wake_up(&delayed_refs->wait);

2813
	spin_unlock(&delayed_refs->lock);
2814
	assert_qgroups_uptodate(trans);
2815 2816 2817
	return 0;
}

2818 2819 2820
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				u64 bytenr, u64 num_bytes, u64 flags,
2821
				int level, int is_data)
2822 2823 2824 2825
{
	struct btrfs_delayed_extent_op *extent_op;
	int ret;

2826
	extent_op = btrfs_alloc_delayed_extent_op();
2827 2828 2829 2830 2831 2832 2833
	if (!extent_op)
		return -ENOMEM;

	extent_op->flags_to_set = flags;
	extent_op->update_flags = 1;
	extent_op->update_key = 0;
	extent_op->is_data = is_data ? 1 : 0;
2834
	extent_op->level = level;
2835

A
Arne Jansen 已提交
2836 2837
	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
					  num_bytes, extent_op);
2838
	if (ret)
2839
		btrfs_free_delayed_extent_op(extent_op);
2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865
	return ret;
}

static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
				      u64 objectid, u64 offset, u64 bytenr)
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_node *ref;
	struct btrfs_delayed_data_ref *data_ref;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct rb_node *node;
	int ret = 0;

	ret = -ENOENT;
	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (!head)
		goto out;

	if (!mutex_trylock(&head->mutex)) {
		atomic_inc(&head->node.refs);
		spin_unlock(&delayed_refs->lock);

2866
		btrfs_release_path(path);
2867

2868 2869 2870 2871
		/*
		 * Mutex was contended, block until it's released and let
		 * caller try again
		 */
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894
		mutex_lock(&head->mutex);
		mutex_unlock(&head->mutex);
		btrfs_put_delayed_ref(&head->node);
		return -EAGAIN;
	}

	node = rb_prev(&head->node.rb_node);
	if (!node)
		goto out_unlock;

	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);

	if (ref->bytenr != bytenr)
		goto out_unlock;

	ret = 1;
	if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
		goto out_unlock;

	data_ref = btrfs_delayed_node_to_data_ref(ref);

	node = rb_prev(node);
	if (node) {
2895 2896
		int seq = ref->seq;

2897
		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2898
		if (ref->bytenr == bytenr && ref->seq == seq)
2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
			goto out_unlock;
	}

	if (data_ref->root != root->root_key.objectid ||
	    data_ref->objectid != objectid || data_ref->offset != offset)
		goto out_unlock;

	ret = 0;
out_unlock:
	mutex_unlock(&head->mutex);
out:
	spin_unlock(&delayed_refs->lock);
	return ret;
}

static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
					struct btrfs_root *root,
					struct btrfs_path *path,
					u64 objectid, u64 offset, u64 bytenr)
2918 2919
{
	struct btrfs_root *extent_root = root->fs_info->extent_root;
2920
	struct extent_buffer *leaf;
2921 2922 2923
	struct btrfs_extent_data_ref *ref;
	struct btrfs_extent_inline_ref *iref;
	struct btrfs_extent_item *ei;
2924
	struct btrfs_key key;
2925
	u32 item_size;
2926
	int ret;
2927

2928
	key.objectid = bytenr;
Z
Zheng Yan 已提交
2929
	key.offset = (u64)-1;
2930
	key.type = BTRFS_EXTENT_ITEM_KEY;
2931 2932 2933 2934

	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
2935
	BUG_ON(ret == 0); /* Corruption */
Y
Yan Zheng 已提交
2936 2937 2938

	ret = -ENOENT;
	if (path->slots[0] == 0)
Z
Zheng Yan 已提交
2939
		goto out;
2940

Z
Zheng Yan 已提交
2941
	path->slots[0]--;
2942
	leaf = path->nodes[0];
2943
	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2944

2945
	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2946
		goto out;
2947

2948 2949 2950 2951 2952 2953 2954 2955 2956
	ret = 1;
	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
		goto out;
	}
#endif
	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2957

2958 2959 2960
	if (item_size != sizeof(*ei) +
	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
		goto out;
2961

2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000
	if (btrfs_extent_generation(leaf, ei) <=
	    btrfs_root_last_snapshot(&root->root_item))
		goto out;

	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
	if (btrfs_extent_inline_ref_type(leaf, iref) !=
	    BTRFS_EXTENT_DATA_REF_KEY)
		goto out;

	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
	if (btrfs_extent_refs(leaf, ei) !=
	    btrfs_extent_data_ref_count(leaf, ref) ||
	    btrfs_extent_data_ref_root(leaf, ref) !=
	    root->root_key.objectid ||
	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
		goto out;

	ret = 0;
out:
	return ret;
}

int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
			  struct btrfs_root *root,
			  u64 objectid, u64 offset, u64 bytenr)
{
	struct btrfs_path *path;
	int ret;
	int ret2;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOENT;

	do {
		ret = check_committed_ref(trans, root, path, objectid,
					  offset, bytenr);
		if (ret && ret != -ENOENT)
3001
			goto out;
Y
Yan Zheng 已提交
3002

3003 3004 3005 3006 3007 3008 3009
		ret2 = check_delayed_ref(trans, root, path, objectid,
					 offset, bytenr);
	} while (ret2 == -EAGAIN);

	if (ret2 && ret2 != -ENOENT) {
		ret = ret2;
		goto out;
3010
	}
3011 3012 3013

	if (ret != -ENOENT || ret2 != -ENOENT)
		ret = 0;
3014
out:
Y
Yan Zheng 已提交
3015
	btrfs_free_path(path);
3016 3017
	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
		WARN_ON(ret > 0);
3018
	return ret;
3019
}
C
Chris Mason 已提交
3020

3021
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3022
			   struct btrfs_root *root,
3023
			   struct extent_buffer *buf,
A
Arne Jansen 已提交
3024
			   int full_backref, int inc, int for_cow)
Z
Zheng Yan 已提交
3025 3026
{
	u64 bytenr;
3027 3028
	u64 num_bytes;
	u64 parent;
Z
Zheng Yan 已提交
3029 3030 3031 3032 3033 3034 3035 3036
	u64 ref_root;
	u32 nritems;
	struct btrfs_key key;
	struct btrfs_file_extent_item *fi;
	int i;
	int level;
	int ret = 0;
	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
A
Arne Jansen 已提交
3037
			    u64, u64, u64, u64, u64, u64, int);
Z
Zheng Yan 已提交
3038 3039 3040 3041 3042

	ref_root = btrfs_header_owner(buf);
	nritems = btrfs_header_nritems(buf);
	level = btrfs_header_level(buf);

3043 3044
	if (!root->ref_cows && level == 0)
		return 0;
Z
Zheng Yan 已提交
3045

3046 3047 3048 3049
	if (inc)
		process_func = btrfs_inc_extent_ref;
	else
		process_func = btrfs_free_extent;
Z
Zheng Yan 已提交
3050

3051 3052 3053 3054 3055 3056
	if (full_backref)
		parent = buf->start;
	else
		parent = 0;

	for (i = 0; i < nritems; i++) {
Z
Zheng Yan 已提交
3057
		if (level == 0) {
3058
			btrfs_item_key_to_cpu(buf, &key, i);
Z
Zheng Yan 已提交
3059 3060
			if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
				continue;
3061
			fi = btrfs_item_ptr(buf, i,
Z
Zheng Yan 已提交
3062 3063 3064 3065 3066 3067 3068
					    struct btrfs_file_extent_item);
			if (btrfs_file_extent_type(buf, fi) ==
			    BTRFS_FILE_EXTENT_INLINE)
				continue;
			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
			if (bytenr == 0)
				continue;
3069 3070 3071 3072 3073

			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
			key.offset -= btrfs_file_extent_offset(buf, fi);
			ret = process_func(trans, root, bytenr, num_bytes,
					   parent, ref_root, key.objectid,
A
Arne Jansen 已提交
3074
					   key.offset, for_cow);
Z
Zheng Yan 已提交
3075 3076 3077
			if (ret)
				goto fail;
		} else {
3078 3079 3080
			bytenr = btrfs_node_blockptr(buf, i);
			num_bytes = btrfs_level_size(root, level - 1);
			ret = process_func(trans, root, bytenr, num_bytes,
A
Arne Jansen 已提交
3081 3082
					   parent, ref_root, level - 1, 0,
					   for_cow);
Z
Zheng Yan 已提交
3083 3084 3085 3086 3087 3088
			if (ret)
				goto fail;
		}
	}
	return 0;
fail:
3089 3090 3091 3092
	return ret;
}

int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
A
Arne Jansen 已提交
3093
		  struct extent_buffer *buf, int full_backref, int for_cow)
3094
{
A
Arne Jansen 已提交
3095
	return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
3096 3097 3098
}

int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
A
Arne Jansen 已提交
3099
		  struct extent_buffer *buf, int full_backref, int for_cow)
3100
{
A
Arne Jansen 已提交
3101
	return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
Z
Zheng Yan 已提交
3102 3103
}

C
Chris Mason 已提交
3104 3105 3106 3107 3108 3109 3110
static int write_one_cache_group(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_block_group_cache *cache)
{
	int ret;
	struct btrfs_root *extent_root = root->fs_info->extent_root;
3111 3112
	unsigned long bi;
	struct extent_buffer *leaf;
C
Chris Mason 已提交
3113 3114

	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3115 3116
	if (ret < 0)
		goto fail;
3117
	BUG_ON(ret); /* Corruption */
3118 3119 3120 3121 3122

	leaf = path->nodes[0];
	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
	btrfs_mark_buffer_dirty(leaf);
3123
	btrfs_release_path(path);
3124
fail:
3125 3126
	if (ret) {
		btrfs_abort_transaction(trans, root, ret);
C
Chris Mason 已提交
3127
		return ret;
3128
	}
C
Chris Mason 已提交
3129 3130 3131 3132
	return 0;

}

3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143
static struct btrfs_block_group_cache *
next_block_group(struct btrfs_root *root,
		 struct btrfs_block_group_cache *cache)
{
	struct rb_node *node;
	spin_lock(&root->fs_info->block_group_cache_lock);
	node = rb_next(&cache->cache_node);
	btrfs_put_block_group(cache);
	if (node) {
		cache = rb_entry(node, struct btrfs_block_group_cache,
				 cache_node);
3144
		btrfs_get_block_group(cache);
3145 3146 3147 3148 3149 3150
	} else
		cache = NULL;
	spin_unlock(&root->fs_info->block_group_cache_lock);
	return cache;
}

3151 3152 3153 3154 3155 3156 3157
static int cache_save_setup(struct btrfs_block_group_cache *block_group,
			    struct btrfs_trans_handle *trans,
			    struct btrfs_path *path)
{
	struct btrfs_root *root = block_group->fs_info->tree_root;
	struct inode *inode = NULL;
	u64 alloc_hint = 0;
3158
	int dcs = BTRFS_DC_ERROR;
3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
	int num_pages = 0;
	int retries = 0;
	int ret = 0;

	/*
	 * If this block group is smaller than 100 megs don't bother caching the
	 * block group.
	 */
	if (block_group->key.offset < (100 * 1024 * 1024)) {
		spin_lock(&block_group->lock);
		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
		spin_unlock(&block_group->lock);
		return 0;
	}

again:
	inode = lookup_free_space_inode(root, block_group, path);
	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
		ret = PTR_ERR(inode);
3178
		btrfs_release_path(path);
3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194
		goto out;
	}

	if (IS_ERR(inode)) {
		BUG_ON(retries);
		retries++;

		if (block_group->ro)
			goto out_free;

		ret = create_free_space_inode(root, trans, block_group, path);
		if (ret)
			goto out_free;
		goto again;
	}

3195 3196 3197 3198 3199 3200 3201
	/* We've already setup this transaction, go ahead and exit */
	if (block_group->cache_generation == trans->transid &&
	    i_size_read(inode)) {
		dcs = BTRFS_DC_SETUP;
		goto out_put;
	}

3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
	/*
	 * We want to set the generation to 0, that way if anything goes wrong
	 * from here on out we know not to trust this cache when we load up next
	 * time.
	 */
	BTRFS_I(inode)->generation = 0;
	ret = btrfs_update_inode(trans, root, inode);
	WARN_ON(ret);

	if (i_size_read(inode) > 0) {
3212 3213 3214 3215 3216
		ret = btrfs_check_trunc_cache_free_space(root,
					&root->fs_info->global_block_rsv);
		if (ret)
			goto out_put;

3217
		ret = btrfs_truncate_free_space_cache(root, trans, inode);
3218 3219 3220 3221 3222
		if (ret)
			goto out_put;
	}

	spin_lock(&block_group->lock);
3223 3224 3225 3226 3227 3228 3229
	if (block_group->cached != BTRFS_CACHE_FINISHED ||
	    !btrfs_test_opt(root, SPACE_CACHE)) {
		/*
		 * don't bother trying to write stuff out _if_
		 * a) we're not cached,
		 * b) we're with nospace_cache mount option.
		 */
3230
		dcs = BTRFS_DC_WRITTEN;
3231 3232 3233 3234 3235
		spin_unlock(&block_group->lock);
		goto out_put;
	}
	spin_unlock(&block_group->lock);

3236 3237 3238 3239 3240 3241 3242
	/*
	 * Try to preallocate enough space based on how big the block group is.
	 * Keep in mind this has to include any pinned space which could end up
	 * taking up quite a bit since it's not folded into the other space
	 * cache.
	 */
	num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024);
3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
	if (!num_pages)
		num_pages = 1;

	num_pages *= 16;
	num_pages *= PAGE_CACHE_SIZE;

	ret = btrfs_check_data_free_space(inode, num_pages);
	if (ret)
		goto out_put;

	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
					      num_pages, num_pages,
					      &alloc_hint);
3256 3257
	if (!ret)
		dcs = BTRFS_DC_SETUP;
3258
	btrfs_free_reserved_data_space(inode, num_pages);
3259

3260 3261 3262
out_put:
	iput(inode);
out_free:
3263
	btrfs_release_path(path);
3264 3265
out:
	spin_lock(&block_group->lock);
3266
	if (!ret && dcs == BTRFS_DC_SETUP)
3267
		block_group->cache_generation = trans->transid;
3268
	block_group->disk_cache_state = dcs;
3269 3270 3271 3272 3273
	spin_unlock(&block_group->lock);

	return ret;
}

3274 3275
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root)
C
Chris Mason 已提交
3276
{
3277
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
3278 3279
	int err = 0;
	struct btrfs_path *path;
3280
	u64 last = 0;
C
Chris Mason 已提交
3281 3282 3283 3284 3285

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304
again:
	while (1) {
		cache = btrfs_lookup_first_block_group(root->fs_info, last);
		while (cache) {
			if (cache->disk_cache_state == BTRFS_DC_CLEAR)
				break;
			cache = next_block_group(root, cache);
		}
		if (!cache) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}
		err = cache_save_setup(cache, trans, path);
		last = cache->key.objectid + cache->key.offset;
		btrfs_put_block_group(cache);
	}

C
Chris Mason 已提交
3305
	while (1) {
3306 3307 3308
		if (last == 0) {
			err = btrfs_run_delayed_refs(trans, root,
						     (unsigned long)-1);
3309 3310
			if (err) /* File system offline */
				goto out;
J
Josef Bacik 已提交
3311
		}
3312

3313 3314
		cache = btrfs_lookup_first_block_group(root->fs_info, last);
		while (cache) {
3315 3316 3317 3318 3319
			if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
				btrfs_put_block_group(cache);
				goto again;
			}

3320 3321 3322 3323 3324 3325 3326 3327 3328 3329
			if (cache->dirty)
				break;
			cache = next_block_group(root, cache);
		}
		if (!cache) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}
J
Josef Bacik 已提交
3330

J
Josef Bacik 已提交
3331 3332
		if (cache->disk_cache_state == BTRFS_DC_SETUP)
			cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
3333
		cache->dirty = 0;
3334
		last = cache->key.objectid + cache->key.offset;
J
Josef Bacik 已提交
3335

3336
		err = write_one_cache_group(trans, root, path, cache);
3337
		btrfs_put_block_group(cache);
3338 3339
		if (err) /* File system offline */
			goto out;
C
Chris Mason 已提交
3340
	}
3341

J
Josef Bacik 已提交
3342 3343 3344 3345 3346 3347 3348 3349 3350
	while (1) {
		/*
		 * I don't think this is needed since we're just marking our
		 * preallocated extent as written, but just in case it can't
		 * hurt.
		 */
		if (last == 0) {
			err = btrfs_run_delayed_refs(trans, root,
						     (unsigned long)-1);
3351 3352
			if (err) /* File system offline */
				goto out;
J
Josef Bacik 已提交
3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376
		}

		cache = btrfs_lookup_first_block_group(root->fs_info, last);
		while (cache) {
			/*
			 * Really this shouldn't happen, but it could if we
			 * couldn't write the entire preallocated extent and
			 * splitting the extent resulted in a new block.
			 */
			if (cache->dirty) {
				btrfs_put_block_group(cache);
				goto again;
			}
			if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
				break;
			cache = next_block_group(root, cache);
		}
		if (!cache) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}

3377
		err = btrfs_write_out_cache(root, trans, cache, path);
J
Josef Bacik 已提交
3378 3379 3380 3381 3382

		/*
		 * If we didn't have an error then the cache state is still
		 * NEED_WRITE, so we can set it to WRITTEN.
		 */
3383
		if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
J
Josef Bacik 已提交
3384 3385 3386 3387
			cache->disk_cache_state = BTRFS_DC_WRITTEN;
		last = cache->key.objectid + cache->key.offset;
		btrfs_put_block_group(cache);
	}
3388
out:
J
Josef Bacik 已提交
3389

C
Chris Mason 已提交
3390
	btrfs_free_path(path);
3391
	return err;
C
Chris Mason 已提交
3392 3393
}

3394 3395 3396 3397 3398 3399 3400 3401 3402
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
{
	struct btrfs_block_group_cache *block_group;
	int readonly = 0;

	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
	if (!block_group || block_group->ro)
		readonly = 1;
	if (block_group)
3403
		btrfs_put_block_group(block_group);
3404 3405 3406
	return readonly;
}

3407 3408 3409 3410 3411
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
			     u64 total_bytes, u64 bytes_used,
			     struct btrfs_space_info **space_info)
{
	struct btrfs_space_info *found;
3412 3413
	int i;
	int factor;
3414
	int ret;
3415 3416 3417 3418 3419 3420

	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
		     BTRFS_BLOCK_GROUP_RAID10))
		factor = 2;
	else
		factor = 1;
3421 3422 3423

	found = __find_space_info(info, flags);
	if (found) {
3424
		spin_lock(&found->lock);
3425
		found->total_bytes += total_bytes;
J
Josef Bacik 已提交
3426
		found->disk_total += total_bytes * factor;
3427
		found->bytes_used += bytes_used;
3428
		found->disk_used += bytes_used * factor;
3429
		found->full = 0;
3430
		spin_unlock(&found->lock);
3431 3432 3433
		*space_info = found;
		return 0;
	}
Y
Yan Zheng 已提交
3434
	found = kzalloc(sizeof(*found), GFP_NOFS);
3435 3436 3437
	if (!found)
		return -ENOMEM;

3438 3439 3440 3441 3442 3443
	ret = percpu_counter_init(&found->total_bytes_pinned, 0);
	if (ret) {
		kfree(found);
		return ret;
	}

3444 3445
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
		INIT_LIST_HEAD(&found->block_groups[i]);
3446
	init_rwsem(&found->groups_sem);
J
Josef Bacik 已提交
3447
	spin_lock_init(&found->lock);
3448
	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3449
	found->total_bytes = total_bytes;
J
Josef Bacik 已提交
3450
	found->disk_total = total_bytes * factor;
3451
	found->bytes_used = bytes_used;
3452
	found->disk_used = bytes_used * factor;
3453
	found->bytes_pinned = 0;
3454
	found->bytes_reserved = 0;
Y
Yan Zheng 已提交
3455
	found->bytes_readonly = 0;
3456
	found->bytes_may_use = 0;
3457
	found->full = 0;
3458
	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3459
	found->chunk_alloc = 0;
3460 3461
	found->flush = 0;
	init_waitqueue_head(&found->wait);
3462
	*space_info = found;
3463
	list_add_rcu(&found->list, &info->space_info);
3464 3465
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		info->data_sinfo = found;
3466 3467 3468
	return 0;
}

3469 3470
static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
3471 3472
	u64 extra_flags = chunk_to_extended(flags) &
				BTRFS_EXTENDED_PROFILE_MASK;
3473

3474
	write_seqlock(&fs_info->profiles_lock);
3475 3476 3477 3478 3479 3480
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		fs_info->avail_data_alloc_bits |= extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_METADATA)
		fs_info->avail_metadata_alloc_bits |= extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
		fs_info->avail_system_alloc_bits |= extra_flags;
3481
	write_sequnlock(&fs_info->profiles_lock);
3482
}
3483

3484 3485 3486
/*
 * returns target flags in extended format or 0 if restripe for this
 * chunk_type is not in progress
3487 3488
 *
 * should be called with either volume_mutex or balance_lock held
3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511
 */
static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{
	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
	u64 target = 0;

	if (!bctl)
		return 0;

	if (flags & BTRFS_BLOCK_GROUP_DATA &&
	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
	}

	return target;
}

3512 3513 3514
/*
 * @flags: available profiles in extended format (see ctree.h)
 *
3515 3516 3517
 * Returns reduced profile in chunk format.  If profile changing is in
 * progress (either running or paused) picks the target profile (if it's
 * already available), otherwise falls back to plain reducing.
3518
 */
3519
static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3520
{
3521 3522 3523 3524 3525 3526 3527
	/*
	 * we add in the count of missing devices because we want
	 * to make sure that any RAID levels on a degraded FS
	 * continue to be honored.
	 */
	u64 num_devices = root->fs_info->fs_devices->rw_devices +
		root->fs_info->fs_devices->missing_devices;
3528
	u64 target;
D
David Woodhouse 已提交
3529
	u64 tmp;
3530

3531 3532 3533 3534
	/*
	 * see if restripe for this chunk_type is in progress, if so
	 * try to reduce to the target profile
	 */
3535
	spin_lock(&root->fs_info->balance_lock);
3536 3537 3538 3539
	target = get_restripe_target(root->fs_info, flags);
	if (target) {
		/* pick target profile only if it's already available */
		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3540
			spin_unlock(&root->fs_info->balance_lock);
3541
			return extended_to_chunk(target);
3542 3543 3544 3545
		}
	}
	spin_unlock(&root->fs_info->balance_lock);

D
David Woodhouse 已提交
3546
	/* First, mask out the RAID levels which aren't possible */
3547
	if (num_devices == 1)
D
David Woodhouse 已提交
3548 3549 3550 3551
		flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
			   BTRFS_BLOCK_GROUP_RAID5);
	if (num_devices < 3)
		flags &= ~BTRFS_BLOCK_GROUP_RAID6;
3552 3553 3554
	if (num_devices < 4)
		flags &= ~BTRFS_BLOCK_GROUP_RAID10;

D
David Woodhouse 已提交
3555 3556 3557 3558
	tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
		       BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
		       BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
	flags &= ~tmp;
3559

D
David Woodhouse 已提交
3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
	if (tmp & BTRFS_BLOCK_GROUP_RAID6)
		tmp = BTRFS_BLOCK_GROUP_RAID6;
	else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
		tmp = BTRFS_BLOCK_GROUP_RAID5;
	else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
		tmp = BTRFS_BLOCK_GROUP_RAID10;
	else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
		tmp = BTRFS_BLOCK_GROUP_RAID1;
	else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
		tmp = BTRFS_BLOCK_GROUP_RAID0;
3570

D
David Woodhouse 已提交
3571
	return extended_to_chunk(flags | tmp);
3572 3573
}

3574
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
J
Josef Bacik 已提交
3575
{
3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587
	unsigned seq;

	do {
		seq = read_seqbegin(&root->fs_info->profiles_lock);

		if (flags & BTRFS_BLOCK_GROUP_DATA)
			flags |= root->fs_info->avail_data_alloc_bits;
		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
			flags |= root->fs_info->avail_system_alloc_bits;
		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
			flags |= root->fs_info->avail_metadata_alloc_bits;
	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
3588

3589
	return btrfs_reduce_alloc_profile(root, flags);
J
Josef Bacik 已提交
3590 3591
}

3592
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
J
Josef Bacik 已提交
3593
{
3594
	u64 flags;
D
David Woodhouse 已提交
3595
	u64 ret;
J
Josef Bacik 已提交
3596

3597 3598 3599 3600
	if (data)
		flags = BTRFS_BLOCK_GROUP_DATA;
	else if (root == root->fs_info->chunk_root)
		flags = BTRFS_BLOCK_GROUP_SYSTEM;
J
Josef Bacik 已提交
3601
	else
3602
		flags = BTRFS_BLOCK_GROUP_METADATA;
J
Josef Bacik 已提交
3603

D
David Woodhouse 已提交
3604 3605
	ret = get_alloc_profile(root, flags);
	return ret;
J
Josef Bacik 已提交
3606
}
J
Josef Bacik 已提交
3607

J
Josef Bacik 已提交
3608 3609 3610 3611
/*
 * This will check the space that the inode allocates from to make sure we have
 * enough space for bytes.
 */
3612
int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
J
Josef Bacik 已提交
3613 3614
{
	struct btrfs_space_info *data_sinfo;
3615
	struct btrfs_root *root = BTRFS_I(inode)->root;
3616
	struct btrfs_fs_info *fs_info = root->fs_info;
3617
	u64 used;
3618
	int ret = 0, committed = 0, alloc_chunk = 1;
J
Josef Bacik 已提交
3619 3620

	/* make sure bytes are sectorsize aligned */
3621
	bytes = ALIGN(bytes, root->sectorsize);
J
Josef Bacik 已提交
3622

3623
	if (btrfs_is_free_space_inode(inode)) {
3624
		committed = 1;
3625
		ASSERT(current->journal_info);
3626 3627
	}

3628
	data_sinfo = fs_info->data_sinfo;
C
Chris Mason 已提交
3629 3630
	if (!data_sinfo)
		goto alloc;
J
Josef Bacik 已提交
3631

J
Josef Bacik 已提交
3632 3633 3634
again:
	/* make sure we have enough space to handle the data first */
	spin_lock(&data_sinfo->lock);
3635 3636 3637
	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
		data_sinfo->bytes_may_use;
3638 3639

	if (used + bytes > data_sinfo->total_bytes) {
3640
		struct btrfs_trans_handle *trans;
J
Josef Bacik 已提交
3641

J
Josef Bacik 已提交
3642 3643 3644 3645
		/*
		 * if we don't have enough free bytes in this space then we need
		 * to alloc a new chunk.
		 */
3646
		if (!data_sinfo->full && alloc_chunk) {
J
Josef Bacik 已提交
3647
			u64 alloc_target;
J
Josef Bacik 已提交
3648

3649
			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
J
Josef Bacik 已提交
3650
			spin_unlock(&data_sinfo->lock);
C
Chris Mason 已提交
3651
alloc:
J
Josef Bacik 已提交
3652
			alloc_target = btrfs_get_alloc_profile(root, 1);
3653 3654 3655 3656 3657 3658 3659 3660 3661 3662
			/*
			 * It is ugly that we don't call nolock join
			 * transaction for the free space inode case here.
			 * But it is safe because we only do the data space
			 * reservation for the free space cache in the
			 * transaction context, the common join transaction
			 * just increase the counter of the current transaction
			 * handler, doesn't try to acquire the trans_lock of
			 * the fs.
			 */
3663
			trans = btrfs_join_transaction(root);
3664 3665
			if (IS_ERR(trans))
				return PTR_ERR(trans);
J
Josef Bacik 已提交
3666

J
Josef Bacik 已提交
3667
			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3668 3669
					     alloc_target,
					     CHUNK_ALLOC_NO_FORCE);
J
Josef Bacik 已提交
3670
			btrfs_end_transaction(trans, root);
3671 3672 3673 3674 3675 3676
			if (ret < 0) {
				if (ret != -ENOSPC)
					return ret;
				else
					goto commit_trans;
			}
J
Josef Bacik 已提交
3677

3678 3679 3680
			if (!data_sinfo)
				data_sinfo = fs_info->data_sinfo;

J
Josef Bacik 已提交
3681 3682
			goto again;
		}
3683 3684

		/*
3685 3686
		 * If we don't have enough pinned space to deal with this
		 * allocation don't bother committing the transaction.
3687
		 */
3688 3689
		if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
					   bytes) < 0)
3690
			committed = 1;
J
Josef Bacik 已提交
3691 3692
		spin_unlock(&data_sinfo->lock);

3693
		/* commit the current transaction and try again */
3694
commit_trans:
J
Josef Bacik 已提交
3695 3696
		if (!committed &&
		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
3697
			committed = 1;
3698

3699
			trans = btrfs_join_transaction(root);
3700 3701
			if (IS_ERR(trans))
				return PTR_ERR(trans);
3702 3703 3704 3705 3706
			ret = btrfs_commit_transaction(trans, root);
			if (ret)
				return ret;
			goto again;
		}
J
Josef Bacik 已提交
3707

3708 3709 3710
		trace_btrfs_space_reservation(root->fs_info,
					      "space_info:enospc",
					      data_sinfo->flags, bytes, 1);
J
Josef Bacik 已提交
3711 3712 3713
		return -ENOSPC;
	}
	data_sinfo->bytes_may_use += bytes;
J
Josef Bacik 已提交
3714
	trace_btrfs_space_reservation(root->fs_info, "space_info",
3715
				      data_sinfo->flags, bytes, 1);
J
Josef Bacik 已提交
3716 3717
	spin_unlock(&data_sinfo->lock);

J
Josef Bacik 已提交
3718 3719
	return 0;
}
J
Josef Bacik 已提交
3720 3721

/*
3722
 * Called if we need to clear a data reservation for this inode.
J
Josef Bacik 已提交
3723
 */
3724
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3725
{
3726
	struct btrfs_root *root = BTRFS_I(inode)->root;
J
Josef Bacik 已提交
3727
	struct btrfs_space_info *data_sinfo;
3728

J
Josef Bacik 已提交
3729
	/* make sure bytes are sectorsize aligned */
3730
	bytes = ALIGN(bytes, root->sectorsize);
3731

3732
	data_sinfo = root->fs_info->data_sinfo;
J
Josef Bacik 已提交
3733
	spin_lock(&data_sinfo->lock);
3734
	WARN_ON(data_sinfo->bytes_may_use < bytes);
J
Josef Bacik 已提交
3735
	data_sinfo->bytes_may_use -= bytes;
J
Josef Bacik 已提交
3736
	trace_btrfs_space_reservation(root->fs_info, "space_info",
3737
				      data_sinfo->flags, bytes, 0);
J
Josef Bacik 已提交
3738
	spin_unlock(&data_sinfo->lock);
3739 3740
}

3741
static void force_metadata_allocation(struct btrfs_fs_info *info)
3742
{
3743 3744
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;
3745

3746 3747 3748
	rcu_read_lock();
	list_for_each_entry_rcu(found, head, list) {
		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3749
			found->force_alloc = CHUNK_ALLOC_FORCE;
3750
	}
3751
	rcu_read_unlock();
3752 3753
}

3754 3755 3756 3757 3758
static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
{
	return (global->size << 1);
}

3759
static int should_alloc_chunk(struct btrfs_root *root,
3760
			      struct btrfs_space_info *sinfo, int force)
3761
{
3762
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3763
	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3764
	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3765
	u64 thresh;
3766

3767 3768 3769
	if (force == CHUNK_ALLOC_FORCE)
		return 1;

3770 3771 3772 3773 3774
	/*
	 * We need to take into account the global rsv because for all intents
	 * and purposes it's used space.  Don't worry about locking the
	 * global_rsv, it doesn't change except when the transaction commits.
	 */
3775
	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
3776
		num_allocated += calc_global_rsv_need_space(global_rsv);
3777

3778 3779 3780 3781 3782
	/*
	 * in limited mode, we want to have some free space up to
	 * about 1% of the FS size.
	 */
	if (force == CHUNK_ALLOC_LIMITED) {
3783
		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3784 3785 3786 3787 3788 3789 3790
		thresh = max_t(u64, 64 * 1024 * 1024,
			       div_factor_fine(thresh, 1));

		if (num_bytes - num_allocated < thresh)
			return 1;
	}

3791
	if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
3792
		return 0;
3793
	return 1;
3794 3795
}

3796 3797 3798 3799
static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
{
	u64 num_dev;

D
David Woodhouse 已提交
3800 3801 3802 3803
	if (type & (BTRFS_BLOCK_GROUP_RAID10 |
		    BTRFS_BLOCK_GROUP_RAID0 |
		    BTRFS_BLOCK_GROUP_RAID5 |
		    BTRFS_BLOCK_GROUP_RAID6))
3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828
		num_dev = root->fs_info->fs_devices->rw_devices;
	else if (type & BTRFS_BLOCK_GROUP_RAID1)
		num_dev = 2;
	else
		num_dev = 1;	/* DUP or single */

	/* metadata for updaing devices and chunk tree */
	return btrfs_calc_trans_metadata_size(root, num_dev + 1);
}

static void check_system_chunk(struct btrfs_trans_handle *trans,
			       struct btrfs_root *root, u64 type)
{
	struct btrfs_space_info *info;
	u64 left;
	u64 thresh;

	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
	spin_lock(&info->lock);
	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
		info->bytes_reserved - info->bytes_readonly;
	spin_unlock(&info->lock);

	thresh = get_system_chunk_thresh(root, type);
	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
3829 3830
		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
			left, thresh, type);
3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841
		dump_space_info(info, 0, 0);
	}

	if (left < thresh) {
		u64 flags;

		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
		btrfs_alloc_chunk(trans, root, flags);
	}
}

3842
static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3843
			  struct btrfs_root *extent_root, u64 flags, int force)
J
Josef Bacik 已提交
3844
{
3845
	struct btrfs_space_info *space_info;
3846
	struct btrfs_fs_info *fs_info = extent_root->fs_info;
3847
	int wait_for_alloc = 0;
J
Josef Bacik 已提交
3848 3849
	int ret = 0;

3850 3851 3852 3853
	/* Don't re-enter if we're already allocating a chunk */
	if (trans->allocating_chunk)
		return -ENOSPC;

3854
	space_info = __find_space_info(extent_root->fs_info, flags);
3855 3856 3857
	if (!space_info) {
		ret = update_space_info(extent_root->fs_info, flags,
					0, 0, &space_info);
3858
		BUG_ON(ret); /* -ENOMEM */
J
Josef Bacik 已提交
3859
	}
3860
	BUG_ON(!space_info); /* Logic error */
J
Josef Bacik 已提交
3861

3862
again:
3863
	spin_lock(&space_info->lock);
3864
	if (force < space_info->force_alloc)
3865
		force = space_info->force_alloc;
3866
	if (space_info->full) {
3867 3868 3869 3870
		if (should_alloc_chunk(extent_root, space_info, force))
			ret = -ENOSPC;
		else
			ret = 0;
3871
		spin_unlock(&space_info->lock);
3872
		return ret;
J
Josef Bacik 已提交
3873 3874
	}

3875
	if (!should_alloc_chunk(extent_root, space_info, force)) {
3876
		spin_unlock(&space_info->lock);
3877 3878 3879 3880 3881
		return 0;
	} else if (space_info->chunk_alloc) {
		wait_for_alloc = 1;
	} else {
		space_info->chunk_alloc = 1;
J
Josef Bacik 已提交
3882
	}
3883

3884
	spin_unlock(&space_info->lock);
J
Josef Bacik 已提交
3885

3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899
	mutex_lock(&fs_info->chunk_mutex);

	/*
	 * The chunk_mutex is held throughout the entirety of a chunk
	 * allocation, so once we've acquired the chunk_mutex we know that the
	 * other guy is done and we need to recheck and see if we should
	 * allocate.
	 */
	if (wait_for_alloc) {
		mutex_unlock(&fs_info->chunk_mutex);
		wait_for_alloc = 0;
		goto again;
	}

3900 3901
	trans->allocating_chunk = true;

3902 3903 3904 3905 3906 3907 3908
	/*
	 * If we have mixed data/metadata chunks we want to make sure we keep
	 * allocating mixed chunks instead of individual chunks.
	 */
	if (btrfs_mixed_space_info(space_info))
		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);

3909 3910 3911 3912 3913
	/*
	 * if we're doing a data chunk, go ahead and make sure that
	 * we keep a reasonable number of metadata chunks allocated in the
	 * FS as well.
	 */
J
Josef Bacik 已提交
3914
	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3915 3916 3917 3918
		fs_info->data_chunk_allocations++;
		if (!(fs_info->data_chunk_allocations %
		      fs_info->metadata_ratio))
			force_metadata_allocation(fs_info);
J
Josef Bacik 已提交
3919 3920
	}

3921 3922 3923 3924 3925 3926
	/*
	 * Check if we have enough space in SYSTEM chunk because we may need
	 * to update devices.
	 */
	check_system_chunk(trans, extent_root, flags);

Y
Yan Zheng 已提交
3927
	ret = btrfs_alloc_chunk(trans, extent_root, flags);
3928
	trans->allocating_chunk = false;
3929

J
Josef Bacik 已提交
3930
	spin_lock(&space_info->lock);
3931 3932
	if (ret < 0 && ret != -ENOSPC)
		goto out;
J
Josef Bacik 已提交
3933
	if (ret)
3934
		space_info->full = 1;
3935 3936
	else
		ret = 1;
3937

3938
	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3939
out:
3940
	space_info->chunk_alloc = 0;
J
Josef Bacik 已提交
3941
	spin_unlock(&space_info->lock);
3942
	mutex_unlock(&fs_info->chunk_mutex);
J
Josef Bacik 已提交
3943
	return ret;
3944
}
J
Josef Bacik 已提交
3945

J
Josef Bacik 已提交
3946 3947
static int can_overcommit(struct btrfs_root *root,
			  struct btrfs_space_info *space_info, u64 bytes,
M
Miao Xie 已提交
3948
			  enum btrfs_reserve_flush_enum flush)
J
Josef Bacik 已提交
3949
{
3950
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
J
Josef Bacik 已提交
3951
	u64 profile = btrfs_get_alloc_profile(root, 0);
3952
	u64 space_size;
J
Josef Bacik 已提交
3953 3954 3955 3956
	u64 avail;
	u64 used;

	used = space_info->bytes_used + space_info->bytes_reserved +
3957 3958 3959 3960 3961 3962 3963 3964
		space_info->bytes_pinned + space_info->bytes_readonly;

	/*
	 * We only want to allow over committing if we have lots of actual space
	 * free, but if we don't have enough space to handle the global reserve
	 * space then we could end up having a real enospc problem when trying
	 * to allocate a chunk or some other such important allocation.
	 */
3965 3966 3967 3968
	spin_lock(&global_rsv->lock);
	space_size = calc_global_rsv_need_space(global_rsv);
	spin_unlock(&global_rsv->lock);
	if (used + space_size >= space_info->total_bytes)
3969 3970 3971
		return 0;

	used += space_info->bytes_may_use;
J
Josef Bacik 已提交
3972 3973 3974 3975 3976 3977 3978

	spin_lock(&root->fs_info->free_chunk_lock);
	avail = root->fs_info->free_chunk_space;
	spin_unlock(&root->fs_info->free_chunk_lock);

	/*
	 * If we have dup, raid1 or raid10 then only half of the free
D
David Woodhouse 已提交
3979 3980 3981
	 * space is actually useable.  For raid56, the space info used
	 * doesn't include the parity drive, so we don't have to
	 * change the math
J
Josef Bacik 已提交
3982 3983 3984 3985 3986 3987 3988
	 */
	if (profile & (BTRFS_BLOCK_GROUP_DUP |
		       BTRFS_BLOCK_GROUP_RAID1 |
		       BTRFS_BLOCK_GROUP_RAID10))
		avail >>= 1;

	/*
3989 3990 3991
	 * If we aren't flushing all things, let us overcommit up to
	 * 1/2th of the space. If we can flush, don't let us overcommit
	 * too much, let it overcommit up to 1/8 of the space.
J
Josef Bacik 已提交
3992
	 */
M
Miao Xie 已提交
3993
	if (flush == BTRFS_RESERVE_FLUSH_ALL)
3994
		avail >>= 3;
J
Josef Bacik 已提交
3995
	else
3996
		avail >>= 1;
J
Josef Bacik 已提交
3997

3998
	if (used + bytes < space_info->total_bytes + avail)
J
Josef Bacik 已提交
3999 4000 4001 4002
		return 1;
	return 0;
}

4003 4004
static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
					 unsigned long nr_pages)
4005 4006 4007
{
	struct super_block *sb = root->fs_info->sb;

4008 4009 4010 4011
	if (down_read_trylock(&sb->s_umount)) {
		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
		up_read(&sb->s_umount);
	} else {
4012 4013 4014 4015 4016 4017 4018
		/*
		 * We needn't worry the filesystem going from r/w to r/o though
		 * we don't acquire ->s_umount mutex, because the filesystem
		 * should guarantee the delalloc inodes list be empty after
		 * the filesystem is readonly(all dirty pages are written to
		 * the disk).
		 */
4019
		btrfs_start_all_delalloc_inodes(root->fs_info, 0);
4020
		if (!current->journal_info)
4021
			btrfs_wait_all_ordered_extents(root->fs_info);
4022 4023 4024
	}
}

4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036
static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
{
	u64 bytes;
	int nr;

	bytes = btrfs_calc_trans_metadata_size(root, 1);
	nr = (int)div64_u64(to_reclaim, bytes);
	if (!nr)
		nr = 1;
	return nr;
}

4037 4038
#define EXTENT_SIZE_PER_ITEM	(256 * 1024)

J
Josef Bacik 已提交
4039
/*
4040
 * shrink metadata reservation for delalloc
J
Josef Bacik 已提交
4041
 */
J
Josef Bacik 已提交
4042 4043
static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
			    bool wait_ordered)
4044
{
4045
	struct btrfs_block_rsv *block_rsv;
J
Josef Bacik 已提交
4046
	struct btrfs_space_info *space_info;
4047
	struct btrfs_trans_handle *trans;
J
Josef Bacik 已提交
4048
	u64 delalloc_bytes;
4049
	u64 max_reclaim;
4050
	long time_left;
4051 4052
	unsigned long nr_pages;
	int loops;
M
Miao Xie 已提交
4053
	enum btrfs_reserve_flush_enum flush;
4054

4055 4056 4057 4058
	/* Calc the number of the pages we need flush for space reservation */
	to_reclaim = calc_reclaim_items_nr(root, to_reclaim);
	to_reclaim *= EXTENT_SIZE_PER_ITEM;

4059
	trans = (struct btrfs_trans_handle *)current->journal_info;
4060
	block_rsv = &root->fs_info->delalloc_block_rsv;
J
Josef Bacik 已提交
4061
	space_info = block_rsv->space_info;
4062

4063 4064
	delalloc_bytes = percpu_counter_sum_positive(
						&root->fs_info->delalloc_bytes);
J
Josef Bacik 已提交
4065
	if (delalloc_bytes == 0) {
4066
		if (trans)
J
Josef Bacik 已提交
4067
			return;
4068 4069
		if (wait_ordered)
			btrfs_wait_all_ordered_extents(root->fs_info);
J
Josef Bacik 已提交
4070
		return;
4071 4072
	}

4073
	loops = 0;
J
Josef Bacik 已提交
4074 4075 4076
	while (delalloc_bytes && loops < 3) {
		max_reclaim = min(delalloc_bytes, to_reclaim);
		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4077
		btrfs_writeback_inodes_sb_nr(root, nr_pages);
4078 4079 4080 4081 4082 4083 4084
		/*
		 * We need to wait for the async pages to actually start before
		 * we do anything.
		 */
		wait_event(root->fs_info->async_submit_wait,
			   !atomic_read(&root->fs_info->async_delalloc_pages));

M
Miao Xie 已提交
4085 4086 4087 4088
		if (!trans)
			flush = BTRFS_RESERVE_FLUSH_ALL;
		else
			flush = BTRFS_RESERVE_NO_FLUSH;
J
Josef Bacik 已提交
4089
		spin_lock(&space_info->lock);
M
Miao Xie 已提交
4090
		if (can_overcommit(root, space_info, orig, flush)) {
J
Josef Bacik 已提交
4091 4092 4093
			spin_unlock(&space_info->lock);
			break;
		}
J
Josef Bacik 已提交
4094
		spin_unlock(&space_info->lock);
4095

4096
		loops++;
4097
		if (wait_ordered && !trans) {
4098
			btrfs_wait_all_ordered_extents(root->fs_info);
4099
		} else {
J
Josef Bacik 已提交
4100
			time_left = schedule_timeout_killable(1);
4101 4102 4103
			if (time_left)
				break;
		}
4104 4105
		delalloc_bytes = percpu_counter_sum_positive(
						&root->fs_info->delalloc_bytes);
4106 4107 4108
	}
}

4109 4110 4111 4112 4113
/**
 * maybe_commit_transaction - possibly commit the transaction if its ok to
 * @root - the root we're allocating for
 * @bytes - the number of bytes we want to reserve
 * @force - force the commit
4114
 *
4115 4116 4117
 * This will check to make sure that committing the transaction will actually
 * get us somewhere and then commit the transaction if it does.  Otherwise it
 * will return -ENOSPC.
4118
 */
4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134
static int may_commit_transaction(struct btrfs_root *root,
				  struct btrfs_space_info *space_info,
				  u64 bytes, int force)
{
	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
	struct btrfs_trans_handle *trans;

	trans = (struct btrfs_trans_handle *)current->journal_info;
	if (trans)
		return -EAGAIN;

	if (force)
		goto commit;

	/* See if there is enough pinned space to make this reservation */
	spin_lock(&space_info->lock);
4135 4136
	if (percpu_counter_compare(&space_info->total_bytes_pinned,
				   bytes) >= 0) {
4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148
		spin_unlock(&space_info->lock);
		goto commit;
	}
	spin_unlock(&space_info->lock);

	/*
	 * See if there is some space in the delayed insertion reservation for
	 * this reservation.
	 */
	if (space_info != delayed_rsv->space_info)
		return -ENOSPC;

L
Liu Bo 已提交
4149
	spin_lock(&space_info->lock);
4150
	spin_lock(&delayed_rsv->lock);
4151 4152
	if (percpu_counter_compare(&space_info->total_bytes_pinned,
				   bytes - delayed_rsv->size) >= 0) {
4153
		spin_unlock(&delayed_rsv->lock);
L
Liu Bo 已提交
4154
		spin_unlock(&space_info->lock);
4155 4156 4157
		return -ENOSPC;
	}
	spin_unlock(&delayed_rsv->lock);
L
Liu Bo 已提交
4158
	spin_unlock(&space_info->lock);
4159 4160 4161 4162 4163 4164 4165 4166 4167

commit:
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans))
		return -ENOSPC;

	return btrfs_commit_transaction(trans, root);
}

4168
enum flush_state {
4169 4170 4171 4172
	FLUSH_DELAYED_ITEMS_NR	=	1,
	FLUSH_DELAYED_ITEMS	=	2,
	FLUSH_DELALLOC		=	3,
	FLUSH_DELALLOC_WAIT	=	4,
4173 4174
	ALLOC_CHUNK		=	5,
	COMMIT_TRANS		=	6,
4175 4176 4177 4178 4179 4180 4181 4182
};

static int flush_space(struct btrfs_root *root,
		       struct btrfs_space_info *space_info, u64 num_bytes,
		       u64 orig_bytes, int state)
{
	struct btrfs_trans_handle *trans;
	int nr;
J
Josef Bacik 已提交
4183
	int ret = 0;
4184 4185 4186 4187

	switch (state) {
	case FLUSH_DELAYED_ITEMS_NR:
	case FLUSH_DELAYED_ITEMS:
4188 4189 4190
		if (state == FLUSH_DELAYED_ITEMS_NR)
			nr = calc_reclaim_items_nr(root, num_bytes) * 2;
		else
4191
			nr = -1;
4192

4193 4194 4195 4196 4197 4198 4199 4200
		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		ret = btrfs_run_delayed_items_nr(trans, root, nr);
		btrfs_end_transaction(trans, root);
		break;
4201 4202 4203 4204 4205
	case FLUSH_DELALLOC:
	case FLUSH_DELALLOC_WAIT:
		shrink_delalloc(root, num_bytes, orig_bytes,
				state == FLUSH_DELALLOC_WAIT);
		break;
4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218
	case ALLOC_CHUNK:
		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
				     btrfs_get_alloc_profile(root, 0),
				     CHUNK_ALLOC_NO_FORCE);
		btrfs_end_transaction(trans, root);
		if (ret == -ENOSPC)
			ret = 0;
		break;
4219 4220 4221 4222 4223 4224 4225 4226 4227 4228
	case COMMIT_TRANS:
		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
		break;
	default:
		ret = -ENOSPC;
		break;
	}

	return ret;
}
4229 4230 4231 4232 4233
/**
 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
 * @root - the root we're allocating for
 * @block_rsv - the block_rsv we're allocating for
 * @orig_bytes - the number of bytes we want
4234
 * @flush - whether or not we can flush to make our reservation
4235
 *
4236 4237 4238 4239 4240 4241
 * This will reserve orgi_bytes number of bytes from the space info associated
 * with the block_rsv.  If there is not enough space it will make an attempt to
 * flush out space to make room.  It will do this by flushing delalloc if
 * possible or committing the transaction.  If flush is 0 then no attempts to
 * regain reservations will be made and this will fail if there is not enough
 * space already.
4242
 */
4243
static int reserve_metadata_bytes(struct btrfs_root *root,
4244
				  struct btrfs_block_rsv *block_rsv,
M
Miao Xie 已提交
4245 4246
				  u64 orig_bytes,
				  enum btrfs_reserve_flush_enum flush)
J
Josef Bacik 已提交
4247
{
4248
	struct btrfs_space_info *space_info = block_rsv->space_info;
4249
	u64 used;
4250
	u64 num_bytes = orig_bytes;
4251
	int flush_state = FLUSH_DELAYED_ITEMS_NR;
4252
	int ret = 0;
4253
	bool flushing = false;
J
Josef Bacik 已提交
4254

4255
again:
4256
	ret = 0;
4257
	spin_lock(&space_info->lock);
4258
	/*
M
Miao Xie 已提交
4259 4260
	 * We only want to wait if somebody other than us is flushing and we
	 * are actually allowed to flush all things.
4261
	 */
M
Miao Xie 已提交
4262 4263
	while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
	       space_info->flush) {
4264 4265 4266 4267 4268 4269 4270
		spin_unlock(&space_info->lock);
		/*
		 * If we have a trans handle we can't wait because the flusher
		 * may have to commit the transaction, which would mean we would
		 * deadlock since we are waiting for the flusher to finish, but
		 * hold the current transaction open.
		 */
4271
		if (current->journal_info)
4272
			return -EAGAIN;
A
Arne Jansen 已提交
4273 4274 4275
		ret = wait_event_killable(space_info->wait, !space_info->flush);
		/* Must have been killed, return */
		if (ret)
4276 4277 4278 4279 4280 4281
			return -EINTR;

		spin_lock(&space_info->lock);
	}

	ret = -ENOSPC;
4282 4283 4284
	used = space_info->bytes_used + space_info->bytes_reserved +
		space_info->bytes_pinned + space_info->bytes_readonly +
		space_info->bytes_may_use;
J
Josef Bacik 已提交
4285

4286 4287 4288 4289 4290 4291 4292
	/*
	 * The idea here is that we've not already over-reserved the block group
	 * then we can go ahead and save our reservation first and then start
	 * flushing if we need to.  Otherwise if we've already overcommitted
	 * lets start flushing stuff first and then come back and try to make
	 * our reservation.
	 */
4293 4294
	if (used <= space_info->total_bytes) {
		if (used + orig_bytes <= space_info->total_bytes) {
4295
			space_info->bytes_may_use += orig_bytes;
J
Josef Bacik 已提交
4296
			trace_btrfs_space_reservation(root->fs_info,
4297
				"space_info", space_info->flags, orig_bytes, 1);
4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312
			ret = 0;
		} else {
			/*
			 * Ok set num_bytes to orig_bytes since we aren't
			 * overocmmitted, this way we only try and reclaim what
			 * we need.
			 */
			num_bytes = orig_bytes;
		}
	} else {
		/*
		 * Ok we're over committed, set num_bytes to the overcommitted
		 * amount plus the amount of bytes that we need for this
		 * reservation.
		 */
4313
		num_bytes = used - space_info->total_bytes +
4314
			(orig_bytes * 2);
4315
	}
J
Josef Bacik 已提交
4316

4317 4318 4319 4320 4321 4322
	if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
		space_info->bytes_may_use += orig_bytes;
		trace_btrfs_space_reservation(root->fs_info, "space_info",
					      space_info->flags, orig_bytes,
					      1);
		ret = 0;
4323 4324
	}

4325 4326 4327 4328
	/*
	 * Couldn't make our reservation, save our place so while we're trying
	 * to reclaim space we can actually use it instead of somebody else
	 * stealing it from us.
M
Miao Xie 已提交
4329 4330 4331
	 *
	 * We make the other tasks wait for the flush only when we can flush
	 * all things.
4332
	 */
4333
	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4334 4335
		flushing = true;
		space_info->flush = 1;
4336
	}
J
Josef Bacik 已提交
4337

4338
	spin_unlock(&space_info->lock);
J
Josef Bacik 已提交
4339

M
Miao Xie 已提交
4340
	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4341
		goto out;
4342

4343 4344 4345
	ret = flush_space(root, space_info, num_bytes, orig_bytes,
			  flush_state);
	flush_state++;
M
Miao Xie 已提交
4346 4347 4348 4349 4350 4351 4352 4353 4354 4355

	/*
	 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
	 * would happen. So skip delalloc flush.
	 */
	if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
	    (flush_state == FLUSH_DELALLOC ||
	     flush_state == FLUSH_DELALLOC_WAIT))
		flush_state = ALLOC_CHUNK;

4356
	if (!ret)
4357
		goto again;
M
Miao Xie 已提交
4358 4359 4360 4361 4362
	else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
		 flush_state < COMMIT_TRANS)
		goto again;
	else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
		 flush_state <= COMMIT_TRANS)
4363 4364 4365
		goto again;

out:
4366 4367 4368 4369 4370 4371 4372 4373 4374
	if (ret == -ENOSPC &&
	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
		struct btrfs_block_rsv *global_rsv =
			&root->fs_info->global_block_rsv;

		if (block_rsv != global_rsv &&
		    !block_rsv_use_bytes(global_rsv, orig_bytes))
			ret = 0;
	}
4375 4376 4377 4378
	if (ret == -ENOSPC)
		trace_btrfs_space_reservation(root->fs_info,
					      "space_info:enospc",
					      space_info->flags, orig_bytes, 1);
4379
	if (flushing) {
4380
		spin_lock(&space_info->lock);
4381 4382
		space_info->flush = 0;
		wake_up_all(&space_info->wait);
4383
		spin_unlock(&space_info->lock);
4384 4385 4386 4387
	}
	return ret;
}

4388 4389 4390
static struct btrfs_block_rsv *get_block_rsv(
					const struct btrfs_trans_handle *trans,
					const struct btrfs_root *root)
4391
{
4392 4393
	struct btrfs_block_rsv *block_rsv = NULL;

4394 4395 4396 4397
	if (root->ref_cows)
		block_rsv = trans->block_rsv;

	if (root == root->fs_info->csum_root && trans->adding_csums)
4398
		block_rsv = trans->block_rsv;
4399

4400 4401 4402
	if (root == root->fs_info->uuid_root)
		block_rsv = trans->block_rsv;

4403
	if (!block_rsv)
4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
		block_rsv = root->block_rsv;

	if (!block_rsv)
		block_rsv = &root->fs_info->empty_block_rsv;

	return block_rsv;
}

static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
			       u64 num_bytes)
{
	int ret = -ENOSPC;
	spin_lock(&block_rsv->lock);
	if (block_rsv->reserved >= num_bytes) {
		block_rsv->reserved -= num_bytes;
		if (block_rsv->reserved < block_rsv->size)
			block_rsv->full = 0;
		ret = 0;
	}
	spin_unlock(&block_rsv->lock);
	return ret;
}

static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
				u64 num_bytes, int update_size)
{
	spin_lock(&block_rsv->lock);
	block_rsv->reserved += num_bytes;
	if (update_size)
		block_rsv->size += num_bytes;
	else if (block_rsv->reserved >= block_rsv->size)
		block_rsv->full = 1;
	spin_unlock(&block_rsv->lock);
}

4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
			     struct btrfs_block_rsv *dest, u64 num_bytes,
			     int min_factor)
{
	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
	u64 min_bytes;

	if (global_rsv->space_info != dest->space_info)
		return -ENOSPC;

	spin_lock(&global_rsv->lock);
	min_bytes = div_factor(global_rsv->size, min_factor);
	if (global_rsv->reserved < min_bytes + num_bytes) {
		spin_unlock(&global_rsv->lock);
		return -ENOSPC;
	}
	global_rsv->reserved -= num_bytes;
	if (global_rsv->reserved < global_rsv->size)
		global_rsv->full = 0;
	spin_unlock(&global_rsv->lock);

	block_rsv_add_bytes(dest, num_bytes, 1);
	return 0;
}

J
Josef Bacik 已提交
4464 4465
static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
				    struct btrfs_block_rsv *block_rsv,
4466
				    struct btrfs_block_rsv *dest, u64 num_bytes)
4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484
{
	struct btrfs_space_info *space_info = block_rsv->space_info;

	spin_lock(&block_rsv->lock);
	if (num_bytes == (u64)-1)
		num_bytes = block_rsv->size;
	block_rsv->size -= num_bytes;
	if (block_rsv->reserved >= block_rsv->size) {
		num_bytes = block_rsv->reserved - block_rsv->size;
		block_rsv->reserved = block_rsv->size;
		block_rsv->full = 1;
	} else {
		num_bytes = 0;
	}
	spin_unlock(&block_rsv->lock);

	if (num_bytes > 0) {
		if (dest) {
4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498
			spin_lock(&dest->lock);
			if (!dest->full) {
				u64 bytes_to_add;

				bytes_to_add = dest->size - dest->reserved;
				bytes_to_add = min(num_bytes, bytes_to_add);
				dest->reserved += bytes_to_add;
				if (dest->reserved >= dest->size)
					dest->full = 1;
				num_bytes -= bytes_to_add;
			}
			spin_unlock(&dest->lock);
		}
		if (num_bytes) {
4499
			spin_lock(&space_info->lock);
4500
			space_info->bytes_may_use -= num_bytes;
J
Josef Bacik 已提交
4501
			trace_btrfs_space_reservation(fs_info, "space_info",
4502
					space_info->flags, num_bytes, 0);
4503
			spin_unlock(&space_info->lock);
4504
		}
J
Josef Bacik 已提交
4505
	}
4506
}
4507

4508 4509 4510 4511
static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
				   struct btrfs_block_rsv *dst, u64 num_bytes)
{
	int ret;
J
Josef Bacik 已提交
4512

4513 4514 4515
	ret = block_rsv_use_bytes(src, num_bytes);
	if (ret)
		return ret;
J
Josef Bacik 已提交
4516

4517
	block_rsv_add_bytes(dst, num_bytes, 1);
J
Josef Bacik 已提交
4518 4519 4520
	return 0;
}

4521
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
J
Josef Bacik 已提交
4522
{
4523 4524
	memset(rsv, 0, sizeof(*rsv));
	spin_lock_init(&rsv->lock);
4525
	rsv->type = type;
4526 4527
}

4528 4529
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
					      unsigned short type)
4530 4531 4532
{
	struct btrfs_block_rsv *block_rsv;
	struct btrfs_fs_info *fs_info = root->fs_info;
J
Josef Bacik 已提交
4533

4534 4535 4536
	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
	if (!block_rsv)
		return NULL;
J
Josef Bacik 已提交
4537

4538
	btrfs_init_block_rsv(block_rsv, type);
4539 4540 4541 4542
	block_rsv->space_info = __find_space_info(fs_info,
						  BTRFS_BLOCK_GROUP_METADATA);
	return block_rsv;
}
J
Josef Bacik 已提交
4543

4544 4545 4546
void btrfs_free_block_rsv(struct btrfs_root *root,
			  struct btrfs_block_rsv *rsv)
{
J
Josef Bacik 已提交
4547 4548
	if (!rsv)
		return;
4549 4550
	btrfs_block_rsv_release(root, rsv, (u64)-1);
	kfree(rsv);
J
Josef Bacik 已提交
4551 4552
}

M
Miao Xie 已提交
4553 4554 4555
int btrfs_block_rsv_add(struct btrfs_root *root,
			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
			enum btrfs_reserve_flush_enum flush)
J
Josef Bacik 已提交
4556
{
4557
	int ret;
J
Josef Bacik 已提交
4558

4559 4560
	if (num_bytes == 0)
		return 0;
4561

4562
	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4563 4564 4565 4566
	if (!ret) {
		block_rsv_add_bytes(block_rsv, num_bytes, 1);
		return 0;
	}
J
Josef Bacik 已提交
4567

4568 4569
	return ret;
}
J
Josef Bacik 已提交
4570

4571
int btrfs_block_rsv_check(struct btrfs_root *root,
4572
			  struct btrfs_block_rsv *block_rsv, int min_factor)
4573 4574 4575
{
	u64 num_bytes = 0;
	int ret = -ENOSPC;
J
Josef Bacik 已提交
4576

4577 4578
	if (!block_rsv)
		return 0;
J
Josef Bacik 已提交
4579

4580
	spin_lock(&block_rsv->lock);
4581 4582 4583 4584
	num_bytes = div_factor(block_rsv->size, min_factor);
	if (block_rsv->reserved >= num_bytes)
		ret = 0;
	spin_unlock(&block_rsv->lock);
J
Josef Bacik 已提交
4585

4586 4587 4588
	return ret;
}

M
Miao Xie 已提交
4589 4590 4591
int btrfs_block_rsv_refill(struct btrfs_root *root,
			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
			   enum btrfs_reserve_flush_enum flush)
4592 4593 4594 4595 4596 4597 4598 4599 4600
{
	u64 num_bytes = 0;
	int ret = -ENOSPC;

	if (!block_rsv)
		return 0;

	spin_lock(&block_rsv->lock);
	num_bytes = min_reserved;
4601
	if (block_rsv->reserved >= num_bytes)
4602
		ret = 0;
4603
	else
4604 4605
		num_bytes -= block_rsv->reserved;
	spin_unlock(&block_rsv->lock);
4606

4607 4608 4609
	if (!ret)
		return 0;

4610
	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4611 4612
	if (!ret) {
		block_rsv_add_bytes(block_rsv, num_bytes, 0);
4613
		return 0;
J
Josef Bacik 已提交
4614
	}
J
Josef Bacik 已提交
4615

4616
	return ret;
4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633
}

int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
			    struct btrfs_block_rsv *dst_rsv,
			    u64 num_bytes)
{
	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
}

void btrfs_block_rsv_release(struct btrfs_root *root,
			     struct btrfs_block_rsv *block_rsv,
			     u64 num_bytes)
{
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
	if (global_rsv->full || global_rsv == block_rsv ||
	    block_rsv->space_info != global_rsv->space_info)
		global_rsv = NULL;
J
Josef Bacik 已提交
4634 4635
	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
				num_bytes);
J
Josef Bacik 已提交
4636 4637 4638
}

/*
4639 4640 4641
 * helper to calculate size of global block reservation.
 * the desired value is sum of space used by extent tree,
 * checksum tree and root tree
J
Josef Bacik 已提交
4642
 */
4643
static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
4644
{
4645 4646 4647 4648
	struct btrfs_space_info *sinfo;
	u64 num_bytes;
	u64 meta_used;
	u64 data_used;
4649
	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
J
Josef Bacik 已提交
4650

4651 4652 4653 4654
	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
	spin_lock(&sinfo->lock);
	data_used = sinfo->bytes_used;
	spin_unlock(&sinfo->lock);
C
Chris Mason 已提交
4655

4656 4657
	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
	spin_lock(&sinfo->lock);
4658 4659
	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
		data_used = 0;
4660 4661
	meta_used = sinfo->bytes_used;
	spin_unlock(&sinfo->lock);
4662

4663 4664 4665
	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
		    csum_size * 2;
	num_bytes += div64_u64(data_used + meta_used, 50);
4666

4667
	if (num_bytes * 3 > meta_used)
4668
		num_bytes = div64_u64(meta_used, 3);
4669

4670 4671
	return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
}
J
Josef Bacik 已提交
4672

4673 4674 4675 4676 4677
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
	struct btrfs_space_info *sinfo = block_rsv->space_info;
	u64 num_bytes;
J
Josef Bacik 已提交
4678

4679
	num_bytes = calc_global_metadata_size(fs_info);
C
Chris Mason 已提交
4680

4681
	spin_lock(&sinfo->lock);
4682
	spin_lock(&block_rsv->lock);
4683

4684
	block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
4685

4686
	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4687 4688
		    sinfo->bytes_reserved + sinfo->bytes_readonly +
		    sinfo->bytes_may_use;
4689 4690 4691 4692

	if (sinfo->total_bytes > num_bytes) {
		num_bytes = sinfo->total_bytes - num_bytes;
		block_rsv->reserved += num_bytes;
4693
		sinfo->bytes_may_use += num_bytes;
J
Josef Bacik 已提交
4694
		trace_btrfs_space_reservation(fs_info, "space_info",
4695
				      sinfo->flags, num_bytes, 1);
J
Josef Bacik 已提交
4696 4697
	}

4698 4699
	if (block_rsv->reserved >= block_rsv->size) {
		num_bytes = block_rsv->reserved - block_rsv->size;
4700
		sinfo->bytes_may_use -= num_bytes;
J
Josef Bacik 已提交
4701
		trace_btrfs_space_reservation(fs_info, "space_info",
4702
				      sinfo->flags, num_bytes, 0);
4703 4704 4705
		block_rsv->reserved = block_rsv->size;
		block_rsv->full = 1;
	}
4706

4707
	spin_unlock(&block_rsv->lock);
4708
	spin_unlock(&sinfo->lock);
J
Josef Bacik 已提交
4709 4710
}

4711
static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
4712
{
4713
	struct btrfs_space_info *space_info;
J
Josef Bacik 已提交
4714

4715 4716
	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
	fs_info->chunk_block_rsv.space_info = space_info;
J
Josef Bacik 已提交
4717

4718
	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4719 4720
	fs_info->global_block_rsv.space_info = space_info;
	fs_info->delalloc_block_rsv.space_info = space_info;
4721 4722
	fs_info->trans_block_rsv.space_info = space_info;
	fs_info->empty_block_rsv.space_info = space_info;
4723
	fs_info->delayed_block_rsv.space_info = space_info;
4724

4725 4726 4727 4728
	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4729 4730
	if (fs_info->quota_root)
		fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4731
	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4732 4733

	update_global_block_rsv(fs_info);
J
Josef Bacik 已提交
4734 4735
}

4736
static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
J
Josef Bacik 已提交
4737
{
J
Josef Bacik 已提交
4738 4739
	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
				(u64)-1);
4740 4741 4742 4743 4744 4745
	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
	WARN_ON(fs_info->trans_block_rsv.size > 0);
	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
	WARN_ON(fs_info->chunk_block_rsv.size > 0);
	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4746 4747
	WARN_ON(fs_info->delayed_block_rsv.size > 0);
	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4748 4749
}

4750 4751
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
				  struct btrfs_root *root)
J
Josef Bacik 已提交
4752
{
4753 4754 4755
	if (!trans->block_rsv)
		return;

4756 4757
	if (!trans->bytes_reserved)
		return;
J
Josef Bacik 已提交
4758

4759
	trace_btrfs_space_reservation(root->fs_info, "transaction",
4760
				      trans->transid, trans->bytes_reserved, 0);
4761
	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4762 4763
	trans->bytes_reserved = 0;
}
J
Josef Bacik 已提交
4764

4765
/* Can only return 0 or -ENOSPC */
4766 4767 4768 4769 4770 4771 4772 4773
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
				  struct inode *inode)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;

	/*
4774 4775 4776
	 * We need to hold space in order to delete our orphan item once we've
	 * added it, so this takes the reservation so we can release it later
	 * when we are truly done with the orphan item.
4777
	 */
C
Chris Mason 已提交
4778
	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
J
Josef Bacik 已提交
4779 4780
	trace_btrfs_space_reservation(root->fs_info, "orphan",
				      btrfs_ino(inode), num_bytes, 1);
4781
	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
J
Josef Bacik 已提交
4782 4783
}

4784
void btrfs_orphan_release_metadata(struct inode *inode)
4785
{
4786
	struct btrfs_root *root = BTRFS_I(inode)->root;
C
Chris Mason 已提交
4787
	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
J
Josef Bacik 已提交
4788 4789
	trace_btrfs_space_reservation(root->fs_info, "orphan",
				      btrfs_ino(inode), num_bytes, 0);
4790 4791
	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
4792

4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809
/*
 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
 * root: the root of the parent directory
 * rsv: block reservation
 * items: the number of items that we need do reservation
 * qgroup_reserved: used to return the reserved size in qgroup
 *
 * This function is used to reserve the space for snapshot/subvolume
 * creation and deletion. Those operations are different with the
 * common file/directory operations, they change two fs/file trees
 * and root tree, the number of items that the qgroup reserves is
 * different with the free space reservation. So we can not use
 * the space reseravtion mechanism in start_transaction().
 */
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
				     struct btrfs_block_rsv *rsv,
				     int items,
4810 4811
				     u64 *qgroup_reserved,
				     bool use_global_rsv)
4812
{
4813 4814
	u64 num_bytes;
	int ret;
4815
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833

	if (root->fs_info->quota_enabled) {
		/* One for parent inode, two for dir entries */
		num_bytes = 3 * root->leafsize;
		ret = btrfs_qgroup_reserve(root, num_bytes);
		if (ret)
			return ret;
	} else {
		num_bytes = 0;
	}

	*qgroup_reserved = num_bytes;

	num_bytes = btrfs_calc_trans_metadata_size(root, items);
	rsv->space_info = __find_space_info(root->fs_info,
					    BTRFS_BLOCK_GROUP_METADATA);
	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
				  BTRFS_RESERVE_FLUSH_ALL);
4834 4835 4836 4837

	if (ret == -ENOSPC && use_global_rsv)
		ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);

4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852
	if (ret) {
		if (*qgroup_reserved)
			btrfs_qgroup_free(root, *qgroup_reserved);
	}

	return ret;
}

void btrfs_subvolume_release_metadata(struct btrfs_root *root,
				      struct btrfs_block_rsv *rsv,
				      u64 qgroup_reserved)
{
	btrfs_block_rsv_release(root, rsv, (u64)-1);
	if (qgroup_reserved)
		btrfs_qgroup_free(root, qgroup_reserved);
4853 4854
}

4855 4856 4857 4858 4859 4860 4861 4862 4863
/**
 * drop_outstanding_extent - drop an outstanding extent
 * @inode: the inode we're dropping the extent for
 *
 * This is called when we are freeing up an outstanding extent, either called
 * after an error or after an extent is written.  This will return the number of
 * reserved extents that need to be freed.  This must be called with
 * BTRFS_I(inode)->lock held.
 */
4864 4865
static unsigned drop_outstanding_extent(struct inode *inode)
{
4866
	unsigned drop_inode_space = 0;
4867 4868 4869 4870 4871
	unsigned dropped_extents = 0;

	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
	BTRFS_I(inode)->outstanding_extents--;

4872
	if (BTRFS_I(inode)->outstanding_extents == 0 &&
4873 4874
	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
			       &BTRFS_I(inode)->runtime_flags))
4875 4876
		drop_inode_space = 1;

4877 4878 4879 4880 4881 4882
	/*
	 * If we have more or the same amount of outsanding extents than we have
	 * reserved then we need to leave the reserved extents count alone.
	 */
	if (BTRFS_I(inode)->outstanding_extents >=
	    BTRFS_I(inode)->reserved_extents)
4883
		return drop_inode_space;
4884 4885 4886 4887

	dropped_extents = BTRFS_I(inode)->reserved_extents -
		BTRFS_I(inode)->outstanding_extents;
	BTRFS_I(inode)->reserved_extents -= dropped_extents;
4888
	return dropped_extents + drop_inode_space;
4889 4890
}

4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910
/**
 * calc_csum_metadata_size - return the amount of metada space that must be
 *	reserved/free'd for the given bytes.
 * @inode: the inode we're manipulating
 * @num_bytes: the number of bytes in question
 * @reserve: 1 if we are reserving space, 0 if we are freeing space
 *
 * This adjusts the number of csum_bytes in the inode and then returns the
 * correct amount of metadata that must either be reserved or freed.  We
 * calculate how many checksums we can fit into one leaf and then divide the
 * number of bytes that will need to be checksumed by this value to figure out
 * how many checksums will be required.  If we are adding bytes then the number
 * may go up and we will return the number of additional bytes that must be
 * reserved.  If it is going down we will return the number of bytes that must
 * be freed.
 *
 * This must be called with BTRFS_I(inode)->lock held.
 */
static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
				   int reserve)
4911
{
4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946
	struct btrfs_root *root = BTRFS_I(inode)->root;
	u64 csum_size;
	int num_csums_per_leaf;
	int num_csums;
	int old_csums;

	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
	    BTRFS_I(inode)->csum_bytes == 0)
		return 0;

	old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
	if (reserve)
		BTRFS_I(inode)->csum_bytes += num_bytes;
	else
		BTRFS_I(inode)->csum_bytes -= num_bytes;
	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
	num_csums_per_leaf = (int)div64_u64(csum_size,
					    sizeof(struct btrfs_csum_item) +
					    sizeof(struct btrfs_disk_key));
	num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
	num_csums = num_csums + num_csums_per_leaf - 1;
	num_csums = num_csums / num_csums_per_leaf;

	old_csums = old_csums + num_csums_per_leaf - 1;
	old_csums = old_csums / num_csums_per_leaf;

	/* No change, no need to reserve more */
	if (old_csums == num_csums)
		return 0;

	if (reserve)
		return btrfs_calc_trans_metadata_size(root,
						      num_csums - old_csums);

	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4947
}
Y
Yan Zheng 已提交
4948

4949 4950 4951 4952
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4953
	u64 to_reserve = 0;
4954
	u64 csum_bytes;
4955
	unsigned nr_extents = 0;
4956
	int extra_reserve = 0;
M
Miao Xie 已提交
4957
	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
4958
	int ret = 0;
4959
	bool delalloc_lock = true;
4960 4961
	u64 to_free = 0;
	unsigned dropped;
4962

4963 4964 4965 4966 4967 4968
	/* If we are a free space inode we need to not flush since we will be in
	 * the middle of a transaction commit.  We also don't need the delalloc
	 * mutex since we won't race with anybody.  We need this mostly to make
	 * lockdep shut its filthy mouth.
	 */
	if (btrfs_is_free_space_inode(inode)) {
M
Miao Xie 已提交
4969
		flush = BTRFS_RESERVE_NO_FLUSH;
4970 4971
		delalloc_lock = false;
	}
4972

M
Miao Xie 已提交
4973 4974
	if (flush != BTRFS_RESERVE_NO_FLUSH &&
	    btrfs_transaction_in_commit(root->fs_info))
4975
		schedule_timeout(1);
4976

4977 4978 4979
	if (delalloc_lock)
		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);

4980
	num_bytes = ALIGN(num_bytes, root->sectorsize);
4981

4982 4983 4984 4985
	spin_lock(&BTRFS_I(inode)->lock);
	BTRFS_I(inode)->outstanding_extents++;

	if (BTRFS_I(inode)->outstanding_extents >
4986
	    BTRFS_I(inode)->reserved_extents)
4987 4988
		nr_extents = BTRFS_I(inode)->outstanding_extents -
			BTRFS_I(inode)->reserved_extents;
4989

4990 4991 4992 4993
	/*
	 * Add an item to reserve for updating the inode when we complete the
	 * delalloc io.
	 */
4994 4995
	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
		      &BTRFS_I(inode)->runtime_flags)) {
4996
		nr_extents++;
4997
		extra_reserve = 1;
4998
	}
4999 5000

	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5001
	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5002
	csum_bytes = BTRFS_I(inode)->csum_bytes;
5003
	spin_unlock(&BTRFS_I(inode)->lock);
5004

5005
	if (root->fs_info->quota_enabled) {
5006 5007
		ret = btrfs_qgroup_reserve(root, num_bytes +
					   nr_extents * root->leafsize);
5008 5009 5010
		if (ret)
			goto out_fail;
	}
5011

5012 5013 5014
	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
	if (unlikely(ret)) {
		if (root->fs_info->quota_enabled)
5015 5016
			btrfs_qgroup_free(root, num_bytes +
						nr_extents * root->leafsize);
5017
		goto out_fail;
5018
	}
5019

5020 5021
	spin_lock(&BTRFS_I(inode)->lock);
	if (extra_reserve) {
5022 5023
		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
			&BTRFS_I(inode)->runtime_flags);
5024 5025 5026 5027
		nr_extents--;
	}
	BTRFS_I(inode)->reserved_extents += nr_extents;
	spin_unlock(&BTRFS_I(inode)->lock);
5028 5029 5030

	if (delalloc_lock)
		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5031

J
Josef Bacik 已提交
5032
	if (to_reserve)
5033
		trace_btrfs_space_reservation(root->fs_info, "delalloc",
J
Josef Bacik 已提交
5034
					      btrfs_ino(inode), to_reserve, 1);
5035 5036 5037
	block_rsv_add_bytes(block_rsv, to_reserve, 1);

	return 0;
5038 5039 5040 5041 5042 5043 5044 5045 5046

out_fail:
	spin_lock(&BTRFS_I(inode)->lock);
	dropped = drop_outstanding_extent(inode);
	/*
	 * If the inodes csum_bytes is the same as the original
	 * csum_bytes then we know we haven't raced with any free()ers
	 * so we can just reduce our inodes csum bytes and carry on.
	 */
5047
	if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5048
		calc_csum_metadata_size(inode, num_bytes, 0);
5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088
	} else {
		u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
		u64 bytes;

		/*
		 * This is tricky, but first we need to figure out how much we
		 * free'd from any free-ers that occured during this
		 * reservation, so we reset ->csum_bytes to the csum_bytes
		 * before we dropped our lock, and then call the free for the
		 * number of bytes that were freed while we were trying our
		 * reservation.
		 */
		bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
		BTRFS_I(inode)->csum_bytes = csum_bytes;
		to_free = calc_csum_metadata_size(inode, bytes, 0);


		/*
		 * Now we need to see how much we would have freed had we not
		 * been making this reservation and our ->csum_bytes were not
		 * artificially inflated.
		 */
		BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
		bytes = csum_bytes - orig_csum_bytes;
		bytes = calc_csum_metadata_size(inode, bytes, 0);

		/*
		 * Now reset ->csum_bytes to what it should be.  If bytes is
		 * more than to_free then we would have free'd more space had we
		 * not had an artificially high ->csum_bytes, so we need to free
		 * the remainder.  If bytes is the same or less then we don't
		 * need to do anything, the other free-ers did the correct
		 * thing.
		 */
		BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
		if (bytes > to_free)
			to_free = bytes - to_free;
		else
			to_free = 0;
	}
5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100
	spin_unlock(&BTRFS_I(inode)->lock);
	if (dropped)
		to_free += btrfs_calc_trans_metadata_size(root, dropped);

	if (to_free) {
		btrfs_block_rsv_release(root, block_rsv, to_free);
		trace_btrfs_space_reservation(root->fs_info, "delalloc",
					      btrfs_ino(inode), to_free, 0);
	}
	if (delalloc_lock)
		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
	return ret;
5101 5102
}

5103 5104 5105 5106 5107 5108 5109 5110 5111
/**
 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
 * @inode: the inode to release the reservation for
 * @num_bytes: the number of bytes we're releasing
 *
 * This will release the metadata reservation for an inode.  This can be called
 * once we complete IO for a given set of bytes to release their metadata
 * reservations.
 */
5112 5113 5114
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
{
	struct btrfs_root *root = BTRFS_I(inode)->root;
5115 5116
	u64 to_free = 0;
	unsigned dropped;
5117 5118

	num_bytes = ALIGN(num_bytes, root->sectorsize);
5119
	spin_lock(&BTRFS_I(inode)->lock);
5120
	dropped = drop_outstanding_extent(inode);
5121

5122 5123
	if (num_bytes)
		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5124
	spin_unlock(&BTRFS_I(inode)->lock);
5125 5126
	if (dropped > 0)
		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5127

J
Josef Bacik 已提交
5128 5129
	trace_btrfs_space_reservation(root->fs_info, "delalloc",
				      btrfs_ino(inode), to_free, 0);
5130 5131 5132 5133 5134
	if (root->fs_info->quota_enabled) {
		btrfs_qgroup_free(root, num_bytes +
					dropped * root->leafsize);
	}

5135 5136 5137 5138
	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
				to_free);
}

5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153
/**
 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
 * @inode: inode we're writing to
 * @num_bytes: the number of bytes we want to allocate
 *
 * This will do the following things
 *
 * o reserve space in the data space info for num_bytes
 * o reserve space in the metadata space info based on number of outstanding
 *   extents and how much csums will be needed
 * o add to the inodes ->delalloc_bytes
 * o add it to the fs_info's delalloc inodes list.
 *
 * This will return 0 for success and -ENOSPC if there is no space left.
 */
5154 5155 5156 5157 5158
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
{
	int ret;

	ret = btrfs_check_data_free_space(inode, num_bytes);
C
Chris Mason 已提交
5159
	if (ret)
5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170
		return ret;

	ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
	if (ret) {
		btrfs_free_reserved_data_space(inode, num_bytes);
		return ret;
	}

	return 0;
}

5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183
/**
 * btrfs_delalloc_release_space - release data and metadata space for delalloc
 * @inode: inode we're releasing space for
 * @num_bytes: the number of bytes we want to free up
 *
 * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
 * called in the case that we don't need the metadata AND data reservations
 * anymore.  So if there is an error or we insert an inline extent.
 *
 * This function will release the metadata space that was not used and will
 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
 * list if there are no delalloc bytes left.
 */
5184 5185 5186 5187
void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
{
	btrfs_delalloc_release_metadata(inode, num_bytes);
	btrfs_free_reserved_data_space(inode, num_bytes);
5188 5189
}

5190
static int update_block_group(struct btrfs_root *root,
5191
			      u64 bytenr, u64 num_bytes, int alloc)
C
Chris Mason 已提交
5192
{
5193
	struct btrfs_block_group_cache *cache = NULL;
C
Chris Mason 已提交
5194
	struct btrfs_fs_info *info = root->fs_info;
5195
	u64 total = num_bytes;
C
Chris Mason 已提交
5196
	u64 old_val;
5197
	u64 byte_in_group;
5198
	int factor;
C
Chris Mason 已提交
5199

5200
	/* block accounting for super block */
5201
	spin_lock(&info->delalloc_root_lock);
5202
	old_val = btrfs_super_bytes_used(info->super_copy);
5203 5204 5205 5206
	if (alloc)
		old_val += num_bytes;
	else
		old_val -= num_bytes;
5207
	btrfs_set_super_bytes_used(info->super_copy, old_val);
5208
	spin_unlock(&info->delalloc_root_lock);
5209

C
Chris Mason 已提交
5210
	while (total) {
5211
		cache = btrfs_lookup_block_group(info, bytenr);
5212
		if (!cache)
5213
			return -ENOENT;
5214 5215 5216 5217 5218 5219
		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
				    BTRFS_BLOCK_GROUP_RAID1 |
				    BTRFS_BLOCK_GROUP_RAID10))
			factor = 2;
		else
			factor = 1;
5220 5221 5222 5223 5224 5225 5226
		/*
		 * If this block group has free space cache written out, we
		 * need to make sure to load it if we are removing space.  This
		 * is because we need the unpinning stage to actually add the
		 * space back to the block group, otherwise we will leak space.
		 */
		if (!alloc && cache->cached == BTRFS_CACHE_NO)
5227
			cache_block_group(cache, 1);
5228

5229 5230
		byte_in_group = bytenr - cache->key.objectid;
		WARN_ON(byte_in_group > cache->key.offset);
C
Chris Mason 已提交
5231

5232
		spin_lock(&cache->space_info->lock);
5233
		spin_lock(&cache->lock);
5234

5235
		if (btrfs_test_opt(root, SPACE_CACHE) &&
5236 5237 5238
		    cache->disk_cache_state < BTRFS_DC_CLEAR)
			cache->disk_cache_state = BTRFS_DC_CLEAR;

J
Josef Bacik 已提交
5239
		cache->dirty = 1;
C
Chris Mason 已提交
5240
		old_val = btrfs_block_group_used(&cache->item);
5241
		num_bytes = min(total, cache->key.offset - byte_in_group);
C
Chris Mason 已提交
5242
		if (alloc) {
5243
			old_val += num_bytes;
5244 5245 5246
			btrfs_set_block_group_used(&cache->item, old_val);
			cache->reserved -= num_bytes;
			cache->space_info->bytes_reserved -= num_bytes;
5247 5248
			cache->space_info->bytes_used += num_bytes;
			cache->space_info->disk_used += num_bytes * factor;
5249
			spin_unlock(&cache->lock);
5250
			spin_unlock(&cache->space_info->lock);
C
Chris Mason 已提交
5251
		} else {
5252
			old_val -= num_bytes;
5253
			btrfs_set_block_group_used(&cache->item, old_val);
5254 5255
			cache->pinned += num_bytes;
			cache->space_info->bytes_pinned += num_bytes;
5256
			cache->space_info->bytes_used -= num_bytes;
5257
			cache->space_info->disk_used -= num_bytes * factor;
5258
			spin_unlock(&cache->lock);
5259
			spin_unlock(&cache->space_info->lock);
5260

5261 5262 5263
			set_extent_dirty(info->pinned_extents,
					 bytenr, bytenr + num_bytes - 1,
					 GFP_NOFS | __GFP_NOFAIL);
C
Chris Mason 已提交
5264
		}
5265
		btrfs_put_block_group(cache);
5266 5267
		total -= num_bytes;
		bytenr += num_bytes;
C
Chris Mason 已提交
5268 5269 5270
	}
	return 0;
}
5271

5272 5273
static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
{
J
Josef Bacik 已提交
5274
	struct btrfs_block_group_cache *cache;
5275
	u64 bytenr;
J
Josef Bacik 已提交
5276

5277 5278 5279 5280 5281 5282 5283
	spin_lock(&root->fs_info->block_group_cache_lock);
	bytenr = root->fs_info->first_logical_byte;
	spin_unlock(&root->fs_info->block_group_cache_lock);

	if (bytenr < (u64)-1)
		return bytenr;

J
Josef Bacik 已提交
5284 5285
	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
	if (!cache)
5286
		return 0;
J
Josef Bacik 已提交
5287

5288
	bytenr = cache->key.objectid;
5289
	btrfs_put_block_group(cache);
5290 5291

	return bytenr;
5292 5293
}

5294 5295 5296
static int pin_down_extent(struct btrfs_root *root,
			   struct btrfs_block_group_cache *cache,
			   u64 bytenr, u64 num_bytes, int reserved)
5297
{
5298 5299 5300 5301 5302 5303 5304 5305 5306 5307
	spin_lock(&cache->space_info->lock);
	spin_lock(&cache->lock);
	cache->pinned += num_bytes;
	cache->space_info->bytes_pinned += num_bytes;
	if (reserved) {
		cache->reserved -= num_bytes;
		cache->space_info->bytes_reserved -= num_bytes;
	}
	spin_unlock(&cache->lock);
	spin_unlock(&cache->space_info->lock);
J
Josef Bacik 已提交
5308

5309 5310
	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
J
Josef Bacik 已提交
5311 5312
	if (reserved)
		trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
5313 5314
	return 0;
}
J
Josef Bacik 已提交
5315

5316 5317 5318 5319 5320 5321 5322
/*
 * this function must be called within transaction
 */
int btrfs_pin_extent(struct btrfs_root *root,
		     u64 bytenr, u64 num_bytes, int reserved)
{
	struct btrfs_block_group_cache *cache;
J
Josef Bacik 已提交
5323

5324
	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5325
	BUG_ON(!cache); /* Logic error */
5326 5327 5328 5329

	pin_down_extent(root, cache, bytenr, num_bytes, reserved);

	btrfs_put_block_group(cache);
5330 5331 5332
	return 0;
}

5333
/*
5334 5335
 * this function must be called within transaction
 */
5336
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
5337 5338 5339
				    u64 bytenr, u64 num_bytes)
{
	struct btrfs_block_group_cache *cache;
5340
	int ret;
5341 5342

	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
5343 5344
	if (!cache)
		return -EINVAL;
5345 5346 5347 5348 5349 5350 5351

	/*
	 * pull in the free space cache (if any) so that our pin
	 * removes the free space from the cache.  We have load_only set
	 * to one because the slow code to read in the free extents does check
	 * the pinned extents.
	 */
5352
	cache_block_group(cache, 1);
5353 5354 5355 5356

	pin_down_extent(root, cache, bytenr, num_bytes, 0);

	/* remove us from the free space cache (if we're there at all) */
5357
	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
5358
	btrfs_put_block_group(cache);
5359
	return ret;
5360 5361
}

5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435
static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
{
	int ret;
	struct btrfs_block_group_cache *block_group;
	struct btrfs_caching_control *caching_ctl;

	block_group = btrfs_lookup_block_group(root->fs_info, start);
	if (!block_group)
		return -EINVAL;

	cache_block_group(block_group, 0);
	caching_ctl = get_caching_control(block_group);

	if (!caching_ctl) {
		/* Logic error */
		BUG_ON(!block_group_cache_done(block_group));
		ret = btrfs_remove_free_space(block_group, start, num_bytes);
	} else {
		mutex_lock(&caching_ctl->mutex);

		if (start >= caching_ctl->progress) {
			ret = add_excluded_extent(root, start, num_bytes);
		} else if (start + num_bytes <= caching_ctl->progress) {
			ret = btrfs_remove_free_space(block_group,
						      start, num_bytes);
		} else {
			num_bytes = caching_ctl->progress - start;
			ret = btrfs_remove_free_space(block_group,
						      start, num_bytes);
			if (ret)
				goto out_lock;

			num_bytes = (start + num_bytes) -
				caching_ctl->progress;
			start = caching_ctl->progress;
			ret = add_excluded_extent(root, start, num_bytes);
		}
out_lock:
		mutex_unlock(&caching_ctl->mutex);
		put_caching_control(caching_ctl);
	}
	btrfs_put_block_group(block_group);
	return ret;
}

int btrfs_exclude_logged_extents(struct btrfs_root *log,
				 struct extent_buffer *eb)
{
	struct btrfs_file_extent_item *item;
	struct btrfs_key key;
	int found_type;
	int i;

	if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
		return 0;

	for (i = 0; i < btrfs_header_nritems(eb); i++) {
		btrfs_item_key_to_cpu(eb, &key, i);
		if (key.type != BTRFS_EXTENT_DATA_KEY)
			continue;
		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
		found_type = btrfs_file_extent_type(eb, item);
		if (found_type == BTRFS_FILE_EXTENT_INLINE)
			continue;
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
			continue;
		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
		__exclude_logged_extent(log, key.objectid, key.offset);
	}

	return 0;
}

5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456
/**
 * btrfs_update_reserved_bytes - update the block_group and space info counters
 * @cache:	The cache we are manipulating
 * @num_bytes:	The number of bytes in question
 * @reserve:	One of the reservation enums
 *
 * This is called by the allocator when it reserves space, or by somebody who is
 * freeing space that was never actually used on disk.  For example if you
 * reserve some space for a new leaf in transaction A and before transaction A
 * commits you free that leaf, you call this with reserve set to 0 in order to
 * clear the reservation.
 *
 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
 * ENOSPC accounting.  For data we handle the reservation through clearing the
 * delalloc bits in the io_tree.  We have to do this since we could end up
 * allocating less disk space for the amount of data we have reserved in the
 * case of compression.
 *
 * If this is a reservation and the block group has become read only we cannot
 * make the reservation and return -EAGAIN, otherwise this function always
 * succeeds.
5457
 */
5458 5459
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
				       u64 num_bytes, int reserve)
5460
{
5461
	struct btrfs_space_info *space_info = cache->space_info;
5462
	int ret = 0;
5463

5464 5465 5466
	spin_lock(&space_info->lock);
	spin_lock(&cache->lock);
	if (reserve != RESERVE_FREE) {
5467 5468 5469
		if (cache->ro) {
			ret = -EAGAIN;
		} else {
5470 5471 5472
			cache->reserved += num_bytes;
			space_info->bytes_reserved += num_bytes;
			if (reserve == RESERVE_ALLOC) {
J
Josef Bacik 已提交
5473
				trace_btrfs_space_reservation(cache->fs_info,
5474 5475
						"space_info", space_info->flags,
						num_bytes, 0);
5476 5477
				space_info->bytes_may_use -= num_bytes;
			}
5478
		}
5479 5480 5481 5482 5483
	} else {
		if (cache->ro)
			space_info->bytes_readonly += num_bytes;
		cache->reserved -= num_bytes;
		space_info->bytes_reserved -= num_bytes;
5484
	}
5485 5486
	spin_unlock(&cache->lock);
	spin_unlock(&space_info->lock);
5487
	return ret;
5488
}
C
Chris Mason 已提交
5489

5490
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5491
				struct btrfs_root *root)
5492 5493
{
	struct btrfs_fs_info *fs_info = root->fs_info;
5494 5495 5496
	struct btrfs_caching_control *next;
	struct btrfs_caching_control *caching_ctl;
	struct btrfs_block_group_cache *cache;
5497
	struct btrfs_space_info *space_info;
5498

5499
	down_write(&fs_info->extent_commit_sem);
5500

5501 5502 5503 5504 5505 5506 5507
	list_for_each_entry_safe(caching_ctl, next,
				 &fs_info->caching_block_groups, list) {
		cache = caching_ctl->block_group;
		if (block_group_cache_done(cache)) {
			cache->last_byte_to_unpin = (u64)-1;
			list_del_init(&caching_ctl->list);
			put_caching_control(caching_ctl);
5508
		} else {
5509
			cache->last_byte_to_unpin = caching_ctl->progress;
5510 5511
		}
	}
5512 5513 5514 5515 5516 5517 5518

	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
		fs_info->pinned_extents = &fs_info->freed_extents[1];
	else
		fs_info->pinned_extents = &fs_info->freed_extents[0];

	up_write(&fs_info->extent_commit_sem);
5519

5520 5521 5522
	list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
		percpu_counter_set(&space_info->total_bytes_pinned, 0);

5523
	update_global_block_rsv(fs_info);
5524 5525
}

5526
static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
C
Chris Mason 已提交
5527
{
5528 5529
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_block_group_cache *cache = NULL;
5530 5531
	struct btrfs_space_info *space_info;
	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5532
	u64 len;
5533
	bool readonly;
C
Chris Mason 已提交
5534

5535
	while (start <= end) {
5536
		readonly = false;
5537 5538 5539 5540 5541
		if (!cache ||
		    start >= cache->key.objectid + cache->key.offset) {
			if (cache)
				btrfs_put_block_group(cache);
			cache = btrfs_lookup_block_group(fs_info, start);
5542
			BUG_ON(!cache); /* Logic error */
5543 5544 5545 5546 5547 5548 5549 5550 5551 5552
		}

		len = cache->key.objectid + cache->key.offset - start;
		len = min(len, end + 1 - start);

		if (start < cache->last_byte_to_unpin) {
			len = min(len, cache->last_byte_to_unpin - start);
			btrfs_add_free_space(cache, start, len);
		}

5553
		start += len;
5554
		space_info = cache->space_info;
5555

5556
		spin_lock(&space_info->lock);
5557 5558
		spin_lock(&cache->lock);
		cache->pinned -= len;
5559 5560 5561 5562 5563
		space_info->bytes_pinned -= len;
		if (cache->ro) {
			space_info->bytes_readonly += len;
			readonly = true;
		}
5564
		spin_unlock(&cache->lock);
5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577
		if (!readonly && global_rsv->space_info == space_info) {
			spin_lock(&global_rsv->lock);
			if (!global_rsv->full) {
				len = min(len, global_rsv->size -
					  global_rsv->reserved);
				global_rsv->reserved += len;
				space_info->bytes_may_use += len;
				if (global_rsv->reserved >= global_rsv->size)
					global_rsv->full = 1;
			}
			spin_unlock(&global_rsv->lock);
		}
		spin_unlock(&space_info->lock);
C
Chris Mason 已提交
5578
	}
5579 5580 5581

	if (cache)
		btrfs_put_block_group(cache);
C
Chris Mason 已提交
5582 5583 5584 5585
	return 0;
}

int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
5586
			       struct btrfs_root *root)
5587
{
5588 5589
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct extent_io_tree *unpin;
5590 5591
	u64 start;
	u64 end;
5592 5593
	int ret;

5594 5595 5596
	if (trans->aborted)
		return 0;

5597 5598 5599 5600 5601
	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
		unpin = &fs_info->freed_extents[1];
	else
		unpin = &fs_info->freed_extents[0];

C
Chris Mason 已提交
5602
	while (1) {
5603
		ret = find_first_extent_bit(unpin, 0, &start, &end,
5604
					    EXTENT_DIRTY, NULL);
5605
		if (ret)
5606
			break;
5607

5608 5609 5610
		if (btrfs_test_opt(root, DISCARD))
			ret = btrfs_discard_extent(root, start,
						   end + 1 - start, NULL);
5611

5612
		clear_extent_dirty(unpin, start, end, GFP_NOFS);
5613
		unpin_extent_range(root, start, end);
5614
		cond_resched();
5615
	}
J
Josef Bacik 已提交
5616

C
Chris Mason 已提交
5617 5618 5619
	return 0;
}

5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640
static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
			     u64 owner, u64 root_objectid)
{
	struct btrfs_space_info *space_info;
	u64 flags;

	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
		if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
			flags = BTRFS_BLOCK_GROUP_SYSTEM;
		else
			flags = BTRFS_BLOCK_GROUP_METADATA;
	} else {
		flags = BTRFS_BLOCK_GROUP_DATA;
	}

	space_info = __find_space_info(fs_info, flags);
	BUG_ON(!space_info); /* Logic bug */
	percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
}


5641 5642 5643 5644 5645 5646
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
				struct btrfs_root *root,
				u64 bytenr, u64 num_bytes, u64 parent,
				u64 root_objectid, u64 owner_objectid,
				u64 owner_offset, int refs_to_drop,
				struct btrfs_delayed_extent_op *extent_op)
5647
{
C
Chris Mason 已提交
5648
	struct btrfs_key key;
5649
	struct btrfs_path *path;
5650 5651
	struct btrfs_fs_info *info = root->fs_info;
	struct btrfs_root *extent_root = info->extent_root;
5652
	struct extent_buffer *leaf;
5653 5654
	struct btrfs_extent_item *ei;
	struct btrfs_extent_inline_ref *iref;
5655
	int ret;
5656
	int is_data;
5657 5658 5659
	int extent_slot = 0;
	int found_extent = 0;
	int num_to_del = 1;
5660 5661
	u32 item_size;
	u64 refs;
5662 5663
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
C
Chris Mason 已提交
5664

5665
	path = btrfs_alloc_path();
5666 5667
	if (!path)
		return -ENOMEM;
5668

5669
	path->reada = 1;
5670
	path->leave_spinning = 1;
5671 5672 5673 5674

	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
	BUG_ON(!is_data && refs_to_drop != 1);

5675 5676 5677
	if (is_data)
		skinny_metadata = 0;

5678 5679 5680 5681
	ret = lookup_extent_backref(trans, extent_root, path, &iref,
				    bytenr, num_bytes, parent,
				    root_objectid, owner_objectid,
				    owner_offset);
5682
	if (ret == 0) {
5683
		extent_slot = path->slots[0];
5684 5685
		while (extent_slot >= 0) {
			btrfs_item_key_to_cpu(path->nodes[0], &key,
5686
					      extent_slot);
5687
			if (key.objectid != bytenr)
5688
				break;
5689 5690
			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
			    key.offset == num_bytes) {
5691 5692 5693
				found_extent = 1;
				break;
			}
5694 5695 5696 5697 5698
			if (key.type == BTRFS_METADATA_ITEM_KEY &&
			    key.offset == owner_objectid) {
				found_extent = 1;
				break;
			}
5699 5700
			if (path->slots[0] - extent_slot > 5)
				break;
5701
			extent_slot--;
5702
		}
5703 5704 5705 5706 5707
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
		if (found_extent && item_size < sizeof(*ei))
			found_extent = 0;
#endif
Z
Zheng Yan 已提交
5708
		if (!found_extent) {
5709
			BUG_ON(iref);
5710
			ret = remove_extent_backref(trans, extent_root, path,
5711 5712
						    NULL, refs_to_drop,
						    is_data);
5713 5714 5715 5716
			if (ret) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
5717
			btrfs_release_path(path);
5718
			path->leave_spinning = 1;
5719 5720 5721 5722 5723

			key.objectid = bytenr;
			key.type = BTRFS_EXTENT_ITEM_KEY;
			key.offset = num_bytes;

5724 5725 5726 5727 5728
			if (!is_data && skinny_metadata) {
				key.type = BTRFS_METADATA_ITEM_KEY;
				key.offset = owner_objectid;
			}

Z
Zheng Yan 已提交
5729 5730
			ret = btrfs_search_slot(trans, extent_root,
						&key, path, -1, 1);
5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753
			if (ret > 0 && skinny_metadata && path->slots[0]) {
				/*
				 * Couldn't find our skinny metadata item,
				 * see if we have ye olde extent item.
				 */
				path->slots[0]--;
				btrfs_item_key_to_cpu(path->nodes[0], &key,
						      path->slots[0]);
				if (key.objectid == bytenr &&
				    key.type == BTRFS_EXTENT_ITEM_KEY &&
				    key.offset == num_bytes)
					ret = 0;
			}

			if (ret > 0 && skinny_metadata) {
				skinny_metadata = false;
				key.type = BTRFS_EXTENT_ITEM_KEY;
				key.offset = num_bytes;
				btrfs_release_path(path);
				ret = btrfs_search_slot(trans, extent_root,
							&key, path, -1, 1);
			}

5754
			if (ret) {
5755
				btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5756
					ret, bytenr);
5757 5758 5759
				if (ret > 0)
					btrfs_print_leaf(extent_root,
							 path->nodes[0]);
5760
			}
5761 5762 5763 5764
			if (ret < 0) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
Z
Zheng Yan 已提交
5765 5766
			extent_slot = path->slots[0];
		}
5767
	} else if (WARN_ON(ret == -ENOENT)) {
5768
		btrfs_print_leaf(extent_root, path->nodes[0]);
5769 5770
		btrfs_err(info,
			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
5771 5772
			bytenr, parent, root_objectid, owner_objectid,
			owner_offset);
5773
	} else {
5774 5775
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
5776
	}
5777 5778

	leaf = path->nodes[0];
5779 5780 5781 5782 5783 5784
	item_size = btrfs_item_size_nr(leaf, extent_slot);
#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
	if (item_size < sizeof(*ei)) {
		BUG_ON(found_extent || extent_slot != path->slots[0]);
		ret = convert_extent_item_v0(trans, extent_root, path,
					     owner_objectid, 0);
5785 5786 5787 5788
		if (ret < 0) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}
5789

5790
		btrfs_release_path(path);
5791 5792 5793 5794 5795 5796 5797 5798 5799
		path->leave_spinning = 1;

		key.objectid = bytenr;
		key.type = BTRFS_EXTENT_ITEM_KEY;
		key.offset = num_bytes;

		ret = btrfs_search_slot(trans, extent_root, &key, path,
					-1, 1);
		if (ret) {
5800
			btrfs_err(info, "umm, got %d back from search, was looking for %llu",
5801
				ret, bytenr);
5802 5803
			btrfs_print_leaf(extent_root, path->nodes[0]);
		}
5804 5805 5806 5807 5808
		if (ret < 0) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}

5809 5810 5811 5812 5813 5814
		extent_slot = path->slots[0];
		leaf = path->nodes[0];
		item_size = btrfs_item_size_nr(leaf, extent_slot);
	}
#endif
	BUG_ON(item_size < sizeof(*ei));
5815
	ei = btrfs_item_ptr(leaf, extent_slot,
C
Chris Mason 已提交
5816
			    struct btrfs_extent_item);
5817 5818
	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
	    key.type == BTRFS_EXTENT_ITEM_KEY) {
5819 5820 5821 5822 5823
		struct btrfs_tree_block_info *bi;
		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
		bi = (struct btrfs_tree_block_info *)(ei + 1);
		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
	}
5824

5825
	refs = btrfs_extent_refs(leaf, ei);
5826 5827 5828 5829 5830 5831 5832
	if (refs < refs_to_drop) {
		btrfs_err(info, "trying to drop %d refs but we only have %Lu "
			  "for bytenr %Lu\n", refs_to_drop, refs, bytenr);
		ret = -EINVAL;
		btrfs_abort_transaction(trans, extent_root, ret);
		goto out;
	}
5833
	refs -= refs_to_drop;
5834

5835 5836 5837 5838 5839 5840
	if (refs > 0) {
		if (extent_op)
			__run_delayed_extent_op(extent_op, leaf, ei);
		/*
		 * In the case of inline back ref, reference count will
		 * be updated by remove_extent_backref
5841
		 */
5842 5843 5844 5845 5846 5847 5848 5849 5850 5851
		if (iref) {
			BUG_ON(!found_extent);
		} else {
			btrfs_set_extent_refs(leaf, ei, refs);
			btrfs_mark_buffer_dirty(leaf);
		}
		if (found_extent) {
			ret = remove_extent_backref(trans, extent_root, path,
						    iref, refs_to_drop,
						    is_data);
5852 5853 5854 5855
			if (ret) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
5856
		}
5857 5858
		add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
				 root_objectid);
5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869
	} else {
		if (found_extent) {
			BUG_ON(is_data && refs_to_drop !=
			       extent_data_ref_count(root, path, iref));
			if (iref) {
				BUG_ON(path->slots[0] != extent_slot);
			} else {
				BUG_ON(path->slots[0] != extent_slot + 1);
				path->slots[0] = extent_slot;
				num_to_del = 2;
			}
C
Chris Mason 已提交
5870
		}
5871

5872 5873
		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
				      num_to_del);
5874 5875 5876 5877
		if (ret) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}
5878
		btrfs_release_path(path);
5879

5880
		if (is_data) {
5881
			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
5882 5883 5884 5885
			if (ret) {
				btrfs_abort_transaction(trans, extent_root, ret);
				goto out;
			}
5886 5887
		}

5888
		ret = update_block_group(root, bytenr, num_bytes, 0);
5889 5890 5891 5892
		if (ret) {
			btrfs_abort_transaction(trans, extent_root, ret);
			goto out;
		}
5893
	}
5894
out:
5895
	btrfs_free_path(path);
5896 5897 5898
	return ret;
}

5899
/*
5900
 * when we free an block, it is possible (and likely) that we free the last
5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911
 * delayed ref for that extent as well.  This searches the delayed ref tree for
 * a given extent, and if there are no other delayed refs to be processed, it
 * removes it from the tree.
 */
static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root, u64 bytenr)
{
	struct btrfs_delayed_ref_head *head;
	struct btrfs_delayed_ref_root *delayed_refs;
	struct btrfs_delayed_ref_node *ref;
	struct rb_node *node;
5912
	int ret = 0;
5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929

	delayed_refs = &trans->transaction->delayed_refs;
	spin_lock(&delayed_refs->lock);
	head = btrfs_find_delayed_ref_head(trans, bytenr);
	if (!head)
		goto out;

	node = rb_prev(&head->node.rb_node);
	if (!node)
		goto out;

	ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);

	/* there are still entries for this ref, we can't drop it */
	if (ref->bytenr == bytenr)
		goto out;

5930 5931 5932
	if (head->extent_op) {
		if (!head->must_insert_reserved)
			goto out;
5933
		btrfs_free_delayed_extent_op(head->extent_op);
5934 5935 5936
		head->extent_op = NULL;
	}

5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949
	/*
	 * waiting for the lock here would deadlock.  If someone else has it
	 * locked they are already in the process of dropping it anyway
	 */
	if (!mutex_trylock(&head->mutex))
		goto out;

	/*
	 * at this point we have a head with no other entries.  Go
	 * ahead and process it.
	 */
	head->node.in_tree = 0;
	rb_erase(&head->node.rb_node, &delayed_refs->root);
5950

5951 5952 5953 5954 5955 5956
	delayed_refs->num_entries--;

	/*
	 * we don't take a ref on the node because we're removing it from the
	 * tree, so we just steal the ref the tree was holding.
	 */
5957 5958 5959 5960 5961
	delayed_refs->num_heads--;
	if (list_empty(&head->cluster))
		delayed_refs->num_heads_ready--;

	list_del_init(&head->cluster);
5962 5963
	spin_unlock(&delayed_refs->lock);

5964 5965 5966 5967 5968
	BUG_ON(head->extent_op);
	if (head->must_insert_reserved)
		ret = 1;

	mutex_unlock(&head->mutex);
5969
	btrfs_put_delayed_ref(&head->node);
5970
	return ret;
5971 5972 5973 5974 5975
out:
	spin_unlock(&delayed_refs->lock);
	return 0;
}

5976 5977 5978
void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root,
			   struct extent_buffer *buf,
5979
			   u64 parent, int last_ref)
5980 5981
{
	struct btrfs_block_group_cache *cache = NULL;
5982
	int pin = 1;
5983 5984 5985
	int ret;

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
A
Arne Jansen 已提交
5986 5987 5988 5989
		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
					buf->start, buf->len,
					parent, root->root_key.objectid,
					btrfs_header_level(buf),
5990
					BTRFS_DROP_DELAYED_REF, NULL, 0);
5991
		BUG_ON(ret); /* -ENOMEM */
5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002
	}

	if (!last_ref)
		return;

	cache = btrfs_lookup_block_group(root->fs_info, buf->start);

	if (btrfs_header_generation(buf) == trans->transid) {
		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
			ret = check_ref_cleanup(trans, root, buf->start);
			if (!ret)
6003
				goto out;
6004 6005 6006 6007
		}

		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
			pin_down_extent(root, cache, buf->start, buf->len, 1);
6008
			goto out;
6009 6010 6011 6012 6013
		}

		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));

		btrfs_add_free_space(cache, buf->start, buf->len);
6014
		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
J
Josef Bacik 已提交
6015
		trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6016
		pin = 0;
6017 6018
	}
out:
6019 6020 6021 6022 6023
	if (pin)
		add_pinned_bytes(root->fs_info, buf->len,
				 btrfs_header_level(buf),
				 root->root_key.objectid);

6024 6025 6026 6027 6028
	/*
	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
	 * anymore.
	 */
	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6029 6030 6031
	btrfs_put_block_group(cache);
}

6032
/* Can return -ENOMEM */
A
Arne Jansen 已提交
6033 6034 6035
int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
		      u64 owner, u64 offset, int for_cow)
6036 6037
{
	int ret;
A
Arne Jansen 已提交
6038
	struct btrfs_fs_info *fs_info = root->fs_info;
6039

6040 6041
	add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);

6042 6043 6044 6045
	/*
	 * tree log blocks never actually go into the extent allocation
	 * tree, just update pinning info and exit early.
	 */
6046 6047
	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6048
		/* unlocks the pinned mutex */
6049
		btrfs_pin_extent(root, bytenr, num_bytes, 1);
6050
		ret = 0;
6051
	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
A
Arne Jansen 已提交
6052 6053
		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
					num_bytes,
6054
					parent, root_objectid, (int)owner,
A
Arne Jansen 已提交
6055
					BTRFS_DROP_DELAYED_REF, NULL, for_cow);
6056
	} else {
A
Arne Jansen 已提交
6057 6058 6059 6060 6061
		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
						num_bytes,
						parent, root_objectid, owner,
						offset, BTRFS_DROP_DELAYED_REF,
						NULL, for_cow);
6062
	}
6063 6064 6065
	return ret;
}

D
David Woodhouse 已提交
6066 6067 6068
static u64 stripe_align(struct btrfs_root *root,
			struct btrfs_block_group_cache *cache,
			u64 val, u64 num_bytes)
6069
{
6070
	u64 ret = ALIGN(val, root->stripesize);
6071 6072 6073
	return ret;
}

J
Josef Bacik 已提交
6074 6075 6076 6077 6078 6079 6080 6081 6082 6083
/*
 * when we wait for progress in the block group caching, its because
 * our allocation attempt failed at least once.  So, we must sleep
 * and let some progress happen before we try again.
 *
 * This function will sleep at least once waiting for new free space to
 * show up, and then it will check the block group free space numbers
 * for our min num_bytes.  Another option is to have it go ahead
 * and look in the rbtree for a free extent of a given size, but this
 * is a good start.
6084 6085 6086
 *
 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
 * any of the information in this block group.
J
Josef Bacik 已提交
6087
 */
6088
static noinline void
J
Josef Bacik 已提交
6089 6090 6091
wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
				u64 num_bytes)
{
6092
	struct btrfs_caching_control *caching_ctl;
J
Josef Bacik 已提交
6093

6094 6095
	caching_ctl = get_caching_control(cache);
	if (!caching_ctl)
6096
		return;
J
Josef Bacik 已提交
6097

6098
	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6099
		   (cache->free_space_ctl->free_space >= num_bytes));
6100 6101 6102 6103 6104 6105 6106 6107

	put_caching_control(caching_ctl);
}

static noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
{
	struct btrfs_caching_control *caching_ctl;
6108
	int ret = 0;
6109 6110 6111

	caching_ctl = get_caching_control(cache);
	if (!caching_ctl)
6112
		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6113 6114

	wait_event(caching_ctl->wait, block_group_cache_done(cache));
6115 6116
	if (cache->cached == BTRFS_CACHE_ERROR)
		ret = -EIO;
6117
	put_caching_control(caching_ctl);
6118
	return ret;
J
Josef Bacik 已提交
6119 6120
}

6121
int __get_raid_index(u64 flags)
6122
{
6123
	if (flags & BTRFS_BLOCK_GROUP_RAID10)
6124
		return BTRFS_RAID_RAID10;
6125
	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6126
		return BTRFS_RAID_RAID1;
6127
	else if (flags & BTRFS_BLOCK_GROUP_DUP)
6128
		return BTRFS_RAID_DUP;
6129
	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6130
		return BTRFS_RAID_RAID0;
D
David Woodhouse 已提交
6131
	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6132
		return BTRFS_RAID_RAID5;
D
David Woodhouse 已提交
6133
	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6134
		return BTRFS_RAID_RAID6;
6135

6136
	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6137 6138
}

6139 6140
static int get_block_group_index(struct btrfs_block_group_cache *cache)
{
6141
	return __get_raid_index(cache->flags);
6142 6143
}

J
Josef Bacik 已提交
6144
enum btrfs_loop_type {
6145 6146 6147 6148
	LOOP_CACHING_NOWAIT = 0,
	LOOP_CACHING_WAIT = 1,
	LOOP_ALLOC_CHUNK = 2,
	LOOP_NO_EMPTY_SIZE = 3,
J
Josef Bacik 已提交
6149 6150
};

6151 6152 6153
/*
 * walks the btree of allocated extents and find a hole of a given size.
 * The key ins is changed to record the hole:
6154
 * ins->objectid == start position
6155
 * ins->flags = BTRFS_EXTENT_ITEM_KEY
6156
 * ins->offset == the size of the hole.
6157
 * Any available blocks before search_start are skipped.
6158 6159 6160
 *
 * If there is no suitable free space, we will record the max size of
 * the free space extent currently.
6161
 */
6162
static noinline int find_free_extent(struct btrfs_root *orig_root,
6163 6164
				     u64 num_bytes, u64 empty_size,
				     u64 hint_byte, struct btrfs_key *ins,
6165
				     u64 flags)
6166
{
6167
	int ret = 0;
C
Chris Mason 已提交
6168
	struct btrfs_root *root = orig_root->fs_info->extent_root;
6169
	struct btrfs_free_cluster *last_ptr = NULL;
6170
	struct btrfs_block_group_cache *block_group = NULL;
6171
	struct btrfs_block_group_cache *used_block_group;
6172
	u64 search_start = 0;
6173
	u64 max_extent_size = 0;
6174
	int empty_cluster = 2 * 1024 * 1024;
6175
	struct btrfs_space_info *space_info;
6176
	int loop = 0;
6177 6178
	int index = __get_raid_index(flags);
	int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
6179
		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
J
Josef Bacik 已提交
6180
	bool found_uncached_bg = false;
6181
	bool failed_cluster_refill = false;
6182
	bool failed_alloc = false;
6183
	bool use_cluster = true;
6184
	bool have_caching_bg = false;
6185

6186
	WARN_ON(num_bytes < root->sectorsize);
6187
	btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
6188 6189
	ins->objectid = 0;
	ins->offset = 0;
6190

6191
	trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
J
Josef Bacik 已提交
6192

6193
	space_info = __find_space_info(root->fs_info, flags);
6194
	if (!space_info) {
6195
		btrfs_err(root->fs_info, "No space info for %llu", flags);
6196 6197
		return -ENOSPC;
	}
J
Josef Bacik 已提交
6198

6199 6200 6201 6202 6203 6204 6205
	/*
	 * If the space info is for both data and metadata it means we have a
	 * small filesystem and we can't use the clustering stuff.
	 */
	if (btrfs_mixed_space_info(space_info))
		use_cluster = false;

6206
	if (flags & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
6207
		last_ptr = &root->fs_info->meta_alloc_cluster;
6208 6209
		if (!btrfs_test_opt(root, SSD))
			empty_cluster = 64 * 1024;
6210 6211
	}

6212
	if ((flags & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
6213
	    btrfs_test_opt(root, SSD)) {
6214 6215
		last_ptr = &root->fs_info->data_alloc_cluster;
	}
J
Josef Bacik 已提交
6216

6217
	if (last_ptr) {
6218 6219 6220 6221
		spin_lock(&last_ptr->lock);
		if (last_ptr->block_group)
			hint_byte = last_ptr->window_start;
		spin_unlock(&last_ptr->lock);
6222
	}
6223

6224
	search_start = max(search_start, first_logical_byte(root, 0));
6225
	search_start = max(search_start, hint_byte);
6226

J
Josef Bacik 已提交
6227
	if (!last_ptr)
6228 6229
		empty_cluster = 0;

J
Josef Bacik 已提交
6230 6231 6232
	if (search_start == hint_byte) {
		block_group = btrfs_lookup_block_group(root->fs_info,
						       search_start);
6233
		used_block_group = block_group;
J
Josef Bacik 已提交
6234 6235 6236
		/*
		 * we don't want to use the block group if it doesn't match our
		 * allocation bits, or if its not cached.
6237 6238 6239
		 *
		 * However if we are re-searching with an ideal block group
		 * picked out then we don't care that the block group is cached.
J
Josef Bacik 已提交
6240
		 */
6241
		if (block_group && block_group_bits(block_group, flags) &&
6242
		    block_group->cached != BTRFS_CACHE_NO) {
J
Josef Bacik 已提交
6243
			down_read(&space_info->groups_sem);
6244 6245 6246 6247 6248 6249 6250 6251 6252 6253
			if (list_empty(&block_group->list) ||
			    block_group->ro) {
				/*
				 * someone is removing this block group,
				 * we can't jump into the have_block_group
				 * target because our list pointers are not
				 * valid
				 */
				btrfs_put_block_group(block_group);
				up_read(&space_info->groups_sem);
6254
			} else {
6255
				index = get_block_group_index(block_group);
6256
				goto have_block_group;
6257
			}
J
Josef Bacik 已提交
6258
		} else if (block_group) {
6259
			btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
6260
		}
6261
	}
J
Josef Bacik 已提交
6262
search:
6263
	have_caching_bg = false;
6264
	down_read(&space_info->groups_sem);
6265 6266
	list_for_each_entry(block_group, &space_info->block_groups[index],
			    list) {
6267
		u64 offset;
J
Josef Bacik 已提交
6268
		int cached;
6269

6270
		used_block_group = block_group;
6271
		btrfs_get_block_group(block_group);
J
Josef Bacik 已提交
6272
		search_start = block_group->key.objectid;
6273

6274 6275 6276 6277 6278
		/*
		 * this can happen if we end up cycling through all the
		 * raid types, but we want to make sure we only allocate
		 * for the proper type.
		 */
6279
		if (!block_group_bits(block_group, flags)) {
6280 6281
		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
				BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
6282 6283
				BTRFS_BLOCK_GROUP_RAID5 |
				BTRFS_BLOCK_GROUP_RAID6 |
6284 6285 6286 6287 6288 6289 6290
				BTRFS_BLOCK_GROUP_RAID10;

			/*
			 * if they asked for extra copies and this block group
			 * doesn't provide them, bail.  This does allow us to
			 * fill raid0 from raid1.
			 */
6291
			if ((flags & extra) && !(block_group->flags & extra))
6292 6293 6294
				goto loop;
		}

J
Josef Bacik 已提交
6295
have_block_group:
6296 6297 6298
		cached = block_group_cache_done(block_group);
		if (unlikely(!cached)) {
			found_uncached_bg = true;
6299
			ret = cache_block_group(block_group, 0);
6300 6301
			BUG_ON(ret < 0);
			ret = 0;
J
Josef Bacik 已提交
6302 6303
		}

6304 6305
		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
			goto loop;
6306
		if (unlikely(block_group->ro))
J
Josef Bacik 已提交
6307
			goto loop;
J
Josef Bacik 已提交
6308

6309
		/*
6310 6311
		 * Ok we want to try and use the cluster allocator, so
		 * lets look there
6312
		 */
6313
		if (last_ptr) {
6314
			unsigned long aligned_cluster;
6315 6316 6317 6318 6319
			/*
			 * the refill lock keeps out other
			 * people trying to start a new cluster
			 */
			spin_lock(&last_ptr->refill_lock);
6320 6321 6322 6323
			used_block_group = last_ptr->block_group;
			if (used_block_group != block_group &&
			    (!used_block_group ||
			     used_block_group->ro ||
6324
			     !block_group_bits(used_block_group, flags))) {
6325
				used_block_group = block_group;
6326
				goto refill_cluster;
6327 6328 6329 6330
			}

			if (used_block_group != block_group)
				btrfs_get_block_group(used_block_group);
6331

6332
			offset = btrfs_alloc_from_cluster(used_block_group,
6333 6334 6335 6336
						last_ptr,
						num_bytes,
						used_block_group->key.objectid,
						&max_extent_size);
6337 6338 6339
			if (offset) {
				/* we have a block, we're done */
				spin_unlock(&last_ptr->refill_lock);
J
Josef Bacik 已提交
6340 6341
				trace_btrfs_reserve_extent_cluster(root,
					block_group, search_start, num_bytes);
6342 6343 6344
				goto checks;
			}

6345 6346 6347 6348
			WARN_ON(last_ptr->block_group != used_block_group);
			if (used_block_group != block_group) {
				btrfs_put_block_group(used_block_group);
				used_block_group = block_group;
6349
			}
6350
refill_cluster:
6351
			BUG_ON(used_block_group != block_group);
6352 6353 6354 6355 6356 6357 6358 6359
			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
			 * set up a new clusters, so lets just skip it
			 * and let the allocator find whatever block
			 * it can find.  If we reach this point, we
			 * will have tried the cluster allocator
			 * plenty of times and not have found
			 * anything, so we are likely way too
			 * fragmented for the clustering stuff to find
6360 6361 6362 6363 6364 6365 6366 6367 6368
			 * anything.
			 *
			 * However, if the cluster is taken from the
			 * current block group, release the cluster
			 * first, so that we stand a better chance of
			 * succeeding in the unclustered
			 * allocation.  */
			if (loop >= LOOP_NO_EMPTY_SIZE &&
			    last_ptr->block_group != block_group) {
6369 6370 6371 6372
				spin_unlock(&last_ptr->refill_lock);
				goto unclustered_alloc;
			}

6373 6374 6375 6376 6377 6378
			/*
			 * this cluster didn't work out, free it and
			 * start over
			 */
			btrfs_return_cluster_to_free_space(NULL, last_ptr);

6379 6380 6381 6382 6383
			if (loop >= LOOP_NO_EMPTY_SIZE) {
				spin_unlock(&last_ptr->refill_lock);
				goto unclustered_alloc;
			}

6384 6385 6386 6387
			aligned_cluster = max_t(unsigned long,
						empty_cluster + empty_size,
					      block_group->full_stripe_len);

6388
			/* allocate a cluster in this block group */
6389 6390 6391 6392
			ret = btrfs_find_space_cluster(root, block_group,
						       last_ptr, search_start,
						       num_bytes,
						       aligned_cluster);
6393 6394 6395 6396 6397 6398
			if (ret == 0) {
				/*
				 * now pull our allocation out of this
				 * cluster
				 */
				offset = btrfs_alloc_from_cluster(block_group,
6399 6400 6401 6402
							last_ptr,
							num_bytes,
							search_start,
							&max_extent_size);
6403 6404 6405
				if (offset) {
					/* we found one, proceed */
					spin_unlock(&last_ptr->refill_lock);
J
Josef Bacik 已提交
6406 6407 6408
					trace_btrfs_reserve_extent_cluster(root,
						block_group, search_start,
						num_bytes);
6409 6410
					goto checks;
				}
6411 6412
			} else if (!cached && loop > LOOP_CACHING_NOWAIT
				   && !failed_cluster_refill) {
J
Josef Bacik 已提交
6413 6414
				spin_unlock(&last_ptr->refill_lock);

6415
				failed_cluster_refill = true;
J
Josef Bacik 已提交
6416 6417 6418
				wait_block_group_cache_progress(block_group,
				       num_bytes + empty_cluster + empty_size);
				goto have_block_group;
6419
			}
J
Josef Bacik 已提交
6420

6421 6422 6423 6424 6425 6426
			/*
			 * at this point we either didn't find a cluster
			 * or we weren't able to allocate a block from our
			 * cluster.  Free the cluster we've been trying
			 * to use, and go to the next block group
			 */
6427
			btrfs_return_cluster_to_free_space(NULL, last_ptr);
6428
			spin_unlock(&last_ptr->refill_lock);
6429
			goto loop;
6430 6431
		}

6432
unclustered_alloc:
6433 6434 6435 6436
		spin_lock(&block_group->free_space_ctl->tree_lock);
		if (cached &&
		    block_group->free_space_ctl->free_space <
		    num_bytes + empty_cluster + empty_size) {
6437 6438 6439 6440
			if (block_group->free_space_ctl->free_space >
			    max_extent_size)
				max_extent_size =
					block_group->free_space_ctl->free_space;
6441 6442 6443 6444 6445
			spin_unlock(&block_group->free_space_ctl->tree_lock);
			goto loop;
		}
		spin_unlock(&block_group->free_space_ctl->tree_lock);

6446
		offset = btrfs_find_space_for_alloc(block_group, search_start,
6447 6448
						    num_bytes, empty_size,
						    &max_extent_size);
6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459
		/*
		 * If we didn't find a chunk, and we haven't failed on this
		 * block group before, and this block group is in the middle of
		 * caching and we are ok with waiting, then go ahead and wait
		 * for progress to be made, and set failed_alloc to true.
		 *
		 * If failed_alloc is true then we've already waited on this
		 * block group once and should move on to the next block group.
		 */
		if (!offset && !failed_alloc && !cached &&
		    loop > LOOP_CACHING_NOWAIT) {
J
Josef Bacik 已提交
6460
			wait_block_group_cache_progress(block_group,
6461 6462
						num_bytes + empty_size);
			failed_alloc = true;
J
Josef Bacik 已提交
6463
			goto have_block_group;
6464
		} else if (!offset) {
6465 6466
			if (!cached)
				have_caching_bg = true;
6467
			goto loop;
J
Josef Bacik 已提交
6468
		}
6469
checks:
D
David Woodhouse 已提交
6470 6471
		search_start = stripe_align(root, used_block_group,
					    offset, num_bytes);
6472

J
Josef Bacik 已提交
6473 6474
		/* move on to the next group */
		if (search_start + num_bytes >
6475 6476
		    used_block_group->key.objectid + used_block_group->key.offset) {
			btrfs_add_free_space(used_block_group, offset, num_bytes);
J
Josef Bacik 已提交
6477
			goto loop;
6478
		}
6479

6480
		if (offset < search_start)
6481
			btrfs_add_free_space(used_block_group, offset,
6482 6483
					     search_start - offset);
		BUG_ON(offset > search_start);
J
Josef Bacik 已提交
6484

6485
		ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
6486
						  alloc_type);
6487
		if (ret == -EAGAIN) {
6488
			btrfs_add_free_space(used_block_group, offset, num_bytes);
J
Josef Bacik 已提交
6489
			goto loop;
J
Josef Bacik 已提交
6490
		}
6491

6492
		/* we are all good, lets return */
J
Josef Bacik 已提交
6493 6494
		ins->objectid = search_start;
		ins->offset = num_bytes;
6495

J
Josef Bacik 已提交
6496 6497
		trace_btrfs_reserve_extent(orig_root, block_group,
					   search_start, num_bytes);
6498 6499
		if (used_block_group != block_group)
			btrfs_put_block_group(used_block_group);
6500
		btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
6501 6502
		break;
loop:
6503
		failed_cluster_refill = false;
6504
		failed_alloc = false;
6505
		BUG_ON(index != get_block_group_index(block_group));
6506 6507
		if (used_block_group != block_group)
			btrfs_put_block_group(used_block_group);
6508
		btrfs_put_block_group(block_group);
J
Josef Bacik 已提交
6509 6510 6511
	}
	up_read(&space_info->groups_sem);

6512 6513 6514
	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
		goto search;

6515 6516 6517
	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
		goto search;

6518
	/*
6519 6520
	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
	 *			caching kthreads as we move along
J
Josef Bacik 已提交
6521 6522 6523 6524
	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
	 *			again
6525
	 */
6526
	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
6527
		index = 0;
6528
		loop++;
J
Josef Bacik 已提交
6529
		if (loop == LOOP_ALLOC_CHUNK) {
6530 6531 6532 6533 6534 6535 6536 6537
			struct btrfs_trans_handle *trans;

			trans = btrfs_join_transaction(root);
			if (IS_ERR(trans)) {
				ret = PTR_ERR(trans);
				goto out;
			}

6538
			ret = do_chunk_alloc(trans, root, flags,
6539 6540 6541 6542 6543
					     CHUNK_ALLOC_FORCE);
			/*
			 * Do not bail out on ENOSPC since we
			 * can do more things.
			 */
6544
			if (ret < 0 && ret != -ENOSPC)
6545 6546
				btrfs_abort_transaction(trans,
							root, ret);
6547 6548 6549 6550
			else
				ret = 0;
			btrfs_end_transaction(trans, root);
			if (ret)
6551
				goto out;
J
Josef Bacik 已提交
6552 6553
		}

6554 6555 6556
		if (loop == LOOP_NO_EMPTY_SIZE) {
			empty_size = 0;
			empty_cluster = 0;
6557
		}
6558 6559

		goto search;
J
Josef Bacik 已提交
6560 6561
	} else if (!ins->objectid) {
		ret = -ENOSPC;
6562
	} else if (ins->objectid) {
6563
		ret = 0;
C
Chris Mason 已提交
6564
	}
6565
out:
6566 6567
	if (ret == -ENOSPC)
		ins->offset = max_extent_size;
C
Chris Mason 已提交
6568
	return ret;
6569
}
6570

J
Josef Bacik 已提交
6571 6572
static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
			    int dump_block_groups)
J
Josef Bacik 已提交
6573 6574
{
	struct btrfs_block_group_cache *cache;
6575
	int index = 0;
J
Josef Bacik 已提交
6576

J
Josef Bacik 已提交
6577
	spin_lock(&info->lock);
6578
	printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
6579 6580 6581
	       info->flags,
	       info->total_bytes - info->bytes_used - info->bytes_pinned -
	       info->bytes_reserved - info->bytes_readonly,
C
Chris Mason 已提交
6582
	       (info->full) ? "" : "not ");
6583 6584
	printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
6585 6586 6587
	       info->total_bytes, info->bytes_used, info->bytes_pinned,
	       info->bytes_reserved, info->bytes_may_use,
	       info->bytes_readonly);
J
Josef Bacik 已提交
6588 6589 6590 6591
	spin_unlock(&info->lock);

	if (!dump_block_groups)
		return;
J
Josef Bacik 已提交
6592

6593
	down_read(&info->groups_sem);
6594 6595
again:
	list_for_each_entry(cache, &info->block_groups[index], list) {
J
Josef Bacik 已提交
6596
		spin_lock(&cache->lock);
6597
		printk(KERN_INFO "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %s\n",
6598 6599 6600
		       cache->key.objectid, cache->key.offset,
		       btrfs_block_group_used(&cache->item), cache->pinned,
		       cache->reserved, cache->ro ? "[readonly]" : "");
J
Josef Bacik 已提交
6601 6602 6603
		btrfs_dump_free_space(cache, bytes);
		spin_unlock(&cache->lock);
	}
6604 6605
	if (++index < BTRFS_NR_RAID_TYPES)
		goto again;
6606
	up_read(&info->groups_sem);
J
Josef Bacik 已提交
6607
}
6608

6609
int btrfs_reserve_extent(struct btrfs_root *root,
6610 6611
			 u64 num_bytes, u64 min_alloc_size,
			 u64 empty_size, u64 hint_byte,
6612
			 struct btrfs_key *ins, int is_data)
6613
{
6614
	bool final_tried = false;
6615
	u64 flags;
6616
	int ret;
6617

6618
	flags = btrfs_get_alloc_profile(root, is_data);
6619
again:
6620
	WARN_ON(num_bytes < root->sectorsize);
6621 6622
	ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
			       flags);
6623

6624
	if (ret == -ENOSPC) {
6625 6626
		if (!final_tried && ins->offset) {
			num_bytes = min(num_bytes >> 1, ins->offset);
6627
			num_bytes = round_down(num_bytes, root->sectorsize);
6628 6629 6630 6631 6632 6633 6634
			num_bytes = max(num_bytes, min_alloc_size);
			if (num_bytes == min_alloc_size)
				final_tried = true;
			goto again;
		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
			struct btrfs_space_info *sinfo;

6635
			sinfo = __find_space_info(root->fs_info, flags);
6636
			btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
6637
				flags, num_bytes);
6638 6639
			if (sinfo)
				dump_space_info(sinfo, num_bytes, 1);
6640
		}
6641
	}
J
Josef Bacik 已提交
6642 6643

	return ret;
6644 6645
}

6646 6647
static int __btrfs_free_reserved_extent(struct btrfs_root *root,
					u64 start, u64 len, int pin)
6648
{
J
Josef Bacik 已提交
6649
	struct btrfs_block_group_cache *cache;
6650
	int ret = 0;
J
Josef Bacik 已提交
6651 6652 6653

	cache = btrfs_lookup_block_group(root->fs_info, start);
	if (!cache) {
6654
		btrfs_err(root->fs_info, "Unable to find block group for %llu",
6655
			start);
J
Josef Bacik 已提交
6656 6657
		return -ENOSPC;
	}
6658

6659 6660
	if (btrfs_test_opt(root, DISCARD))
		ret = btrfs_discard_extent(root, start, len, NULL);
6661

6662 6663 6664 6665 6666 6667
	if (pin)
		pin_down_extent(root, cache, start, len, 1);
	else {
		btrfs_add_free_space(cache, start, len);
		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
	}
6668
	btrfs_put_block_group(cache);
J
Josef Bacik 已提交
6669

6670 6671
	trace_btrfs_reserved_extent_free(root, start, len);

6672 6673 6674
	return ret;
}

6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686
int btrfs_free_reserved_extent(struct btrfs_root *root,
					u64 start, u64 len)
{
	return __btrfs_free_reserved_extent(root, start, len, 0);
}

int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
				       u64 start, u64 len)
{
	return __btrfs_free_reserved_extent(root, start, len, 1);
}

6687 6688 6689 6690 6691
static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      u64 parent, u64 root_objectid,
				      u64 flags, u64 owner, u64 offset,
				      struct btrfs_key *ins, int ref_mod)
6692 6693
{
	int ret;
6694
	struct btrfs_fs_info *fs_info = root->fs_info;
6695
	struct btrfs_extent_item *extent_item;
6696
	struct btrfs_extent_inline_ref *iref;
6697
	struct btrfs_path *path;
6698 6699 6700
	struct extent_buffer *leaf;
	int type;
	u32 size;
6701

6702 6703 6704 6705
	if (parent > 0)
		type = BTRFS_SHARED_DATA_REF_KEY;
	else
		type = BTRFS_EXTENT_DATA_REF_KEY;
6706

6707
	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
6708 6709

	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
6710 6711
	if (!path)
		return -ENOMEM;
6712

6713
	path->leave_spinning = 1;
6714 6715
	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
				      ins, size);
6716 6717 6718 6719
	if (ret) {
		btrfs_free_path(path);
		return ret;
	}
J
Josef Bacik 已提交
6720

6721 6722
	leaf = path->nodes[0];
	extent_item = btrfs_item_ptr(leaf, path->slots[0],
6723
				     struct btrfs_extent_item);
6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743
	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
	btrfs_set_extent_flags(leaf, extent_item,
			       flags | BTRFS_EXTENT_FLAG_DATA);

	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
	btrfs_set_extent_inline_ref_type(leaf, iref, type);
	if (parent > 0) {
		struct btrfs_shared_data_ref *ref;
		ref = (struct btrfs_shared_data_ref *)(iref + 1);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
	} else {
		struct btrfs_extent_data_ref *ref;
		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
	}
6744 6745

	btrfs_mark_buffer_dirty(path->nodes[0]);
6746
	btrfs_free_path(path);
6747

6748
	ret = update_block_group(root, ins->objectid, ins->offset, 1);
6749
	if (ret) { /* -ENOENT, logic error */
6750
		btrfs_err(fs_info, "update block group failed for %llu %llu",
6751
			ins->objectid, ins->offset);
6752 6753
		BUG();
	}
J
Josef Bacik 已提交
6754
	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
6755 6756 6757
	return ret;
}

6758 6759 6760 6761 6762
static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 parent, u64 root_objectid,
				     u64 flags, struct btrfs_disk_key *key,
				     int level, struct btrfs_key *ins)
6763 6764
{
	int ret;
6765 6766 6767 6768 6769 6770
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_extent_item *extent_item;
	struct btrfs_tree_block_info *block_info;
	struct btrfs_extent_inline_ref *iref;
	struct btrfs_path *path;
	struct extent_buffer *leaf;
6771 6772 6773 6774 6775 6776
	u32 size = sizeof(*extent_item) + sizeof(*iref);
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);

	if (!skinny_metadata)
		size += sizeof(*block_info);
6777

6778
	path = btrfs_alloc_path();
6779 6780 6781
	if (!path) {
		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
						   root->leafsize);
6782
		return -ENOMEM;
6783
	}
6784

6785 6786 6787
	path->leave_spinning = 1;
	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
				      ins, size);
6788
	if (ret) {
6789 6790
		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
						   root->leafsize);
6791 6792 6793
		btrfs_free_path(path);
		return ret;
	}
6794 6795 6796 6797 6798 6799 6800 6801 6802

	leaf = path->nodes[0];
	extent_item = btrfs_item_ptr(leaf, path->slots[0],
				     struct btrfs_extent_item);
	btrfs_set_extent_refs(leaf, extent_item, 1);
	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
	btrfs_set_extent_flags(leaf, extent_item,
			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);

6803 6804 6805 6806 6807 6808 6809 6810
	if (skinny_metadata) {
		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
	} else {
		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
		btrfs_set_tree_block_key(leaf, block_info, key);
		btrfs_set_tree_block_level(leaf, block_info, level);
		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
	}
6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825

	if (parent > 0) {
		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
		btrfs_set_extent_inline_ref_type(leaf, iref,
						 BTRFS_SHARED_BLOCK_REF_KEY);
		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
	} else {
		btrfs_set_extent_inline_ref_type(leaf, iref,
						 BTRFS_TREE_BLOCK_REF_KEY);
		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
	}

	btrfs_mark_buffer_dirty(leaf);
	btrfs_free_path(path);

6826
	ret = update_block_group(root, ins->objectid, root->leafsize, 1);
6827
	if (ret) { /* -ENOENT, logic error */
6828
		btrfs_err(fs_info, "update block group failed for %llu %llu",
6829
			ins->objectid, ins->offset);
6830 6831
		BUG();
	}
J
Josef Bacik 已提交
6832 6833

	trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->leafsize);
6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845
	return ret;
}

int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     u64 root_objectid, u64 owner,
				     u64 offset, struct btrfs_key *ins)
{
	int ret;

	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);

A
Arne Jansen 已提交
6846 6847 6848 6849
	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
					 ins->offset, 0,
					 root_objectid, owner, offset,
					 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
6850 6851
	return ret;
}
6852 6853 6854 6855 6856 6857

/*
 * this is used by the tree logging recovery code.  It records that
 * an extent has been allocated and makes sure to clear the free
 * space cache bits as well
 */
6858 6859 6860 6861
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   u64 root_objectid, u64 owner, u64 offset,
				   struct btrfs_key *ins)
6862 6863 6864
{
	int ret;
	struct btrfs_block_group_cache *block_group;
6865

6866 6867 6868 6869 6870 6871
	/*
	 * Mixed block groups will exclude before processing the log so we only
	 * need to do the exlude dance if this fs isn't mixed.
	 */
	if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
		ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
6872
		if (ret)
6873
			return ret;
6874 6875
	}

6876 6877 6878 6879
	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
	if (!block_group)
		return -EINVAL;

6880 6881
	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
					  RESERVE_ALLOC_NO_ACCOUNT);
6882
	BUG_ON(ret); /* logic error */
6883 6884
	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
					 0, owner, offset, ins, 1);
6885
	btrfs_put_block_group(block_group);
6886 6887 6888
	return ret;
}

6889 6890 6891
static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		      u64 bytenr, u32 blocksize, int level)
6892 6893 6894 6895 6896 6897 6898
{
	struct extent_buffer *buf;

	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
	if (!buf)
		return ERR_PTR(-ENOMEM);
	btrfs_set_header_generation(buf, trans->transid);
6899
	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6900 6901
	btrfs_tree_lock(buf);
	clean_tree_block(trans, root, buf);
6902
	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
6903 6904

	btrfs_set_lock_blocking(buf);
6905
	btrfs_set_buffer_uptodate(buf);
6906

6907
	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6908 6909 6910 6911 6912 6913 6914 6915 6916 6917
		/*
		 * we allow two log transactions at a time, use different
		 * EXENT bit to differentiate dirty pages.
		 */
		if (root->log_transid % 2 == 0)
			set_extent_dirty(&root->dirty_log_pages, buf->start,
					buf->start + buf->len - 1, GFP_NOFS);
		else
			set_extent_new(&root->dirty_log_pages, buf->start,
					buf->start + buf->len - 1, GFP_NOFS);
6918 6919
	} else {
		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6920
			 buf->start + buf->len - 1, GFP_NOFS);
6921
	}
6922
	trans->blocks_used++;
6923
	/* this returns a buffer locked for blocking */
6924 6925 6926
	return buf;
}

6927 6928 6929 6930 6931
static struct btrfs_block_rsv *
use_block_rsv(struct btrfs_trans_handle *trans,
	      struct btrfs_root *root, u32 blocksize)
{
	struct btrfs_block_rsv *block_rsv;
6932
	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6933
	int ret;
6934
	bool global_updated = false;
6935 6936 6937

	block_rsv = get_block_rsv(trans, root);

6938 6939
	if (unlikely(block_rsv->size == 0))
		goto try_reserve;
6940
again:
6941 6942 6943 6944
	ret = block_rsv_use_bytes(block_rsv, blocksize);
	if (!ret)
		return block_rsv;

6945 6946 6947
	if (block_rsv->failfast)
		return ERR_PTR(ret);

6948 6949 6950 6951 6952 6953
	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
		global_updated = true;
		update_global_block_rsv(root->fs_info);
		goto again;
	}

6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968
	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
		static DEFINE_RATELIMIT_STATE(_rs,
				DEFAULT_RATELIMIT_INTERVAL * 10,
				/*DEFAULT_RATELIMIT_BURST*/ 1);
		if (__ratelimit(&_rs))
			WARN(1, KERN_DEBUG
				"btrfs: block rsv returned %d\n", ret);
	}
try_reserve:
	ret = reserve_metadata_bytes(root, block_rsv, blocksize,
				     BTRFS_RESERVE_NO_FLUSH);
	if (!ret)
		return block_rsv;
	/*
	 * If we couldn't reserve metadata bytes try and use some from
6969 6970
	 * the global reserve if its space type is the same as the global
	 * reservation.
6971
	 */
6972 6973
	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
	    block_rsv->space_info == global_rsv->space_info) {
6974 6975 6976 6977 6978
		ret = block_rsv_use_bytes(global_rsv, blocksize);
		if (!ret)
			return global_rsv;
	}
	return ERR_PTR(ret);
6979 6980
}

J
Josef Bacik 已提交
6981 6982
static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
6983 6984
{
	block_rsv_add_bytes(block_rsv, blocksize, 0);
J
Josef Bacik 已提交
6985
	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
6986 6987
}

6988
/*
6989 6990 6991 6992
 * finds a free extent and does all the dirty work required for allocation
 * returns the key for the extent through ins, and a tree buffer for
 * the first block of the extent through buf.
 *
6993 6994
 * returns the tree buffer or NULL.
 */
6995
struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6996 6997 6998
					struct btrfs_root *root, u32 blocksize,
					u64 parent, u64 root_objectid,
					struct btrfs_disk_key *key, int level,
6999
					u64 hint, u64 empty_size)
7000
{
C
Chris Mason 已提交
7001
	struct btrfs_key ins;
7002
	struct btrfs_block_rsv *block_rsv;
7003
	struct extent_buffer *buf;
7004 7005
	u64 flags = 0;
	int ret;
7006 7007
	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
						 SKINNY_METADATA);
7008

7009 7010 7011 7012
	block_rsv = use_block_rsv(trans, root, blocksize);
	if (IS_ERR(block_rsv))
		return ERR_CAST(block_rsv);

7013
	ret = btrfs_reserve_extent(root, blocksize, blocksize,
7014
				   empty_size, hint, &ins, 0);
7015
	if (ret) {
J
Josef Bacik 已提交
7016
		unuse_block_rsv(root->fs_info, block_rsv, blocksize);
7017
		return ERR_PTR(ret);
7018
	}
7019

7020 7021
	buf = btrfs_init_new_buffer(trans, root, ins.objectid,
				    blocksize, level);
7022
	BUG_ON(IS_ERR(buf)); /* -ENOMEM */
7023 7024 7025 7026 7027 7028 7029 7030 7031 7032

	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
		if (parent == 0)
			parent = ins.objectid;
		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
	} else
		BUG_ON(parent > 0);

	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
		struct btrfs_delayed_extent_op *extent_op;
7033
		extent_op = btrfs_alloc_delayed_extent_op();
7034
		BUG_ON(!extent_op); /* -ENOMEM */
7035 7036 7037 7038 7039
		if (key)
			memcpy(&extent_op->key, key, sizeof(extent_op->key));
		else
			memset(&extent_op->key, 0, sizeof(extent_op->key));
		extent_op->flags_to_set = flags;
7040 7041 7042 7043
		if (skinny_metadata)
			extent_op->update_key = 0;
		else
			extent_op->update_key = 1;
7044 7045
		extent_op->update_flags = 1;
		extent_op->is_data = 0;
7046
		extent_op->level = level;
7047

A
Arne Jansen 已提交
7048 7049
		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
					ins.objectid,
7050 7051
					ins.offset, parent, root_objectid,
					level, BTRFS_ADD_DELAYED_EXTENT,
7052
					extent_op, 0);
7053
		BUG_ON(ret); /* -ENOMEM */
7054
	}
7055 7056
	return buf;
}
7057

7058 7059 7060 7061 7062 7063 7064 7065 7066
struct walk_control {
	u64 refs[BTRFS_MAX_LEVEL];
	u64 flags[BTRFS_MAX_LEVEL];
	struct btrfs_key update_progress;
	int stage;
	int level;
	int shared_level;
	int update_ref;
	int keep_locks;
Y
Yan, Zheng 已提交
7067 7068
	int reada_slot;
	int reada_count;
A
Arne Jansen 已提交
7069
	int for_reloc;
7070 7071 7072 7073 7074
};

#define DROP_REFERENCE	1
#define UPDATE_BACKREF	2

Y
Yan, Zheng 已提交
7075 7076 7077 7078
static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
				     struct btrfs_root *root,
				     struct walk_control *wc,
				     struct btrfs_path *path)
7079
{
Y
Yan, Zheng 已提交
7080 7081 7082
	u64 bytenr;
	u64 generation;
	u64 refs;
7083
	u64 flags;
7084
	u32 nritems;
Y
Yan, Zheng 已提交
7085 7086 7087
	u32 blocksize;
	struct btrfs_key key;
	struct extent_buffer *eb;
7088
	int ret;
Y
Yan, Zheng 已提交
7089 7090
	int slot;
	int nread = 0;
7091

Y
Yan, Zheng 已提交
7092 7093 7094 7095 7096 7097 7098 7099
	if (path->slots[wc->level] < wc->reada_slot) {
		wc->reada_count = wc->reada_count * 2 / 3;
		wc->reada_count = max(wc->reada_count, 2);
	} else {
		wc->reada_count = wc->reada_count * 3 / 2;
		wc->reada_count = min_t(int, wc->reada_count,
					BTRFS_NODEPTRS_PER_BLOCK(root));
	}
7100

Y
Yan, Zheng 已提交
7101 7102 7103
	eb = path->nodes[wc->level];
	nritems = btrfs_header_nritems(eb);
	blocksize = btrfs_level_size(root, wc->level - 1);
7104

Y
Yan, Zheng 已提交
7105 7106 7107
	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
		if (nread >= wc->reada_count)
			break;
7108

C
Chris Mason 已提交
7109
		cond_resched();
Y
Yan, Zheng 已提交
7110 7111
		bytenr = btrfs_node_blockptr(eb, slot);
		generation = btrfs_node_ptr_generation(eb, slot);
C
Chris Mason 已提交
7112

Y
Yan, Zheng 已提交
7113 7114
		if (slot == path->slots[wc->level])
			goto reada;
7115

Y
Yan, Zheng 已提交
7116 7117
		if (wc->stage == UPDATE_BACKREF &&
		    generation <= root->root_key.offset)
7118 7119
			continue;

7120
		/* We don't lock the tree block, it's OK to be racy here */
7121 7122 7123
		ret = btrfs_lookup_extent_info(trans, root, bytenr,
					       wc->level - 1, 1, &refs,
					       &flags);
7124 7125 7126
		/* We don't care about errors in readahead. */
		if (ret < 0)
			continue;
7127 7128
		BUG_ON(refs == 0);

Y
Yan, Zheng 已提交
7129 7130 7131
		if (wc->stage == DROP_REFERENCE) {
			if (refs == 1)
				goto reada;
7132

7133 7134 7135
			if (wc->level == 1 &&
			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				continue;
Y
Yan, Zheng 已提交
7136 7137 7138 7139 7140 7141 7142 7143
			if (!wc->update_ref ||
			    generation <= root->root_key.offset)
				continue;
			btrfs_node_key_to_cpu(eb, &key, slot);
			ret = btrfs_comp_cpu_keys(&key,
						  &wc->update_progress);
			if (ret < 0)
				continue;
7144 7145 7146 7147
		} else {
			if (wc->level == 1 &&
			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				continue;
7148
		}
Y
Yan, Zheng 已提交
7149 7150 7151 7152
reada:
		ret = readahead_tree_block(root, bytenr, blocksize,
					   generation);
		if (ret)
7153
			break;
Y
Yan, Zheng 已提交
7154
		nread++;
C
Chris Mason 已提交
7155
	}
Y
Yan, Zheng 已提交
7156
	wc->reada_slot = slot;
C
Chris Mason 已提交
7157
}
7158

Y
Yan Zheng 已提交
7159
/*
L
Liu Bo 已提交
7160
 * helper to process tree block while walking down the tree.
7161 7162 7163 7164 7165
 *
 * when wc->stage == UPDATE_BACKREF, this function updates
 * back refs for pointers in the block.
 *
 * NOTE: return value 1 means we should stop walking down.
Y
Yan Zheng 已提交
7166
 */
7167
static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
7168
				   struct btrfs_root *root,
7169
				   struct btrfs_path *path,
7170
				   struct walk_control *wc, int lookup_info)
Y
Yan Zheng 已提交
7171
{
7172 7173 7174
	int level = wc->level;
	struct extent_buffer *eb = path->nodes[level];
	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
Y
Yan Zheng 已提交
7175 7176
	int ret;

7177 7178 7179
	if (wc->stage == UPDATE_BACKREF &&
	    btrfs_header_owner(eb) != root->root_key.objectid)
		return 1;
Y
Yan Zheng 已提交
7180

7181 7182 7183 7184
	/*
	 * when reference count of tree block is 1, it won't increase
	 * again. once full backref flag is set, we never clear it.
	 */
7185 7186 7187
	if (lookup_info &&
	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
7188 7189
		BUG_ON(!path->locks[level]);
		ret = btrfs_lookup_extent_info(trans, root,
7190
					       eb->start, level, 1,
7191 7192
					       &wc->refs[level],
					       &wc->flags[level]);
7193 7194 7195
		BUG_ON(ret == -ENOMEM);
		if (ret)
			return ret;
7196 7197
		BUG_ON(wc->refs[level] == 0);
	}
7198

7199 7200 7201
	if (wc->stage == DROP_REFERENCE) {
		if (wc->refs[level] > 1)
			return 1;
Y
Yan Zheng 已提交
7202

7203
		if (path->locks[level] && !wc->keep_locks) {
7204
			btrfs_tree_unlock_rw(eb, path->locks[level]);
7205 7206 7207 7208
			path->locks[level] = 0;
		}
		return 0;
	}
Y
Yan Zheng 已提交
7209

7210 7211 7212
	/* wc->stage == UPDATE_BACKREF */
	if (!(wc->flags[level] & flag)) {
		BUG_ON(!path->locks[level]);
A
Arne Jansen 已提交
7213
		ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
7214
		BUG_ON(ret); /* -ENOMEM */
A
Arne Jansen 已提交
7215
		ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
7216
		BUG_ON(ret); /* -ENOMEM */
7217
		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
7218 7219
						  eb->len, flag,
						  btrfs_header_level(eb), 0);
7220
		BUG_ON(ret); /* -ENOMEM */
7221 7222 7223 7224 7225 7226 7227 7228
		wc->flags[level] |= flag;
	}

	/*
	 * the block is shared by multiple trees, so it's not good to
	 * keep the tree lock
	 */
	if (path->locks[level] && level > 0) {
7229
		btrfs_tree_unlock_rw(eb, path->locks[level]);
7230 7231 7232 7233 7234
		path->locks[level] = 0;
	}
	return 0;
}

Y
Yan, Zheng 已提交
7235
/*
L
Liu Bo 已提交
7236
 * helper to process tree block pointer.
Y
Yan, Zheng 已提交
7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250
 *
 * when wc->stage == DROP_REFERENCE, this function checks
 * reference count of the block pointed to. if the block
 * is shared and we need update back refs for the subtree
 * rooted at the block, this function changes wc->stage to
 * UPDATE_BACKREF. if the block is shared and there is no
 * need to update back, this function drops the reference
 * to the block.
 *
 * NOTE: return value 1 means we should stop walking down.
 */
static noinline int do_walk_down(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
7251
				 struct walk_control *wc, int *lookup_info)
Y
Yan, Zheng 已提交
7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270
{
	u64 bytenr;
	u64 generation;
	u64 parent;
	u32 blocksize;
	struct btrfs_key key;
	struct extent_buffer *next;
	int level = wc->level;
	int reada = 0;
	int ret = 0;

	generation = btrfs_node_ptr_generation(path->nodes[level],
					       path->slots[level]);
	/*
	 * if the lower level block was created before the snapshot
	 * was created, we know there is no need to update back refs
	 * for the subtree
	 */
	if (wc->stage == UPDATE_BACKREF &&
7271 7272
	    generation <= root->root_key.offset) {
		*lookup_info = 1;
Y
Yan, Zheng 已提交
7273
		return 1;
7274
	}
Y
Yan, Zheng 已提交
7275 7276 7277 7278 7279 7280 7281

	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
	blocksize = btrfs_level_size(root, level - 1);

	next = btrfs_find_tree_block(root, bytenr, blocksize);
	if (!next) {
		next = btrfs_find_create_tree_block(root, bytenr, blocksize);
7282 7283
		if (!next)
			return -ENOMEM;
7284 7285
		btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
					       level - 1);
Y
Yan, Zheng 已提交
7286 7287 7288 7289 7290
		reada = 1;
	}
	btrfs_tree_lock(next);
	btrfs_set_lock_blocking(next);

7291
	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
7292 7293
				       &wc->refs[level - 1],
				       &wc->flags[level - 1]);
7294 7295 7296 7297 7298
	if (ret < 0) {
		btrfs_tree_unlock(next);
		return ret;
	}

7299 7300 7301 7302
	if (unlikely(wc->refs[level - 1] == 0)) {
		btrfs_err(root->fs_info, "Missing references.");
		BUG();
	}
7303
	*lookup_info = 0;
Y
Yan, Zheng 已提交
7304

7305
	if (wc->stage == DROP_REFERENCE) {
Y
Yan, Zheng 已提交
7306
		if (wc->refs[level - 1] > 1) {
7307 7308 7309 7310
			if (level == 1 &&
			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
				goto skip;

Y
Yan, Zheng 已提交
7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323
			if (!wc->update_ref ||
			    generation <= root->root_key.offset)
				goto skip;

			btrfs_node_key_to_cpu(path->nodes[level], &key,
					      path->slots[level]);
			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
			if (ret < 0)
				goto skip;

			wc->stage = UPDATE_BACKREF;
			wc->shared_level = level - 1;
		}
7324 7325 7326 7327
	} else {
		if (level == 1 &&
		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
			goto skip;
Y
Yan, Zheng 已提交
7328 7329
	}

7330
	if (!btrfs_buffer_uptodate(next, generation, 0)) {
Y
Yan, Zheng 已提交
7331 7332 7333
		btrfs_tree_unlock(next);
		free_extent_buffer(next);
		next = NULL;
7334
		*lookup_info = 1;
Y
Yan, Zheng 已提交
7335 7336 7337 7338 7339 7340
	}

	if (!next) {
		if (reada && level == 1)
			reada_walk_down(trans, root, wc, path);
		next = read_tree_block(root, bytenr, blocksize, generation);
7341 7342
		if (!next || !extent_buffer_uptodate(next)) {
			free_extent_buffer(next);
7343
			return -EIO;
7344
		}
Y
Yan, Zheng 已提交
7345 7346 7347 7348 7349 7350 7351 7352
		btrfs_tree_lock(next);
		btrfs_set_lock_blocking(next);
	}

	level--;
	BUG_ON(level != btrfs_header_level(next));
	path->nodes[level] = next;
	path->slots[level] = 0;
7353
	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
Y
Yan, Zheng 已提交
7354 7355 7356 7357 7358 7359 7360
	wc->level = level;
	if (wc->level == 1)
		wc->reada_slot = 0;
	return 0;
skip:
	wc->refs[level - 1] = 0;
	wc->flags[level - 1] = 0;
7361 7362 7363 7364 7365 7366 7367 7368
	if (wc->stage == DROP_REFERENCE) {
		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
			parent = path->nodes[level]->start;
		} else {
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(path->nodes[level]));
			parent = 0;
		}
Y
Yan, Zheng 已提交
7369

7370
		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
A
Arne Jansen 已提交
7371
				root->root_key.objectid, level - 1, 0, 0);
7372
		BUG_ON(ret); /* -ENOMEM */
Y
Yan, Zheng 已提交
7373 7374 7375
	}
	btrfs_tree_unlock(next);
	free_extent_buffer(next);
7376
	*lookup_info = 1;
Y
Yan, Zheng 已提交
7377 7378 7379
	return 1;
}

7380
/*
L
Liu Bo 已提交
7381
 * helper to process tree block while walking up the tree.
7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396
 *
 * when wc->stage == DROP_REFERENCE, this function drops
 * reference count on the block.
 *
 * when wc->stage == UPDATE_BACKREF, this function changes
 * wc->stage back to DROP_REFERENCE if we changed wc->stage
 * to UPDATE_BACKREF previously while processing the block.
 *
 * NOTE: return value 1 means we should stop walking up.
 */
static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct walk_control *wc)
{
7397
	int ret;
7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423
	int level = wc->level;
	struct extent_buffer *eb = path->nodes[level];
	u64 parent = 0;

	if (wc->stage == UPDATE_BACKREF) {
		BUG_ON(wc->shared_level < level);
		if (level < wc->shared_level)
			goto out;

		ret = find_next_key(path, level + 1, &wc->update_progress);
		if (ret > 0)
			wc->update_ref = 0;

		wc->stage = DROP_REFERENCE;
		wc->shared_level = -1;
		path->slots[level] = 0;

		/*
		 * check reference count again if the block isn't locked.
		 * we should start walking down the tree again if reference
		 * count is one.
		 */
		if (!path->locks[level]) {
			BUG_ON(level == 0);
			btrfs_tree_lock(eb);
			btrfs_set_lock_blocking(eb);
7424
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7425 7426

			ret = btrfs_lookup_extent_info(trans, root,
7427
						       eb->start, level, 1,
7428 7429
						       &wc->refs[level],
						       &wc->flags[level]);
7430 7431
			if (ret < 0) {
				btrfs_tree_unlock_rw(eb, path->locks[level]);
L
Liu Bo 已提交
7432
				path->locks[level] = 0;
7433 7434
				return ret;
			}
7435 7436
			BUG_ON(wc->refs[level] == 0);
			if (wc->refs[level] == 1) {
7437
				btrfs_tree_unlock_rw(eb, path->locks[level]);
L
Liu Bo 已提交
7438
				path->locks[level] = 0;
7439 7440
				return 1;
			}
Y
Yan Zheng 已提交
7441
		}
7442
	}
Y
Yan Zheng 已提交
7443

7444 7445
	/* wc->stage == DROP_REFERENCE */
	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
7446

7447 7448 7449
	if (wc->refs[level] == 1) {
		if (level == 0) {
			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
A
Arne Jansen 已提交
7450 7451
				ret = btrfs_dec_ref(trans, root, eb, 1,
						    wc->for_reloc);
7452
			else
A
Arne Jansen 已提交
7453 7454
				ret = btrfs_dec_ref(trans, root, eb, 0,
						    wc->for_reloc);
7455
			BUG_ON(ret); /* -ENOMEM */
7456 7457 7458 7459 7460 7461
		}
		/* make block locked assertion in clean_tree_block happy */
		if (!path->locks[level] &&
		    btrfs_header_generation(eb) == trans->transid) {
			btrfs_tree_lock(eb);
			btrfs_set_lock_blocking(eb);
7462
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478
		}
		clean_tree_block(trans, root, eb);
	}

	if (eb == root->node) {
		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
			parent = eb->start;
		else
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(eb));
	} else {
		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
			parent = path->nodes[level + 1]->start;
		else
			BUG_ON(root->root_key.objectid !=
			       btrfs_header_owner(path->nodes[level + 1]));
Y
Yan Zheng 已提交
7479 7480
	}

7481
	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
7482 7483 7484
out:
	wc->refs[level] = 0;
	wc->flags[level] = 0;
7485
	return 0;
7486 7487 7488 7489 7490 7491 7492 7493
}

static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   struct btrfs_path *path,
				   struct walk_control *wc)
{
	int level = wc->level;
7494
	int lookup_info = 1;
7495 7496 7497
	int ret;

	while (level >= 0) {
7498
		ret = walk_down_proc(trans, root, path, wc, lookup_info);
7499 7500 7501 7502 7503 7504
		if (ret > 0)
			break;

		if (level == 0)
			break;

7505 7506 7507 7508
		if (path->slots[level] >=
		    btrfs_header_nritems(path->nodes[level]))
			break;

7509
		ret = do_walk_down(trans, root, path, wc, &lookup_info);
Y
Yan, Zheng 已提交
7510 7511 7512
		if (ret > 0) {
			path->slots[level]++;
			continue;
7513 7514
		} else if (ret < 0)
			return ret;
Y
Yan, Zheng 已提交
7515
		level = wc->level;
Y
Yan Zheng 已提交
7516 7517 7518 7519
	}
	return 0;
}

C
Chris Mason 已提交
7520
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
7521
				 struct btrfs_root *root,
Y
Yan Zheng 已提交
7522
				 struct btrfs_path *path,
7523
				 struct walk_control *wc, int max_level)
C
Chris Mason 已提交
7524
{
7525
	int level = wc->level;
C
Chris Mason 已提交
7526
	int ret;
7527

7528 7529 7530 7531 7532 7533
	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
	while (level < max_level && path->nodes[level]) {
		wc->level = level;
		if (path->slots[level] + 1 <
		    btrfs_header_nritems(path->nodes[level])) {
			path->slots[level]++;
C
Chris Mason 已提交
7534 7535
			return 0;
		} else {
7536 7537 7538
			ret = walk_up_proc(trans, root, path, wc);
			if (ret > 0)
				return 0;
7539

7540
			if (path->locks[level]) {
7541 7542
				btrfs_tree_unlock_rw(path->nodes[level],
						     path->locks[level]);
7543
				path->locks[level] = 0;
Y
Yan Zheng 已提交
7544
			}
7545 7546 7547
			free_extent_buffer(path->nodes[level]);
			path->nodes[level] = NULL;
			level++;
C
Chris Mason 已提交
7548 7549 7550 7551 7552
		}
	}
	return 1;
}

C
Chris Mason 已提交
7553
/*
7554 7555 7556 7557 7558 7559 7560 7561 7562
 * drop a subvolume tree.
 *
 * this function traverses the tree freeing any blocks that only
 * referenced by the tree.
 *
 * when a shared tree block is found. this function decreases its
 * reference count by one. if update_ref is true, this function
 * also make sure backrefs for the shared block and all lower level
 * blocks are properly updated.
D
David Sterba 已提交
7563 7564
 *
 * If called with for_reloc == 0, may exit early with -EAGAIN
C
Chris Mason 已提交
7565
 */
7566
int btrfs_drop_snapshot(struct btrfs_root *root,
A
Arne Jansen 已提交
7567 7568
			 struct btrfs_block_rsv *block_rsv, int update_ref,
			 int for_reloc)
C
Chris Mason 已提交
7569
{
7570
	struct btrfs_path *path;
7571 7572
	struct btrfs_trans_handle *trans;
	struct btrfs_root *tree_root = root->fs_info->tree_root;
7573
	struct btrfs_root_item *root_item = &root->root_item;
7574 7575 7576 7577 7578
	struct walk_control *wc;
	struct btrfs_key key;
	int err = 0;
	int ret;
	int level;
7579
	bool root_dropped = false;
C
Chris Mason 已提交
7580

7581
	path = btrfs_alloc_path();
7582 7583 7584 7585
	if (!path) {
		err = -ENOMEM;
		goto out;
	}
C
Chris Mason 已提交
7586

7587
	wc = kzalloc(sizeof(*wc), GFP_NOFS);
7588 7589
	if (!wc) {
		btrfs_free_path(path);
7590 7591
		err = -ENOMEM;
		goto out;
7592
	}
7593

7594
	trans = btrfs_start_transaction(tree_root, 0);
7595 7596 7597 7598
	if (IS_ERR(trans)) {
		err = PTR_ERR(trans);
		goto out_free;
	}
7599

7600 7601
	if (block_rsv)
		trans->block_rsv = block_rsv;
7602

7603
	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
7604
		level = btrfs_header_level(root->node);
7605 7606
		path->nodes[level] = btrfs_lock_root_node(root);
		btrfs_set_lock_blocking(path->nodes[level]);
7607
		path->slots[level] = 0;
7608
		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7609 7610
		memset(&wc->update_progress, 0,
		       sizeof(wc->update_progress));
7611 7612
	} else {
		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
7613 7614 7615
		memcpy(&wc->update_progress, &key,
		       sizeof(wc->update_progress));

7616
		level = root_item->drop_level;
7617
		BUG_ON(level == 0);
7618
		path->lowest_level = level;
7619 7620 7621 7622
		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
		path->lowest_level = 0;
		if (ret < 0) {
			err = ret;
7623
			goto out_end_trans;
7624
		}
Y
Yan, Zheng 已提交
7625
		WARN_ON(ret > 0);
7626

7627 7628 7629 7630
		/*
		 * unlock our path, this is safe because only this
		 * function is allowed to delete this snapshot
		 */
7631
		btrfs_unlock_up_safe(path, 0);
7632 7633 7634 7635 7636

		level = btrfs_header_level(root->node);
		while (1) {
			btrfs_tree_lock(path->nodes[level]);
			btrfs_set_lock_blocking(path->nodes[level]);
7637
			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7638 7639 7640

			ret = btrfs_lookup_extent_info(trans, root,
						path->nodes[level]->start,
7641
						level, 1, &wc->refs[level],
7642
						&wc->flags[level]);
7643 7644 7645 7646
			if (ret < 0) {
				err = ret;
				goto out_end_trans;
			}
7647 7648 7649 7650 7651 7652
			BUG_ON(wc->refs[level] == 0);

			if (level == root_item->drop_level)
				break;

			btrfs_tree_unlock(path->nodes[level]);
7653
			path->locks[level] = 0;
7654 7655 7656
			WARN_ON(wc->refs[level] != 1);
			level--;
		}
7657
	}
7658 7659 7660 7661 7662 7663

	wc->level = level;
	wc->shared_level = -1;
	wc->stage = DROP_REFERENCE;
	wc->update_ref = update_ref;
	wc->keep_locks = 0;
A
Arne Jansen 已提交
7664
	wc->for_reloc = for_reloc;
Y
Yan, Zheng 已提交
7665
	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
7666

C
Chris Mason 已提交
7667
	while (1) {
D
David Sterba 已提交
7668

7669 7670 7671
		ret = walk_down_tree(trans, root, path, wc);
		if (ret < 0) {
			err = ret;
C
Chris Mason 已提交
7672
			break;
7673
		}
C
Chris Mason 已提交
7674

7675 7676 7677
		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
		if (ret < 0) {
			err = ret;
C
Chris Mason 已提交
7678
			break;
7679 7680 7681 7682
		}

		if (ret > 0) {
			BUG_ON(wc->stage != DROP_REFERENCE);
7683 7684
			break;
		}
7685 7686 7687 7688 7689 7690 7691 7692 7693 7694

		if (wc->stage == DROP_REFERENCE) {
			level = wc->level;
			btrfs_node_key(path->nodes[level],
				       &root_item->drop_progress,
				       path->slots[level]);
			root_item->drop_level = level;
		}

		BUG_ON(wc->level == 0);
7695 7696
		if (btrfs_should_end_transaction(trans, tree_root) ||
		    (!for_reloc && btrfs_need_cleaner_sleep(root))) {
7697 7698 7699
			ret = btrfs_update_root(trans, tree_root,
						&root->root_key,
						root_item);
7700 7701 7702 7703 7704
			if (ret) {
				btrfs_abort_transaction(trans, tree_root, ret);
				err = ret;
				goto out_end_trans;
			}
7705

7706
			btrfs_end_transaction_throttle(trans, tree_root);
7707 7708 7709 7710 7711 7712
			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
				pr_debug("btrfs: drop snapshot early exit\n");
				err = -EAGAIN;
				goto out_free;
			}

7713
			trans = btrfs_start_transaction(tree_root, 0);
7714 7715 7716 7717
			if (IS_ERR(trans)) {
				err = PTR_ERR(trans);
				goto out_free;
			}
7718 7719
			if (block_rsv)
				trans->block_rsv = block_rsv;
7720
		}
C
Chris Mason 已提交
7721
	}
7722
	btrfs_release_path(path);
7723 7724
	if (err)
		goto out_end_trans;
7725 7726

	ret = btrfs_del_root(trans, tree_root, &root->root_key);
7727 7728 7729 7730
	if (ret) {
		btrfs_abort_transaction(trans, tree_root, ret);
		goto out_end_trans;
	}
7731

7732
	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
7733 7734
		ret = btrfs_find_root(tree_root, &root->root_key, path,
				      NULL, NULL);
7735 7736 7737 7738 7739
		if (ret < 0) {
			btrfs_abort_transaction(trans, tree_root, ret);
			err = ret;
			goto out_end_trans;
		} else if (ret > 0) {
7740 7741 7742 7743 7744 7745 7746
			/* if we fail to delete the orphan item this time
			 * around, it'll get picked up the next time.
			 *
			 * The most common failure here is just -ENOENT.
			 */
			btrfs_del_orphan_item(trans, tree_root,
					      root->root_key.objectid);
7747 7748 7749 7750
		}
	}

	if (root->in_radix) {
7751
		btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
7752 7753 7754
	} else {
		free_extent_buffer(root->node);
		free_extent_buffer(root->commit_root);
7755
		btrfs_put_fs_root(root);
7756
	}
7757
	root_dropped = true;
7758
out_end_trans:
7759
	btrfs_end_transaction_throttle(trans, tree_root);
7760
out_free:
7761
	kfree(wc);
7762
	btrfs_free_path(path);
7763
out:
7764 7765 7766 7767 7768 7769 7770
	/*
	 * So if we need to stop dropping the snapshot for whatever reason we
	 * need to make sure to add it back to the dead root list so that we
	 * keep trying to do the work later.  This also cleans up roots if we
	 * don't have it in the radix (like when we recover after a power fail
	 * or unmount) so we don't leak memory.
	 */
7771
	if (!for_reloc && root_dropped == false)
7772
		btrfs_add_dead_root(root);
7773 7774
	if (err)
		btrfs_std_error(root->fs_info, err);
7775
	return err;
C
Chris Mason 已提交
7776
}
C
Chris Mason 已提交
7777

7778 7779 7780 7781
/*
 * drop subtree rooted at tree block 'node'.
 *
 * NOTE: this function will unlock and release tree block 'node'
A
Arne Jansen 已提交
7782
 * only used by relocation code
7783
 */
Y
Yan Zheng 已提交
7784 7785 7786 7787 7788 7789
int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
			struct btrfs_root *root,
			struct extent_buffer *node,
			struct extent_buffer *parent)
{
	struct btrfs_path *path;
7790
	struct walk_control *wc;
Y
Yan Zheng 已提交
7791 7792 7793 7794 7795
	int level;
	int parent_level;
	int ret = 0;
	int wret;

7796 7797
	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);

Y
Yan Zheng 已提交
7798
	path = btrfs_alloc_path();
T
Tsutomu Itoh 已提交
7799 7800
	if (!path)
		return -ENOMEM;
Y
Yan Zheng 已提交
7801

7802
	wc = kzalloc(sizeof(*wc), GFP_NOFS);
T
Tsutomu Itoh 已提交
7803 7804 7805 7806
	if (!wc) {
		btrfs_free_path(path);
		return -ENOMEM;
	}
7807

7808
	btrfs_assert_tree_locked(parent);
Y
Yan Zheng 已提交
7809 7810 7811 7812 7813
	parent_level = btrfs_header_level(parent);
	extent_buffer_get(parent);
	path->nodes[parent_level] = parent;
	path->slots[parent_level] = btrfs_header_nritems(parent);

7814
	btrfs_assert_tree_locked(node);
Y
Yan Zheng 已提交
7815 7816 7817
	level = btrfs_header_level(node);
	path->nodes[level] = node;
	path->slots[level] = 0;
7818
	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
7819 7820 7821 7822 7823 7824 7825 7826

	wc->refs[parent_level] = 1;
	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
	wc->level = level;
	wc->shared_level = -1;
	wc->stage = DROP_REFERENCE;
	wc->update_ref = 0;
	wc->keep_locks = 1;
A
Arne Jansen 已提交
7827
	wc->for_reloc = 1;
Y
Yan, Zheng 已提交
7828
	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
Y
Yan Zheng 已提交
7829 7830

	while (1) {
7831 7832
		wret = walk_down_tree(trans, root, path, wc);
		if (wret < 0) {
Y
Yan Zheng 已提交
7833 7834
			ret = wret;
			break;
7835
		}
Y
Yan Zheng 已提交
7836

7837
		wret = walk_up_tree(trans, root, path, wc, parent_level);
Y
Yan Zheng 已提交
7838 7839 7840 7841 7842 7843
		if (wret < 0)
			ret = wret;
		if (wret != 0)
			break;
	}

7844
	kfree(wc);
Y
Yan Zheng 已提交
7845 7846 7847 7848
	btrfs_free_path(path);
	return ret;
}

7849 7850 7851
static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
{
	u64 num_devices;
7852
	u64 stripped;
7853

7854 7855 7856 7857 7858 7859 7860
	/*
	 * if restripe for this chunk_type is on pick target profile and
	 * return, otherwise do the usual balance
	 */
	stripped = get_restripe_target(root->fs_info, flags);
	if (stripped)
		return extended_to_chunk(stripped);
7861

7862 7863 7864 7865 7866 7867 7868 7869
	/*
	 * we add in the count of missing devices because we want
	 * to make sure that any RAID levels on a degraded FS
	 * continue to be honored.
	 */
	num_devices = root->fs_info->fs_devices->rw_devices +
		root->fs_info->fs_devices->missing_devices;

7870
	stripped = BTRFS_BLOCK_GROUP_RAID0 |
D
David Woodhouse 已提交
7871
		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
7872 7873
		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;

7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897
	if (num_devices == 1) {
		stripped |= BTRFS_BLOCK_GROUP_DUP;
		stripped = flags & ~stripped;

		/* turn raid0 into single device chunks */
		if (flags & BTRFS_BLOCK_GROUP_RAID0)
			return stripped;

		/* turn mirroring into duplication */
		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
			     BTRFS_BLOCK_GROUP_RAID10))
			return stripped | BTRFS_BLOCK_GROUP_DUP;
	} else {
		/* they already had raid on here, just return */
		if (flags & stripped)
			return flags;

		stripped |= BTRFS_BLOCK_GROUP_DUP;
		stripped = flags & ~stripped;

		/* switch duplicated blocks with raid1 */
		if (flags & BTRFS_BLOCK_GROUP_DUP)
			return stripped | BTRFS_BLOCK_GROUP_RAID1;

7898
		/* this is drive concat, leave it alone */
7899
	}
7900

7901 7902 7903
	return flags;
}

7904
static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
C
Chris Mason 已提交
7905
{
7906 7907
	struct btrfs_space_info *sinfo = cache->space_info;
	u64 num_bytes;
7908
	u64 min_allocable_bytes;
7909
	int ret = -ENOSPC;
C
Chris Mason 已提交
7910

7911

7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923
	/*
	 * We need some metadata space and system metadata space for
	 * allocating chunks in some corner cases until we force to set
	 * it to be readonly.
	 */
	if ((sinfo->flags &
	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
	    !force)
		min_allocable_bytes = 1 * 1024 * 1024;
	else
		min_allocable_bytes = 0;

7924 7925
	spin_lock(&sinfo->lock);
	spin_lock(&cache->lock);
7926 7927 7928 7929 7930 7931

	if (cache->ro) {
		ret = 0;
		goto out;
	}

7932 7933 7934 7935
	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
		    cache->bytes_super - btrfs_block_group_used(&cache->item);

	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7936 7937
	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
	    min_allocable_bytes <= sinfo->total_bytes) {
7938 7939 7940 7941
		sinfo->bytes_readonly += num_bytes;
		cache->ro = 1;
		ret = 0;
	}
7942
out:
7943 7944 7945 7946
	spin_unlock(&cache->lock);
	spin_unlock(&sinfo->lock);
	return ret;
}
7947

7948 7949
int btrfs_set_block_group_ro(struct btrfs_root *root,
			     struct btrfs_block_group_cache *cache)
7950

7951 7952 7953 7954
{
	struct btrfs_trans_handle *trans;
	u64 alloc_flags;
	int ret;
7955

7956
	BUG_ON(cache->ro);
C
Chris Mason 已提交
7957

C
Chris Mason 已提交
7958
	trans = btrfs_join_transaction(root);
7959 7960
	if (IS_ERR(trans))
		return PTR_ERR(trans);
7961

7962
	alloc_flags = update_block_group_flags(root, cache->flags);
7963
	if (alloc_flags != cache->flags) {
7964
		ret = do_chunk_alloc(trans, root, alloc_flags,
7965 7966 7967 7968
				     CHUNK_ALLOC_FORCE);
		if (ret < 0)
			goto out;
	}
7969

7970
	ret = set_block_group_ro(cache, 0);
7971 7972 7973
	if (!ret)
		goto out;
	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7974
	ret = do_chunk_alloc(trans, root, alloc_flags,
7975
			     CHUNK_ALLOC_FORCE);
7976 7977
	if (ret < 0)
		goto out;
7978
	ret = set_block_group_ro(cache, 0);
7979 7980 7981 7982
out:
	btrfs_end_transaction(trans, root);
	return ret;
}
7983

7984 7985 7986 7987
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
			    struct btrfs_root *root, u64 type)
{
	u64 alloc_flags = get_alloc_profile(root, type);
7988
	return do_chunk_alloc(trans, root, alloc_flags,
7989
			      CHUNK_ALLOC_FORCE);
7990 7991
}

7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037
/*
 * helper to account the unused space of all the readonly block group in the
 * list. takes mirrors into account.
 */
static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
{
	struct btrfs_block_group_cache *block_group;
	u64 free_bytes = 0;
	int factor;

	list_for_each_entry(block_group, groups_list, list) {
		spin_lock(&block_group->lock);

		if (!block_group->ro) {
			spin_unlock(&block_group->lock);
			continue;
		}

		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
					  BTRFS_BLOCK_GROUP_RAID10 |
					  BTRFS_BLOCK_GROUP_DUP))
			factor = 2;
		else
			factor = 1;

		free_bytes += (block_group->key.offset -
			       btrfs_block_group_used(&block_group->item)) *
			       factor;

		spin_unlock(&block_group->lock);
	}

	return free_bytes;
}

/*
 * helper to account the unused space of all the readonly block group in the
 * space_info. takes mirrors into account.
 */
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
{
	int i;
	u64 free_bytes = 0;

	spin_lock(&sinfo->lock);

8038
	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8039 8040 8041 8042 8043 8044 8045 8046 8047
		if (!list_empty(&sinfo->block_groups[i]))
			free_bytes += __btrfs_get_ro_block_group_free_space(
						&sinfo->block_groups[i]);

	spin_unlock(&sinfo->lock);

	return free_bytes;
}

8048
void btrfs_set_block_group_rw(struct btrfs_root *root,
8049
			      struct btrfs_block_group_cache *cache)
8050
{
8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063
	struct btrfs_space_info *sinfo = cache->space_info;
	u64 num_bytes;

	BUG_ON(!cache->ro);

	spin_lock(&sinfo->lock);
	spin_lock(&cache->lock);
	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
		    cache->bytes_super - btrfs_block_group_used(&cache->item);
	sinfo->bytes_readonly -= num_bytes;
	cache->ro = 0;
	spin_unlock(&cache->lock);
	spin_unlock(&sinfo->lock);
8064 8065
}

8066 8067 8068 8069 8070 8071 8072
/*
 * checks to see if its even possible to relocate this block group.
 *
 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
 * ok to go ahead and try.
 */
int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
Z
Zheng Yan 已提交
8073
{
8074 8075 8076 8077
	struct btrfs_block_group_cache *block_group;
	struct btrfs_space_info *space_info;
	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
	struct btrfs_device *device;
8078
	struct btrfs_trans_handle *trans;
8079
	u64 min_free;
J
Josef Bacik 已提交
8080 8081
	u64 dev_min = 1;
	u64 dev_nr = 0;
8082
	u64 target;
8083
	int index;
8084 8085
	int full = 0;
	int ret = 0;
Z
Zheng Yan 已提交
8086

8087
	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
Z
Zheng Yan 已提交
8088

8089 8090 8091
	/* odd, couldn't find the block group, leave it alone */
	if (!block_group)
		return -1;
Z
Zheng Yan 已提交
8092

8093 8094
	min_free = btrfs_block_group_used(&block_group->item);

8095
	/* no bytes used, we're good */
8096
	if (!min_free)
Z
Zheng Yan 已提交
8097 8098
		goto out;

8099 8100
	space_info = block_group->space_info;
	spin_lock(&space_info->lock);
8101

8102
	full = space_info->full;
8103

8104 8105
	/*
	 * if this is the last block group we have in this space, we can't
8106 8107 8108 8109
	 * relocate it unless we're able to allocate a new chunk below.
	 *
	 * Otherwise, we need to make sure we have room in the space to handle
	 * all of the extents from this block group.  If we can, we're good
8110
	 */
8111
	if ((space_info->total_bytes != block_group->key.offset) &&
8112 8113 8114
	    (space_info->bytes_used + space_info->bytes_reserved +
	     space_info->bytes_pinned + space_info->bytes_readonly +
	     min_free < space_info->total_bytes)) {
8115 8116
		spin_unlock(&space_info->lock);
		goto out;
8117
	}
8118
	spin_unlock(&space_info->lock);
8119

8120 8121 8122
	/*
	 * ok we don't have enough space, but maybe we have free space on our
	 * devices to allocate new chunks for relocation, so loop through our
8123 8124 8125
	 * alloc devices and guess if we have enough space.  if this block
	 * group is going to be restriped, run checks against the target
	 * profile instead of the current one.
8126 8127
	 */
	ret = -1;
8128

8129 8130 8131 8132 8133 8134 8135 8136
	/*
	 * index:
	 *      0: raid10
	 *      1: raid1
	 *      2: dup
	 *      3: raid0
	 *      4: single
	 */
8137 8138
	target = get_restripe_target(root->fs_info, block_group->flags);
	if (target) {
8139
		index = __get_raid_index(extended_to_chunk(target));
8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150
	} else {
		/*
		 * this is just a balance, so if we were marked as full
		 * we know there is no space for a new chunk
		 */
		if (full)
			goto out;

		index = get_block_group_index(block_group);
	}

8151
	if (index == BTRFS_RAID_RAID10) {
8152
		dev_min = 4;
J
Josef Bacik 已提交
8153 8154
		/* Divide by 2 */
		min_free >>= 1;
8155
	} else if (index == BTRFS_RAID_RAID1) {
8156
		dev_min = 2;
8157
	} else if (index == BTRFS_RAID_DUP) {
J
Josef Bacik 已提交
8158 8159
		/* Multiply by 2 */
		min_free <<= 1;
8160
	} else if (index == BTRFS_RAID_RAID0) {
8161
		dev_min = fs_devices->rw_devices;
J
Josef Bacik 已提交
8162
		do_div(min_free, dev_min);
8163 8164
	}

8165 8166 8167 8168 8169 8170 8171
	/* We need to do this so that we can look at pending chunks */
	trans = btrfs_join_transaction(root);
	if (IS_ERR(trans)) {
		ret = PTR_ERR(trans);
		goto out;
	}

8172 8173
	mutex_lock(&root->fs_info->chunk_mutex);
	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8174
		u64 dev_offset;
8175

8176 8177 8178 8179
		/*
		 * check to make sure we can actually find a chunk with enough
		 * space to fit our block group in.
		 */
8180 8181
		if (device->total_bytes > device->bytes_used + min_free &&
		    !device->is_tgtdev_for_dev_replace) {
8182
			ret = find_free_dev_extent(trans, device, min_free,
8183
						   &dev_offset, NULL);
8184
			if (!ret)
8185 8186 8187
				dev_nr++;

			if (dev_nr >= dev_min)
8188
				break;
8189

8190
			ret = -1;
8191
		}
8192
	}
8193
	mutex_unlock(&root->fs_info->chunk_mutex);
8194
	btrfs_end_transaction(trans, root);
8195
out:
8196
	btrfs_put_block_group(block_group);
8197 8198 8199
	return ret;
}

8200 8201
static int find_first_block_group(struct btrfs_root *root,
		struct btrfs_path *path, struct btrfs_key *key)
8202
{
8203
	int ret = 0;
8204 8205 8206
	struct btrfs_key found_key;
	struct extent_buffer *leaf;
	int slot;
8207

8208 8209
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
	if (ret < 0)
8210 8211
		goto out;

C
Chris Mason 已提交
8212
	while (1) {
8213
		slot = path->slots[0];
8214
		leaf = path->nodes[0];
8215 8216 8217 8218 8219
		if (slot >= btrfs_header_nritems(leaf)) {
			ret = btrfs_next_leaf(root, path);
			if (ret == 0)
				continue;
			if (ret < 0)
8220
				goto out;
8221
			break;
8222
		}
8223
		btrfs_item_key_to_cpu(leaf, &found_key, slot);
8224

8225
		if (found_key.objectid >= key->objectid &&
8226 8227 8228 8229
		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
			ret = 0;
			goto out;
		}
8230
		path->slots[0]++;
8231
	}
8232
out:
8233
	return ret;
8234 8235
}

8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269
void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
{
	struct btrfs_block_group_cache *block_group;
	u64 last = 0;

	while (1) {
		struct inode *inode;

		block_group = btrfs_lookup_first_block_group(info, last);
		while (block_group) {
			spin_lock(&block_group->lock);
			if (block_group->iref)
				break;
			spin_unlock(&block_group->lock);
			block_group = next_block_group(info->tree_root,
						       block_group);
		}
		if (!block_group) {
			if (last == 0)
				break;
			last = 0;
			continue;
		}

		inode = block_group->inode;
		block_group->iref = 0;
		block_group->inode = NULL;
		spin_unlock(&block_group->lock);
		iput(inode);
		last = block_group->key.objectid + block_group->key.offset;
		btrfs_put_block_group(block_group);
	}
}

Z
Zheng Yan 已提交
8270 8271 8272
int btrfs_free_block_groups(struct btrfs_fs_info *info)
{
	struct btrfs_block_group_cache *block_group;
8273
	struct btrfs_space_info *space_info;
8274
	struct btrfs_caching_control *caching_ctl;
Z
Zheng Yan 已提交
8275 8276
	struct rb_node *n;

8277 8278 8279 8280 8281 8282 8283 8284 8285
	down_write(&info->extent_commit_sem);
	while (!list_empty(&info->caching_block_groups)) {
		caching_ctl = list_entry(info->caching_block_groups.next,
					 struct btrfs_caching_control, list);
		list_del(&caching_ctl->list);
		put_caching_control(caching_ctl);
	}
	up_write(&info->extent_commit_sem);

Z
Zheng Yan 已提交
8286 8287 8288 8289 8290 8291
	spin_lock(&info->block_group_cache_lock);
	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
		block_group = rb_entry(n, struct btrfs_block_group_cache,
				       cache_node);
		rb_erase(&block_group->cache_node,
			 &info->block_group_cache_tree);
Y
Yan Zheng 已提交
8292 8293
		spin_unlock(&info->block_group_cache_lock);

8294
		down_write(&block_group->space_info->groups_sem);
Z
Zheng Yan 已提交
8295
		list_del(&block_group->list);
8296
		up_write(&block_group->space_info->groups_sem);
8297

J
Josef Bacik 已提交
8298
		if (block_group->cached == BTRFS_CACHE_STARTED)
8299
			wait_block_group_cache_done(block_group);
J
Josef Bacik 已提交
8300

8301 8302 8303 8304
		/*
		 * We haven't cached this block group, which means we could
		 * possibly have excluded extents on this block group.
		 */
8305 8306
		if (block_group->cached == BTRFS_CACHE_NO ||
		    block_group->cached == BTRFS_CACHE_ERROR)
8307 8308
			free_excluded_extents(info->extent_root, block_group);

J
Josef Bacik 已提交
8309
		btrfs_remove_free_space_cache(block_group);
8310
		btrfs_put_block_group(block_group);
Y
Yan Zheng 已提交
8311 8312

		spin_lock(&info->block_group_cache_lock);
Z
Zheng Yan 已提交
8313 8314
	}
	spin_unlock(&info->block_group_cache_lock);
8315 8316 8317 8318 8319 8320 8321 8322 8323

	/* now that all the block groups are freed, go through and
	 * free all the space_info structs.  This is only called during
	 * the final stages of unmount, and so we know nobody is
	 * using them.  We call synchronize_rcu() once before we start,
	 * just to be on the safe side.
	 */
	synchronize_rcu();

8324 8325
	release_global_block_rsv(info);

8326
	while (!list_empty(&info->space_info)) {
8327 8328 8329
		space_info = list_entry(info->space_info.next,
					struct btrfs_space_info,
					list);
8330
		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
8331
			if (WARN_ON(space_info->bytes_pinned > 0 ||
8332
			    space_info->bytes_reserved > 0 ||
8333
			    space_info->bytes_may_use > 0)) {
8334 8335
				dump_space_info(space_info, 0, 0);
			}
8336
		}
8337
		percpu_counter_destroy(&space_info->total_bytes_pinned);
8338 8339 8340
		list_del(&space_info->list);
		kfree(space_info);
	}
Z
Zheng Yan 已提交
8341 8342 8343
	return 0;
}

8344 8345 8346 8347 8348 8349 8350 8351 8352 8353
static void __link_block_group(struct btrfs_space_info *space_info,
			       struct btrfs_block_group_cache *cache)
{
	int index = get_block_group_index(cache);

	down_write(&space_info->groups_sem);
	list_add_tail(&cache->list, &space_info->block_groups[index]);
	up_write(&space_info->groups_sem);
}

C
Chris Mason 已提交
8354 8355 8356 8357 8358
int btrfs_read_block_groups(struct btrfs_root *root)
{
	struct btrfs_path *path;
	int ret;
	struct btrfs_block_group_cache *cache;
C
Chris Mason 已提交
8359
	struct btrfs_fs_info *info = root->fs_info;
8360
	struct btrfs_space_info *space_info;
C
Chris Mason 已提交
8361 8362
	struct btrfs_key key;
	struct btrfs_key found_key;
8363
	struct extent_buffer *leaf;
8364 8365
	int need_clear = 0;
	u64 cache_gen;
8366

C
Chris Mason 已提交
8367
	root = info->extent_root;
C
Chris Mason 已提交
8368
	key.objectid = 0;
8369
	key.offset = 0;
C
Chris Mason 已提交
8370 8371 8372 8373
	btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;
J
Josef Bacik 已提交
8374
	path->reada = 1;
C
Chris Mason 已提交
8375

8376
	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
8377
	if (btrfs_test_opt(root, SPACE_CACHE) &&
8378
	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
8379
		need_clear = 1;
8380 8381
	if (btrfs_test_opt(root, CLEAR_CACHE))
		need_clear = 1;
8382

C
Chris Mason 已提交
8383
	while (1) {
8384
		ret = find_first_block_group(root, path, &key);
8385 8386
		if (ret > 0)
			break;
8387 8388
		if (ret != 0)
			goto error;
8389 8390
		leaf = path->nodes[0];
		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8391
		cache = kzalloc(sizeof(*cache), GFP_NOFS);
C
Chris Mason 已提交
8392
		if (!cache) {
8393
			ret = -ENOMEM;
8394
			goto error;
C
Chris Mason 已提交
8395
		}
8396 8397 8398 8399 8400 8401 8402
		cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
						GFP_NOFS);
		if (!cache->free_space_ctl) {
			kfree(cache);
			ret = -ENOMEM;
			goto error;
		}
C
Chris Mason 已提交
8403

8404
		atomic_set(&cache->count, 1);
8405
		spin_lock_init(&cache->lock);
J
Josef Bacik 已提交
8406
		cache->fs_info = info;
J
Josef Bacik 已提交
8407
		INIT_LIST_HEAD(&cache->list);
8408
		INIT_LIST_HEAD(&cache->cluster_list);
8409

8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420
		if (need_clear) {
			/*
			 * When we mount with old space cache, we need to
			 * set BTRFS_DC_CLEAR and set dirty flag.
			 *
			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
			 *    truncate the old free space cache inode and
			 *    setup a new one.
			 * b) Setting 'dirty flag' makes sure that we flush
			 *    the new space cache info onto disk.
			 */
8421
			cache->disk_cache_state = BTRFS_DC_CLEAR;
8422 8423 8424
			if (btrfs_test_opt(root, SPACE_CACHE))
				cache->dirty = 1;
		}
8425

8426 8427 8428
		read_extent_buffer(leaf, &cache->item,
				   btrfs_item_ptr_offset(leaf, path->slots[0]),
				   sizeof(cache->item));
C
Chris Mason 已提交
8429
		memcpy(&cache->key, &found_key, sizeof(found_key));
8430

C
Chris Mason 已提交
8431
		key.objectid = found_key.objectid + found_key.offset;
8432
		btrfs_release_path(path);
8433
		cache->flags = btrfs_block_group_flags(&cache->item);
J
Josef Bacik 已提交
8434
		cache->sectorsize = root->sectorsize;
D
David Woodhouse 已提交
8435 8436 8437
		cache->full_stripe_len = btrfs_full_stripe_len(root,
					       &root->fs_info->mapping_tree,
					       found_key.objectid);
8438 8439
		btrfs_init_free_space_ctl(cache);

8440 8441 8442 8443 8444
		/*
		 * We need to exclude the super stripes now so that the space
		 * info has super bytes accounted for, otherwise we'll think
		 * we have more space than we actually do.
		 */
8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455
		ret = exclude_super_stripes(root, cache);
		if (ret) {
			/*
			 * We may have excluded something, so call this just in
			 * case.
			 */
			free_excluded_extents(root, cache);
			kfree(cache->free_space_ctl);
			kfree(cache);
			goto error;
		}
8456

J
Josef Bacik 已提交
8457 8458 8459 8460 8461 8462 8463 8464
		/*
		 * check for two cases, either we are full, and therefore
		 * don't need to bother with the caching work since we won't
		 * find any space, or we are empty, and we can just add all
		 * the space in and be done with it.  This saves us _alot_ of
		 * time, particularly in the full case.
		 */
		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8465
			cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
8466
			cache->cached = BTRFS_CACHE_FINISHED;
8467
			free_excluded_extents(root, cache);
J
Josef Bacik 已提交
8468
		} else if (btrfs_block_group_used(&cache->item) == 0) {
8469
			cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
8470 8471 8472 8473 8474
			cache->cached = BTRFS_CACHE_FINISHED;
			add_new_free_space(cache, root->fs_info,
					   found_key.objectid,
					   found_key.objectid +
					   found_key.offset);
8475
			free_excluded_extents(root, cache);
J
Josef Bacik 已提交
8476
		}
8477

8478 8479 8480 8481 8482 8483 8484
		ret = btrfs_add_block_group_cache(root->fs_info, cache);
		if (ret) {
			btrfs_remove_free_space_cache(cache);
			btrfs_put_block_group(cache);
			goto error;
		}

8485 8486 8487
		ret = update_space_info(info, cache->flags, found_key.offset,
					btrfs_block_group_used(&cache->item),
					&space_info);
8488 8489 8490 8491 8492 8493 8494 8495 8496 8497
		if (ret) {
			btrfs_remove_free_space_cache(cache);
			spin_lock(&info->block_group_cache_lock);
			rb_erase(&cache->cache_node,
				 &info->block_group_cache_tree);
			spin_unlock(&info->block_group_cache_lock);
			btrfs_put_block_group(cache);
			goto error;
		}

8498
		cache->space_info = space_info;
8499
		spin_lock(&cache->space_info->lock);
8500
		cache->space_info->bytes_readonly += cache->bytes_super;
8501 8502
		spin_unlock(&cache->space_info->lock);

8503
		__link_block_group(space_info, cache);
J
Josef Bacik 已提交
8504

8505
		set_avail_alloc_bits(root->fs_info, cache->flags);
Y
Yan Zheng 已提交
8506
		if (btrfs_chunk_readonly(root, cache->key.objectid))
8507
			set_block_group_ro(cache, 1);
C
Chris Mason 已提交
8508
	}
8509 8510 8511 8512 8513

	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
		if (!(get_alloc_profile(root, space_info->flags) &
		      (BTRFS_BLOCK_GROUP_RAID10 |
		       BTRFS_BLOCK_GROUP_RAID1 |
D
David Woodhouse 已提交
8514 8515
		       BTRFS_BLOCK_GROUP_RAID5 |
		       BTRFS_BLOCK_GROUP_RAID6 |
8516 8517 8518 8519 8520 8521
		       BTRFS_BLOCK_GROUP_DUP)))
			continue;
		/*
		 * avoid allocating from un-mirrored block group if there are
		 * mirrored block groups.
		 */
8522 8523 8524
		list_for_each_entry(cache,
				&space_info->block_groups[BTRFS_RAID_RAID0],
				list)
8525
			set_block_group_ro(cache, 1);
8526 8527 8528
		list_for_each_entry(cache,
				&space_info->block_groups[BTRFS_RAID_SINGLE],
				list)
8529
			set_block_group_ro(cache, 1);
C
Chris Mason 已提交
8530
	}
8531 8532

	init_global_block_rsv(info);
8533 8534
	ret = 0;
error:
C
Chris Mason 已提交
8535
	btrfs_free_path(path);
8536
	return ret;
C
Chris Mason 已提交
8537
}
8538

8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root)
{
	struct btrfs_block_group_cache *block_group, *tmp;
	struct btrfs_root *extent_root = root->fs_info->extent_root;
	struct btrfs_block_group_item item;
	struct btrfs_key key;
	int ret = 0;

	list_for_each_entry_safe(block_group, tmp, &trans->new_bgs,
				 new_bg_list) {
		list_del_init(&block_group->new_bg_list);

		if (ret)
			continue;

		spin_lock(&block_group->lock);
		memcpy(&item, &block_group->item, sizeof(item));
		memcpy(&key, &block_group->key, sizeof(key));
		spin_unlock(&block_group->lock);

		ret = btrfs_insert_item(trans, extent_root, &key, &item,
					sizeof(item));
		if (ret)
			btrfs_abort_transaction(trans, extent_root, ret);
8564 8565 8566 8567
		ret = btrfs_finish_chunk_alloc(trans, extent_root,
					       key.objectid, key.offset);
		if (ret)
			btrfs_abort_transaction(trans, extent_root, ret);
8568 8569 8570
	}
}

8571 8572
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
			   struct btrfs_root *root, u64 bytes_used,
8573
			   u64 type, u64 chunk_objectid, u64 chunk_offset,
8574 8575 8576 8577 8578 8579 8580 8581
			   u64 size)
{
	int ret;
	struct btrfs_root *extent_root;
	struct btrfs_block_group_cache *cache;

	extent_root = root->fs_info->extent_root;

8582
	root->fs_info->last_trans_log_full_commit = trans->transid;
8583

8584
	cache = kzalloc(sizeof(*cache), GFP_NOFS);
J
Josef Bacik 已提交
8585 8586
	if (!cache)
		return -ENOMEM;
8587 8588 8589 8590 8591 8592
	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
					GFP_NOFS);
	if (!cache->free_space_ctl) {
		kfree(cache);
		return -ENOMEM;
	}
J
Josef Bacik 已提交
8593

8594
	cache->key.objectid = chunk_offset;
8595
	cache->key.offset = size;
8596
	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8597
	cache->sectorsize = root->sectorsize;
8598
	cache->fs_info = root->fs_info;
D
David Woodhouse 已提交
8599 8600 8601
	cache->full_stripe_len = btrfs_full_stripe_len(root,
					       &root->fs_info->mapping_tree,
					       chunk_offset);
8602

8603
	atomic_set(&cache->count, 1);
8604
	spin_lock_init(&cache->lock);
J
Josef Bacik 已提交
8605
	INIT_LIST_HEAD(&cache->list);
8606
	INIT_LIST_HEAD(&cache->cluster_list);
8607
	INIT_LIST_HEAD(&cache->new_bg_list);
C
Chris Mason 已提交
8608

8609 8610
	btrfs_init_free_space_ctl(cache);

8611 8612 8613 8614 8615
	btrfs_set_block_group_used(&cache->item, bytes_used);
	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
	cache->flags = type;
	btrfs_set_block_group_flags(&cache->item, type);

8616
	cache->last_byte_to_unpin = (u64)-1;
J
Josef Bacik 已提交
8617
	cache->cached = BTRFS_CACHE_FINISHED;
8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628
	ret = exclude_super_stripes(root, cache);
	if (ret) {
		/*
		 * We may have excluded something, so call this just in
		 * case.
		 */
		free_excluded_extents(root, cache);
		kfree(cache->free_space_ctl);
		kfree(cache);
		return ret;
	}
8629

J
Josef Bacik 已提交
8630 8631 8632
	add_new_free_space(cache, root->fs_info, chunk_offset,
			   chunk_offset + size);

8633 8634
	free_excluded_extents(root, cache);

8635 8636 8637 8638 8639 8640 8641
	ret = btrfs_add_block_group_cache(root->fs_info, cache);
	if (ret) {
		btrfs_remove_free_space_cache(cache);
		btrfs_put_block_group(cache);
		return ret;
	}

8642 8643
	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
				&cache->space_info);
8644 8645 8646 8647 8648 8649 8650 8651 8652
	if (ret) {
		btrfs_remove_free_space_cache(cache);
		spin_lock(&root->fs_info->block_group_cache_lock);
		rb_erase(&cache->cache_node,
			 &root->fs_info->block_group_cache_tree);
		spin_unlock(&root->fs_info->block_group_cache_lock);
		btrfs_put_block_group(cache);
		return ret;
	}
8653
	update_global_block_rsv(root->fs_info);
8654 8655

	spin_lock(&cache->space_info->lock);
8656
	cache->space_info->bytes_readonly += cache->bytes_super;
8657 8658
	spin_unlock(&cache->space_info->lock);

8659
	__link_block_group(cache->space_info, cache);
8660

8661
	list_add_tail(&cache->new_bg_list, &trans->new_bgs);
8662

C
Chris Mason 已提交
8663
	set_avail_alloc_bits(extent_root->fs_info, type);
8664

8665 8666
	return 0;
}
Z
Zheng Yan 已提交
8667

8668 8669
static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
{
8670 8671
	u64 extra_flags = chunk_to_extended(flags) &
				BTRFS_EXTENDED_PROFILE_MASK;
8672

8673
	write_seqlock(&fs_info->profiles_lock);
8674 8675 8676 8677 8678 8679
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		fs_info->avail_data_alloc_bits &= ~extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_METADATA)
		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
		fs_info->avail_system_alloc_bits &= ~extra_flags;
8680
	write_sequnlock(&fs_info->profiles_lock);
8681 8682
}

Z
Zheng Yan 已提交
8683 8684 8685 8686 8687
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root, u64 group_start)
{
	struct btrfs_path *path;
	struct btrfs_block_group_cache *block_group;
8688
	struct btrfs_free_cluster *cluster;
8689
	struct btrfs_root *tree_root = root->fs_info->tree_root;
Z
Zheng Yan 已提交
8690
	struct btrfs_key key;
8691
	struct inode *inode;
Z
Zheng Yan 已提交
8692
	int ret;
8693
	int index;
J
Josef Bacik 已提交
8694
	int factor;
Z
Zheng Yan 已提交
8695 8696 8697 8698 8699

	root = root->fs_info->extent_root;

	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
	BUG_ON(!block_group);
Y
Yan Zheng 已提交
8700
	BUG_ON(!block_group->ro);
Z
Zheng Yan 已提交
8701

8702 8703 8704 8705 8706 8707
	/*
	 * Free the reserved super bytes from this block group before
	 * remove it.
	 */
	free_excluded_extents(root, block_group);

Z
Zheng Yan 已提交
8708
	memcpy(&key, &block_group->key, sizeof(key));
8709
	index = get_block_group_index(block_group);
J
Josef Bacik 已提交
8710 8711 8712 8713 8714 8715
	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
				  BTRFS_BLOCK_GROUP_RAID1 |
				  BTRFS_BLOCK_GROUP_RAID10))
		factor = 2;
	else
		factor = 1;
Z
Zheng Yan 已提交
8716

8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731
	/* make sure this block group isn't part of an allocation cluster */
	cluster = &root->fs_info->data_alloc_cluster;
	spin_lock(&cluster->refill_lock);
	btrfs_return_cluster_to_free_space(block_group, cluster);
	spin_unlock(&cluster->refill_lock);

	/*
	 * make sure this block group isn't part of a metadata
	 * allocation cluster
	 */
	cluster = &root->fs_info->meta_alloc_cluster;
	spin_lock(&cluster->refill_lock);
	btrfs_return_cluster_to_free_space(block_group, cluster);
	spin_unlock(&cluster->refill_lock);

Z
Zheng Yan 已提交
8732
	path = btrfs_alloc_path();
8733 8734 8735 8736
	if (!path) {
		ret = -ENOMEM;
		goto out;
	}
Z
Zheng Yan 已提交
8737

8738
	inode = lookup_free_space_inode(tree_root, block_group, path);
8739
	if (!IS_ERR(inode)) {
8740
		ret = btrfs_orphan_add(trans, inode);
8741 8742 8743 8744
		if (ret) {
			btrfs_add_delayed_iput(inode);
			goto out;
		}
8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756
		clear_nlink(inode);
		/* One for the block groups ref */
		spin_lock(&block_group->lock);
		if (block_group->iref) {
			block_group->iref = 0;
			block_group->inode = NULL;
			spin_unlock(&block_group->lock);
			iput(inode);
		} else {
			spin_unlock(&block_group->lock);
		}
		/* One for our lookup ref */
8757
		btrfs_add_delayed_iput(inode);
8758 8759 8760 8761 8762 8763 8764 8765 8766 8767
	}

	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
	key.offset = block_group->key.objectid;
	key.type = 0;

	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
	if (ret < 0)
		goto out;
	if (ret > 0)
8768
		btrfs_release_path(path);
8769 8770 8771 8772
	if (ret == 0) {
		ret = btrfs_del_item(trans, tree_root, path);
		if (ret)
			goto out;
8773
		btrfs_release_path(path);
8774 8775
	}

8776
	spin_lock(&root->fs_info->block_group_cache_lock);
Z
Zheng Yan 已提交
8777 8778
	rb_erase(&block_group->cache_node,
		 &root->fs_info->block_group_cache_tree);
8779 8780 8781

	if (root->fs_info->first_logical_byte == block_group->key.objectid)
		root->fs_info->first_logical_byte = (u64)-1;
8782
	spin_unlock(&root->fs_info->block_group_cache_lock);
J
Josef Bacik 已提交
8783

8784
	down_write(&block_group->space_info->groups_sem);
8785 8786 8787 8788 8789
	/*
	 * we must use list_del_init so people can check to see if they
	 * are still on the list after taking the semaphore
	 */
	list_del_init(&block_group->list);
8790 8791
	if (list_empty(&block_group->space_info->block_groups[index]))
		clear_avail_alloc_bits(root->fs_info, block_group->flags);
8792
	up_write(&block_group->space_info->groups_sem);
Z
Zheng Yan 已提交
8793

J
Josef Bacik 已提交
8794
	if (block_group->cached == BTRFS_CACHE_STARTED)
8795
		wait_block_group_cache_done(block_group);
J
Josef Bacik 已提交
8796 8797 8798

	btrfs_remove_free_space_cache(block_group);

Y
Yan Zheng 已提交
8799 8800 8801
	spin_lock(&block_group->space_info->lock);
	block_group->space_info->total_bytes -= block_group->key.offset;
	block_group->space_info->bytes_readonly -= block_group->key.offset;
J
Josef Bacik 已提交
8802
	block_group->space_info->disk_total -= block_group->key.offset * factor;
Y
Yan Zheng 已提交
8803
	spin_unlock(&block_group->space_info->lock);
8804

8805 8806
	memcpy(&key, &block_group->key, sizeof(key));

8807
	btrfs_clear_space_info_full(root->fs_info);
Y
Yan Zheng 已提交
8808

8809 8810
	btrfs_put_block_group(block_group);
	btrfs_put_block_group(block_group);
Z
Zheng Yan 已提交
8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822

	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
	if (ret > 0)
		ret = -EIO;
	if (ret < 0)
		goto out;

	ret = btrfs_del_item(trans, root, path);
out:
	btrfs_free_path(path);
	return ret;
}
L
liubo 已提交
8823

8824 8825 8826
int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
{
	struct btrfs_space_info *space_info;
8827 8828 8829 8830
	struct btrfs_super_block *disk_super;
	u64 features;
	u64 flags;
	int mixed = 0;
8831 8832
	int ret;

8833
	disk_super = fs_info->super_copy;
8834 8835
	if (!btrfs_super_root(disk_super))
		return 1;
8836

8837 8838 8839
	features = btrfs_super_incompat_flags(disk_super);
	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;
8840

8841 8842
	flags = BTRFS_BLOCK_GROUP_SYSTEM;
	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
8843
	if (ret)
8844
		goto out;
8845

8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858
	if (mixed) {
		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
	} else {
		flags = BTRFS_BLOCK_GROUP_METADATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
		if (ret)
			goto out;

		flags = BTRFS_BLOCK_GROUP_DATA;
		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
	}
out:
8859 8860 8861
	return ret;
}

L
liubo 已提交
8862 8863 8864 8865 8866 8867
int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
{
	return unpin_extent_range(root, start, end);
}

int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8868
			       u64 num_bytes, u64 *actual_bytes)
L
liubo 已提交
8869
{
8870
	return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
L
liubo 已提交
8871
}
8872 8873 8874 8875 8876 8877 8878 8879 8880

int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
{
	struct btrfs_fs_info *fs_info = root->fs_info;
	struct btrfs_block_group_cache *cache = NULL;
	u64 group_trimmed;
	u64 start;
	u64 end;
	u64 trimmed = 0;
8881
	u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
8882 8883
	int ret = 0;

8884 8885 8886 8887 8888 8889 8890
	/*
	 * try to trim all FS space, our block group may start from non-zero.
	 */
	if (range->len == total_bytes)
		cache = btrfs_lookup_first_block_group(fs_info, range->start);
	else
		cache = btrfs_lookup_block_group(fs_info, range->start);
8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903

	while (cache) {
		if (cache->key.objectid >= (range->start + range->len)) {
			btrfs_put_block_group(cache);
			break;
		}

		start = max(range->start, cache->key.objectid);
		end = min(range->start + range->len,
				cache->key.objectid + cache->key.offset);

		if (end - start >= range->minlen) {
			if (!block_group_cache_done(cache)) {
8904
				ret = cache_block_group(cache, 0);
8905 8906 8907 8908 8909 8910 8911 8912 8913
				if (ret) {
					btrfs_put_block_group(cache);
					break;
				}
				ret = wait_block_group_cache_done(cache);
				if (ret) {
					btrfs_put_block_group(cache);
					break;
				}
8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933
			}
			ret = btrfs_trim_block_group(cache,
						     &group_trimmed,
						     start,
						     end,
						     range->minlen);

			trimmed += group_trimmed;
			if (ret) {
				btrfs_put_block_group(cache);
				break;
			}
		}

		cache = next_block_group(fs_info->tree_root, cache);
	}

	range->len = trimmed;
	return ret;
}